|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9345794392523364, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04672897196261682, |
|
"grad_norm": 55.02521133123827, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": -2.7216238975524902, |
|
"logits/rejected": -2.7209055423736572, |
|
"logps/chosen": -268.4510192871094, |
|
"logps/rejected": -203.9590606689453, |
|
"loss": 0.6914, |
|
"rewards/accuracies": 0.33125001192092896, |
|
"rewards/chosen": 0.017813727259635925, |
|
"rewards/margins": 0.009322145953774452, |
|
"rewards/rejected": 0.008491581305861473, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.09345794392523364, |
|
"grad_norm": 50.12554517439661, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -2.660832166671753, |
|
"logits/rejected": -2.6669700145721436, |
|
"logps/chosen": -256.93609619140625, |
|
"logps/rejected": -211.64938354492188, |
|
"loss": 0.6467, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.4671781659126282, |
|
"rewards/margins": 0.18480566143989563, |
|
"rewards/rejected": 0.28237253427505493, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14018691588785046, |
|
"grad_norm": 48.457281147169425, |
|
"learning_rate": 9.993623730611148e-07, |
|
"logits/chosen": -2.494657278060913, |
|
"logits/rejected": -2.5133018493652344, |
|
"logps/chosen": -232.6891632080078, |
|
"logps/rejected": -212.8677215576172, |
|
"loss": 0.6204, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 1.2435152530670166, |
|
"rewards/margins": 0.6126660108566284, |
|
"rewards/rejected": 0.6308490037918091, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.18691588785046728, |
|
"grad_norm": 42.23668651632898, |
|
"learning_rate": 9.97451118516912e-07, |
|
"logits/chosen": -2.3121209144592285, |
|
"logits/rejected": -2.302377462387085, |
|
"logps/chosen": -234.3399658203125, |
|
"logps/rejected": -191.3181610107422, |
|
"loss": 0.6223, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 1.3283790349960327, |
|
"rewards/margins": 0.9071598052978516, |
|
"rewards/rejected": 0.4212193489074707, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2336448598130841, |
|
"grad_norm": 49.68505743626908, |
|
"learning_rate": 9.94271111036929e-07, |
|
"logits/chosen": -2.2619333267211914, |
|
"logits/rejected": -2.2323482036590576, |
|
"logps/chosen": -230.17385864257812, |
|
"logps/rejected": -205.64108276367188, |
|
"loss": 0.616, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 1.3353136777877808, |
|
"rewards/margins": 1.208017110824585, |
|
"rewards/rejected": 0.127296581864357, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2803738317757009, |
|
"grad_norm": 39.50573463077347, |
|
"learning_rate": 9.898304612549066e-07, |
|
"logits/chosen": -2.308243989944458, |
|
"logits/rejected": -2.2968266010284424, |
|
"logps/chosen": -241.26632690429688, |
|
"logps/rejected": -207.3319854736328, |
|
"loss": 0.5998, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": 0.9092999696731567, |
|
"rewards/margins": 0.9556086659431458, |
|
"rewards/rejected": -0.046308644115924835, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.32710280373831774, |
|
"grad_norm": 42.78736900246308, |
|
"learning_rate": 9.841404950825536e-07, |
|
"logits/chosen": -2.3728604316711426, |
|
"logits/rejected": -2.3580102920532227, |
|
"logps/chosen": -256.8548583984375, |
|
"logps/rejected": -205.154052734375, |
|
"loss": 0.5996, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 1.0152027606964111, |
|
"rewards/margins": 1.2137099504470825, |
|
"rewards/rejected": -0.1985071450471878, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.37383177570093457, |
|
"grad_norm": 41.73908597429494, |
|
"learning_rate": 9.77215724822721e-07, |
|
"logits/chosen": -2.4492850303649902, |
|
"logits/rejected": -2.4539356231689453, |
|
"logps/chosen": -243.1707763671875, |
|
"logps/rejected": -213.95166015625, |
|
"loss": 0.6098, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 1.0353302955627441, |
|
"rewards/margins": 1.2659428119659424, |
|
"rewards/rejected": -0.2306123673915863, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.4205607476635514, |
|
"grad_norm": 41.530064757148224, |
|
"learning_rate": 9.69073812155662e-07, |
|
"logits/chosen": -2.5637125968933105, |
|
"logits/rejected": -2.5535428524017334, |
|
"logps/chosen": -244.7168731689453, |
|
"logps/rejected": -205.80923461914062, |
|
"loss": 0.5974, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 0.8133939504623413, |
|
"rewards/margins": 0.9837163686752319, |
|
"rewards/rejected": -0.17032238841056824, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.4672897196261682, |
|
"grad_norm": 38.26706141308248, |
|
"learning_rate": 9.597355230927788e-07, |
|
"logits/chosen": -2.5823917388916016, |
|
"logits/rejected": -2.562842607498169, |
|
"logps/chosen": -240.04067993164062, |
|
"logps/rejected": -209.23428344726562, |
|
"loss": 0.572, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.9298027753829956, |
|
"rewards/margins": 1.2456680536270142, |
|
"rewards/rejected": -0.3158652186393738, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4672897196261682, |
|
"eval_logits/chosen": -2.530949115753174, |
|
"eval_logits/rejected": -2.529101610183716, |
|
"eval_logps/chosen": -245.5291748046875, |
|
"eval_logps/rejected": -217.46429443359375, |
|
"eval_loss": 0.5720326900482178, |
|
"eval_rewards/accuracies": 0.7578125, |
|
"eval_rewards/chosen": 1.0708366632461548, |
|
"eval_rewards/margins": 1.28933846950531, |
|
"eval_rewards/rejected": -0.2185017466545105, |
|
"eval_runtime": 202.2601, |
|
"eval_samples_per_second": 15.03, |
|
"eval_steps_per_second": 0.237, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.514018691588785, |
|
"grad_norm": 40.54073508413725, |
|
"learning_rate": 9.4922467501275e-07, |
|
"logits/chosen": -2.495945930480957, |
|
"logits/rejected": -2.487422466278076, |
|
"logps/chosen": -250.51620483398438, |
|
"logps/rejected": -228.90200805664062, |
|
"loss": 0.5176, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 1.0155770778656006, |
|
"rewards/margins": 1.9236654043197632, |
|
"rewards/rejected": -0.9080885648727417, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5607476635514018, |
|
"grad_norm": 38.23797310786567, |
|
"learning_rate": 9.375680759151206e-07, |
|
"logits/chosen": -2.474236249923706, |
|
"logits/rejected": -2.4737977981567383, |
|
"logps/chosen": -255.09298706054688, |
|
"logps/rejected": -200.73593139648438, |
|
"loss": 0.5654, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 1.0740002393722534, |
|
"rewards/margins": 1.5434155464172363, |
|
"rewards/rejected": -0.4694152772426605, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.6074766355140186, |
|
"grad_norm": 42.648181943788025, |
|
"learning_rate": 9.247954560462927e-07, |
|
"logits/chosen": -2.505916118621826, |
|
"logits/rejected": -2.506608724594116, |
|
"logps/chosen": -255.432861328125, |
|
"logps/rejected": -205.4224090576172, |
|
"loss": 0.5628, |
|
"rewards/accuracies": 0.8062499761581421, |
|
"rewards/chosen": 1.1411590576171875, |
|
"rewards/margins": 1.7762504816055298, |
|
"rewards/rejected": -0.6350914239883423, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.6542056074766355, |
|
"grad_norm": 38.81572593341751, |
|
"learning_rate": 9.109393920723001e-07, |
|
"logits/chosen": -2.4328043460845947, |
|
"logits/rejected": -2.4342734813690186, |
|
"logps/chosen": -233.8389129638672, |
|
"logps/rejected": -212.91085815429688, |
|
"loss": 0.5378, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.6836588978767395, |
|
"rewards/margins": 1.282029390335083, |
|
"rewards/rejected": -0.5983705520629883, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.7009345794392523, |
|
"grad_norm": 32.317612654080975, |
|
"learning_rate": 8.960352239917699e-07, |
|
"logits/chosen": -2.450084924697876, |
|
"logits/rejected": -2.401425361633301, |
|
"logps/chosen": -240.6315460205078, |
|
"logps/rejected": -227.21084594726562, |
|
"loss": 0.5154, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": 0.6731350421905518, |
|
"rewards/margins": 1.5562646389007568, |
|
"rewards/rejected": -0.8831297755241394, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7476635514018691, |
|
"grad_norm": 30.17721204804764, |
|
"learning_rate": 8.801209650009814e-07, |
|
"logits/chosen": -2.4172046184539795, |
|
"logits/rejected": -2.400567054748535, |
|
"logps/chosen": -245.4665985107422, |
|
"logps/rejected": -214.18515014648438, |
|
"loss": 0.5206, |
|
"rewards/accuracies": 0.8187500238418579, |
|
"rewards/chosen": 0.9583255052566528, |
|
"rewards/margins": 1.7562158107757568, |
|
"rewards/rejected": -0.7978904843330383, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.794392523364486, |
|
"grad_norm": 32.24535114623233, |
|
"learning_rate": 8.632372045409141e-07, |
|
"logits/chosen": -2.320589065551758, |
|
"logits/rejected": -2.3311946392059326, |
|
"logps/chosen": -245.598388671875, |
|
"logps/rejected": -234.7646026611328, |
|
"loss": 0.5472, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": 1.1685658693313599, |
|
"rewards/margins": 1.754003882408142, |
|
"rewards/rejected": -0.5854381322860718, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.8411214953271028, |
|
"grad_norm": 34.061291659967246, |
|
"learning_rate": 8.454270047735642e-07, |
|
"logits/chosen": -2.329784870147705, |
|
"logits/rejected": -2.304997682571411, |
|
"logps/chosen": -238.0483856201172, |
|
"logps/rejected": -195.24313354492188, |
|
"loss": 0.5291, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.6335947513580322, |
|
"rewards/margins": 1.5654070377349854, |
|
"rewards/rejected": -0.9318124055862427, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8878504672897196, |
|
"grad_norm": 31.735542564732725, |
|
"learning_rate": 8.267357907515661e-07, |
|
"logits/chosen": -2.298316478729248, |
|
"logits/rejected": -2.2975010871887207, |
|
"logps/chosen": -246.3526153564453, |
|
"logps/rejected": -206.03524780273438, |
|
"loss": 0.5418, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": 0.5418449640274048, |
|
"rewards/margins": 1.539952039718628, |
|
"rewards/rejected": -0.9981070756912231, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.9345794392523364, |
|
"grad_norm": 32.65915062987667, |
|
"learning_rate": 8.072112345612433e-07, |
|
"logits/chosen": -2.2663910388946533, |
|
"logits/rejected": -2.218681812286377, |
|
"logps/chosen": -246.5704803466797, |
|
"logps/rejected": -218.6560821533203, |
|
"loss": 0.4997, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.8927062153816223, |
|
"rewards/margins": 2.236736297607422, |
|
"rewards/rejected": -1.3440301418304443, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9345794392523364, |
|
"eval_logits/chosen": -2.2007782459259033, |
|
"eval_logits/rejected": -2.177567720413208, |
|
"eval_logps/chosen": -247.5850067138672, |
|
"eval_logps/rejected": -224.4142608642578, |
|
"eval_loss": 0.5101521015167236, |
|
"eval_rewards/accuracies": 0.7864583134651184, |
|
"eval_rewards/chosen": 0.8652558326721191, |
|
"eval_rewards/margins": 1.7787574529647827, |
|
"eval_rewards/rejected": -0.9135015606880188, |
|
"eval_runtime": 201.8599, |
|
"eval_samples_per_second": 15.06, |
|
"eval_steps_per_second": 0.238, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 321, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1178822762299392.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|