|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9158878504672896, |
|
"eval_steps": 50, |
|
"global_step": 78, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.18691588785046728, |
|
"grad_norm": 57.02260636434872, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": -2.726503372192383, |
|
"logits/rejected": -2.7297565937042236, |
|
"logps/chosen": -246.4221649169922, |
|
"logps/rejected": -233.8533172607422, |
|
"loss": 0.6919, |
|
"rewards/accuracies": 0.2874999940395355, |
|
"rewards/chosen": 0.01179208792746067, |
|
"rewards/margins": 0.0005273699061945081, |
|
"rewards/rejected": 0.011264720931649208, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.37383177570093457, |
|
"grad_norm": 46.98073487199951, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -2.6742746829986572, |
|
"logits/rejected": -2.6755404472351074, |
|
"logps/chosen": -254.0735321044922, |
|
"logps/rejected": -213.7585906982422, |
|
"loss": 0.6511, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": 0.4740002751350403, |
|
"rewards/margins": 0.21410906314849854, |
|
"rewards/rejected": 0.25989121198654175, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5607476635514018, |
|
"grad_norm": 42.555481546926174, |
|
"learning_rate": 9.867190271803463e-07, |
|
"logits/chosen": -2.536228895187378, |
|
"logits/rejected": -2.540681838989258, |
|
"logps/chosen": -239.7574462890625, |
|
"logps/rejected": -213.6060791015625, |
|
"loss": 0.6036, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 1.3793785572052002, |
|
"rewards/margins": 0.558377206325531, |
|
"rewards/rejected": 0.8210013508796692, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.7476635514018691, |
|
"grad_norm": 54.32236085820561, |
|
"learning_rate": 9.475816456775312e-07, |
|
"logits/chosen": -2.464298963546753, |
|
"logits/rejected": -2.4543182849884033, |
|
"logps/chosen": -253.5270538330078, |
|
"logps/rejected": -187.60568237304688, |
|
"loss": 0.6198, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 1.651815414428711, |
|
"rewards/margins": 1.00961172580719, |
|
"rewards/rejected": 0.6422036290168762, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.9345794392523364, |
|
"grad_norm": 60.5416226517713, |
|
"learning_rate": 8.846669854914395e-07, |
|
"logits/chosen": -2.3736038208007812, |
|
"logits/rejected": -2.3714096546173096, |
|
"logps/chosen": -237.95022583007812, |
|
"logps/rejected": -209.83383178710938, |
|
"loss": 0.6654, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 1.2289361953735352, |
|
"rewards/margins": 0.9275712966918945, |
|
"rewards/rejected": 0.30136504769325256, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.1214953271028036, |
|
"grad_norm": 19.1123319245986, |
|
"learning_rate": 8.013173181896282e-07, |
|
"logits/chosen": -2.379423141479492, |
|
"logits/rejected": -2.363684892654419, |
|
"logps/chosen": -256.1286926269531, |
|
"logps/rejected": -219.66744995117188, |
|
"loss": 0.3766, |
|
"rewards/accuracies": 0.856249988079071, |
|
"rewards/chosen": 2.1505281925201416, |
|
"rewards/margins": 2.1115310192108154, |
|
"rewards/rejected": 0.0389973521232605, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.308411214953271, |
|
"grad_norm": 17.202654912868127, |
|
"learning_rate": 7.019605024359474e-07, |
|
"logits/chosen": -2.384429454803467, |
|
"logits/rejected": -2.3521230220794678, |
|
"logps/chosen": -237.07943725585938, |
|
"logps/rejected": -216.90951538085938, |
|
"loss": 0.2353, |
|
"rewards/accuracies": 0.96875, |
|
"rewards/chosen": 2.1183865070343018, |
|
"rewards/margins": 2.790973424911499, |
|
"rewards/rejected": -0.6725870370864868, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.4953271028037383, |
|
"grad_norm": 20.863387078156986, |
|
"learning_rate": 5.918747589082852e-07, |
|
"logits/chosen": -2.4055588245391846, |
|
"logits/rejected": -2.3711154460906982, |
|
"logps/chosen": -236.8618927001953, |
|
"logps/rejected": -210.20046997070312, |
|
"loss": 0.2308, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": 1.564582109451294, |
|
"rewards/margins": 2.503974199295044, |
|
"rewards/rejected": -0.9393919706344604, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.6822429906542056, |
|
"grad_norm": 19.065414773504852, |
|
"learning_rate": 4.769082706771303e-07, |
|
"logits/chosen": -2.3858325481414795, |
|
"logits/rejected": -2.362159252166748, |
|
"logps/chosen": -242.7309112548828, |
|
"logps/rejected": -240.0617218017578, |
|
"loss": 0.2041, |
|
"rewards/accuracies": 0.918749988079071, |
|
"rewards/chosen": 2.287132740020752, |
|
"rewards/margins": 3.6341636180877686, |
|
"rewards/rejected": -1.3470308780670166, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.8691588785046729, |
|
"grad_norm": 16.23221592573633, |
|
"learning_rate": 3.6316850496395855e-07, |
|
"logits/chosen": -2.3492136001586914, |
|
"logits/rejected": -2.354576587677002, |
|
"logps/chosen": -222.81600952148438, |
|
"logps/rejected": -216.98867797851562, |
|
"loss": 0.219, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 1.7240461111068726, |
|
"rewards/margins": 3.0606493949890137, |
|
"rewards/rejected": -1.336603045463562, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.8691588785046729, |
|
"eval_logits/chosen": -2.394383430480957, |
|
"eval_logits/rejected": -2.345257520675659, |
|
"eval_logps/chosen": -233.8548126220703, |
|
"eval_logps/rejected": -205.1439971923828, |
|
"eval_loss": 0.6225951313972473, |
|
"eval_rewards/accuracies": 0.7395833134651184, |
|
"eval_rewards/chosen": 1.014007329940796, |
|
"eval_rewards/margins": 1.1295721530914307, |
|
"eval_rewards/rejected": -0.11556478589773178, |
|
"eval_runtime": 51.6397, |
|
"eval_samples_per_second": 14.717, |
|
"eval_steps_per_second": 0.232, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.05607476635514, |
|
"grad_norm": 23.0307089839972, |
|
"learning_rate": 2.566977607165719e-07, |
|
"logits/chosen": -2.359964370727539, |
|
"logits/rejected": -2.3656833171844482, |
|
"logps/chosen": -237.4097137451172, |
|
"logps/rejected": -227.85302734375, |
|
"loss": 0.2486, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 1.7794231176376343, |
|
"rewards/margins": 3.446876049041748, |
|
"rewards/rejected": -1.6674525737762451, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.2429906542056073, |
|
"grad_norm": 14.01114954383873, |
|
"learning_rate": 1.631521781767214e-07, |
|
"logits/chosen": -2.347280263900757, |
|
"logits/rejected": -2.336236000061035, |
|
"logps/chosen": -222.9915008544922, |
|
"logps/rejected": -242.89633178710938, |
|
"loss": 0.1282, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 2.146198034286499, |
|
"rewards/margins": 3.6815218925476074, |
|
"rewards/rejected": -1.5353240966796875, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.4299065420560746, |
|
"grad_norm": 10.13439083687081, |
|
"learning_rate": 8.75012627008489e-08, |
|
"logits/chosen": -2.3028604984283447, |
|
"logits/rejected": -2.2970237731933594, |
|
"logps/chosen": -230.75216674804688, |
|
"logps/rejected": -218.7707061767578, |
|
"loss": 0.1274, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 2.042752504348755, |
|
"rewards/margins": 3.5585684776306152, |
|
"rewards/rejected": -1.5158164501190186, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.616822429906542, |
|
"grad_norm": 14.37288741381732, |
|
"learning_rate": 3.376388529782215e-08, |
|
"logits/chosen": -2.2832436561584473, |
|
"logits/rejected": -2.289004325866699, |
|
"logps/chosen": -238.990966796875, |
|
"logps/rejected": -225.0963134765625, |
|
"loss": 0.1198, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 2.3161845207214355, |
|
"rewards/margins": 3.950852632522583, |
|
"rewards/rejected": -1.6346677541732788, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.803738317757009, |
|
"grad_norm": 10.412412508913112, |
|
"learning_rate": 4.794784562397458e-09, |
|
"logits/chosen": -2.3055803775787354, |
|
"logits/rejected": -2.287616729736328, |
|
"logps/chosen": -224.54293823242188, |
|
"logps/rejected": -259.7159423828125, |
|
"loss": 0.1229, |
|
"rewards/accuracies": 0.981249988079071, |
|
"rewards/chosen": 2.4232189655303955, |
|
"rewards/margins": 4.429986000061035, |
|
"rewards/rejected": -2.0067672729492188, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.9158878504672896, |
|
"step": 78, |
|
"total_flos": 919378820333568.0, |
|
"train_loss": 0.34083553231679475, |
|
"train_runtime": 2719.7499, |
|
"train_samples_per_second": 7.543, |
|
"train_steps_per_second": 0.029 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 78, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 919378820333568.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|