|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.2461059190031152, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06230529595015576, |
|
"grad_norm": 51.38829302969845, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": -2.7407402992248535, |
|
"logits/rejected": -2.726320266723633, |
|
"logps/chosen": -261.4151916503906, |
|
"logps/rejected": -221.7285614013672, |
|
"loss": 0.692, |
|
"rewards/accuracies": 0.28125, |
|
"rewards/chosen": 0.0067472741939127445, |
|
"rewards/margins": -0.002469523111358285, |
|
"rewards/rejected": 0.009216798469424248, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.12461059190031153, |
|
"grad_norm": 54.32144262101448, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -2.679516315460205, |
|
"logits/rejected": -2.6851108074188232, |
|
"logps/chosen": -244.4600067138672, |
|
"logps/rejected": -194.29598999023438, |
|
"loss": 0.6422, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 0.4551638066768646, |
|
"rewards/margins": 0.20188376307487488, |
|
"rewards/rejected": 0.2532801032066345, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.18691588785046728, |
|
"grad_norm": 44.80793063993453, |
|
"learning_rate": 9.988343845952696e-07, |
|
"logits/chosen": -2.4403023719787598, |
|
"logits/rejected": -2.412696361541748, |
|
"logps/chosen": -237.53103637695312, |
|
"logps/rejected": -197.9112091064453, |
|
"loss": 0.6663, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 1.520341396331787, |
|
"rewards/margins": 0.5765382051467896, |
|
"rewards/rejected": 0.9438031315803528, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.24922118380062305, |
|
"grad_norm": 49.11699733468642, |
|
"learning_rate": 9.953429730181652e-07, |
|
"logits/chosen": -2.2657809257507324, |
|
"logits/rejected": -2.2547507286071777, |
|
"logps/chosen": -225.1350860595703, |
|
"logps/rejected": -232.2615203857422, |
|
"loss": 0.669, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 1.3284717798233032, |
|
"rewards/margins": 0.6173279285430908, |
|
"rewards/rejected": 0.7111440896987915, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3115264797507788, |
|
"grad_norm": 43.743825005172326, |
|
"learning_rate": 9.895420438411615e-07, |
|
"logits/chosen": -2.223740577697754, |
|
"logits/rejected": -2.239903688430786, |
|
"logps/chosen": -245.8108673095703, |
|
"logps/rejected": -246.0238037109375, |
|
"loss": 0.6389, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 1.2945916652679443, |
|
"rewards/margins": 0.8869680166244507, |
|
"rewards/rejected": 0.407623827457428, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.37383177570093457, |
|
"grad_norm": 38.88842184904586, |
|
"learning_rate": 9.814586436738997e-07, |
|
"logits/chosen": -2.302748680114746, |
|
"logits/rejected": -2.31158185005188, |
|
"logps/chosen": -246.70578002929688, |
|
"logps/rejected": -217.460205078125, |
|
"loss": 0.6192, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": 1.3854382038116455, |
|
"rewards/margins": 1.0373018980026245, |
|
"rewards/rejected": 0.34813636541366577, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.43613707165109034, |
|
"grad_norm": 51.72443769242076, |
|
"learning_rate": 9.711304610594102e-07, |
|
"logits/chosen": -2.3440041542053223, |
|
"logits/rejected": -2.31417179107666, |
|
"logps/chosen": -224.3390350341797, |
|
"logps/rejected": -194.77621459960938, |
|
"loss": 0.5888, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.972150981426239, |
|
"rewards/margins": 0.8106788396835327, |
|
"rewards/rejected": 0.1614721715450287, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.4984423676012461, |
|
"grad_norm": 41.52935591539919, |
|
"learning_rate": 9.586056507527264e-07, |
|
"logits/chosen": -2.383333683013916, |
|
"logits/rejected": -2.34763765335083, |
|
"logps/chosen": -246.3532257080078, |
|
"logps/rejected": -218.7850341796875, |
|
"loss": 0.6391, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 1.0201754570007324, |
|
"rewards/margins": 1.0443416833877563, |
|
"rewards/rejected": -0.02416619285941124, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5607476635514018, |
|
"grad_norm": 37.760052208675, |
|
"learning_rate": 9.439426092011875e-07, |
|
"logits/chosen": -2.4347915649414062, |
|
"logits/rejected": -2.3998470306396484, |
|
"logps/chosen": -232.26266479492188, |
|
"logps/rejected": -218.72866821289062, |
|
"loss": 0.5939, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": 1.1044284105300903, |
|
"rewards/margins": 0.9438824653625488, |
|
"rewards/rejected": 0.16054585576057434, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6230529595015576, |
|
"grad_norm": 37.571273896281745, |
|
"learning_rate": 9.272097022732443e-07, |
|
"logits/chosen": -2.45988130569458, |
|
"logits/rejected": -2.4507269859313965, |
|
"logps/chosen": -249.6595916748047, |
|
"logps/rejected": -218.61581420898438, |
|
"loss": 0.5547, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 1.192905306816101, |
|
"rewards/margins": 1.339156985282898, |
|
"rewards/rejected": -0.14625166356563568, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6230529595015576, |
|
"eval_logits/chosen": -2.489163637161255, |
|
"eval_logits/rejected": -2.487579107284546, |
|
"eval_logps/chosen": -237.33294677734375, |
|
"eval_logps/rejected": -211.63194274902344, |
|
"eval_loss": 0.5780755281448364, |
|
"eval_rewards/accuracies": 0.7222222089767456, |
|
"eval_rewards/chosen": 0.9537798762321472, |
|
"eval_rewards/margins": 1.0286411046981812, |
|
"eval_rewards/rejected": -0.07486122101545334, |
|
"eval_runtime": 152.1375, |
|
"eval_samples_per_second": 14.986, |
|
"eval_steps_per_second": 0.237, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6853582554517134, |
|
"grad_norm": 54.80320093683614, |
|
"learning_rate": 9.084849465052209e-07, |
|
"logits/chosen": -2.485079765319824, |
|
"logits/rejected": -2.4753665924072266, |
|
"logps/chosen": -235.5215301513672, |
|
"logps/rejected": -220.1900177001953, |
|
"loss": 0.5909, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 0.7905256152153015, |
|
"rewards/margins": 1.0766956806182861, |
|
"rewards/rejected": -0.286170095205307, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.7476635514018691, |
|
"grad_norm": 39.23815457631208, |
|
"learning_rate": 8.878556453522099e-07, |
|
"logits/chosen": -2.4538919925689697, |
|
"logits/rejected": -2.454207181930542, |
|
"logps/chosen": -251.35598754882812, |
|
"logps/rejected": -231.8341522216797, |
|
"loss": 0.5733, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.9427865743637085, |
|
"rewards/margins": 1.1441007852554321, |
|
"rewards/rejected": -0.2013140171766281, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8099688473520249, |
|
"grad_norm": 47.13645309746499, |
|
"learning_rate": 8.654179821390621e-07, |
|
"logits/chosen": -2.4390835762023926, |
|
"logits/rejected": -2.449341297149658, |
|
"logps/chosen": -262.5836486816406, |
|
"logps/rejected": -209.0801239013672, |
|
"loss": 0.5441, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": 1.3532841205596924, |
|
"rewards/margins": 1.4332042932510376, |
|
"rewards/rejected": -0.07991998642683029, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.8722741433021807, |
|
"grad_norm": 40.00645275405077, |
|
"learning_rate": 8.41276571609327e-07, |
|
"logits/chosen": -2.4375357627868652, |
|
"logits/rejected": -2.4228920936584473, |
|
"logps/chosen": -246.0580291748047, |
|
"logps/rejected": -219.20260620117188, |
|
"loss": 0.5816, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 1.0882443189620972, |
|
"rewards/margins": 1.3165982961654663, |
|
"rewards/rejected": -0.22835393249988556, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9345794392523364, |
|
"grad_norm": 34.63673701954024, |
|
"learning_rate": 8.155439721630264e-07, |
|
"logits/chosen": -2.4634451866149902, |
|
"logits/rejected": -2.4578163623809814, |
|
"logps/chosen": -227.31924438476562, |
|
"logps/rejected": -217.36508178710938, |
|
"loss": 0.5334, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.8839371800422668, |
|
"rewards/margins": 1.3334705829620361, |
|
"rewards/rejected": -0.4495334029197693, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.9968847352024922, |
|
"grad_norm": 36.51497796461376, |
|
"learning_rate": 7.883401610574336e-07, |
|
"logits/chosen": -2.4859111309051514, |
|
"logits/rejected": -2.465956211090088, |
|
"logps/chosen": -242.3914794921875, |
|
"logps/rejected": -210.991943359375, |
|
"loss": 0.5281, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 0.5210741758346558, |
|
"rewards/margins": 1.2326009273529053, |
|
"rewards/rejected": -0.7115266919136047, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.0591900311526479, |
|
"grad_norm": 20.825789256058748, |
|
"learning_rate": 7.597919750177168e-07, |
|
"logits/chosen": -2.437802791595459, |
|
"logits/rejected": -2.436861515045166, |
|
"logps/chosen": -260.0554504394531, |
|
"logps/rejected": -234.86740112304688, |
|
"loss": 0.2507, |
|
"rewards/accuracies": 0.893750011920929, |
|
"rewards/chosen": 0.9926292300224304, |
|
"rewards/margins": 2.598191261291504, |
|
"rewards/rejected": -1.6055622100830078, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.1214953271028036, |
|
"grad_norm": 17.02799613983615, |
|
"learning_rate": 7.30032518865576e-07, |
|
"logits/chosen": -2.417109489440918, |
|
"logits/rejected": -2.404921770095825, |
|
"logps/chosen": -233.2266387939453, |
|
"logps/rejected": -226.57186889648438, |
|
"loss": 0.2109, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 1.7625309228897095, |
|
"rewards/margins": 3.178365707397461, |
|
"rewards/rejected": -1.4158347845077515, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.1838006230529594, |
|
"grad_norm": 20.45306812774076, |
|
"learning_rate": 6.992005449231207e-07, |
|
"logits/chosen": -2.3889553546905518, |
|
"logits/rejected": -2.38620924949646, |
|
"logps/chosen": -251.99356079101562, |
|
"logps/rejected": -243.1147918701172, |
|
"loss": 0.2198, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.9874979257583618, |
|
"rewards/margins": 3.548366069793701, |
|
"rewards/rejected": -1.5608683824539185, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.2461059190031152, |
|
"grad_norm": 16.12410058725181, |
|
"learning_rate": 6.67439806085493e-07, |
|
"logits/chosen": -2.338783025741577, |
|
"logits/rejected": -2.325284004211426, |
|
"logps/chosen": -240.7113800048828, |
|
"logps/rejected": -222.7356414794922, |
|
"loss": 0.2103, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 2.1200156211853027, |
|
"rewards/margins": 3.5902743339538574, |
|
"rewards/rejected": -1.4702587127685547, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.2461059190031152, |
|
"eval_logits/chosen": -2.3138608932495117, |
|
"eval_logits/rejected": -2.2990024089813232, |
|
"eval_logps/chosen": -233.84922790527344, |
|
"eval_logps/rejected": -214.24310302734375, |
|
"eval_loss": 0.6053553819656372, |
|
"eval_rewards/accuracies": 0.7777777910232544, |
|
"eval_rewards/chosen": 1.3021522760391235, |
|
"eval_rewards/margins": 1.638129472732544, |
|
"eval_rewards/rejected": -0.33597710728645325, |
|
"eval_runtime": 151.0351, |
|
"eval_samples_per_second": 15.096, |
|
"eval_steps_per_second": 0.238, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 240, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1178822762299392.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|