|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.4922118380062304, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06230529595015576, |
|
"grad_norm": 51.38829302969845, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": -2.7407402992248535, |
|
"logits/rejected": -2.726320266723633, |
|
"logps/chosen": -261.4151916503906, |
|
"logps/rejected": -221.7285614013672, |
|
"loss": 0.692, |
|
"rewards/accuracies": 0.28125, |
|
"rewards/chosen": 0.0067472741939127445, |
|
"rewards/margins": -0.002469523111358285, |
|
"rewards/rejected": 0.009216798469424248, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.12461059190031153, |
|
"grad_norm": 54.32144262101448, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -2.679516315460205, |
|
"logits/rejected": -2.6851108074188232, |
|
"logps/chosen": -244.4600067138672, |
|
"logps/rejected": -194.29598999023438, |
|
"loss": 0.6422, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 0.4551638066768646, |
|
"rewards/margins": 0.20188376307487488, |
|
"rewards/rejected": 0.2532801032066345, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.18691588785046728, |
|
"grad_norm": 44.80793063993453, |
|
"learning_rate": 9.988343845952696e-07, |
|
"logits/chosen": -2.4403023719787598, |
|
"logits/rejected": -2.412696361541748, |
|
"logps/chosen": -237.53103637695312, |
|
"logps/rejected": -197.9112091064453, |
|
"loss": 0.6663, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 1.520341396331787, |
|
"rewards/margins": 0.5765382051467896, |
|
"rewards/rejected": 0.9438031315803528, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.24922118380062305, |
|
"grad_norm": 49.11699733468642, |
|
"learning_rate": 9.953429730181652e-07, |
|
"logits/chosen": -2.2657809257507324, |
|
"logits/rejected": -2.2547507286071777, |
|
"logps/chosen": -225.1350860595703, |
|
"logps/rejected": -232.2615203857422, |
|
"loss": 0.669, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 1.3284717798233032, |
|
"rewards/margins": 0.6173279285430908, |
|
"rewards/rejected": 0.7111440896987915, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3115264797507788, |
|
"grad_norm": 43.743825005172326, |
|
"learning_rate": 9.895420438411615e-07, |
|
"logits/chosen": -2.223740577697754, |
|
"logits/rejected": -2.239903688430786, |
|
"logps/chosen": -245.8108673095703, |
|
"logps/rejected": -246.0238037109375, |
|
"loss": 0.6389, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 1.2945916652679443, |
|
"rewards/margins": 0.8869680166244507, |
|
"rewards/rejected": 0.407623827457428, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.37383177570093457, |
|
"grad_norm": 38.88842184904586, |
|
"learning_rate": 9.814586436738997e-07, |
|
"logits/chosen": -2.302748680114746, |
|
"logits/rejected": -2.31158185005188, |
|
"logps/chosen": -246.70578002929688, |
|
"logps/rejected": -217.460205078125, |
|
"loss": 0.6192, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": 1.3854382038116455, |
|
"rewards/margins": 1.0373018980026245, |
|
"rewards/rejected": 0.34813636541366577, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.43613707165109034, |
|
"grad_norm": 51.72443769242076, |
|
"learning_rate": 9.711304610594102e-07, |
|
"logits/chosen": -2.3440041542053223, |
|
"logits/rejected": -2.31417179107666, |
|
"logps/chosen": -224.3390350341797, |
|
"logps/rejected": -194.77621459960938, |
|
"loss": 0.5888, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.972150981426239, |
|
"rewards/margins": 0.8106788396835327, |
|
"rewards/rejected": 0.1614721715450287, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.4984423676012461, |
|
"grad_norm": 41.52935591539919, |
|
"learning_rate": 9.586056507527264e-07, |
|
"logits/chosen": -2.383333683013916, |
|
"logits/rejected": -2.34763765335083, |
|
"logps/chosen": -246.3532257080078, |
|
"logps/rejected": -218.7850341796875, |
|
"loss": 0.6391, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 1.0201754570007324, |
|
"rewards/margins": 1.0443416833877563, |
|
"rewards/rejected": -0.02416619285941124, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5607476635514018, |
|
"grad_norm": 37.760052208675, |
|
"learning_rate": 9.439426092011875e-07, |
|
"logits/chosen": -2.4347915649414062, |
|
"logits/rejected": -2.3998470306396484, |
|
"logps/chosen": -232.26266479492188, |
|
"logps/rejected": -218.72866821289062, |
|
"loss": 0.5939, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": 1.1044284105300903, |
|
"rewards/margins": 0.9438824653625488, |
|
"rewards/rejected": 0.16054585576057434, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6230529595015576, |
|
"grad_norm": 37.571273896281745, |
|
"learning_rate": 9.272097022732443e-07, |
|
"logits/chosen": -2.45988130569458, |
|
"logits/rejected": -2.4507269859313965, |
|
"logps/chosen": -249.6595916748047, |
|
"logps/rejected": -218.61581420898438, |
|
"loss": 0.5547, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 1.192905306816101, |
|
"rewards/margins": 1.339156985282898, |
|
"rewards/rejected": -0.14625166356563568, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6230529595015576, |
|
"eval_logits/chosen": -2.489163637161255, |
|
"eval_logits/rejected": -2.487579107284546, |
|
"eval_logps/chosen": -237.33294677734375, |
|
"eval_logps/rejected": -211.63194274902344, |
|
"eval_loss": 0.5780755281448364, |
|
"eval_rewards/accuracies": 0.7222222089767456, |
|
"eval_rewards/chosen": 0.9537798762321472, |
|
"eval_rewards/margins": 1.0286411046981812, |
|
"eval_rewards/rejected": -0.07486122101545334, |
|
"eval_runtime": 152.1375, |
|
"eval_samples_per_second": 14.986, |
|
"eval_steps_per_second": 0.237, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6853582554517134, |
|
"grad_norm": 54.80320093683614, |
|
"learning_rate": 9.084849465052209e-07, |
|
"logits/chosen": -2.485079765319824, |
|
"logits/rejected": -2.4753665924072266, |
|
"logps/chosen": -235.5215301513672, |
|
"logps/rejected": -220.1900177001953, |
|
"loss": 0.5909, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 0.7905256152153015, |
|
"rewards/margins": 1.0766956806182861, |
|
"rewards/rejected": -0.286170095205307, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.7476635514018691, |
|
"grad_norm": 39.23815457631208, |
|
"learning_rate": 8.878556453522099e-07, |
|
"logits/chosen": -2.4538919925689697, |
|
"logits/rejected": -2.454207181930542, |
|
"logps/chosen": -251.35598754882812, |
|
"logps/rejected": -231.8341522216797, |
|
"loss": 0.5733, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.9427865743637085, |
|
"rewards/margins": 1.1441007852554321, |
|
"rewards/rejected": -0.2013140171766281, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8099688473520249, |
|
"grad_norm": 47.13645309746499, |
|
"learning_rate": 8.654179821390621e-07, |
|
"logits/chosen": -2.4390835762023926, |
|
"logits/rejected": -2.449341297149658, |
|
"logps/chosen": -262.5836486816406, |
|
"logps/rejected": -209.0801239013672, |
|
"loss": 0.5441, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": 1.3532841205596924, |
|
"rewards/margins": 1.4332042932510376, |
|
"rewards/rejected": -0.07991998642683029, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.8722741433021807, |
|
"grad_norm": 40.00645275405077, |
|
"learning_rate": 8.41276571609327e-07, |
|
"logits/chosen": -2.4375357627868652, |
|
"logits/rejected": -2.4228920936584473, |
|
"logps/chosen": -246.0580291748047, |
|
"logps/rejected": -219.20260620117188, |
|
"loss": 0.5816, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 1.0882443189620972, |
|
"rewards/margins": 1.3165982961654663, |
|
"rewards/rejected": -0.22835393249988556, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9345794392523364, |
|
"grad_norm": 34.63673701954024, |
|
"learning_rate": 8.155439721630264e-07, |
|
"logits/chosen": -2.4634451866149902, |
|
"logits/rejected": -2.4578163623809814, |
|
"logps/chosen": -227.31924438476562, |
|
"logps/rejected": -217.36508178710938, |
|
"loss": 0.5334, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.8839371800422668, |
|
"rewards/margins": 1.3334705829620361, |
|
"rewards/rejected": -0.4495334029197693, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.9968847352024922, |
|
"grad_norm": 36.51497796461376, |
|
"learning_rate": 7.883401610574336e-07, |
|
"logits/chosen": -2.4859111309051514, |
|
"logits/rejected": -2.465956211090088, |
|
"logps/chosen": -242.3914794921875, |
|
"logps/rejected": -210.991943359375, |
|
"loss": 0.5281, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 0.5210741758346558, |
|
"rewards/margins": 1.2326009273529053, |
|
"rewards/rejected": -0.7115266919136047, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.0591900311526479, |
|
"grad_norm": 20.825789256058748, |
|
"learning_rate": 7.597919750177168e-07, |
|
"logits/chosen": -2.437802791595459, |
|
"logits/rejected": -2.436861515045166, |
|
"logps/chosen": -260.0554504394531, |
|
"logps/rejected": -234.86740112304688, |
|
"loss": 0.2507, |
|
"rewards/accuracies": 0.893750011920929, |
|
"rewards/chosen": 0.9926292300224304, |
|
"rewards/margins": 2.598191261291504, |
|
"rewards/rejected": -1.6055622100830078, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.1214953271028036, |
|
"grad_norm": 17.02799613983615, |
|
"learning_rate": 7.30032518865576e-07, |
|
"logits/chosen": -2.417109489440918, |
|
"logits/rejected": -2.404921770095825, |
|
"logps/chosen": -233.2266387939453, |
|
"logps/rejected": -226.57186889648438, |
|
"loss": 0.2109, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 1.7625309228897095, |
|
"rewards/margins": 3.178365707397461, |
|
"rewards/rejected": -1.4158347845077515, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.1838006230529594, |
|
"grad_norm": 20.45306812774076, |
|
"learning_rate": 6.992005449231207e-07, |
|
"logits/chosen": -2.3889553546905518, |
|
"logits/rejected": -2.38620924949646, |
|
"logps/chosen": -251.99356079101562, |
|
"logps/rejected": -243.1147918701172, |
|
"loss": 0.2198, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.9874979257583618, |
|
"rewards/margins": 3.548366069793701, |
|
"rewards/rejected": -1.5608683824539185, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.2461059190031152, |
|
"grad_norm": 16.12410058725181, |
|
"learning_rate": 6.67439806085493e-07, |
|
"logits/chosen": -2.338783025741577, |
|
"logits/rejected": -2.325284004211426, |
|
"logps/chosen": -240.7113800048828, |
|
"logps/rejected": -222.7356414794922, |
|
"loss": 0.2103, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 2.1200156211853027, |
|
"rewards/margins": 3.5902743339538574, |
|
"rewards/rejected": -1.4702587127685547, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.2461059190031152, |
|
"eval_logits/chosen": -2.3138608932495117, |
|
"eval_logits/rejected": -2.2990024089813232, |
|
"eval_logps/chosen": -233.84922790527344, |
|
"eval_logps/rejected": -214.24310302734375, |
|
"eval_loss": 0.6053553819656372, |
|
"eval_rewards/accuracies": 0.7777777910232544, |
|
"eval_rewards/chosen": 1.3021522760391235, |
|
"eval_rewards/margins": 1.638129472732544, |
|
"eval_rewards/rejected": -0.33597710728645325, |
|
"eval_runtime": 151.0351, |
|
"eval_samples_per_second": 15.096, |
|
"eval_steps_per_second": 0.238, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.308411214953271, |
|
"grad_norm": 24.653682630335922, |
|
"learning_rate": 6.348983855785121e-07, |
|
"logits/chosen": -2.2989373207092285, |
|
"logits/rejected": -2.290553569793701, |
|
"logps/chosen": -242.45651245117188, |
|
"logps/rejected": -214.2149658203125, |
|
"loss": 0.2427, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 1.9658845663070679, |
|
"rewards/margins": 3.365657091140747, |
|
"rewards/rejected": -1.3997727632522583, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.3707165109034267, |
|
"grad_norm": 23.99763993646845, |
|
"learning_rate": 6.01728006526317e-07, |
|
"logits/chosen": -2.2708828449249268, |
|
"logits/rejected": -2.22404408454895, |
|
"logps/chosen": -240.62039184570312, |
|
"logps/rejected": -212.97265625, |
|
"loss": 0.2437, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 2.018810749053955, |
|
"rewards/margins": 3.461576461791992, |
|
"rewards/rejected": -1.442765474319458, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.4330218068535825, |
|
"grad_norm": 28.797618169798415, |
|
"learning_rate": 5.680833245481234e-07, |
|
"logits/chosen": -2.270700693130493, |
|
"logits/rejected": -2.254040002822876, |
|
"logps/chosen": -244.4936981201172, |
|
"logps/rejected": -235.03982543945312, |
|
"loss": 0.3121, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": 2.1543164253234863, |
|
"rewards/margins": 3.647374391555786, |
|
"rewards/rejected": -1.4930576086044312, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.4953271028037383, |
|
"grad_norm": 23.473075262058984, |
|
"learning_rate": 5.341212066823355e-07, |
|
"logits/chosen": -2.3658766746520996, |
|
"logits/rejected": -2.3159148693084717, |
|
"logps/chosen": -232.2969970703125, |
|
"logps/rejected": -243.86593627929688, |
|
"loss": 0.2509, |
|
"rewards/accuracies": 0.918749988079071, |
|
"rewards/chosen": 2.186516284942627, |
|
"rewards/margins": 3.8642489910125732, |
|
"rewards/rejected": -1.6777331829071045, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.557632398753894, |
|
"grad_norm": 20.1682623902596, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": -2.3667149543762207, |
|
"logits/rejected": -2.360989809036255, |
|
"logps/chosen": -233.6787109375, |
|
"logps/rejected": -218.07400512695312, |
|
"loss": 0.2581, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 2.068718671798706, |
|
"rewards/margins": 3.0430657863616943, |
|
"rewards/rejected": -0.9743471145629883, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.6199376947040498, |
|
"grad_norm": 25.280683256501696, |
|
"learning_rate": 4.6587879331766457e-07, |
|
"logits/chosen": -2.388986825942993, |
|
"logits/rejected": -2.3711230754852295, |
|
"logps/chosen": -235.60311889648438, |
|
"logps/rejected": -224.765625, |
|
"loss": 0.2676, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 2.094482183456421, |
|
"rewards/margins": 3.472238063812256, |
|
"rewards/rejected": -1.377756118774414, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.6822429906542056, |
|
"grad_norm": 27.72459722923311, |
|
"learning_rate": 4.3191667545187675e-07, |
|
"logits/chosen": -2.3894925117492676, |
|
"logits/rejected": -2.373619556427002, |
|
"logps/chosen": -227.37002563476562, |
|
"logps/rejected": -210.1580047607422, |
|
"loss": 0.3106, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 1.8648887872695923, |
|
"rewards/margins": 3.196420192718506, |
|
"rewards/rejected": -1.3315311670303345, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.7445482866043613, |
|
"grad_norm": 31.87683565676033, |
|
"learning_rate": 3.9827199347368317e-07, |
|
"logits/chosen": -2.4143717288970947, |
|
"logits/rejected": -2.3826475143432617, |
|
"logps/chosen": -239.6693572998047, |
|
"logps/rejected": -235.18310546875, |
|
"loss": 0.3051, |
|
"rewards/accuracies": 0.918749988079071, |
|
"rewards/chosen": 2.101874589920044, |
|
"rewards/margins": 3.821915864944458, |
|
"rewards/rejected": -1.7200415134429932, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.8068535825545171, |
|
"grad_norm": 22.52672353938926, |
|
"learning_rate": 3.651016144214878e-07, |
|
"logits/chosen": -2.409569263458252, |
|
"logits/rejected": -2.3817169666290283, |
|
"logps/chosen": -234.31216430664062, |
|
"logps/rejected": -235.0973358154297, |
|
"loss": 0.2524, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 2.0497496128082275, |
|
"rewards/margins": 3.438047409057617, |
|
"rewards/rejected": -1.3882980346679688, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.8691588785046729, |
|
"grad_norm": 18.620721977843708, |
|
"learning_rate": 3.325601939145069e-07, |
|
"logits/chosen": -2.389521360397339, |
|
"logits/rejected": -2.376431465148926, |
|
"logps/chosen": -227.772216796875, |
|
"logps/rejected": -229.7588348388672, |
|
"loss": 0.2095, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 1.8472219705581665, |
|
"rewards/margins": 3.8442161083221436, |
|
"rewards/rejected": -1.9969940185546875, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.8691588785046729, |
|
"eval_logits/chosen": -2.387160539627075, |
|
"eval_logits/rejected": -2.3737096786499023, |
|
"eval_logps/chosen": -232.7999725341797, |
|
"eval_logps/rejected": -216.12271118164062, |
|
"eval_loss": 0.5997537970542908, |
|
"eval_rewards/accuracies": 0.7743055820465088, |
|
"eval_rewards/chosen": 1.4070783853530884, |
|
"eval_rewards/margins": 1.9310154914855957, |
|
"eval_rewards/rejected": -0.5239372253417969, |
|
"eval_runtime": 151.4198, |
|
"eval_samples_per_second": 15.057, |
|
"eval_steps_per_second": 0.238, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.9314641744548287, |
|
"grad_norm": 21.107555847149946, |
|
"learning_rate": 3.007994550768793e-07, |
|
"logits/chosen": -2.34552264213562, |
|
"logits/rejected": -2.3452506065368652, |
|
"logps/chosen": -232.08834838867188, |
|
"logps/rejected": -241.10519409179688, |
|
"loss": 0.2781, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.9657140970230103, |
|
"rewards/margins": 3.589655637741089, |
|
"rewards/rejected": -1.6239420175552368, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.9937694704049844, |
|
"grad_norm": 27.047203132356024, |
|
"learning_rate": 2.699674811344239e-07, |
|
"logits/chosen": -2.3365378379821777, |
|
"logits/rejected": -2.3133881092071533, |
|
"logps/chosen": -262.26690673828125, |
|
"logps/rejected": -240.2264862060547, |
|
"loss": 0.285, |
|
"rewards/accuracies": 0.8687499761581421, |
|
"rewards/chosen": 2.5078001022338867, |
|
"rewards/margins": 3.6130905151367188, |
|
"rewards/rejected": -1.105290174484253, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.05607476635514, |
|
"grad_norm": 15.009687081782982, |
|
"learning_rate": 2.4020802498228334e-07, |
|
"logits/chosen": -2.3232502937316895, |
|
"logits/rejected": -2.25954008102417, |
|
"logps/chosen": -228.8788604736328, |
|
"logps/rejected": -219.2600860595703, |
|
"loss": 0.1447, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": 2.339390277862549, |
|
"rewards/margins": 3.8433260917663574, |
|
"rewards/rejected": -1.5039361715316772, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 2.1183800623052957, |
|
"grad_norm": 13.575529236402222, |
|
"learning_rate": 2.1165983894256646e-07, |
|
"logits/chosen": -2.3263421058654785, |
|
"logits/rejected": -2.2757723331451416, |
|
"logps/chosen": -231.72366333007812, |
|
"logps/rejected": -209.7088623046875, |
|
"loss": 0.1306, |
|
"rewards/accuracies": 0.981249988079071, |
|
"rewards/chosen": 2.5836586952209473, |
|
"rewards/margins": 4.131028175354004, |
|
"rewards/rejected": -1.5473694801330566, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.1806853582554515, |
|
"grad_norm": 21.912200560703297, |
|
"learning_rate": 1.8445602783697373e-07, |
|
"logits/chosen": -2.274022340774536, |
|
"logits/rejected": -2.27970290184021, |
|
"logps/chosen": -220.47201538085938, |
|
"logps/rejected": -237.96298217773438, |
|
"loss": 0.1521, |
|
"rewards/accuracies": 0.96875, |
|
"rewards/chosen": 2.145082950592041, |
|
"rewards/margins": 4.457543849945068, |
|
"rewards/rejected": -2.3124611377716064, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 2.2429906542056073, |
|
"grad_norm": 13.769383169619562, |
|
"learning_rate": 1.5872342839067304e-07, |
|
"logits/chosen": -2.27915620803833, |
|
"logits/rejected": -2.2511637210845947, |
|
"logps/chosen": -234.8832550048828, |
|
"logps/rejected": -230.9884796142578, |
|
"loss": 0.1365, |
|
"rewards/accuracies": 0.96875, |
|
"rewards/chosen": 2.4758851528167725, |
|
"rewards/margins": 4.525899887084961, |
|
"rewards/rejected": -2.050014019012451, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.305295950155763, |
|
"grad_norm": 15.714968300704413, |
|
"learning_rate": 1.3458201786093794e-07, |
|
"logits/chosen": -2.2769675254821777, |
|
"logits/rejected": -2.2372546195983887, |
|
"logps/chosen": -254.29257202148438, |
|
"logps/rejected": -231.6654815673828, |
|
"loss": 0.1485, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": 2.103557586669922, |
|
"rewards/margins": 4.048580169677734, |
|
"rewards/rejected": -1.9450223445892334, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 2.367601246105919, |
|
"grad_norm": 17.400239039982335, |
|
"learning_rate": 1.1214435464779003e-07, |
|
"logits/chosen": -2.257563591003418, |
|
"logits/rejected": -2.221086025238037, |
|
"logps/chosen": -246.0885009765625, |
|
"logps/rejected": -243.9921417236328, |
|
"loss": 0.1361, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 2.082960605621338, |
|
"rewards/margins": 4.483183860778809, |
|
"rewards/rejected": -2.4002232551574707, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.4299065420560746, |
|
"grad_norm": 18.536501030355655, |
|
"learning_rate": 9.1515053494779e-08, |
|
"logits/chosen": -2.2309863567352295, |
|
"logits/rejected": -2.231677532196045, |
|
"logps/chosen": -252.19198608398438, |
|
"logps/rejected": -236.55419921875, |
|
"loss": 0.153, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": 2.158655881881714, |
|
"rewards/margins": 4.387732982635498, |
|
"rewards/rejected": -2.229076862335205, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 2.4922118380062304, |
|
"grad_norm": 16.28538830494891, |
|
"learning_rate": 7.279029772675571e-08, |
|
"logits/chosen": -2.2156031131744385, |
|
"logits/rejected": -2.206735849380493, |
|
"logps/chosen": -233.7646942138672, |
|
"logps/rejected": -236.1497039794922, |
|
"loss": 0.1498, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 2.1195907592773438, |
|
"rewards/margins": 3.97522234916687, |
|
"rewards/rejected": -1.8556314706802368, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.4922118380062304, |
|
"eval_logits/chosen": -2.2468643188476562, |
|
"eval_logits/rejected": -2.225817918777466, |
|
"eval_logps/chosen": -235.95494079589844, |
|
"eval_logps/rejected": -220.96902465820312, |
|
"eval_loss": 0.5972098112106323, |
|
"eval_rewards/accuracies": 0.7847222089767456, |
|
"eval_rewards/chosen": 1.0915825366973877, |
|
"eval_rewards/margins": 2.100149393081665, |
|
"eval_rewards/rejected": -1.008566975593567, |
|
"eval_runtime": 151.4569, |
|
"eval_samples_per_second": 15.054, |
|
"eval_steps_per_second": 0.238, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 240, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2358113407598592.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|