|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9977426636568847, |
|
"eval_steps": 50, |
|
"global_step": 996, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.015048908954100828, |
|
"grad_norm": 18.3067113765928, |
|
"learning_rate": 5e-07, |
|
"loss": 1.7267, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.030097817908201655, |
|
"grad_norm": 12.425894386937017, |
|
"learning_rate": 1e-06, |
|
"loss": 1.5935, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.045146726862302484, |
|
"grad_norm": 7.656122541492926, |
|
"learning_rate": 9.999365521737421e-07, |
|
"loss": 1.2846, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.06019563581640331, |
|
"grad_norm": 4.2346112922619135, |
|
"learning_rate": 9.99746224797475e-07, |
|
"loss": 1.147, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07524454477050414, |
|
"grad_norm": 3.7327484351825992, |
|
"learning_rate": 9.99429066174632e-07, |
|
"loss": 1.0702, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09029345372460497, |
|
"grad_norm": 3.53430215111439, |
|
"learning_rate": 9.989851567973138e-07, |
|
"loss": 1.0299, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1053423626787058, |
|
"grad_norm": 3.65219200982227, |
|
"learning_rate": 9.984146093258608e-07, |
|
"loss": 0.996, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.12039127163280662, |
|
"grad_norm": 3.475510536585853, |
|
"learning_rate": 9.9771756856026e-07, |
|
"loss": 0.9736, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13544018058690746, |
|
"grad_norm": 3.5641491900715456, |
|
"learning_rate": 9.968942114033973e-07, |
|
"loss": 0.9431, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.1504890895410083, |
|
"grad_norm": 3.310177183723562, |
|
"learning_rate": 9.959447468161596e-07, |
|
"loss": 0.9273, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1504890895410083, |
|
"eval_loss": 0.936109185218811, |
|
"eval_runtime": 166.5093, |
|
"eval_samples_per_second": 56.754, |
|
"eval_steps_per_second": 0.889, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1655379984951091, |
|
"grad_norm": 3.7634083628541792, |
|
"learning_rate": 9.948694157644042e-07, |
|
"loss": 0.9287, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.18058690744920994, |
|
"grad_norm": 3.4603396193970557, |
|
"learning_rate": 9.936684911578017e-07, |
|
"loss": 0.9232, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.19563581640331076, |
|
"grad_norm": 3.709365715188229, |
|
"learning_rate": 9.923422777805751e-07, |
|
"loss": 0.9329, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.2106847253574116, |
|
"grad_norm": 3.3261104359806892, |
|
"learning_rate": 9.908911122141486e-07, |
|
"loss": 0.8932, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.22573363431151242, |
|
"grad_norm": 3.333593966518852, |
|
"learning_rate": 9.893153627517248e-07, |
|
"loss": 0.8696, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.24078254326561324, |
|
"grad_norm": 3.75439485313761, |
|
"learning_rate": 9.876154293048163e-07, |
|
"loss": 0.8977, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.2558314522197141, |
|
"grad_norm": 3.665753400039767, |
|
"learning_rate": 9.857917433017508e-07, |
|
"loss": 0.8997, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.2708803611738149, |
|
"grad_norm": 3.8591465347932377, |
|
"learning_rate": 9.838447675781793e-07, |
|
"loss": 0.8567, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.28592927012791575, |
|
"grad_norm": 3.4759441065159047, |
|
"learning_rate": 9.817749962596114e-07, |
|
"loss": 0.8692, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.3009781790820166, |
|
"grad_norm": 3.5256398646315157, |
|
"learning_rate": 9.795829546360113e-07, |
|
"loss": 0.8938, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3009781790820166, |
|
"eval_loss": 0.8804982900619507, |
|
"eval_runtime": 165.1088, |
|
"eval_samples_per_second": 57.235, |
|
"eval_steps_per_second": 0.896, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3160270880361174, |
|
"grad_norm": 3.1624855062390287, |
|
"learning_rate": 9.77269199028483e-07, |
|
"loss": 0.8744, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.3310759969902182, |
|
"grad_norm": 3.483610150725716, |
|
"learning_rate": 9.748343166480822e-07, |
|
"loss": 0.8506, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.34612490594431905, |
|
"grad_norm": 3.530971931358358, |
|
"learning_rate": 9.722789254467854e-07, |
|
"loss": 0.8583, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.3611738148984199, |
|
"grad_norm": 3.6587864102748275, |
|
"learning_rate": 9.696036739606606e-07, |
|
"loss": 0.8699, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.3762227238525207, |
|
"grad_norm": 3.559601017867941, |
|
"learning_rate": 9.668092411452735e-07, |
|
"loss": 0.87, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.3912716328066215, |
|
"grad_norm": 3.470737417836157, |
|
"learning_rate": 9.638963362033756e-07, |
|
"loss": 0.8627, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.40632054176072235, |
|
"grad_norm": 3.688913494188485, |
|
"learning_rate": 9.608656984049132e-07, |
|
"loss": 0.842, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.4213694507148232, |
|
"grad_norm": 3.5434282594897843, |
|
"learning_rate": 9.577180968994081e-07, |
|
"loss": 0.8367, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.436418359668924, |
|
"grad_norm": 3.5450302290364517, |
|
"learning_rate": 9.544543305207546e-07, |
|
"loss": 0.8565, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.45146726862302483, |
|
"grad_norm": 3.6995845437807677, |
|
"learning_rate": 9.510752275844809e-07, |
|
"loss": 0.8476, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.45146726862302483, |
|
"eval_loss": 0.8552336692810059, |
|
"eval_runtime": 164.2631, |
|
"eval_samples_per_second": 57.53, |
|
"eval_steps_per_second": 0.901, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.46651617757712566, |
|
"grad_norm": 3.5832071794600626, |
|
"learning_rate": 9.475816456775312e-07, |
|
"loss": 0.8419, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.4815650865312265, |
|
"grad_norm": 3.4551108533194355, |
|
"learning_rate": 9.439744714406166e-07, |
|
"loss": 0.8571, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.4966139954853273, |
|
"grad_norm": 3.463691802146643, |
|
"learning_rate": 9.402546203431947e-07, |
|
"loss": 0.8552, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.5116629044394282, |
|
"grad_norm": 3.6122617820665965, |
|
"learning_rate": 9.364230364511295e-07, |
|
"loss": 0.8284, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.526711813393529, |
|
"grad_norm": 3.5227479646686626, |
|
"learning_rate": 9.324806921870975e-07, |
|
"loss": 0.8445, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.5417607223476298, |
|
"grad_norm": 3.1608919125871213, |
|
"learning_rate": 9.284285880837946e-07, |
|
"loss": 0.8467, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.5568096313017307, |
|
"grad_norm": 3.397778366526389, |
|
"learning_rate": 9.242677525300088e-07, |
|
"loss": 0.8444, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.5718585402558315, |
|
"grad_norm": 3.623712344003, |
|
"learning_rate": 9.199992415096259e-07, |
|
"loss": 0.8329, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.5869074492099323, |
|
"grad_norm": 3.6817969267423, |
|
"learning_rate": 9.156241383336278e-07, |
|
"loss": 0.8393, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.6019563581640331, |
|
"grad_norm": 3.7179065033691803, |
|
"learning_rate": 9.111435533651595e-07, |
|
"loss": 0.8413, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6019563581640331, |
|
"eval_loss": 0.8378457427024841, |
|
"eval_runtime": 164.1905, |
|
"eval_samples_per_second": 57.555, |
|
"eval_steps_per_second": 0.901, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.617005267118134, |
|
"grad_norm": 3.461402352739981, |
|
"learning_rate": 9.065586237377274e-07, |
|
"loss": 0.8383, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.6320541760722348, |
|
"grad_norm": 3.6508042748668013, |
|
"learning_rate": 9.018705130666049e-07, |
|
"loss": 0.846, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.6471030850263356, |
|
"grad_norm": 3.860811653516243, |
|
"learning_rate": 8.970804111535175e-07, |
|
"loss": 0.8348, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.6621519939804364, |
|
"grad_norm": 3.503371889729855, |
|
"learning_rate": 8.921895336846812e-07, |
|
"loss": 0.8237, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6772009029345373, |
|
"grad_norm": 3.534504520782972, |
|
"learning_rate": 8.871991219222712e-07, |
|
"loss": 0.8363, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.6922498118886381, |
|
"grad_norm": 3.4768514044708128, |
|
"learning_rate": 8.821104423894014e-07, |
|
"loss": 0.8115, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.7072987208427389, |
|
"grad_norm": 3.364918499512913, |
|
"learning_rate": 8.769247865486915e-07, |
|
"loss": 0.8129, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.7223476297968398, |
|
"grad_norm": 3.76176938510827, |
|
"learning_rate": 8.716434704745046e-07, |
|
"loss": 0.8243, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.7373965387509406, |
|
"grad_norm": 3.4565749939546704, |
|
"learning_rate": 8.662678345189396e-07, |
|
"loss": 0.8394, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.7524454477050414, |
|
"grad_norm": 3.246463585410726, |
|
"learning_rate": 8.607992429716608e-07, |
|
"loss": 0.8254, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.7524454477050414, |
|
"eval_loss": 0.8255200982093811, |
|
"eval_runtime": 164.5909, |
|
"eval_samples_per_second": 57.415, |
|
"eval_steps_per_second": 0.899, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.7674943566591422, |
|
"grad_norm": 3.5687284671280004, |
|
"learning_rate": 8.55239083713654e-07, |
|
"loss": 0.8209, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.782543265613243, |
|
"grad_norm": 3.309161579948898, |
|
"learning_rate": 8.495887678649932e-07, |
|
"loss": 0.8235, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7975921745673439, |
|
"grad_norm": 3.6601438215929787, |
|
"learning_rate": 8.438497294267116e-07, |
|
"loss": 0.8148, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.8126410835214447, |
|
"grad_norm": 3.424592236767194, |
|
"learning_rate": 8.38023424916864e-07, |
|
"loss": 0.8237, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.8276899924755455, |
|
"grad_norm": 3.658233989032351, |
|
"learning_rate": 8.321113330008756e-07, |
|
"loss": 0.8199, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.8427389014296464, |
|
"grad_norm": 3.6686108116776386, |
|
"learning_rate": 8.261149541162691e-07, |
|
"loss": 0.8198, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.8577878103837472, |
|
"grad_norm": 3.564688018245365, |
|
"learning_rate": 8.20035810091867e-07, |
|
"loss": 0.833, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.872836719337848, |
|
"grad_norm": 3.4267054682208222, |
|
"learning_rate": 8.13875443761565e-07, |
|
"loss": 0.8336, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.8878856282919488, |
|
"grad_norm": 3.625662003888318, |
|
"learning_rate": 8.076354185727734e-07, |
|
"loss": 0.8211, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.9029345372460497, |
|
"grad_norm": 3.659547833287384, |
|
"learning_rate": 8.013173181896282e-07, |
|
"loss": 0.8091, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.9029345372460497, |
|
"eval_loss": 0.8148868680000305, |
|
"eval_runtime": 164.4984, |
|
"eval_samples_per_second": 57.447, |
|
"eval_steps_per_second": 0.9, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.9179834462001505, |
|
"grad_norm": 3.415432660275148, |
|
"learning_rate": 7.94922746091071e-07, |
|
"loss": 0.8271, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.9330323551542513, |
|
"grad_norm": 3.433626617159138, |
|
"learning_rate": 7.884533251638999e-07, |
|
"loss": 0.7997, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.9480812641083521, |
|
"grad_norm": 3.4616411740611226, |
|
"learning_rate": 7.819106972908949e-07, |
|
"loss": 0.8131, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.963130173062453, |
|
"grad_norm": 3.595962547526837, |
|
"learning_rate": 7.752965229341219e-07, |
|
"loss": 0.8244, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.9781790820165538, |
|
"grad_norm": 3.5100709203373666, |
|
"learning_rate": 7.686124807135228e-07, |
|
"loss": 0.8056, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.9932279909706546, |
|
"grad_norm": 3.7018983277934456, |
|
"learning_rate": 7.618602669808957e-07, |
|
"loss": 0.8193, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.0082768999247556, |
|
"grad_norm": 3.5410406166783015, |
|
"learning_rate": 7.550415953893756e-07, |
|
"loss": 0.781, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.0233258088788564, |
|
"grad_norm": 3.576616810803158, |
|
"learning_rate": 7.481581964585244e-07, |
|
"loss": 0.7508, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.0383747178329572, |
|
"grad_norm": 3.5692261780454806, |
|
"learning_rate": 7.412118171351395e-07, |
|
"loss": 0.7483, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 1.053423626787058, |
|
"grad_norm": 3.364552676871854, |
|
"learning_rate": 7.342042203498951e-07, |
|
"loss": 0.7271, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.053423626787058, |
|
"eval_loss": 0.8104769587516785, |
|
"eval_runtime": 164.6059, |
|
"eval_samples_per_second": 57.41, |
|
"eval_steps_per_second": 0.899, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.0684725357411589, |
|
"grad_norm": 3.5560550995930544, |
|
"learning_rate": 7.271371845699241e-07, |
|
"loss": 0.7538, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 1.0835214446952597, |
|
"grad_norm": 3.6783603046250546, |
|
"learning_rate": 7.200125033474598e-07, |
|
"loss": 0.7428, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.0985703536493605, |
|
"grad_norm": 3.4366708935207977, |
|
"learning_rate": 7.128319848646477e-07, |
|
"loss": 0.7528, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 1.1136192626034613, |
|
"grad_norm": 3.5209701182053026, |
|
"learning_rate": 7.055974514746445e-07, |
|
"loss": 0.7583, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.1286681715575622, |
|
"grad_norm": 3.7508612585686234, |
|
"learning_rate": 6.983107392391202e-07, |
|
"loss": 0.7333, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 1.143717080511663, |
|
"grad_norm": 3.804536934008722, |
|
"learning_rate": 6.909736974622826e-07, |
|
"loss": 0.7367, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.1587659894657638, |
|
"grad_norm": 3.380810090226087, |
|
"learning_rate": 6.835881882215395e-07, |
|
"loss": 0.7389, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 1.1738148984198646, |
|
"grad_norm": 3.50522357566982, |
|
"learning_rate": 6.761560858949192e-07, |
|
"loss": 0.7272, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.1888638073739655, |
|
"grad_norm": 3.8495609198880003, |
|
"learning_rate": 6.686792766853705e-07, |
|
"loss": 0.7411, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 1.2039127163280663, |
|
"grad_norm": 3.4673446441183264, |
|
"learning_rate": 6.611596581420599e-07, |
|
"loss": 0.7383, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.2039127163280663, |
|
"eval_loss": 0.8055551648139954, |
|
"eval_runtime": 164.521, |
|
"eval_samples_per_second": 57.439, |
|
"eval_steps_per_second": 0.9, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.2189616252821671, |
|
"grad_norm": 3.703622757288821, |
|
"learning_rate": 6.53599138678791e-07, |
|
"loss": 0.7447, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 1.234010534236268, |
|
"grad_norm": 3.5785402373111386, |
|
"learning_rate": 6.459996370896652e-07, |
|
"loss": 0.7143, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.2490594431903688, |
|
"grad_norm": 3.5402461359971684, |
|
"learning_rate": 6.383630820621081e-07, |
|
"loss": 0.7486, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 1.2641083521444696, |
|
"grad_norm": 3.243470804610217, |
|
"learning_rate": 6.306914116873862e-07, |
|
"loss": 0.7428, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.2791572610985704, |
|
"grad_norm": 3.431888635712123, |
|
"learning_rate": 6.22986572968736e-07, |
|
"loss": 0.7466, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 1.2942061700526712, |
|
"grad_norm": 3.6294844548998335, |
|
"learning_rate": 6.152505213272307e-07, |
|
"loss": 0.7568, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.309255079006772, |
|
"grad_norm": 3.5614062494190653, |
|
"learning_rate": 6.074852201055121e-07, |
|
"loss": 0.7345, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 1.324303987960873, |
|
"grad_norm": 3.3812248567300762, |
|
"learning_rate": 5.996926400695113e-07, |
|
"loss": 0.7423, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.3393528969149737, |
|
"grad_norm": 3.641361393772205, |
|
"learning_rate": 5.918747589082852e-07, |
|
"loss": 0.7435, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 1.3544018058690745, |
|
"grad_norm": 3.6883754790740704, |
|
"learning_rate": 5.840335607320963e-07, |
|
"loss": 0.7543, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.3544018058690745, |
|
"eval_loss": 0.8005452752113342, |
|
"eval_runtime": 164.8286, |
|
"eval_samples_per_second": 57.332, |
|
"eval_steps_per_second": 0.898, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.3694507148231754, |
|
"grad_norm": 3.624423891962332, |
|
"learning_rate": 5.761710355688627e-07, |
|
"loss": 0.7147, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 1.3844996237772762, |
|
"grad_norm": 3.493220831680645, |
|
"learning_rate": 5.682891788591065e-07, |
|
"loss": 0.7449, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.399548532731377, |
|
"grad_norm": 3.5099359704812483, |
|
"learning_rate": 5.603899909495283e-07, |
|
"loss": 0.7245, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 1.4145974416854779, |
|
"grad_norm": 3.4941852068840817, |
|
"learning_rate": 5.52475476585336e-07, |
|
"loss": 0.7431, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.4296463506395787, |
|
"grad_norm": 3.6975807488475083, |
|
"learning_rate": 5.445476444014591e-07, |
|
"loss": 0.7467, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 1.4446952595936795, |
|
"grad_norm": 3.8488861949889817, |
|
"learning_rate": 5.366085064127734e-07, |
|
"loss": 0.7494, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.4597441685477803, |
|
"grad_norm": 3.7475349702165035, |
|
"learning_rate": 5.286600775034699e-07, |
|
"loss": 0.7488, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 1.4747930775018812, |
|
"grad_norm": 3.536002827555251, |
|
"learning_rate": 5.207043749156944e-07, |
|
"loss": 0.7455, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.489841986455982, |
|
"grad_norm": 3.5359647799654454, |
|
"learning_rate": 5.127434177375893e-07, |
|
"loss": 0.7245, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 1.5048908954100828, |
|
"grad_norm": 3.530405750992252, |
|
"learning_rate": 5.047792263908659e-07, |
|
"loss": 0.7373, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.5048908954100828, |
|
"eval_loss": 0.7960706949234009, |
|
"eval_runtime": 164.4972, |
|
"eval_samples_per_second": 57.448, |
|
"eval_steps_per_second": 0.9, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.5199398043641836, |
|
"grad_norm": 3.6777960709655773, |
|
"learning_rate": 4.968138221180401e-07, |
|
"loss": 0.7177, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 1.5349887133182845, |
|
"grad_norm": 3.640353250330635, |
|
"learning_rate": 4.888492264694565e-07, |
|
"loss": 0.7274, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.5500376222723853, |
|
"grad_norm": 3.573323133504263, |
|
"learning_rate": 4.808874607902397e-07, |
|
"loss": 0.7248, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 1.565086531226486, |
|
"grad_norm": 3.758313365256897, |
|
"learning_rate": 4.7293054570729126e-07, |
|
"loss": 0.7228, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.580135440180587, |
|
"grad_norm": 3.482110494824545, |
|
"learning_rate": 4.649805006164743e-07, |
|
"loss": 0.7345, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 1.5951843491346878, |
|
"grad_norm": 3.5548114132456803, |
|
"learning_rate": 4.5703934317010727e-07, |
|
"loss": 0.7336, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.6102332580887886, |
|
"grad_norm": 3.47435512141577, |
|
"learning_rate": 4.491090887649024e-07, |
|
"loss": 0.7402, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 1.6252821670428894, |
|
"grad_norm": 3.6201869053811877, |
|
"learning_rate": 4.4119175003047407e-07, |
|
"loss": 0.7417, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.6403310759969902, |
|
"grad_norm": 3.5658610125401236, |
|
"learning_rate": 4.3328933631855195e-07, |
|
"loss": 0.7414, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 1.655379984951091, |
|
"grad_norm": 3.6931102044498787, |
|
"learning_rate": 4.2540385319302524e-07, |
|
"loss": 0.7145, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.655379984951091, |
|
"eval_loss": 0.7923677563667297, |
|
"eval_runtime": 164.3846, |
|
"eval_samples_per_second": 57.487, |
|
"eval_steps_per_second": 0.9, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.670428893905192, |
|
"grad_norm": 3.6634192829178778, |
|
"learning_rate": 4.175373019209468e-07, |
|
"loss": 0.736, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 1.6854778028592927, |
|
"grad_norm": 3.663246949776699, |
|
"learning_rate": 4.0969167896463046e-07, |
|
"loss": 0.7271, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.7005267118133935, |
|
"grad_norm": 3.833602468227867, |
|
"learning_rate": 4.018689754749648e-07, |
|
"loss": 0.7381, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 1.7155756207674944, |
|
"grad_norm": 3.599494624246003, |
|
"learning_rate": 3.9407117678607756e-07, |
|
"loss": 0.7131, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.7306245297215952, |
|
"grad_norm": 3.496269655645928, |
|
"learning_rate": 3.8630026191147405e-07, |
|
"loss": 0.7202, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 1.745673438675696, |
|
"grad_norm": 3.7034173777972894, |
|
"learning_rate": 3.78558203041782e-07, |
|
"loss": 0.7279, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.7607223476297968, |
|
"grad_norm": 4.001879136240641, |
|
"learning_rate": 3.7084696504422525e-07, |
|
"loss": 0.7217, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 1.7757712565838977, |
|
"grad_norm": 3.8933985790258343, |
|
"learning_rate": 3.6316850496395855e-07, |
|
"loss": 0.7405, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.7908201655379985, |
|
"grad_norm": 3.6403289474674567, |
|
"learning_rate": 3.555247715273867e-07, |
|
"loss": 0.7349, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 1.8058690744920993, |
|
"grad_norm": 3.5876701469901375, |
|
"learning_rate": 3.4791770464759347e-07, |
|
"loss": 0.7176, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.8058690744920993, |
|
"eval_loss": 0.7886707186698914, |
|
"eval_runtime": 164.4006, |
|
"eval_samples_per_second": 57.482, |
|
"eval_steps_per_second": 0.9, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.8209179834462002, |
|
"grad_norm": 3.511529577298137, |
|
"learning_rate": 3.4034923493201007e-07, |
|
"loss": 0.7286, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 1.835966892400301, |
|
"grad_norm": 3.5025696000503546, |
|
"learning_rate": 3.3282128319244237e-07, |
|
"loss": 0.7274, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.8510158013544018, |
|
"grad_norm": 3.7129442617829587, |
|
"learning_rate": 3.2533575995758694e-07, |
|
"loss": 0.7503, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 1.8660647103085026, |
|
"grad_norm": 3.323612660156572, |
|
"learning_rate": 3.178945649881543e-07, |
|
"loss": 0.7278, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.8811136192626035, |
|
"grad_norm": 3.651561344831796, |
|
"learning_rate": 3.1049958679472645e-07, |
|
"loss": 0.7189, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 1.8961625282167043, |
|
"grad_norm": 3.5915287409182843, |
|
"learning_rate": 3.031527021584701e-07, |
|
"loss": 0.7344, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.911211437170805, |
|
"grad_norm": 3.7127611003852317, |
|
"learning_rate": 2.9585577565482484e-07, |
|
"loss": 0.7278, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 1.926260346124906, |
|
"grad_norm": 3.6103190459559844, |
|
"learning_rate": 2.886106591802908e-07, |
|
"loss": 0.7482, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.9413092550790068, |
|
"grad_norm": 3.629391029578448, |
|
"learning_rate": 2.814191914824332e-07, |
|
"loss": 0.71, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 1.9563581640331076, |
|
"grad_norm": 3.876524735419322, |
|
"learning_rate": 2.7428319769322415e-07, |
|
"loss": 0.7384, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.9563581640331076, |
|
"eval_loss": 0.7857681512832642, |
|
"eval_runtime": 164.3713, |
|
"eval_samples_per_second": 57.492, |
|
"eval_steps_per_second": 0.9, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.9714070729872084, |
|
"grad_norm": 3.3899320851149786, |
|
"learning_rate": 2.672044888658399e-07, |
|
"loss": 0.7152, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 1.9864559819413092, |
|
"grad_norm": 3.496799757255535, |
|
"learning_rate": 2.6018486151503213e-07, |
|
"loss": 0.715, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 2.00150489089541, |
|
"grad_norm": 3.848643688599015, |
|
"learning_rate": 2.532260971611867e-07, |
|
"loss": 0.7306, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 2.016553799849511, |
|
"grad_norm": 3.700900714630044, |
|
"learning_rate": 2.4632996187819034e-07, |
|
"loss": 0.6819, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 2.0316027088036117, |
|
"grad_norm": 3.6340025634573396, |
|
"learning_rate": 2.394982058452165e-07, |
|
"loss": 0.6779, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 2.0466516177577128, |
|
"grad_norm": 3.9705869092986608, |
|
"learning_rate": 2.3273256290254402e-07, |
|
"loss": 0.6818, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 2.0617005267118134, |
|
"grad_norm": 3.7843167139607377, |
|
"learning_rate": 2.2603475011152517e-07, |
|
"loss": 0.701, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 2.0767494356659144, |
|
"grad_norm": 3.69408051045692, |
|
"learning_rate": 2.1940646731880885e-07, |
|
"loss": 0.6814, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 2.091798344620015, |
|
"grad_norm": 3.822497413729019, |
|
"learning_rate": 2.1284939672493506e-07, |
|
"loss": 0.6986, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 2.106847253574116, |
|
"grad_norm": 3.7297282781569816, |
|
"learning_rate": 2.0636520245740708e-07, |
|
"loss": 0.6877, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.106847253574116, |
|
"eval_loss": 0.7906577587127686, |
|
"eval_runtime": 164.5279, |
|
"eval_samples_per_second": 57.437, |
|
"eval_steps_per_second": 0.9, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.1218961625282167, |
|
"grad_norm": 3.653634589505017, |
|
"learning_rate": 1.9995553014834986e-07, |
|
"loss": 0.6616, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 2.1369450714823177, |
|
"grad_norm": 3.6094055740564577, |
|
"learning_rate": 1.9362200651686406e-07, |
|
"loss": 0.6807, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 2.1519939804364183, |
|
"grad_norm": 3.7245551119478484, |
|
"learning_rate": 1.873662389561771e-07, |
|
"loss": 0.6789, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 2.1670428893905194, |
|
"grad_norm": 3.8994746222238943, |
|
"learning_rate": 1.8118981512570254e-07, |
|
"loss": 0.6798, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.18209179834462, |
|
"grad_norm": 3.9399523583825657, |
|
"learning_rate": 1.750943025481046e-07, |
|
"loss": 0.6752, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 2.197140707298721, |
|
"grad_norm": 3.9639982002498435, |
|
"learning_rate": 1.6908124821147517e-07, |
|
"loss": 0.6593, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.2121896162528216, |
|
"grad_norm": 3.686217265583731, |
|
"learning_rate": 1.631521781767214e-07, |
|
"loss": 0.6708, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 2.2272385252069227, |
|
"grad_norm": 3.839667134049121, |
|
"learning_rate": 1.5730859719026535e-07, |
|
"loss": 0.67, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.2422874341610233, |
|
"grad_norm": 3.61654343340027, |
|
"learning_rate": 1.5155198830215144e-07, |
|
"loss": 0.668, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 2.2573363431151243, |
|
"grad_norm": 3.621797995269719, |
|
"learning_rate": 1.4588381248966185e-07, |
|
"loss": 0.6796, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.2573363431151243, |
|
"eval_loss": 0.7899430990219116, |
|
"eval_runtime": 164.4792, |
|
"eval_samples_per_second": 57.454, |
|
"eval_steps_per_second": 0.9, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.272385252069225, |
|
"grad_norm": 4.012254837738426, |
|
"learning_rate": 1.4030550828653354e-07, |
|
"loss": 0.6911, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 2.287434161023326, |
|
"grad_norm": 3.9520876419590256, |
|
"learning_rate": 1.3481849141786977e-07, |
|
"loss": 0.6795, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.3024830699774266, |
|
"grad_norm": 3.883255647541917, |
|
"learning_rate": 1.294241544408425e-07, |
|
"loss": 0.6694, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 2.3175319789315276, |
|
"grad_norm": 3.799844419656149, |
|
"learning_rate": 1.241238663912727e-07, |
|
"loss": 0.7048, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.3325808878856282, |
|
"grad_norm": 3.8884134747643793, |
|
"learning_rate": 1.1891897243618183e-07, |
|
"loss": 0.6814, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 2.3476297968397293, |
|
"grad_norm": 3.5975780782982825, |
|
"learning_rate": 1.1381079353239915e-07, |
|
"loss": 0.6822, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.36267870579383, |
|
"grad_norm": 3.992733447714394, |
|
"learning_rate": 1.0880062609131485e-07, |
|
"loss": 0.689, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 2.377727614747931, |
|
"grad_norm": 3.613464796202204, |
|
"learning_rate": 1.0388974164986247e-07, |
|
"loss": 0.6743, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.3927765237020315, |
|
"grad_norm": 3.659276093531672, |
|
"learning_rate": 9.907938654781306e-08, |
|
"loss": 0.6926, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 2.4078254326561326, |
|
"grad_norm": 3.9248896041140897, |
|
"learning_rate": 9.437078161146589e-08, |
|
"loss": 0.6837, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.4078254326561326, |
|
"eval_loss": 0.788821280002594, |
|
"eval_runtime": 164.5032, |
|
"eval_samples_per_second": 57.446, |
|
"eval_steps_per_second": 0.9, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.422874341610233, |
|
"grad_norm": 3.876935336075608, |
|
"learning_rate": 8.976512184381246e-08, |
|
"loss": 0.6791, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 2.4379232505643342, |
|
"grad_norm": 3.7240893931507033, |
|
"learning_rate": 8.526357612125573e-08, |
|
"loss": 0.6858, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.452972159518435, |
|
"grad_norm": 3.8422906773804293, |
|
"learning_rate": 8.086728689695921e-08, |
|
"loss": 0.6946, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 2.468021068472536, |
|
"grad_norm": 3.8826146265577135, |
|
"learning_rate": 7.657736991090263e-08, |
|
"loss": 0.6841, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.4830699774266365, |
|
"grad_norm": 3.9083521097106693, |
|
"learning_rate": 7.239491390671631e-08, |
|
"loss": 0.6812, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 2.4981188863807375, |
|
"grad_norm": 3.9311253338807477, |
|
"learning_rate": 6.832098035536759e-08, |
|
"loss": 0.6757, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.513167795334838, |
|
"grad_norm": 3.829030585152858, |
|
"learning_rate": 6.435660318576935e-08, |
|
"loss": 0.6754, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 2.528216704288939, |
|
"grad_norm": 3.8440317620257582, |
|
"learning_rate": 6.0502788522377e-08, |
|
"loss": 0.6729, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.54326561324304, |
|
"grad_norm": 4.046477181331474, |
|
"learning_rate": 5.676051442984325e-08, |
|
"loss": 0.6832, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 2.558314522197141, |
|
"grad_norm": 3.8109396018056083, |
|
"learning_rate": 5.313073066479379e-08, |
|
"loss": 0.6653, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.558314522197141, |
|
"eval_loss": 0.7885056138038635, |
|
"eval_runtime": 164.2633, |
|
"eval_samples_per_second": 57.53, |
|
"eval_steps_per_second": 0.901, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.5733634311512414, |
|
"grad_norm": 4.016744376255009, |
|
"learning_rate": 4.961435843478751e-08, |
|
"loss": 0.6845, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 2.5884123401053425, |
|
"grad_norm": 3.736617322184166, |
|
"learning_rate": 4.621229016452155e-08, |
|
"loss": 0.6734, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.603461249059443, |
|
"grad_norm": 3.86816117493287, |
|
"learning_rate": 4.2925389269341916e-08, |
|
"loss": 0.6777, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 2.618510158013544, |
|
"grad_norm": 3.7170069049525596, |
|
"learning_rate": 3.975448993611652e-08, |
|
"loss": 0.6719, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.6335590669676447, |
|
"grad_norm": 3.7507043912522664, |
|
"learning_rate": 3.67003969115251e-08, |
|
"loss": 0.6779, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 2.648607975921746, |
|
"grad_norm": 3.941235995809754, |
|
"learning_rate": 3.376388529782215e-08, |
|
"loss": 0.6781, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.6636568848758464, |
|
"grad_norm": 3.9252013397629355, |
|
"learning_rate": 3.094570035612226e-08, |
|
"loss": 0.6942, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 2.6787057938299474, |
|
"grad_norm": 3.8180128091085748, |
|
"learning_rate": 2.8246557317259723e-08, |
|
"loss": 0.6914, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.693754702784048, |
|
"grad_norm": 3.6630851255135166, |
|
"learning_rate": 2.5667141200268694e-08, |
|
"loss": 0.6892, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 2.708803611738149, |
|
"grad_norm": 3.80576717661997, |
|
"learning_rate": 2.3208106638531842e-08, |
|
"loss": 0.6563, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.708803611738149, |
|
"eval_loss": 0.7879343032836914, |
|
"eval_runtime": 164.5206, |
|
"eval_samples_per_second": 57.44, |
|
"eval_steps_per_second": 0.9, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.7238525206922497, |
|
"grad_norm": 3.6414312258428794, |
|
"learning_rate": 2.087007771363969e-08, |
|
"loss": 0.6736, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 2.7389014296463507, |
|
"grad_norm": 4.106134375603198, |
|
"learning_rate": 1.8653647797004236e-08, |
|
"loss": 0.6797, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.7539503386004514, |
|
"grad_norm": 3.7480063289957264, |
|
"learning_rate": 1.655937939926655e-08, |
|
"loss": 0.6666, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 2.7689992475545524, |
|
"grad_norm": 3.806194015893575, |
|
"learning_rate": 1.4587804027536454e-08, |
|
"loss": 0.6797, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.784048156508653, |
|
"grad_norm": 3.850885472228141, |
|
"learning_rate": 1.2739422050500436e-08, |
|
"loss": 0.6643, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 2.799097065462754, |
|
"grad_norm": 3.8694606829300593, |
|
"learning_rate": 1.101470257143261e-08, |
|
"loss": 0.6811, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.8141459744168547, |
|
"grad_norm": 3.7451798850830484, |
|
"learning_rate": 9.414083309140453e-09, |
|
"loss": 0.691, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 2.8291948833709557, |
|
"grad_norm": 3.805433129820117, |
|
"learning_rate": 7.93797048687539e-09, |
|
"loss": 0.6758, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.8442437923250563, |
|
"grad_norm": 3.848550417844875, |
|
"learning_rate": 6.5867387292369295e-09, |
|
"loss": 0.6712, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 2.8592927012791574, |
|
"grad_norm": 3.8372836117275524, |
|
"learning_rate": 5.360730967096272e-09, |
|
"loss": 0.6829, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.8592927012791574, |
|
"eval_loss": 0.7878710031509399, |
|
"eval_runtime": 164.3818, |
|
"eval_samples_per_second": 57.488, |
|
"eval_steps_per_second": 0.9, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.874341610233258, |
|
"grad_norm": 3.690622616479554, |
|
"learning_rate": 4.260258350563317e-09, |
|
"loss": 0.6704, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 2.889390519187359, |
|
"grad_norm": 3.9735203009322824, |
|
"learning_rate": 3.285600170019609e-09, |
|
"loss": 0.6802, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.9044394281414596, |
|
"grad_norm": 3.9338847655040836, |
|
"learning_rate": 2.437003785236702e-09, |
|
"loss": 0.6778, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 2.9194883370955607, |
|
"grad_norm": 3.828474240996448, |
|
"learning_rate": 1.714684562598545e-09, |
|
"loss": 0.6714, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.9345372460496613, |
|
"grad_norm": 3.819052850746861, |
|
"learning_rate": 1.1188258204433144e-09, |
|
"loss": 0.6755, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 2.9495861550037623, |
|
"grad_norm": 3.6976061918723917, |
|
"learning_rate": 6.49578782538851e-10, |
|
"loss": 0.6792, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.964635063957863, |
|
"grad_norm": 3.964592057252041, |
|
"learning_rate": 3.070625397031401e-10, |
|
"loss": 0.6723, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 2.979683972911964, |
|
"grad_norm": 4.047836254173495, |
|
"learning_rate": 9.136401958059759e-11, |
|
"loss": 0.6766, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.9947328818660646, |
|
"grad_norm": 4.038781331246751, |
|
"learning_rate": 2.5379645800516215e-12, |
|
"loss": 0.6835, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 2.9977426636568847, |
|
"step": 996, |
|
"total_flos": 5872633472090112.0, |
|
"train_loss": 0.7696134887905006, |
|
"train_runtime": 16213.1344, |
|
"train_samples_per_second": 15.737, |
|
"train_steps_per_second": 0.061 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 996, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 10000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5872633472090112.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|