htlou's picture
Upload folder using huggingface_hub
406f0e4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9878213802435725,
"eval_steps": 50,
"global_step": 552,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02706359945872801,
"grad_norm": 18.65970657349142,
"learning_rate": 5e-07,
"loss": 1.7373,
"step": 5
},
{
"epoch": 0.05412719891745602,
"grad_norm": 12.123851504493118,
"learning_rate": 1e-06,
"loss": 1.5834,
"step": 10
},
{
"epoch": 0.08119079837618404,
"grad_norm": 7.621307641306264,
"learning_rate": 9.997900331216397e-07,
"loss": 1.2794,
"step": 15
},
{
"epoch": 0.10825439783491204,
"grad_norm": 4.267249289625919,
"learning_rate": 9.991603088309193e-07,
"loss": 1.1358,
"step": 20
},
{
"epoch": 0.13531799729364005,
"grad_norm": 3.917907714316784,
"learning_rate": 9.981113560128126e-07,
"loss": 1.0736,
"step": 25
},
{
"epoch": 0.16238159675236807,
"grad_norm": 3.7859846750996846,
"learning_rate": 9.966440556487147e-07,
"loss": 1.035,
"step": 30
},
{
"epoch": 0.18944519621109607,
"grad_norm": 3.7025125719803813,
"learning_rate": 9.947596400765342e-07,
"loss": 1.0091,
"step": 35
},
{
"epoch": 0.2165087956698241,
"grad_norm": 3.370534729114571,
"learning_rate": 9.924596919556916e-07,
"loss": 0.9798,
"step": 40
},
{
"epoch": 0.2435723951285521,
"grad_norm": 3.494521506906822,
"learning_rate": 9.897461429378964e-07,
"loss": 0.9557,
"step": 45
},
{
"epoch": 0.2706359945872801,
"grad_norm": 3.2135593780998435,
"learning_rate": 9.866212720448147e-07,
"loss": 0.9274,
"step": 50
},
{
"epoch": 0.2706359945872801,
"eval_loss": 0.9371856451034546,
"eval_runtime": 92.1867,
"eval_samples_per_second": 56.95,
"eval_steps_per_second": 0.9,
"step": 50
},
{
"epoch": 0.2976995940460081,
"grad_norm": 3.6113816994688874,
"learning_rate": 9.830877037539935e-07,
"loss": 0.9243,
"step": 55
},
{
"epoch": 0.32476319350473615,
"grad_norm": 3.3612765440218966,
"learning_rate": 9.791484057946465e-07,
"loss": 0.9356,
"step": 60
},
{
"epoch": 0.35182679296346414,
"grad_norm": 3.4973229455252706,
"learning_rate": 9.748066866551555e-07,
"loss": 0.9136,
"step": 65
},
{
"epoch": 0.37889039242219213,
"grad_norm": 3.515361088136603,
"learning_rate": 9.700661928043785e-07,
"loss": 0.9077,
"step": 70
},
{
"epoch": 0.4059539918809202,
"grad_norm": 3.6572315608142527,
"learning_rate": 9.649309056290984e-07,
"loss": 0.9102,
"step": 75
},
{
"epoch": 0.4330175913396482,
"grad_norm": 3.84703859120007,
"learning_rate": 9.594051380901858e-07,
"loss": 0.8847,
"step": 80
},
{
"epoch": 0.46008119079837617,
"grad_norm": 3.429617493706216,
"learning_rate": 9.534935311002834e-07,
"loss": 0.8902,
"step": 85
},
{
"epoch": 0.4871447902571042,
"grad_norm": 3.5683074263183254,
"learning_rate": 9.472010496260544e-07,
"loss": 0.8873,
"step": 90
},
{
"epoch": 0.5142083897158322,
"grad_norm": 3.575422711992299,
"learning_rate": 9.405329785182678e-07,
"loss": 0.881,
"step": 95
},
{
"epoch": 0.5412719891745602,
"grad_norm": 3.669206503451801,
"learning_rate": 9.334949180732244e-07,
"loss": 0.8893,
"step": 100
},
{
"epoch": 0.5412719891745602,
"eval_loss": 0.8818467855453491,
"eval_runtime": 91.9423,
"eval_samples_per_second": 57.101,
"eval_steps_per_second": 0.903,
"step": 100
},
{
"epoch": 0.5683355886332883,
"grad_norm": 3.914942398465802,
"learning_rate": 9.260927793292497e-07,
"loss": 0.8724,
"step": 105
},
{
"epoch": 0.5953991880920162,
"grad_norm": 3.5380899440058564,
"learning_rate": 9.183327791022047e-07,
"loss": 0.8944,
"step": 110
},
{
"epoch": 0.6224627875507442,
"grad_norm": 3.4498048009589968,
"learning_rate": 9.102214347641843e-07,
"loss": 0.876,
"step": 115
},
{
"epoch": 0.6495263870094723,
"grad_norm": 3.7226989372850356,
"learning_rate": 9.017655587697883e-07,
"loss": 0.8822,
"step": 120
},
{
"epoch": 0.6765899864682002,
"grad_norm": 3.698396988748353,
"learning_rate": 8.929722529345623e-07,
"loss": 0.8689,
"step": 125
},
{
"epoch": 0.7036535859269283,
"grad_norm": 3.7148159292863028,
"learning_rate": 8.83848902470413e-07,
"loss": 0.8583,
"step": 130
},
{
"epoch": 0.7307171853856563,
"grad_norm": 3.5813361420522827,
"learning_rate": 8.744031697830088e-07,
"loss": 0.8662,
"step": 135
},
{
"epoch": 0.7577807848443843,
"grad_norm": 3.135451465920447,
"learning_rate": 8.646429880363746e-07,
"loss": 0.8554,
"step": 140
},
{
"epoch": 0.7848443843031123,
"grad_norm": 3.5589111641165405,
"learning_rate": 8.545765544900846e-07,
"loss": 0.859,
"step": 145
},
{
"epoch": 0.8119079837618404,
"grad_norm": 3.4347954694969465,
"learning_rate": 8.442123236146508e-07,
"loss": 0.8684,
"step": 150
},
{
"epoch": 0.8119079837618404,
"eval_loss": 0.8576312065124512,
"eval_runtime": 91.9678,
"eval_samples_per_second": 57.085,
"eval_steps_per_second": 0.902,
"step": 150
},
{
"epoch": 0.8389715832205683,
"grad_norm": 3.42544344763707,
"learning_rate": 8.33558999990887e-07,
"loss": 0.8259,
"step": 155
},
{
"epoch": 0.8660351826792964,
"grad_norm": 3.748411527272942,
"learning_rate": 8.22625530999215e-07,
"loss": 0.8591,
"step": 160
},
{
"epoch": 0.8930987821380244,
"grad_norm": 3.3899159655842626,
"learning_rate": 8.114210993050502e-07,
"loss": 0.8411,
"step": 165
},
{
"epoch": 0.9201623815967523,
"grad_norm": 3.4095342832234445,
"learning_rate": 7.999551151465791e-07,
"loss": 0.8509,
"step": 170
},
{
"epoch": 0.9472259810554804,
"grad_norm": 3.486474439832698,
"learning_rate": 7.88237208431406e-07,
"loss": 0.8407,
"step": 175
},
{
"epoch": 0.9742895805142084,
"grad_norm": 3.236038581839862,
"learning_rate": 7.762772206487065e-07,
"loss": 0.8106,
"step": 180
},
{
"epoch": 1.0013531799729365,
"grad_norm": 3.4903154970716432,
"learning_rate": 7.640851966036805e-07,
"loss": 0.84,
"step": 185
},
{
"epoch": 1.0284167794316643,
"grad_norm": 3.5863433980686557,
"learning_rate": 7.516713759812464e-07,
"loss": 0.7865,
"step": 190
},
{
"epoch": 1.0554803788903924,
"grad_norm": 3.362482872884403,
"learning_rate": 7.390461847460628e-07,
"loss": 0.7834,
"step": 195
},
{
"epoch": 1.0825439783491204,
"grad_norm": 3.510512158864217,
"learning_rate": 7.262202263860988e-07,
"loss": 0.7885,
"step": 200
},
{
"epoch": 1.0825439783491204,
"eval_loss": 0.8443654775619507,
"eval_runtime": 91.8778,
"eval_samples_per_second": 57.141,
"eval_steps_per_second": 0.903,
"step": 200
},
{
"epoch": 1.1096075778078485,
"grad_norm": 3.4220527353768824,
"learning_rate": 7.1320427300711e-07,
"loss": 0.7751,
"step": 205
},
{
"epoch": 1.1366711772665765,
"grad_norm": 3.52648434908435,
"learning_rate": 7.000092562854959e-07,
"loss": 0.7738,
"step": 210
},
{
"epoch": 1.1637347767253043,
"grad_norm": 3.4548324057171147,
"learning_rate": 6.866462582871401e-07,
"loss": 0.7716,
"step": 215
},
{
"epoch": 1.1907983761840324,
"grad_norm": 3.459942778474347,
"learning_rate": 6.731265021599436e-07,
"loss": 0.7802,
"step": 220
},
{
"epoch": 1.2178619756427604,
"grad_norm": 3.597273563743811,
"learning_rate": 6.594613427078674e-07,
"loss": 0.7696,
"step": 225
},
{
"epoch": 1.2449255751014885,
"grad_norm": 3.8266943969656317,
"learning_rate": 6.456622568544011e-07,
"loss": 0.7735,
"step": 230
},
{
"epoch": 1.2719891745602165,
"grad_norm": 3.780461192372918,
"learning_rate": 6.317408340034684e-07,
"loss": 0.7793,
"step": 235
},
{
"epoch": 1.2990527740189446,
"grad_norm": 3.4361198088375673,
"learning_rate": 6.177087663058625e-07,
"loss": 0.7539,
"step": 240
},
{
"epoch": 1.3261163734776726,
"grad_norm": 3.7110817877487383,
"learning_rate": 6.035778388393893e-07,
"loss": 0.7605,
"step": 245
},
{
"epoch": 1.3531799729364005,
"grad_norm": 3.7117782049022483,
"learning_rate": 5.893599197109624e-07,
"loss": 0.7588,
"step": 250
},
{
"epoch": 1.3531799729364005,
"eval_loss": 0.8342949748039246,
"eval_runtime": 91.9385,
"eval_samples_per_second": 57.103,
"eval_steps_per_second": 0.903,
"step": 250
},
{
"epoch": 1.3802435723951285,
"grad_norm": 3.6963553360558032,
"learning_rate": 5.750669500889666e-07,
"loss": 0.7692,
"step": 255
},
{
"epoch": 1.4073071718538566,
"grad_norm": 3.50195752763911,
"learning_rate": 5.607109341742578e-07,
"loss": 0.7721,
"step": 260
},
{
"epoch": 1.4343707713125846,
"grad_norm": 3.7055534906664165,
"learning_rate": 5.463039291182256e-07,
"loss": 0.7651,
"step": 265
},
{
"epoch": 1.4614343707713127,
"grad_norm": 3.5346162965379695,
"learning_rate": 5.318580348963825e-07,
"loss": 0.7785,
"step": 270
},
{
"epoch": 1.4884979702300405,
"grad_norm": 3.5657020524687986,
"learning_rate": 5.173853841459877e-07,
"loss": 0.763,
"step": 275
},
{
"epoch": 1.5155615696887685,
"grad_norm": 3.5651769161124074,
"learning_rate": 5.028981319762399e-07,
"loss": 0.755,
"step": 280
},
{
"epoch": 1.5426251691474966,
"grad_norm": 3.4198506759540233,
"learning_rate": 4.884084457595956e-07,
"loss": 0.755,
"step": 285
},
{
"epoch": 1.5696887686062246,
"grad_norm": 3.5240822496367556,
"learning_rate": 4.7392849491278817e-07,
"loss": 0.772,
"step": 290
},
{
"epoch": 1.5967523680649527,
"grad_norm": 3.8692497705578335,
"learning_rate": 4.5947044067613e-07,
"loss": 0.7595,
"step": 295
},
{
"epoch": 1.6238159675236807,
"grad_norm": 3.471893348202751,
"learning_rate": 4.4504642589968217e-07,
"loss": 0.7736,
"step": 300
},
{
"epoch": 1.6238159675236807,
"eval_loss": 0.8263227343559265,
"eval_runtime": 91.9869,
"eval_samples_per_second": 57.073,
"eval_steps_per_second": 0.902,
"step": 300
},
{
"epoch": 1.6508795669824088,
"grad_norm": 3.5822544768320417,
"learning_rate": 4.3066856484486847e-07,
"loss": 0.751,
"step": 305
},
{
"epoch": 1.6779431664411368,
"grad_norm": 3.5379322852796924,
"learning_rate": 4.1634893301010165e-07,
"loss": 0.7659,
"step": 310
},
{
"epoch": 1.7050067658998647,
"grad_norm": 3.6759115180695465,
"learning_rate": 4.0209955698896445e-07,
"loss": 0.7859,
"step": 315
},
{
"epoch": 1.7320703653585927,
"grad_norm": 3.325644714682765,
"learning_rate": 3.8793240436946385e-07,
"loss": 0.7501,
"step": 320
},
{
"epoch": 1.7591339648173205,
"grad_norm": 3.7071013179144257,
"learning_rate": 3.738593736828426e-07,
"loss": 0.7652,
"step": 325
},
{
"epoch": 1.7861975642760486,
"grad_norm": 3.7366222242171303,
"learning_rate": 3.598922844103902e-07,
"loss": 0.7653,
"step": 330
},
{
"epoch": 1.8132611637347766,
"grad_norm": 3.699329762945061,
"learning_rate": 3.4604286705664397e-07,
"loss": 0.7667,
"step": 335
},
{
"epoch": 1.8403247631935047,
"grad_norm": 3.452541395212277,
"learning_rate": 3.323227532973193e-07,
"loss": 0.7478,
"step": 340
},
{
"epoch": 1.8673883626522327,
"grad_norm": 3.665296534909908,
"learning_rate": 3.187434662102434e-07,
"loss": 0.7678,
"step": 345
},
{
"epoch": 1.8944519621109608,
"grad_norm": 3.842830417177158,
"learning_rate": 3.0531641059749634e-07,
"loss": 0.7499,
"step": 350
},
{
"epoch": 1.8944519621109608,
"eval_loss": 0.8207802176475525,
"eval_runtime": 91.9974,
"eval_samples_per_second": 57.067,
"eval_steps_per_second": 0.902,
"step": 350
},
{
"epoch": 1.9215155615696888,
"grad_norm": 3.561775197313456,
"learning_rate": 2.920528634068885e-07,
"loss": 0.7482,
"step": 355
},
{
"epoch": 1.9485791610284169,
"grad_norm": 3.83958742018717,
"learning_rate": 2.789639642608184e-07,
"loss": 0.7501,
"step": 360
},
{
"epoch": 1.975642760487145,
"grad_norm": 3.798708193385439,
"learning_rate": 2.6606070610046526e-07,
"loss": 0.7639,
"step": 365
},
{
"epoch": 2.002706359945873,
"grad_norm": 3.686196257239308,
"learning_rate": 2.533539259531757e-07,
"loss": 0.7545,
"step": 370
},
{
"epoch": 2.029769959404601,
"grad_norm": 3.66828576372959,
"learning_rate": 2.408542958307957e-07,
"loss": 0.7153,
"step": 375
},
{
"epoch": 2.0568335588633286,
"grad_norm": 3.7385059480327767,
"learning_rate": 2.2857231376659514e-07,
"loss": 0.7111,
"step": 380
},
{
"epoch": 2.0838971583220567,
"grad_norm": 3.738542782092733,
"learning_rate": 2.1651829499831043e-07,
"loss": 0.7091,
"step": 385
},
{
"epoch": 2.1109607577807847,
"grad_norm": 3.6847092660306067,
"learning_rate": 2.0470236330471126e-07,
"loss": 0.6938,
"step": 390
},
{
"epoch": 2.138024357239513,
"grad_norm": 3.8317927714979008,
"learning_rate": 1.9313444250296846e-07,
"loss": 0.725,
"step": 395
},
{
"epoch": 2.165087956698241,
"grad_norm": 3.77803814777597,
"learning_rate": 1.818242481139613e-07,
"loss": 0.6949,
"step": 400
},
{
"epoch": 2.165087956698241,
"eval_loss": 0.823599100112915,
"eval_runtime": 91.9806,
"eval_samples_per_second": 57.077,
"eval_steps_per_second": 0.902,
"step": 400
},
{
"epoch": 2.192151556156969,
"grad_norm": 3.6134354332095238,
"learning_rate": 1.7078127920252783e-07,
"loss": 0.7078,
"step": 405
},
{
"epoch": 2.219215155615697,
"grad_norm": 3.7411384166319346,
"learning_rate": 1.600148103995087e-07,
"loss": 0.7089,
"step": 410
},
{
"epoch": 2.246278755074425,
"grad_norm": 3.8076732272754406,
"learning_rate": 1.4953388411228602e-07,
"loss": 0.7351,
"step": 415
},
{
"epoch": 2.273342354533153,
"grad_norm": 3.561250617360428,
"learning_rate": 1.3934730293035936e-07,
"loss": 0.7127,
"step": 420
},
{
"epoch": 2.300405953991881,
"grad_norm": 3.857897941251079,
"learning_rate": 1.2946362223233614e-07,
"loss": 0.7155,
"step": 425
},
{
"epoch": 2.3274695534506087,
"grad_norm": 3.87623198142243,
"learning_rate": 1.198911430005478e-07,
"loss": 0.7248,
"step": 430
},
{
"epoch": 2.3545331529093367,
"grad_norm": 3.94878669421802,
"learning_rate": 1.1063790484932462e-07,
"loss": 0.7125,
"step": 435
},
{
"epoch": 2.381596752368065,
"grad_norm": 3.9196427575075288,
"learning_rate": 1.0171167927278368e-07,
"loss": 0.7087,
"step": 440
},
{
"epoch": 2.408660351826793,
"grad_norm": 3.8012419710191123,
"learning_rate": 9.311996311780446e-08,
"loss": 0.7013,
"step": 445
},
{
"epoch": 2.435723951285521,
"grad_norm": 3.872302135767634,
"learning_rate": 8.486997228767012e-08,
"loss": 0.7056,
"step": 450
},
{
"epoch": 2.435723951285521,
"eval_loss": 0.8227203488349915,
"eval_runtime": 91.9356,
"eval_samples_per_second": 57.105,
"eval_steps_per_second": 0.903,
"step": 450
},
{
"epoch": 2.462787550744249,
"grad_norm": 3.700203865295363,
"learning_rate": 7.696863568166518e-08,
"loss": 0.7216,
"step": 455
},
{
"epoch": 2.489851150202977,
"grad_norm": 3.6078275253514183,
"learning_rate": 6.942258937571771e-08,
"loss": 0.6922,
"step": 460
},
{
"epoch": 2.516914749661705,
"grad_norm": 3.8271435786761088,
"learning_rate": 6.2238171048975e-08,
"loss": 0.7127,
"step": 465
},
{
"epoch": 2.543978349120433,
"grad_norm": 4.00275667268857,
"learning_rate": 5.5421414660992705e-08,
"loss": 0.7072,
"step": 470
},
{
"epoch": 2.571041948579161,
"grad_norm": 4.262739343601447,
"learning_rate": 4.8978045384008125e-08,
"loss": 0.6983,
"step": 475
},
{
"epoch": 2.598105548037889,
"grad_norm": 3.627717683448711,
"learning_rate": 4.2913474794554036e-08,
"loss": 0.718,
"step": 480
},
{
"epoch": 2.6251691474966172,
"grad_norm": 3.627655916654691,
"learning_rate": 3.723279632845155e-08,
"loss": 0.6992,
"step": 485
},
{
"epoch": 2.6522327469553453,
"grad_norm": 4.157793979172056,
"learning_rate": 3.194078100299863e-08,
"loss": 0.7154,
"step": 490
},
{
"epoch": 2.6792963464140733,
"grad_norm": 3.6512611965864314,
"learning_rate": 2.7041873409947734e-08,
"loss": 0.715,
"step": 495
},
{
"epoch": 2.706359945872801,
"grad_norm": 3.726128068146228,
"learning_rate": 2.2540187982637627e-08,
"loss": 0.7001,
"step": 500
},
{
"epoch": 2.706359945872801,
"eval_loss": 0.8219273686408997,
"eval_runtime": 91.8886,
"eval_samples_per_second": 57.134,
"eval_steps_per_second": 0.903,
"step": 500
},
{
"epoch": 2.733423545331529,
"grad_norm": 3.728833279635095,
"learning_rate": 1.8439505540414458e-08,
"loss": 0.7019,
"step": 505
},
{
"epoch": 2.760487144790257,
"grad_norm": 3.6477799057801645,
"learning_rate": 1.4743270113244277e-08,
"loss": 0.6896,
"step": 510
},
{
"epoch": 2.787550744248985,
"grad_norm": 3.7765174476962233,
"learning_rate": 1.1454586049184589e-08,
"loss": 0.7095,
"step": 515
},
{
"epoch": 2.814614343707713,
"grad_norm": 3.844174292857732,
"learning_rate": 8.576215407142651e-09,
"loss": 0.6985,
"step": 520
},
{
"epoch": 2.841677943166441,
"grad_norm": 3.756024935666749,
"learning_rate": 6.110575637112425e-09,
"loss": 0.7077,
"step": 525
},
{
"epoch": 2.8687415426251692,
"grad_norm": 3.9062256497537864,
"learning_rate": 4.059737549836517e-09,
"loss": 0.707,
"step": 530
},
{
"epoch": 2.8958051420838973,
"grad_norm": 3.8224427448415494,
"learning_rate": 2.425423577599783e-09,
"loss": 0.6974,
"step": 535
},
{
"epoch": 2.9228687415426253,
"grad_norm": 3.7396928746940383,
"learning_rate": 1.209006327614226e-09,
"loss": 0.7088,
"step": 540
},
{
"epoch": 2.949932341001353,
"grad_norm": 3.6670690549497804,
"learning_rate": 4.115074292109777e-10,
"loss": 0.7058,
"step": 545
},
{
"epoch": 2.976995940460081,
"grad_norm": 3.7461282178146655,
"learning_rate": 3.3596675806824013e-11,
"loss": 0.7045,
"step": 550
},
{
"epoch": 2.976995940460081,
"eval_loss": 0.8217999339103699,
"eval_runtime": 91.9491,
"eval_samples_per_second": 57.097,
"eval_steps_per_second": 0.903,
"step": 550
},
{
"epoch": 2.9878213802435725,
"step": 552,
"total_flos": 3254608239525888.0,
"train_loss": 0.81000138948793,
"train_runtime": 8272.8839,
"train_samples_per_second": 17.134,
"train_steps_per_second": 0.067
}
],
"logging_steps": 5,
"max_steps": 552,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3254608239525888.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}