htlou's picture
Upload folder using huggingface_hub
fc7e60c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9941972920696323,
"eval_steps": 50,
"global_step": 774,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.019342359767891684,
"grad_norm": 18.527687354581595,
"learning_rate": 5e-07,
"loss": 1.7356,
"step": 5
},
{
"epoch": 0.03868471953578337,
"grad_norm": 12.936955115293438,
"learning_rate": 1e-06,
"loss": 1.5854,
"step": 10
},
{
"epoch": 0.058027079303675046,
"grad_norm": 7.321839613340823,
"learning_rate": 9.998943236640677e-07,
"loss": 1.2859,
"step": 15
},
{
"epoch": 0.07736943907156674,
"grad_norm": 4.100947188595897,
"learning_rate": 9.995773393262229e-07,
"loss": 1.1468,
"step": 20
},
{
"epoch": 0.09671179883945841,
"grad_norm": 3.5921467819144097,
"learning_rate": 9.99049180977439e-07,
"loss": 1.0832,
"step": 25
},
{
"epoch": 0.11605415860735009,
"grad_norm": 3.700008683460077,
"learning_rate": 9.983100718730718e-07,
"loss": 1.0181,
"step": 30
},
{
"epoch": 0.13539651837524178,
"grad_norm": 3.413586927958152,
"learning_rate": 9.973603244384906e-07,
"loss": 0.9956,
"step": 35
},
{
"epoch": 0.15473887814313347,
"grad_norm": 3.668981471014926,
"learning_rate": 9.9620034013701e-07,
"loss": 0.9636,
"step": 40
},
{
"epoch": 0.17408123791102514,
"grad_norm": 3.55289668457361,
"learning_rate": 9.948306093001933e-07,
"loss": 0.9538,
"step": 45
},
{
"epoch": 0.19342359767891681,
"grad_norm": 3.3904476145405393,
"learning_rate": 9.932517109205849e-07,
"loss": 0.9216,
"step": 50
},
{
"epoch": 0.19342359767891681,
"eval_loss": 0.9377365708351135,
"eval_runtime": 129.4079,
"eval_samples_per_second": 56.797,
"eval_steps_per_second": 0.889,
"step": 50
},
{
"epoch": 0.2127659574468085,
"grad_norm": 3.4681388676869433,
"learning_rate": 9.914643124069666e-07,
"loss": 0.923,
"step": 55
},
{
"epoch": 0.23210831721470018,
"grad_norm": 3.29526552312586,
"learning_rate": 9.89469169302242e-07,
"loss": 0.9115,
"step": 60
},
{
"epoch": 0.2514506769825919,
"grad_norm": 3.3040218603649225,
"learning_rate": 9.872671249640626e-07,
"loss": 0.9168,
"step": 65
},
{
"epoch": 0.27079303675048355,
"grad_norm": 3.315572544161604,
"learning_rate": 9.848591102083375e-07,
"loss": 0.8954,
"step": 70
},
{
"epoch": 0.2901353965183752,
"grad_norm": 3.2544174349742825,
"learning_rate": 9.822461429157716e-07,
"loss": 0.9181,
"step": 75
},
{
"epoch": 0.30947775628626695,
"grad_norm": 3.4125559699409056,
"learning_rate": 9.794293276016023e-07,
"loss": 0.9121,
"step": 80
},
{
"epoch": 0.3288201160541586,
"grad_norm": 3.6466175557331986,
"learning_rate": 9.764098549487155e-07,
"loss": 0.8976,
"step": 85
},
{
"epoch": 0.3481624758220503,
"grad_norm": 3.4442797818420234,
"learning_rate": 9.731890013043367e-07,
"loss": 0.8697,
"step": 90
},
{
"epoch": 0.36750483558994196,
"grad_norm": 3.4952948347778796,
"learning_rate": 9.697681281405128e-07,
"loss": 0.8797,
"step": 95
},
{
"epoch": 0.38684719535783363,
"grad_norm": 3.3555597902003944,
"learning_rate": 9.6614868147861e-07,
"loss": 0.872,
"step": 100
},
{
"epoch": 0.38684719535783363,
"eval_loss": 0.8798893690109253,
"eval_runtime": 127.7553,
"eval_samples_per_second": 57.532,
"eval_steps_per_second": 0.9,
"step": 100
},
{
"epoch": 0.40618955512572535,
"grad_norm": 3.5367127004254324,
"learning_rate": 9.623321912780744e-07,
"loss": 0.8812,
"step": 105
},
{
"epoch": 0.425531914893617,
"grad_norm": 3.578326025613384,
"learning_rate": 9.583202707897073e-07,
"loss": 0.8779,
"step": 110
},
{
"epoch": 0.4448742746615087,
"grad_norm": 3.296044526686552,
"learning_rate": 9.54114615873738e-07,
"loss": 0.8618,
"step": 115
},
{
"epoch": 0.46421663442940037,
"grad_norm": 3.4854773143444535,
"learning_rate": 9.497170042829736e-07,
"loss": 0.8771,
"step": 120
},
{
"epoch": 0.4835589941972921,
"grad_norm": 3.49978332955871,
"learning_rate": 9.451292949113329e-07,
"loss": 0.8678,
"step": 125
},
{
"epoch": 0.5029013539651838,
"grad_norm": 3.8225654739598895,
"learning_rate": 9.403534270080829e-07,
"loss": 0.8884,
"step": 130
},
{
"epoch": 0.5222437137330754,
"grad_norm": 3.2475969413565635,
"learning_rate": 9.353914193581072e-07,
"loss": 0.8575,
"step": 135
},
{
"epoch": 0.5415860735009671,
"grad_norm": 3.6725820783332774,
"learning_rate": 9.302453694285548e-07,
"loss": 0.8553,
"step": 140
},
{
"epoch": 0.5609284332688588,
"grad_norm": 3.2370725122807484,
"learning_rate": 9.249174524822305e-07,
"loss": 0.868,
"step": 145
},
{
"epoch": 0.5802707930367504,
"grad_norm": 3.669185365661775,
"learning_rate": 9.19409920658098e-07,
"loss": 0.8452,
"step": 150
},
{
"epoch": 0.5802707930367504,
"eval_loss": 0.854381799697876,
"eval_runtime": 127.6808,
"eval_samples_per_second": 57.565,
"eval_steps_per_second": 0.901,
"step": 150
},
{
"epoch": 0.5996131528046421,
"grad_norm": 3.538171559620098,
"learning_rate": 9.137251020192907e-07,
"loss": 0.8327,
"step": 155
},
{
"epoch": 0.6189555125725339,
"grad_norm": 3.45820833220196,
"learning_rate": 9.078653995690246e-07,
"loss": 0.8577,
"step": 160
},
{
"epoch": 0.6382978723404256,
"grad_norm": 3.52275765783797,
"learning_rate": 9.018332902348388e-07,
"loss": 0.8288,
"step": 165
},
{
"epoch": 0.6576402321083172,
"grad_norm": 3.5694200958956097,
"learning_rate": 8.956313238215823e-07,
"loss": 0.8297,
"step": 170
},
{
"epoch": 0.6769825918762089,
"grad_norm": 3.623437037037063,
"learning_rate": 8.892621219336e-07,
"loss": 0.8376,
"step": 175
},
{
"epoch": 0.6963249516441006,
"grad_norm": 3.4631672962599755,
"learning_rate": 8.827283768665648e-07,
"loss": 0.8401,
"step": 180
},
{
"epoch": 0.7156673114119922,
"grad_norm": 3.5857126637962726,
"learning_rate": 8.760328504694317e-07,
"loss": 0.8447,
"step": 185
},
{
"epoch": 0.7350096711798839,
"grad_norm": 3.4134278990568436,
"learning_rate": 8.691783729769873e-07,
"loss": 0.8223,
"step": 190
},
{
"epoch": 0.7543520309477756,
"grad_norm": 3.527262510580844,
"learning_rate": 8.621678418134963e-07,
"loss": 0.8527,
"step": 195
},
{
"epoch": 0.7736943907156673,
"grad_norm": 3.524465193791854,
"learning_rate": 8.550042203679439e-07,
"loss": 0.8444,
"step": 200
},
{
"epoch": 0.7736943907156673,
"eval_loss": 0.8374524116516113,
"eval_runtime": 127.8349,
"eval_samples_per_second": 57.496,
"eval_steps_per_second": 0.9,
"step": 200
},
{
"epoch": 0.793036750483559,
"grad_norm": 3.5379630468540806,
"learning_rate": 8.476905367413957e-07,
"loss": 0.8291,
"step": 205
},
{
"epoch": 0.8123791102514507,
"grad_norm": 3.427913081867794,
"learning_rate": 8.402298824670029e-07,
"loss": 0.822,
"step": 210
},
{
"epoch": 0.8317214700193424,
"grad_norm": 3.6293945120927464,
"learning_rate": 8.326254112031949e-07,
"loss": 0.8253,
"step": 215
},
{
"epoch": 0.851063829787234,
"grad_norm": 3.1613190541906797,
"learning_rate": 8.248803374006113e-07,
"loss": 0.8266,
"step": 220
},
{
"epoch": 0.8704061895551257,
"grad_norm": 3.516439765275824,
"learning_rate": 8.169979349433358e-07,
"loss": 0.8281,
"step": 225
},
{
"epoch": 0.8897485493230174,
"grad_norm": 3.358282538404413,
"learning_rate": 8.089815357650089e-07,
"loss": 0.8358,
"step": 230
},
{
"epoch": 0.9090909090909091,
"grad_norm": 3.496336629563945,
"learning_rate": 8.008345284404003e-07,
"loss": 0.8189,
"step": 235
},
{
"epoch": 0.9284332688588007,
"grad_norm": 3.550035619548321,
"learning_rate": 7.925603567530418e-07,
"loss": 0.8191,
"step": 240
},
{
"epoch": 0.9477756286266924,
"grad_norm": 3.6931325798521857,
"learning_rate": 7.841625182395206e-07,
"loss": 0.8241,
"step": 245
},
{
"epoch": 0.9671179883945842,
"grad_norm": 3.3871785206976863,
"learning_rate": 7.756445627110522e-07,
"loss": 0.8236,
"step": 250
},
{
"epoch": 0.9671179883945842,
"eval_loss": 0.8247936964035034,
"eval_runtime": 127.7676,
"eval_samples_per_second": 57.526,
"eval_steps_per_second": 0.9,
"step": 250
},
{
"epoch": 0.9864603481624759,
"grad_norm": 3.4188093303718965,
"learning_rate": 7.670100907529557e-07,
"loss": 0.8353,
"step": 255
},
{
"epoch": 1.0058027079303675,
"grad_norm": 3.636501521011555,
"learning_rate": 7.582627522026685e-07,
"loss": 0.8063,
"step": 260
},
{
"epoch": 1.0251450676982592,
"grad_norm": 3.621680508418713,
"learning_rate": 7.49406244606939e-07,
"loss": 0.7737,
"step": 265
},
{
"epoch": 1.0444874274661509,
"grad_norm": 3.581741353253455,
"learning_rate": 7.404443116588547e-07,
"loss": 0.7829,
"step": 270
},
{
"epoch": 1.0638297872340425,
"grad_norm": 3.383739671366194,
"learning_rate": 7.31380741615363e-07,
"loss": 0.7543,
"step": 275
},
{
"epoch": 1.0831721470019342,
"grad_norm": 3.943776092351626,
"learning_rate": 7.222193656959546e-07,
"loss": 0.7892,
"step": 280
},
{
"epoch": 1.1025145067698259,
"grad_norm": 3.449730443330265,
"learning_rate": 7.129640564631863e-07,
"loss": 0.7594,
"step": 285
},
{
"epoch": 1.1218568665377175,
"grad_norm": 3.5920269631980477,
"learning_rate": 7.036187261857288e-07,
"loss": 0.759,
"step": 290
},
{
"epoch": 1.1411992263056092,
"grad_norm": 3.590692121258084,
"learning_rate": 6.941873251846293e-07,
"loss": 0.7602,
"step": 295
},
{
"epoch": 1.1605415860735009,
"grad_norm": 3.7238466219935935,
"learning_rate": 6.846738401634898e-07,
"loss": 0.776,
"step": 300
},
{
"epoch": 1.1605415860735009,
"eval_loss": 0.8193865418434143,
"eval_runtime": 127.7613,
"eval_samples_per_second": 57.529,
"eval_steps_per_second": 0.9,
"step": 300
},
{
"epoch": 1.1798839458413926,
"grad_norm": 3.3793218130154643,
"learning_rate": 6.750822925232663e-07,
"loss": 0.7495,
"step": 305
},
{
"epoch": 1.1992263056092844,
"grad_norm": 3.56289301999189,
"learning_rate": 6.654167366624008e-07,
"loss": 0.7579,
"step": 310
},
{
"epoch": 1.218568665377176,
"grad_norm": 3.55989595423253,
"learning_rate": 6.556812582630059e-07,
"loss": 0.7715,
"step": 315
},
{
"epoch": 1.2379110251450678,
"grad_norm": 3.5614329702070995,
"learning_rate": 6.458799725638248e-07,
"loss": 0.7583,
"step": 320
},
{
"epoch": 1.2572533849129595,
"grad_norm": 3.3734819969541463,
"learning_rate": 6.36017022620698e-07,
"loss": 0.758,
"step": 325
},
{
"epoch": 1.2765957446808511,
"grad_norm": 3.3947723934851015,
"learning_rate": 6.260965775552713e-07,
"loss": 0.7671,
"step": 330
},
{
"epoch": 1.2959381044487428,
"grad_norm": 3.5148140089765434,
"learning_rate": 6.161228307926858e-07,
"loss": 0.7435,
"step": 335
},
{
"epoch": 1.3152804642166345,
"grad_norm": 3.79895410453979,
"learning_rate": 6.060999982889954e-07,
"loss": 0.7515,
"step": 340
},
{
"epoch": 1.3346228239845261,
"grad_norm": 3.2855087244873533,
"learning_rate": 5.960323167490588e-07,
"loss": 0.7294,
"step": 345
},
{
"epoch": 1.3539651837524178,
"grad_norm": 3.8271832901061886,
"learning_rate": 5.859240418356614e-07,
"loss": 0.7598,
"step": 350
},
{
"epoch": 1.3539651837524178,
"eval_loss": 0.8137449622154236,
"eval_runtime": 127.7492,
"eval_samples_per_second": 57.535,
"eval_steps_per_second": 0.9,
"step": 350
},
{
"epoch": 1.3733075435203095,
"grad_norm": 3.932445955468858,
"learning_rate": 5.757794463706253e-07,
"loss": 0.7626,
"step": 355
},
{
"epoch": 1.3926499032882012,
"grad_norm": 3.5885849985690035,
"learning_rate": 5.656028185286637e-07,
"loss": 0.7548,
"step": 360
},
{
"epoch": 1.4119922630560928,
"grad_norm": 3.654106726967214,
"learning_rate": 5.553984600247463e-07,
"loss": 0.7446,
"step": 365
},
{
"epoch": 1.4313346228239845,
"grad_norm": 3.512502382810576,
"learning_rate": 5.451706842957421e-07,
"loss": 0.7378,
"step": 370
},
{
"epoch": 1.4506769825918762,
"grad_norm": 3.7978244792994285,
"learning_rate": 5.349238146771061e-07,
"loss": 0.7436,
"step": 375
},
{
"epoch": 1.4700193423597678,
"grad_norm": 3.542579158199173,
"learning_rate": 5.246621825753827e-07,
"loss": 0.7549,
"step": 380
},
{
"epoch": 1.4893617021276595,
"grad_norm": 3.538084352067931,
"learning_rate": 5.143901256372967e-07,
"loss": 0.7495,
"step": 385
},
{
"epoch": 1.5087040618955512,
"grad_norm": 3.617738720729932,
"learning_rate": 5.041119859162068e-07,
"loss": 0.7396,
"step": 390
},
{
"epoch": 1.528046421663443,
"grad_norm": 3.673050568373933,
"learning_rate": 4.938321080366968e-07,
"loss": 0.7436,
"step": 395
},
{
"epoch": 1.5473887814313345,
"grad_norm": 3.6088143658238208,
"learning_rate": 4.835548373580792e-07,
"loss": 0.7539,
"step": 400
},
{
"epoch": 1.5473887814313345,
"eval_loss": 0.8074547648429871,
"eval_runtime": 127.7746,
"eval_samples_per_second": 57.523,
"eval_steps_per_second": 0.9,
"step": 400
},
{
"epoch": 1.5667311411992264,
"grad_norm": 3.605893887637476,
"learning_rate": 4.73284518137589e-07,
"loss": 0.7371,
"step": 405
},
{
"epoch": 1.5860735009671179,
"grad_norm": 3.5111892276140098,
"learning_rate": 4.630254916940423e-07,
"loss": 0.7502,
"step": 410
},
{
"epoch": 1.6054158607350097,
"grad_norm": 3.7061953647378085,
"learning_rate": 4.5278209457273825e-07,
"loss": 0.7455,
"step": 415
},
{
"epoch": 1.6247582205029012,
"grad_norm": 3.7197608595379426,
"learning_rate": 4.425586567123779e-07,
"loss": 0.7348,
"step": 420
},
{
"epoch": 1.644100580270793,
"grad_norm": 3.950557508016625,
"learning_rate": 4.3235949961477627e-07,
"loss": 0.7528,
"step": 425
},
{
"epoch": 1.6634429400386848,
"grad_norm": 3.4882683728185966,
"learning_rate": 4.2218893451814e-07,
"loss": 0.7233,
"step": 430
},
{
"epoch": 1.6827852998065764,
"grad_norm": 3.7564603432326535,
"learning_rate": 4.120512605746842e-07,
"loss": 0.762,
"step": 435
},
{
"epoch": 1.702127659574468,
"grad_norm": 3.735016928935352,
"learning_rate": 4.019507630333577e-07,
"loss": 0.7333,
"step": 440
},
{
"epoch": 1.7214700193423598,
"grad_norm": 3.666531076735895,
"learning_rate": 3.9189171142844553e-07,
"loss": 0.7343,
"step": 445
},
{
"epoch": 1.7408123791102514,
"grad_norm": 3.751741635827122,
"learning_rate": 3.8187835777481375e-07,
"loss": 0.7273,
"step": 450
},
{
"epoch": 1.7408123791102514,
"eval_loss": 0.8021160364151001,
"eval_runtime": 127.7286,
"eval_samples_per_second": 57.544,
"eval_steps_per_second": 0.9,
"step": 450
},
{
"epoch": 1.760154738878143,
"grad_norm": 3.6597963453482656,
"learning_rate": 3.7191493477056086e-07,
"loss": 0.7416,
"step": 455
},
{
"epoch": 1.7794970986460348,
"grad_norm": 3.7522608563050537,
"learning_rate": 3.620056540078323e-07,
"loss": 0.7464,
"step": 460
},
{
"epoch": 1.7988394584139265,
"grad_norm": 3.6374420398476457,
"learning_rate": 3.5215470419255897e-07,
"loss": 0.7279,
"step": 465
},
{
"epoch": 1.8181818181818183,
"grad_norm": 3.7809155789430045,
"learning_rate": 3.423662493738687e-07,
"loss": 0.7464,
"step": 470
},
{
"epoch": 1.8375241779497098,
"grad_norm": 3.5450074284083444,
"learning_rate": 3.3264442718392014e-07,
"loss": 0.7295,
"step": 475
},
{
"epoch": 1.8568665377176017,
"grad_norm": 3.676696426875523,
"learning_rate": 3.229933470889038e-07,
"loss": 0.7281,
"step": 480
},
{
"epoch": 1.8762088974854931,
"grad_norm": 3.589693765306483,
"learning_rate": 3.134170886519486e-07,
"loss": 0.7319,
"step": 485
},
{
"epoch": 1.895551257253385,
"grad_norm": 3.6200404091466933,
"learning_rate": 3.039196998086687e-07,
"loss": 0.7344,
"step": 490
},
{
"epoch": 1.9148936170212765,
"grad_norm": 3.583752869993935,
"learning_rate": 2.9450519515607963e-07,
"loss": 0.7269,
"step": 495
},
{
"epoch": 1.9342359767891684,
"grad_norm": 3.769352285404306,
"learning_rate": 2.8517755425560663e-07,
"loss": 0.7314,
"step": 500
},
{
"epoch": 1.9342359767891684,
"eval_loss": 0.7983231544494629,
"eval_runtime": 127.8024,
"eval_samples_per_second": 57.511,
"eval_steps_per_second": 0.9,
"step": 500
},
{
"epoch": 1.9535783365570598,
"grad_norm": 3.5961562887933747,
"learning_rate": 2.7594071995090283e-07,
"loss": 0.7424,
"step": 505
},
{
"epoch": 1.9729206963249517,
"grad_norm": 3.821907492658478,
"learning_rate": 2.667985967011878e-07,
"loss": 0.7332,
"step": 510
},
{
"epoch": 1.9922630560928434,
"grad_norm": 3.8816672682352626,
"learning_rate": 2.577550489308123e-07,
"loss": 0.7328,
"step": 515
},
{
"epoch": 2.011605415860735,
"grad_norm": 4.108838347130274,
"learning_rate": 2.488138993957452e-07,
"loss": 0.7194,
"step": 520
},
{
"epoch": 2.0309477756286265,
"grad_norm": 3.5548470436404433,
"learning_rate": 2.3997892756767394e-07,
"loss": 0.6922,
"step": 525
},
{
"epoch": 2.0502901353965184,
"grad_norm": 3.7279675160489827,
"learning_rate": 2.3125386803640183e-07,
"loss": 0.6936,
"step": 530
},
{
"epoch": 2.0696324951644103,
"grad_norm": 3.884838129631813,
"learning_rate": 2.226424089312174e-07,
"loss": 0.6973,
"step": 535
},
{
"epoch": 2.0889748549323017,
"grad_norm": 3.592031200352461,
"learning_rate": 2.1414819036190157e-07,
"loss": 0.6941,
"step": 540
},
{
"epoch": 2.1083172147001936,
"grad_norm": 3.807636152221673,
"learning_rate": 2.057748028800344e-07,
"loss": 0.7032,
"step": 545
},
{
"epoch": 2.127659574468085,
"grad_norm": 3.791470897609836,
"learning_rate": 1.9752578596124952e-07,
"loss": 0.7094,
"step": 550
},
{
"epoch": 2.127659574468085,
"eval_loss": 0.8028796911239624,
"eval_runtime": 127.4685,
"eval_samples_per_second": 57.661,
"eval_steps_per_second": 0.902,
"step": 550
},
{
"epoch": 2.147001934235977,
"grad_norm": 3.801001026224913,
"learning_rate": 1.8940462650907912e-07,
"loss": 0.6816,
"step": 555
},
{
"epoch": 2.1663442940038684,
"grad_norm": 3.7897529261988403,
"learning_rate": 1.8141475738102086e-07,
"loss": 0.6795,
"step": 560
},
{
"epoch": 2.1856866537717603,
"grad_norm": 3.873193867336735,
"learning_rate": 1.735595559374508e-07,
"loss": 0.6968,
"step": 565
},
{
"epoch": 2.2050290135396517,
"grad_norm": 4.010411940489978,
"learning_rate": 1.6584234261399532e-07,
"loss": 0.6982,
"step": 570
},
{
"epoch": 2.2243713733075436,
"grad_norm": 3.689366962655481,
"learning_rate": 1.5826637951796474e-07,
"loss": 0.684,
"step": 575
},
{
"epoch": 2.243713733075435,
"grad_norm": 3.702408168586401,
"learning_rate": 1.5083486904944387e-07,
"loss": 0.678,
"step": 580
},
{
"epoch": 2.263056092843327,
"grad_norm": 3.7789654460337467,
"learning_rate": 1.4355095254761974e-07,
"loss": 0.7011,
"step": 585
},
{
"epoch": 2.2823984526112184,
"grad_norm": 3.757564264693129,
"learning_rate": 1.3641770896292082e-07,
"loss": 0.7105,
"step": 590
},
{
"epoch": 2.3017408123791103,
"grad_norm": 3.6208442620229238,
"learning_rate": 1.2943815355552851e-07,
"loss": 0.6939,
"step": 595
},
{
"epoch": 2.3210831721470018,
"grad_norm": 3.661781095521188,
"learning_rate": 1.226152366208104e-07,
"loss": 0.7073,
"step": 600
},
{
"epoch": 2.3210831721470018,
"eval_loss": 0.8018428683280945,
"eval_runtime": 127.8893,
"eval_samples_per_second": 57.472,
"eval_steps_per_second": 0.899,
"step": 600
},
{
"epoch": 2.3404255319148937,
"grad_norm": 3.680478717422678,
"learning_rate": 1.1595184224221466e-07,
"loss": 0.6853,
"step": 605
},
{
"epoch": 2.359767891682785,
"grad_norm": 4.069897412011099,
"learning_rate": 1.0945078707215221e-07,
"loss": 0.6988,
"step": 610
},
{
"epoch": 2.379110251450677,
"grad_norm": 3.616210595264312,
"learning_rate": 1.0311481914138371e-07,
"loss": 0.6789,
"step": 615
},
{
"epoch": 2.398452611218569,
"grad_norm": 3.8468836483394195,
"learning_rate": 9.6946616697411e-08,
"loss": 0.6861,
"step": 620
},
{
"epoch": 2.4177949709864603,
"grad_norm": 3.719763619027049,
"learning_rate": 9.094878707236841e-08,
"loss": 0.701,
"step": 625
},
{
"epoch": 2.437137330754352,
"grad_norm": 3.817198136247863,
"learning_rate": 8.512386558088919e-08,
"loss": 0.7061,
"step": 630
},
{
"epoch": 2.4564796905222437,
"grad_norm": 3.9942569258409915,
"learning_rate": 7.947431444841452e-08,
"loss": 0.7032,
"step": 635
},
{
"epoch": 2.4758220502901356,
"grad_norm": 3.752182756744305,
"learning_rate": 7.400252177039784e-08,
"loss": 0.6791,
"step": 640
},
{
"epoch": 2.495164410058027,
"grad_norm": 3.7498129520839885,
"learning_rate": 6.871080050284394e-08,
"loss": 0.6837,
"step": 645
},
{
"epoch": 2.514506769825919,
"grad_norm": 3.796492759569209,
"learning_rate": 6.360138748461013e-08,
"loss": 0.6944,
"step": 650
},
{
"epoch": 2.514506769825919,
"eval_loss": 0.8010697364807129,
"eval_runtime": 127.6709,
"eval_samples_per_second": 57.57,
"eval_steps_per_second": 0.901,
"step": 650
},
{
"epoch": 2.5338491295938104,
"grad_norm": 3.761294248355449,
"learning_rate": 5.867644249188247e-08,
"loss": 0.69,
"step": 655
},
{
"epoch": 2.5531914893617023,
"grad_norm": 3.972673839505174,
"learning_rate": 5.3938047325226944e-08,
"loss": 0.7015,
"step": 660
},
{
"epoch": 2.5725338491295937,
"grad_norm": 3.6874503743702918,
"learning_rate": 4.9388204929601326e-08,
"loss": 0.6942,
"step": 665
},
{
"epoch": 2.5918762088974856,
"grad_norm": 3.8115465970994156,
"learning_rate": 4.5028838547699346e-08,
"loss": 0.6851,
"step": 670
},
{
"epoch": 2.611218568665377,
"grad_norm": 3.7125576734640138,
"learning_rate": 4.0861790906985884e-08,
"loss": 0.6952,
"step": 675
},
{
"epoch": 2.630560928433269,
"grad_norm": 3.7316375539263666,
"learning_rate": 3.6888823440766214e-08,
"loss": 0.6766,
"step": 680
},
{
"epoch": 2.6499032882011604,
"grad_norm": 3.821854585997512,
"learning_rate": 3.311161554361874e-08,
"loss": 0.6766,
"step": 685
},
{
"epoch": 2.6692456479690523,
"grad_norm": 3.790693563798963,
"learning_rate": 2.9531763861505964e-08,
"loss": 0.6832,
"step": 690
},
{
"epoch": 2.6885880077369437,
"grad_norm": 3.528383170970932,
"learning_rate": 2.6150781616863794e-08,
"loss": 0.6797,
"step": 695
},
{
"epoch": 2.7079303675048356,
"grad_norm": 3.703907558068266,
"learning_rate": 2.2970097968953994e-08,
"loss": 0.6841,
"step": 700
},
{
"epoch": 2.7079303675048356,
"eval_loss": 0.8003172278404236,
"eval_runtime": 127.7485,
"eval_samples_per_second": 57.535,
"eval_steps_per_second": 0.9,
"step": 700
},
{
"epoch": 2.7272727272727275,
"grad_norm": 4.045927762778011,
"learning_rate": 1.9991057409751267e-08,
"loss": 0.7023,
"step": 705
},
{
"epoch": 2.746615087040619,
"grad_norm": 3.7217800342328053,
"learning_rate": 1.7214919195619125e-08,
"loss": 0.6809,
"step": 710
},
{
"epoch": 2.7659574468085104,
"grad_norm": 3.849855952936902,
"learning_rate": 1.4642856815015758e-08,
"loss": 0.6918,
"step": 715
},
{
"epoch": 2.7852998065764023,
"grad_norm": 3.4541052934042704,
"learning_rate": 1.2275957492453692e-08,
"loss": 0.6672,
"step": 720
},
{
"epoch": 2.804642166344294,
"grad_norm": 3.752950947257332,
"learning_rate": 1.0115221728924706e-08,
"loss": 0.6954,
"step": 725
},
{
"epoch": 2.8239845261121856,
"grad_norm": 3.8044625608243603,
"learning_rate": 8.161562878982398e-09,
"loss": 0.7087,
"step": 730
},
{
"epoch": 2.843326885880077,
"grad_norm": 3.877886572852321,
"learning_rate": 6.415806764662524e-09,
"loss": 0.694,
"step": 735
},
{
"epoch": 2.862669245647969,
"grad_norm": 3.905892889322174,
"learning_rate": 4.8786913264033945e-09,
"loss": 0.6904,
"step": 740
},
{
"epoch": 2.882011605415861,
"grad_norm": 3.6119800064006884,
"learning_rate": 3.5508663111147306e-09,
"loss": 0.6895,
"step": 745
},
{
"epoch": 2.9013539651837523,
"grad_norm": 3.5602581412399625,
"learning_rate": 2.432892997526026e-09,
"loss": 0.6832,
"step": 750
},
{
"epoch": 2.9013539651837523,
"eval_loss": 0.8001306056976318,
"eval_runtime": 127.7405,
"eval_samples_per_second": 57.539,
"eval_steps_per_second": 0.9,
"step": 750
},
{
"epoch": 2.920696324951644,
"grad_norm": 4.020301585342686,
"learning_rate": 1.5252439589311107e-09,
"loss": 0.6995,
"step": 755
},
{
"epoch": 2.9400386847195357,
"grad_norm": 3.794246694730058,
"learning_rate": 8.283028634287203e-10,
"loss": 0.69,
"step": 760
},
{
"epoch": 2.9593810444874276,
"grad_norm": 3.7696382426574115,
"learning_rate": 3.4236431174428094e-10,
"loss": 0.6655,
"step": 765
},
{
"epoch": 2.978723404255319,
"grad_norm": 3.7732134199260745,
"learning_rate": 6.763371270035457e-11,
"loss": 0.678,
"step": 770
},
{
"epoch": 2.9941972920696323,
"step": 774,
"total_flos": 4563620855808000.0,
"train_loss": 0.7853651687464357,
"train_runtime": 12073.1716,
"train_samples_per_second": 16.437,
"train_steps_per_second": 0.064
}
],
"logging_steps": 5,
"max_steps": 774,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4563620855808000.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}