htlou's picture
Upload folder using huggingface_hub
e0f57f8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9941972920696323,
"eval_steps": 50,
"global_step": 774,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.019342359767891684,
"grad_norm": 18.29429795265829,
"learning_rate": 5e-07,
"loss": 1.7218,
"step": 5
},
{
"epoch": 0.03868471953578337,
"grad_norm": 12.374896347790715,
"learning_rate": 1e-06,
"loss": 1.5873,
"step": 10
},
{
"epoch": 0.058027079303675046,
"grad_norm": 7.9779911036982005,
"learning_rate": 9.998943236640677e-07,
"loss": 1.2946,
"step": 15
},
{
"epoch": 0.07736943907156674,
"grad_norm": 4.26597805437282,
"learning_rate": 9.995773393262229e-07,
"loss": 1.1398,
"step": 20
},
{
"epoch": 0.09671179883945841,
"grad_norm": 3.7901464214490552,
"learning_rate": 9.99049180977439e-07,
"loss": 1.0768,
"step": 25
},
{
"epoch": 0.11605415860735009,
"grad_norm": 3.719211053411099,
"learning_rate": 9.983100718730718e-07,
"loss": 1.0361,
"step": 30
},
{
"epoch": 0.13539651837524178,
"grad_norm": 3.467191764227361,
"learning_rate": 9.973603244384906e-07,
"loss": 0.9909,
"step": 35
},
{
"epoch": 0.15473887814313347,
"grad_norm": 3.3499969223078385,
"learning_rate": 9.9620034013701e-07,
"loss": 0.9577,
"step": 40
},
{
"epoch": 0.17408123791102514,
"grad_norm": 3.53015393903578,
"learning_rate": 9.948306093001933e-07,
"loss": 0.9516,
"step": 45
},
{
"epoch": 0.19342359767891681,
"grad_norm": 3.39919184233192,
"learning_rate": 9.932517109205849e-07,
"loss": 0.9353,
"step": 50
},
{
"epoch": 0.19342359767891681,
"eval_loss": 0.9258183836936951,
"eval_runtime": 127.8238,
"eval_samples_per_second": 57.501,
"eval_steps_per_second": 0.9,
"step": 50
},
{
"epoch": 0.2127659574468085,
"grad_norm": 3.2060428601111997,
"learning_rate": 9.914643124069666e-07,
"loss": 0.9063,
"step": 55
},
{
"epoch": 0.23210831721470018,
"grad_norm": 3.666917330552863,
"learning_rate": 9.89469169302242e-07,
"loss": 0.9336,
"step": 60
},
{
"epoch": 0.2514506769825919,
"grad_norm": 3.9838409320760357,
"learning_rate": 9.872671249640626e-07,
"loss": 0.9231,
"step": 65
},
{
"epoch": 0.27079303675048355,
"grad_norm": 3.518667919651293,
"learning_rate": 9.848591102083375e-07,
"loss": 0.9306,
"step": 70
},
{
"epoch": 0.2901353965183752,
"grad_norm": 3.2598329874793666,
"learning_rate": 9.822461429157716e-07,
"loss": 0.9022,
"step": 75
},
{
"epoch": 0.30947775628626695,
"grad_norm": 3.496771288813608,
"learning_rate": 9.794293276016023e-07,
"loss": 0.8818,
"step": 80
},
{
"epoch": 0.3288201160541586,
"grad_norm": 3.5451094564430847,
"learning_rate": 9.764098549487155e-07,
"loss": 0.8897,
"step": 85
},
{
"epoch": 0.3481624758220503,
"grad_norm": 3.2267999853614593,
"learning_rate": 9.731890013043367e-07,
"loss": 0.8789,
"step": 90
},
{
"epoch": 0.36750483558994196,
"grad_norm": 3.5002585177526213,
"learning_rate": 9.697681281405128e-07,
"loss": 0.8748,
"step": 95
},
{
"epoch": 0.38684719535783363,
"grad_norm": 3.365687264198066,
"learning_rate": 9.6614868147861e-07,
"loss": 0.8526,
"step": 100
},
{
"epoch": 0.38684719535783363,
"eval_loss": 0.8687371015548706,
"eval_runtime": 127.6254,
"eval_samples_per_second": 57.59,
"eval_steps_per_second": 0.901,
"step": 100
},
{
"epoch": 0.40618955512572535,
"grad_norm": 3.379303752602587,
"learning_rate": 9.623321912780744e-07,
"loss": 0.865,
"step": 105
},
{
"epoch": 0.425531914893617,
"grad_norm": 3.544869473720659,
"learning_rate": 9.583202707897073e-07,
"loss": 0.8807,
"step": 110
},
{
"epoch": 0.4448742746615087,
"grad_norm": 3.7015145875218574,
"learning_rate": 9.54114615873738e-07,
"loss": 0.8707,
"step": 115
},
{
"epoch": 0.46421663442940037,
"grad_norm": 3.6640282382892218,
"learning_rate": 9.497170042829736e-07,
"loss": 0.8644,
"step": 120
},
{
"epoch": 0.4835589941972921,
"grad_norm": 3.5307683407864134,
"learning_rate": 9.451292949113329e-07,
"loss": 0.8794,
"step": 125
},
{
"epoch": 0.5029013539651838,
"grad_norm": 3.402033918475512,
"learning_rate": 9.403534270080829e-07,
"loss": 0.872,
"step": 130
},
{
"epoch": 0.5222437137330754,
"grad_norm": 3.310826176140146,
"learning_rate": 9.353914193581072e-07,
"loss": 0.8677,
"step": 135
},
{
"epoch": 0.5415860735009671,
"grad_norm": 3.2406150003457452,
"learning_rate": 9.302453694285548e-07,
"loss": 0.8474,
"step": 140
},
{
"epoch": 0.5609284332688588,
"grad_norm": 3.4974393211031907,
"learning_rate": 9.249174524822305e-07,
"loss": 0.8465,
"step": 145
},
{
"epoch": 0.5802707930367504,
"grad_norm": 3.4284093385969925,
"learning_rate": 9.19409920658098e-07,
"loss": 0.8383,
"step": 150
},
{
"epoch": 0.5802707930367504,
"eval_loss": 0.8431525826454163,
"eval_runtime": 127.7056,
"eval_samples_per_second": 57.554,
"eval_steps_per_second": 0.901,
"step": 150
},
{
"epoch": 0.5996131528046421,
"grad_norm": 3.6968279151532384,
"learning_rate": 9.137251020192907e-07,
"loss": 0.8423,
"step": 155
},
{
"epoch": 0.6189555125725339,
"grad_norm": 3.50833637505395,
"learning_rate": 9.078653995690246e-07,
"loss": 0.8387,
"step": 160
},
{
"epoch": 0.6382978723404256,
"grad_norm": 3.3457806997461987,
"learning_rate": 9.018332902348388e-07,
"loss": 0.8633,
"step": 165
},
{
"epoch": 0.6576402321083172,
"grad_norm": 3.804547305635231,
"learning_rate": 8.956313238215823e-07,
"loss": 0.8376,
"step": 170
},
{
"epoch": 0.6769825918762089,
"grad_norm": 3.640636148585517,
"learning_rate": 8.892621219336e-07,
"loss": 0.8438,
"step": 175
},
{
"epoch": 0.6963249516441006,
"grad_norm": 3.4756125888954723,
"learning_rate": 8.827283768665648e-07,
"loss": 0.8607,
"step": 180
},
{
"epoch": 0.7156673114119922,
"grad_norm": 3.3875203995409215,
"learning_rate": 8.760328504694317e-07,
"loss": 0.8301,
"step": 185
},
{
"epoch": 0.7350096711798839,
"grad_norm": 3.372082258308208,
"learning_rate": 8.691783729769873e-07,
"loss": 0.8263,
"step": 190
},
{
"epoch": 0.7543520309477756,
"grad_norm": 3.3557984138015637,
"learning_rate": 8.621678418134963e-07,
"loss": 0.8187,
"step": 195
},
{
"epoch": 0.7736943907156673,
"grad_norm": 3.335888461384703,
"learning_rate": 8.550042203679439e-07,
"loss": 0.8264,
"step": 200
},
{
"epoch": 0.7736943907156673,
"eval_loss": 0.8269398808479309,
"eval_runtime": 127.5697,
"eval_samples_per_second": 57.616,
"eval_steps_per_second": 0.901,
"step": 200
},
{
"epoch": 0.793036750483559,
"grad_norm": 3.423751371323696,
"learning_rate": 8.476905367413957e-07,
"loss": 0.8231,
"step": 205
},
{
"epoch": 0.8123791102514507,
"grad_norm": 3.512146342996446,
"learning_rate": 8.402298824670029e-07,
"loss": 0.8383,
"step": 210
},
{
"epoch": 0.8317214700193424,
"grad_norm": 3.679961917362473,
"learning_rate": 8.326254112031949e-07,
"loss": 0.8352,
"step": 215
},
{
"epoch": 0.851063829787234,
"grad_norm": 3.5282426701829825,
"learning_rate": 8.248803374006113e-07,
"loss": 0.8121,
"step": 220
},
{
"epoch": 0.8704061895551257,
"grad_norm": 3.6512031815177486,
"learning_rate": 8.169979349433358e-07,
"loss": 0.8376,
"step": 225
},
{
"epoch": 0.8897485493230174,
"grad_norm": 3.235414274362894,
"learning_rate": 8.089815357650089e-07,
"loss": 0.8303,
"step": 230
},
{
"epoch": 0.9090909090909091,
"grad_norm": 3.253247851140818,
"learning_rate": 8.008345284404003e-07,
"loss": 0.8302,
"step": 235
},
{
"epoch": 0.9284332688588007,
"grad_norm": 3.398661312301685,
"learning_rate": 7.925603567530418e-07,
"loss": 0.8388,
"step": 240
},
{
"epoch": 0.9477756286266924,
"grad_norm": 3.4458534561793144,
"learning_rate": 7.841625182395206e-07,
"loss": 0.8354,
"step": 245
},
{
"epoch": 0.9671179883945842,
"grad_norm": 3.52003203310209,
"learning_rate": 7.756445627110522e-07,
"loss": 0.825,
"step": 250
},
{
"epoch": 0.9671179883945842,
"eval_loss": 0.8141016960144043,
"eval_runtime": 127.5727,
"eval_samples_per_second": 57.614,
"eval_steps_per_second": 0.901,
"step": 250
},
{
"epoch": 0.9864603481624759,
"grad_norm": 3.5618840305239043,
"learning_rate": 7.670100907529557e-07,
"loss": 0.8222,
"step": 255
},
{
"epoch": 1.0058027079303675,
"grad_norm": 3.8599831213139533,
"learning_rate": 7.582627522026685e-07,
"loss": 0.8146,
"step": 260
},
{
"epoch": 1.0251450676982592,
"grad_norm": 3.541052361485808,
"learning_rate": 7.49406244606939e-07,
"loss": 0.7562,
"step": 265
},
{
"epoch": 1.0444874274661509,
"grad_norm": 3.6633110448636996,
"learning_rate": 7.404443116588547e-07,
"loss": 0.7751,
"step": 270
},
{
"epoch": 1.0638297872340425,
"grad_norm": 3.488605973668302,
"learning_rate": 7.31380741615363e-07,
"loss": 0.7661,
"step": 275
},
{
"epoch": 1.0831721470019342,
"grad_norm": 3.6599674759653698,
"learning_rate": 7.222193656959546e-07,
"loss": 0.7692,
"step": 280
},
{
"epoch": 1.1025145067698259,
"grad_norm": 3.7997115771096177,
"learning_rate": 7.129640564631863e-07,
"loss": 0.7556,
"step": 285
},
{
"epoch": 1.1218568665377175,
"grad_norm": 3.6182500776322213,
"learning_rate": 7.036187261857288e-07,
"loss": 0.7641,
"step": 290
},
{
"epoch": 1.1411992263056092,
"grad_norm": 3.4091769100972584,
"learning_rate": 6.941873251846293e-07,
"loss": 0.7636,
"step": 295
},
{
"epoch": 1.1605415860735009,
"grad_norm": 3.5604176730475974,
"learning_rate": 6.846738401634898e-07,
"loss": 0.7583,
"step": 300
},
{
"epoch": 1.1605415860735009,
"eval_loss": 0.8098444938659668,
"eval_runtime": 127.8067,
"eval_samples_per_second": 57.509,
"eval_steps_per_second": 0.9,
"step": 300
},
{
"epoch": 1.1798839458413926,
"grad_norm": 3.549344381739553,
"learning_rate": 6.750822925232663e-07,
"loss": 0.7756,
"step": 305
},
{
"epoch": 1.1992263056092844,
"grad_norm": 3.925155393010541,
"learning_rate": 6.654167366624008e-07,
"loss": 0.7463,
"step": 310
},
{
"epoch": 1.218568665377176,
"grad_norm": 3.4515420712536202,
"learning_rate": 6.556812582630059e-07,
"loss": 0.7564,
"step": 315
},
{
"epoch": 1.2379110251450678,
"grad_norm": 3.516145369254403,
"learning_rate": 6.458799725638248e-07,
"loss": 0.7619,
"step": 320
},
{
"epoch": 1.2572533849129595,
"grad_norm": 3.588101859460772,
"learning_rate": 6.36017022620698e-07,
"loss": 0.7579,
"step": 325
},
{
"epoch": 1.2765957446808511,
"grad_norm": 3.499006102072433,
"learning_rate": 6.260965775552713e-07,
"loss": 0.7517,
"step": 330
},
{
"epoch": 1.2959381044487428,
"grad_norm": 3.8452645432049355,
"learning_rate": 6.161228307926858e-07,
"loss": 0.7615,
"step": 335
},
{
"epoch": 1.3152804642166345,
"grad_norm": 3.530157481700969,
"learning_rate": 6.060999982889954e-07,
"loss": 0.7349,
"step": 340
},
{
"epoch": 1.3346228239845261,
"grad_norm": 3.511064558056812,
"learning_rate": 5.960323167490588e-07,
"loss": 0.7453,
"step": 345
},
{
"epoch": 1.3539651837524178,
"grad_norm": 3.681202337191524,
"learning_rate": 5.859240418356614e-07,
"loss": 0.7459,
"step": 350
},
{
"epoch": 1.3539651837524178,
"eval_loss": 0.8035895228385925,
"eval_runtime": 127.6066,
"eval_samples_per_second": 57.599,
"eval_steps_per_second": 0.901,
"step": 350
},
{
"epoch": 1.3733075435203095,
"grad_norm": 3.7050410147150594,
"learning_rate": 5.757794463706253e-07,
"loss": 0.7603,
"step": 355
},
{
"epoch": 1.3926499032882012,
"grad_norm": 3.6286211129571107,
"learning_rate": 5.656028185286637e-07,
"loss": 0.7581,
"step": 360
},
{
"epoch": 1.4119922630560928,
"grad_norm": 3.705421837944294,
"learning_rate": 5.553984600247463e-07,
"loss": 0.7422,
"step": 365
},
{
"epoch": 1.4313346228239845,
"grad_norm": 3.7953601373705026,
"learning_rate": 5.451706842957421e-07,
"loss": 0.7502,
"step": 370
},
{
"epoch": 1.4506769825918762,
"grad_norm": 3.8014187386742777,
"learning_rate": 5.349238146771061e-07,
"loss": 0.7483,
"step": 375
},
{
"epoch": 1.4700193423597678,
"grad_norm": 3.504282700491077,
"learning_rate": 5.246621825753827e-07,
"loss": 0.7396,
"step": 380
},
{
"epoch": 1.4893617021276595,
"grad_norm": 3.5095582192022743,
"learning_rate": 5.143901256372967e-07,
"loss": 0.7377,
"step": 385
},
{
"epoch": 1.5087040618955512,
"grad_norm": 3.4475672003161257,
"learning_rate": 5.041119859162068e-07,
"loss": 0.7393,
"step": 390
},
{
"epoch": 1.528046421663443,
"grad_norm": 3.571290931797336,
"learning_rate": 4.938321080366968e-07,
"loss": 0.7338,
"step": 395
},
{
"epoch": 1.5473887814313345,
"grad_norm": 3.6205663721700914,
"learning_rate": 4.835548373580792e-07,
"loss": 0.7534,
"step": 400
},
{
"epoch": 1.5473887814313345,
"eval_loss": 0.7977527379989624,
"eval_runtime": 127.6,
"eval_samples_per_second": 57.602,
"eval_steps_per_second": 0.901,
"step": 400
},
{
"epoch": 1.5667311411992264,
"grad_norm": 3.5535922972652334,
"learning_rate": 4.73284518137589e-07,
"loss": 0.7492,
"step": 405
},
{
"epoch": 1.5860735009671179,
"grad_norm": 3.5273195524585454,
"learning_rate": 4.630254916940423e-07,
"loss": 0.7479,
"step": 410
},
{
"epoch": 1.6054158607350097,
"grad_norm": 3.5266217127055888,
"learning_rate": 4.5278209457273825e-07,
"loss": 0.732,
"step": 415
},
{
"epoch": 1.6247582205029012,
"grad_norm": 3.9206092134824697,
"learning_rate": 4.425586567123779e-07,
"loss": 0.7312,
"step": 420
},
{
"epoch": 1.644100580270793,
"grad_norm": 3.5562647759590305,
"learning_rate": 4.3235949961477627e-07,
"loss": 0.7482,
"step": 425
},
{
"epoch": 1.6634429400386848,
"grad_norm": 3.4662308137356646,
"learning_rate": 4.2218893451814e-07,
"loss": 0.7393,
"step": 430
},
{
"epoch": 1.6827852998065764,
"grad_norm": 3.7128827415530004,
"learning_rate": 4.120512605746842e-07,
"loss": 0.7432,
"step": 435
},
{
"epoch": 1.702127659574468,
"grad_norm": 3.5592695862370074,
"learning_rate": 4.019507630333577e-07,
"loss": 0.7404,
"step": 440
},
{
"epoch": 1.7214700193423598,
"grad_norm": 3.710326956161109,
"learning_rate": 3.9189171142844553e-07,
"loss": 0.7559,
"step": 445
},
{
"epoch": 1.7408123791102514,
"grad_norm": 3.523728094197245,
"learning_rate": 3.8187835777481375e-07,
"loss": 0.7429,
"step": 450
},
{
"epoch": 1.7408123791102514,
"eval_loss": 0.7935256361961365,
"eval_runtime": 127.7706,
"eval_samples_per_second": 57.525,
"eval_steps_per_second": 0.9,
"step": 450
},
{
"epoch": 1.760154738878143,
"grad_norm": 3.524155101820067,
"learning_rate": 3.7191493477056086e-07,
"loss": 0.721,
"step": 455
},
{
"epoch": 1.7794970986460348,
"grad_norm": 3.307755294406217,
"learning_rate": 3.620056540078323e-07,
"loss": 0.7279,
"step": 460
},
{
"epoch": 1.7988394584139265,
"grad_norm": 3.450615128725478,
"learning_rate": 3.5215470419255897e-07,
"loss": 0.7329,
"step": 465
},
{
"epoch": 1.8181818181818183,
"grad_norm": 3.4244105953540442,
"learning_rate": 3.423662493738687e-07,
"loss": 0.7502,
"step": 470
},
{
"epoch": 1.8375241779497098,
"grad_norm": 3.6595088524698576,
"learning_rate": 3.3264442718392014e-07,
"loss": 0.741,
"step": 475
},
{
"epoch": 1.8568665377176017,
"grad_norm": 3.898153864979138,
"learning_rate": 3.229933470889038e-07,
"loss": 0.7573,
"step": 480
},
{
"epoch": 1.8762088974854931,
"grad_norm": 3.446217110493551,
"learning_rate": 3.134170886519486e-07,
"loss": 0.7414,
"step": 485
},
{
"epoch": 1.895551257253385,
"grad_norm": 3.640466931045702,
"learning_rate": 3.039196998086687e-07,
"loss": 0.7201,
"step": 490
},
{
"epoch": 1.9148936170212765,
"grad_norm": 3.8276180248573355,
"learning_rate": 2.9450519515607963e-07,
"loss": 0.7475,
"step": 495
},
{
"epoch": 1.9342359767891684,
"grad_norm": 3.6601723064769893,
"learning_rate": 2.8517755425560663e-07,
"loss": 0.7409,
"step": 500
},
{
"epoch": 1.9342359767891684,
"eval_loss": 0.7896685004234314,
"eval_runtime": 127.5357,
"eval_samples_per_second": 57.631,
"eval_steps_per_second": 0.902,
"step": 500
},
{
"epoch": 1.9535783365570598,
"grad_norm": 3.5497802979146456,
"learning_rate": 2.7594071995090283e-07,
"loss": 0.7451,
"step": 505
},
{
"epoch": 1.9729206963249517,
"grad_norm": 3.402165439915187,
"learning_rate": 2.667985967011878e-07,
"loss": 0.7266,
"step": 510
},
{
"epoch": 1.9922630560928434,
"grad_norm": 3.7514406502780964,
"learning_rate": 2.577550489308123e-07,
"loss": 0.7307,
"step": 515
},
{
"epoch": 2.011605415860735,
"grad_norm": 4.09122639389021,
"learning_rate": 2.488138993957452e-07,
"loss": 0.7063,
"step": 520
},
{
"epoch": 2.0309477756286265,
"grad_norm": 4.141130274306359,
"learning_rate": 2.3997892756767394e-07,
"loss": 0.6951,
"step": 525
},
{
"epoch": 2.0502901353965184,
"grad_norm": 3.67252888168734,
"learning_rate": 2.3125386803640183e-07,
"loss": 0.7084,
"step": 530
},
{
"epoch": 2.0696324951644103,
"grad_norm": 3.5582226093164433,
"learning_rate": 2.226424089312174e-07,
"loss": 0.7051,
"step": 535
},
{
"epoch": 2.0889748549323017,
"grad_norm": 3.696880117962318,
"learning_rate": 2.1414819036190157e-07,
"loss": 0.6955,
"step": 540
},
{
"epoch": 2.1083172147001936,
"grad_norm": 3.6604807985161303,
"learning_rate": 2.057748028800344e-07,
"loss": 0.6965,
"step": 545
},
{
"epoch": 2.127659574468085,
"grad_norm": 3.751745079490738,
"learning_rate": 1.9752578596124952e-07,
"loss": 0.6927,
"step": 550
},
{
"epoch": 2.127659574468085,
"eval_loss": 0.7948961853981018,
"eval_runtime": 127.4872,
"eval_samples_per_second": 57.653,
"eval_steps_per_second": 0.902,
"step": 550
},
{
"epoch": 2.147001934235977,
"grad_norm": 3.9167862522448957,
"learning_rate": 1.8940462650907912e-07,
"loss": 0.6882,
"step": 555
},
{
"epoch": 2.1663442940038684,
"grad_norm": 3.560294152990905,
"learning_rate": 1.8141475738102086e-07,
"loss": 0.6858,
"step": 560
},
{
"epoch": 2.1856866537717603,
"grad_norm": 3.642012709130625,
"learning_rate": 1.735595559374508e-07,
"loss": 0.6763,
"step": 565
},
{
"epoch": 2.2050290135396517,
"grad_norm": 3.8138504169072984,
"learning_rate": 1.6584234261399532e-07,
"loss": 0.703,
"step": 570
},
{
"epoch": 2.2243713733075436,
"grad_norm": 4.103706228250993,
"learning_rate": 1.5826637951796474e-07,
"loss": 0.6956,
"step": 575
},
{
"epoch": 2.243713733075435,
"grad_norm": 3.5059744293726336,
"learning_rate": 1.5083486904944387e-07,
"loss": 0.6892,
"step": 580
},
{
"epoch": 2.263056092843327,
"grad_norm": 4.028201423509813,
"learning_rate": 1.4355095254761974e-07,
"loss": 0.6863,
"step": 585
},
{
"epoch": 2.2823984526112184,
"grad_norm": 3.8119585236671893,
"learning_rate": 1.3641770896292082e-07,
"loss": 0.6946,
"step": 590
},
{
"epoch": 2.3017408123791103,
"grad_norm": 3.9781395096904575,
"learning_rate": 1.2943815355552851e-07,
"loss": 0.6938,
"step": 595
},
{
"epoch": 2.3210831721470018,
"grad_norm": 3.8219447395207364,
"learning_rate": 1.226152366208104e-07,
"loss": 0.6889,
"step": 600
},
{
"epoch": 2.3210831721470018,
"eval_loss": 0.7934096455574036,
"eval_runtime": 127.5736,
"eval_samples_per_second": 57.614,
"eval_steps_per_second": 0.901,
"step": 600
},
{
"epoch": 2.3404255319148937,
"grad_norm": 3.784762679951525,
"learning_rate": 1.1595184224221466e-07,
"loss": 0.6897,
"step": 605
},
{
"epoch": 2.359767891682785,
"grad_norm": 4.023132354100149,
"learning_rate": 1.0945078707215221e-07,
"loss": 0.6873,
"step": 610
},
{
"epoch": 2.379110251450677,
"grad_norm": 3.7520760586749953,
"learning_rate": 1.0311481914138371e-07,
"loss": 0.7062,
"step": 615
},
{
"epoch": 2.398452611218569,
"grad_norm": 3.777209444957414,
"learning_rate": 9.6946616697411e-08,
"loss": 0.6909,
"step": 620
},
{
"epoch": 2.4177949709864603,
"grad_norm": 3.7303835054301593,
"learning_rate": 9.094878707236841e-08,
"loss": 0.6934,
"step": 625
},
{
"epoch": 2.437137330754352,
"grad_norm": 3.5630150859580425,
"learning_rate": 8.512386558088919e-08,
"loss": 0.7,
"step": 630
},
{
"epoch": 2.4564796905222437,
"grad_norm": 3.813667924982544,
"learning_rate": 7.947431444841452e-08,
"loss": 0.6933,
"step": 635
},
{
"epoch": 2.4758220502901356,
"grad_norm": 3.819792577178714,
"learning_rate": 7.400252177039784e-08,
"loss": 0.6949,
"step": 640
},
{
"epoch": 2.495164410058027,
"grad_norm": 3.9105258158809204,
"learning_rate": 6.871080050284394e-08,
"loss": 0.6971,
"step": 645
},
{
"epoch": 2.514506769825919,
"grad_norm": 3.864980953874583,
"learning_rate": 6.360138748461013e-08,
"loss": 0.7015,
"step": 650
},
{
"epoch": 2.514506769825919,
"eval_loss": 0.7927743792533875,
"eval_runtime": 127.5567,
"eval_samples_per_second": 57.621,
"eval_steps_per_second": 0.902,
"step": 650
},
{
"epoch": 2.5338491295938104,
"grad_norm": 3.8434289557296815,
"learning_rate": 5.867644249188247e-08,
"loss": 0.7014,
"step": 655
},
{
"epoch": 2.5531914893617023,
"grad_norm": 3.908654718811004,
"learning_rate": 5.3938047325226944e-08,
"loss": 0.6839,
"step": 660
},
{
"epoch": 2.5725338491295937,
"grad_norm": 3.778555696115051,
"learning_rate": 4.9388204929601326e-08,
"loss": 0.6854,
"step": 665
},
{
"epoch": 2.5918762088974856,
"grad_norm": 3.6881527806646406,
"learning_rate": 4.5028838547699346e-08,
"loss": 0.7061,
"step": 670
},
{
"epoch": 2.611218568665377,
"grad_norm": 3.8412216704216475,
"learning_rate": 4.0861790906985884e-08,
"loss": 0.676,
"step": 675
},
{
"epoch": 2.630560928433269,
"grad_norm": 3.769050419414534,
"learning_rate": 3.6888823440766214e-08,
"loss": 0.7027,
"step": 680
},
{
"epoch": 2.6499032882011604,
"grad_norm": 3.84774623904604,
"learning_rate": 3.311161554361874e-08,
"loss": 0.6912,
"step": 685
},
{
"epoch": 2.6692456479690523,
"grad_norm": 3.9446313135020596,
"learning_rate": 2.9531763861505964e-08,
"loss": 0.6756,
"step": 690
},
{
"epoch": 2.6885880077369437,
"grad_norm": 3.5761684361674004,
"learning_rate": 2.6150781616863794e-08,
"loss": 0.6914,
"step": 695
},
{
"epoch": 2.7079303675048356,
"grad_norm": 4.1393792533061715,
"learning_rate": 2.2970097968953994e-08,
"loss": 0.6837,
"step": 700
},
{
"epoch": 2.7079303675048356,
"eval_loss": 0.7920587658882141,
"eval_runtime": 127.7157,
"eval_samples_per_second": 57.55,
"eval_steps_per_second": 0.9,
"step": 700
},
{
"epoch": 2.7272727272727275,
"grad_norm": 3.9729587208028274,
"learning_rate": 1.9991057409751267e-08,
"loss": 0.6905,
"step": 705
},
{
"epoch": 2.746615087040619,
"grad_norm": 3.7929506007765017,
"learning_rate": 1.7214919195619125e-08,
"loss": 0.6752,
"step": 710
},
{
"epoch": 2.7659574468085104,
"grad_norm": 3.615765661856518,
"learning_rate": 1.4642856815015758e-08,
"loss": 0.6991,
"step": 715
},
{
"epoch": 2.7852998065764023,
"grad_norm": 3.884082014060051,
"learning_rate": 1.2275957492453692e-08,
"loss": 0.6846,
"step": 720
},
{
"epoch": 2.804642166344294,
"grad_norm": 3.590900524493669,
"learning_rate": 1.0115221728924706e-08,
"loss": 0.6801,
"step": 725
},
{
"epoch": 2.8239845261121856,
"grad_norm": 3.8191804987004025,
"learning_rate": 8.161562878982398e-09,
"loss": 0.7049,
"step": 730
},
{
"epoch": 2.843326885880077,
"grad_norm": 4.18976948653072,
"learning_rate": 6.415806764662524e-09,
"loss": 0.6871,
"step": 735
},
{
"epoch": 2.862669245647969,
"grad_norm": 3.7105498242235786,
"learning_rate": 4.8786913264033945e-09,
"loss": 0.6774,
"step": 740
},
{
"epoch": 2.882011605415861,
"grad_norm": 3.655342612843487,
"learning_rate": 3.5508663111147306e-09,
"loss": 0.6913,
"step": 745
},
{
"epoch": 2.9013539651837523,
"grad_norm": 3.64997500663328,
"learning_rate": 2.432892997526026e-09,
"loss": 0.687,
"step": 750
},
{
"epoch": 2.9013539651837523,
"eval_loss": 0.7918885350227356,
"eval_runtime": 127.697,
"eval_samples_per_second": 57.558,
"eval_steps_per_second": 0.901,
"step": 750
},
{
"epoch": 2.920696324951644,
"grad_norm": 3.720532393294838,
"learning_rate": 1.5252439589311107e-09,
"loss": 0.6823,
"step": 755
},
{
"epoch": 2.9400386847195357,
"grad_norm": 3.67654814171945,
"learning_rate": 8.283028634287203e-10,
"loss": 0.6823,
"step": 760
},
{
"epoch": 2.9593810444874276,
"grad_norm": 3.756704932951535,
"learning_rate": 3.4236431174428094e-10,
"loss": 0.6766,
"step": 765
},
{
"epoch": 2.978723404255319,
"grad_norm": 3.716812044061899,
"learning_rate": 6.763371270035457e-11,
"loss": 0.6781,
"step": 770
},
{
"epoch": 2.9941972920696323,
"step": 774,
"total_flos": 4563620855808000.0,
"train_loss": 0.7852395317043137,
"train_runtime": 12070.3191,
"train_samples_per_second": 16.441,
"train_steps_per_second": 0.064
}
],
"logging_steps": 5,
"max_steps": 774,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4563620855808000.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}