htlou's picture
Upload folder using huggingface_hub
a687ba3 verified
raw
history blame
30.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9941972920696323,
"eval_steps": 50,
"global_step": 774,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.019342359767891684,
"grad_norm": 17.769769703655175,
"learning_rate": 5e-07,
"loss": 1.7186,
"step": 5
},
{
"epoch": 0.03868471953578337,
"grad_norm": 13.081004196040046,
"learning_rate": 1e-06,
"loss": 1.5925,
"step": 10
},
{
"epoch": 0.058027079303675046,
"grad_norm": 7.089375759826574,
"learning_rate": 9.998943236640677e-07,
"loss": 1.2917,
"step": 15
},
{
"epoch": 0.07736943907156674,
"grad_norm": 4.226052382894484,
"learning_rate": 9.995773393262229e-07,
"loss": 1.152,
"step": 20
},
{
"epoch": 0.09671179883945841,
"grad_norm": 4.275706578437442,
"learning_rate": 9.99049180977439e-07,
"loss": 1.0662,
"step": 25
},
{
"epoch": 0.11605415860735009,
"grad_norm": 3.8773098526703986,
"learning_rate": 9.983100718730718e-07,
"loss": 1.0205,
"step": 30
},
{
"epoch": 0.13539651837524178,
"grad_norm": 3.5740777007667783,
"learning_rate": 9.973603244384906e-07,
"loss": 0.9948,
"step": 35
},
{
"epoch": 0.15473887814313347,
"grad_norm": 3.6003113336465318,
"learning_rate": 9.9620034013701e-07,
"loss": 0.9421,
"step": 40
},
{
"epoch": 0.17408123791102514,
"grad_norm": 3.583333545710018,
"learning_rate": 9.948306093001933e-07,
"loss": 0.9544,
"step": 45
},
{
"epoch": 0.19342359767891681,
"grad_norm": 3.7908364237614416,
"learning_rate": 9.932517109205849e-07,
"loss": 0.9541,
"step": 50
},
{
"epoch": 0.19342359767891681,
"eval_loss": 0.9273383021354675,
"eval_runtime": 127.7915,
"eval_samples_per_second": 57.516,
"eval_steps_per_second": 0.9,
"step": 50
},
{
"epoch": 0.2127659574468085,
"grad_norm": 3.613000346904536,
"learning_rate": 9.914643124069666e-07,
"loss": 0.906,
"step": 55
},
{
"epoch": 0.23210831721470018,
"grad_norm": 3.6167098335205994,
"learning_rate": 9.89469169302242e-07,
"loss": 0.9124,
"step": 60
},
{
"epoch": 0.2514506769825919,
"grad_norm": 3.6118346250444553,
"learning_rate": 9.872671249640626e-07,
"loss": 0.9371,
"step": 65
},
{
"epoch": 0.27079303675048355,
"grad_norm": 3.5717388877832117,
"learning_rate": 9.848591102083375e-07,
"loss": 0.9202,
"step": 70
},
{
"epoch": 0.2901353965183752,
"grad_norm": 3.713276627714593,
"learning_rate": 9.822461429157716e-07,
"loss": 0.9395,
"step": 75
},
{
"epoch": 0.30947775628626695,
"grad_norm": 3.8351121315501007,
"learning_rate": 9.794293276016023e-07,
"loss": 0.9039,
"step": 80
},
{
"epoch": 0.3288201160541586,
"grad_norm": 3.3458211116161043,
"learning_rate": 9.764098549487155e-07,
"loss": 0.8935,
"step": 85
},
{
"epoch": 0.3481624758220503,
"grad_norm": 3.6453614828666105,
"learning_rate": 9.731890013043367e-07,
"loss": 0.8922,
"step": 90
},
{
"epoch": 0.36750483558994196,
"grad_norm": 3.67687317911342,
"learning_rate": 9.697681281405128e-07,
"loss": 0.8675,
"step": 95
},
{
"epoch": 0.38684719535783363,
"grad_norm": 3.4055043252450132,
"learning_rate": 9.6614868147861e-07,
"loss": 0.8621,
"step": 100
},
{
"epoch": 0.38684719535783363,
"eval_loss": 0.8719474077224731,
"eval_runtime": 127.7997,
"eval_samples_per_second": 57.512,
"eval_steps_per_second": 0.9,
"step": 100
},
{
"epoch": 0.40618955512572535,
"grad_norm": 3.4010105290171797,
"learning_rate": 9.623321912780744e-07,
"loss": 0.8956,
"step": 105
},
{
"epoch": 0.425531914893617,
"grad_norm": 3.726255996119051,
"learning_rate": 9.583202707897073e-07,
"loss": 0.8645,
"step": 110
},
{
"epoch": 0.4448742746615087,
"grad_norm": 3.428308495841723,
"learning_rate": 9.54114615873738e-07,
"loss": 0.8526,
"step": 115
},
{
"epoch": 0.46421663442940037,
"grad_norm": 3.4777454179113914,
"learning_rate": 9.497170042829736e-07,
"loss": 0.873,
"step": 120
},
{
"epoch": 0.4835589941972921,
"grad_norm": 3.306069687386359,
"learning_rate": 9.451292949113329e-07,
"loss": 0.8646,
"step": 125
},
{
"epoch": 0.5029013539651838,
"grad_norm": 3.6787006187120315,
"learning_rate": 9.403534270080829e-07,
"loss": 0.8668,
"step": 130
},
{
"epoch": 0.5222437137330754,
"grad_norm": 3.8412614001222387,
"learning_rate": 9.353914193581072e-07,
"loss": 0.8522,
"step": 135
},
{
"epoch": 0.5415860735009671,
"grad_norm": 3.63279379492409,
"learning_rate": 9.302453694285548e-07,
"loss": 0.8552,
"step": 140
},
{
"epoch": 0.5609284332688588,
"grad_norm": 3.557168960609258,
"learning_rate": 9.249174524822305e-07,
"loss": 0.8569,
"step": 145
},
{
"epoch": 0.5802707930367504,
"grad_norm": 3.6248960952352673,
"learning_rate": 9.19409920658098e-07,
"loss": 0.8657,
"step": 150
},
{
"epoch": 0.5802707930367504,
"eval_loss": 0.8459084033966064,
"eval_runtime": 127.5971,
"eval_samples_per_second": 57.603,
"eval_steps_per_second": 0.901,
"step": 150
},
{
"epoch": 0.5996131528046421,
"grad_norm": 3.4411079524968815,
"learning_rate": 9.137251020192907e-07,
"loss": 0.8513,
"step": 155
},
{
"epoch": 0.6189555125725339,
"grad_norm": 3.399830662079635,
"learning_rate": 9.078653995690246e-07,
"loss": 0.8375,
"step": 160
},
{
"epoch": 0.6382978723404256,
"grad_norm": 3.60600371924574,
"learning_rate": 9.018332902348388e-07,
"loss": 0.8505,
"step": 165
},
{
"epoch": 0.6576402321083172,
"grad_norm": 3.331304480330792,
"learning_rate": 8.956313238215823e-07,
"loss": 0.8472,
"step": 170
},
{
"epoch": 0.6769825918762089,
"grad_norm": 3.6483016146418876,
"learning_rate": 8.892621219336e-07,
"loss": 0.8489,
"step": 175
},
{
"epoch": 0.6963249516441006,
"grad_norm": 3.421730346610911,
"learning_rate": 8.827283768665648e-07,
"loss": 0.8463,
"step": 180
},
{
"epoch": 0.7156673114119922,
"grad_norm": 3.7149068640377694,
"learning_rate": 8.760328504694317e-07,
"loss": 0.8303,
"step": 185
},
{
"epoch": 0.7350096711798839,
"grad_norm": 3.5148473544431953,
"learning_rate": 8.691783729769873e-07,
"loss": 0.8285,
"step": 190
},
{
"epoch": 0.7543520309477756,
"grad_norm": 3.8143875519937227,
"learning_rate": 8.621678418134963e-07,
"loss": 0.8267,
"step": 195
},
{
"epoch": 0.7736943907156673,
"grad_norm": 3.665364587167835,
"learning_rate": 8.550042203679439e-07,
"loss": 0.8232,
"step": 200
},
{
"epoch": 0.7736943907156673,
"eval_loss": 0.8286838531494141,
"eval_runtime": 127.6464,
"eval_samples_per_second": 57.581,
"eval_steps_per_second": 0.901,
"step": 200
},
{
"epoch": 0.793036750483559,
"grad_norm": 3.6010898270602216,
"learning_rate": 8.476905367413957e-07,
"loss": 0.8321,
"step": 205
},
{
"epoch": 0.8123791102514507,
"grad_norm": 3.4203770888001173,
"learning_rate": 8.402298824670029e-07,
"loss": 0.8261,
"step": 210
},
{
"epoch": 0.8317214700193424,
"grad_norm": 3.5555049932560707,
"learning_rate": 8.326254112031949e-07,
"loss": 0.83,
"step": 215
},
{
"epoch": 0.851063829787234,
"grad_norm": 3.5446967824907465,
"learning_rate": 8.248803374006113e-07,
"loss": 0.8263,
"step": 220
},
{
"epoch": 0.8704061895551257,
"grad_norm": 3.582387564660827,
"learning_rate": 8.169979349433358e-07,
"loss": 0.8233,
"step": 225
},
{
"epoch": 0.8897485493230174,
"grad_norm": 3.477179043854488,
"learning_rate": 8.089815357650089e-07,
"loss": 0.8224,
"step": 230
},
{
"epoch": 0.9090909090909091,
"grad_norm": 3.5228566797833176,
"learning_rate": 8.008345284404003e-07,
"loss": 0.823,
"step": 235
},
{
"epoch": 0.9284332688588007,
"grad_norm": 3.5831989534603235,
"learning_rate": 7.925603567530418e-07,
"loss": 0.8363,
"step": 240
},
{
"epoch": 0.9477756286266924,
"grad_norm": 3.5406680784685003,
"learning_rate": 7.841625182395206e-07,
"loss": 0.8396,
"step": 245
},
{
"epoch": 0.9671179883945842,
"grad_norm": 3.586661139550379,
"learning_rate": 7.756445627110522e-07,
"loss": 0.8179,
"step": 250
},
{
"epoch": 0.9671179883945842,
"eval_loss": 0.8162312507629395,
"eval_runtime": 127.7269,
"eval_samples_per_second": 57.545,
"eval_steps_per_second": 0.9,
"step": 250
},
{
"epoch": 0.9864603481624759,
"grad_norm": 3.583872815575711,
"learning_rate": 7.670100907529557e-07,
"loss": 0.8267,
"step": 255
},
{
"epoch": 1.0058027079303675,
"grad_norm": 3.4543546378461354,
"learning_rate": 7.582627522026685e-07,
"loss": 0.7992,
"step": 260
},
{
"epoch": 1.0251450676982592,
"grad_norm": 3.4033484910260197,
"learning_rate": 7.49406244606939e-07,
"loss": 0.7297,
"step": 265
},
{
"epoch": 1.0444874274661509,
"grad_norm": 3.4147333856503272,
"learning_rate": 7.404443116588547e-07,
"loss": 0.7652,
"step": 270
},
{
"epoch": 1.0638297872340425,
"grad_norm": 3.4496152121107486,
"learning_rate": 7.31380741615363e-07,
"loss": 0.7412,
"step": 275
},
{
"epoch": 1.0831721470019342,
"grad_norm": 3.327696184514914,
"learning_rate": 7.222193656959546e-07,
"loss": 0.7637,
"step": 280
},
{
"epoch": 1.1025145067698259,
"grad_norm": 3.7061830831988227,
"learning_rate": 7.129640564631863e-07,
"loss": 0.7634,
"step": 285
},
{
"epoch": 1.1218568665377175,
"grad_norm": 4.192820471653476,
"learning_rate": 7.036187261857288e-07,
"loss": 0.7502,
"step": 290
},
{
"epoch": 1.1411992263056092,
"grad_norm": 3.6982147551712887,
"learning_rate": 6.941873251846293e-07,
"loss": 0.7737,
"step": 295
},
{
"epoch": 1.1605415860735009,
"grad_norm": 3.6871631539510807,
"learning_rate": 6.846738401634898e-07,
"loss": 0.7558,
"step": 300
},
{
"epoch": 1.1605415860735009,
"eval_loss": 0.8122316598892212,
"eval_runtime": 127.708,
"eval_samples_per_second": 57.553,
"eval_steps_per_second": 0.9,
"step": 300
},
{
"epoch": 1.1798839458413926,
"grad_norm": 3.6376198365580934,
"learning_rate": 6.750822925232663e-07,
"loss": 0.7701,
"step": 305
},
{
"epoch": 1.1992263056092844,
"grad_norm": 3.5708682961272316,
"learning_rate": 6.654167366624008e-07,
"loss": 0.7641,
"step": 310
},
{
"epoch": 1.218568665377176,
"grad_norm": 3.71858745567317,
"learning_rate": 6.556812582630059e-07,
"loss": 0.7481,
"step": 315
},
{
"epoch": 1.2379110251450678,
"grad_norm": 3.3987189299432217,
"learning_rate": 6.458799725638248e-07,
"loss": 0.7494,
"step": 320
},
{
"epoch": 1.2572533849129595,
"grad_norm": 3.582889984477799,
"learning_rate": 6.36017022620698e-07,
"loss": 0.7291,
"step": 325
},
{
"epoch": 1.2765957446808511,
"grad_norm": 3.7598968173069776,
"learning_rate": 6.260965775552713e-07,
"loss": 0.7643,
"step": 330
},
{
"epoch": 1.2959381044487428,
"grad_norm": 3.489163050088922,
"learning_rate": 6.161228307926858e-07,
"loss": 0.7374,
"step": 335
},
{
"epoch": 1.3152804642166345,
"grad_norm": 3.75863899391321,
"learning_rate": 6.060999982889954e-07,
"loss": 0.7568,
"step": 340
},
{
"epoch": 1.3346228239845261,
"grad_norm": 3.3559127612246233,
"learning_rate": 5.960323167490588e-07,
"loss": 0.7453,
"step": 345
},
{
"epoch": 1.3539651837524178,
"grad_norm": 3.490356836359392,
"learning_rate": 5.859240418356614e-07,
"loss": 0.7602,
"step": 350
},
{
"epoch": 1.3539651837524178,
"eval_loss": 0.8059322834014893,
"eval_runtime": 127.7998,
"eval_samples_per_second": 57.512,
"eval_steps_per_second": 0.9,
"step": 350
},
{
"epoch": 1.3733075435203095,
"grad_norm": 3.6757853156395606,
"learning_rate": 5.757794463706253e-07,
"loss": 0.7523,
"step": 355
},
{
"epoch": 1.3926499032882012,
"grad_norm": 3.5063904211356385,
"learning_rate": 5.656028185286637e-07,
"loss": 0.7496,
"step": 360
},
{
"epoch": 1.4119922630560928,
"grad_norm": 3.4960857334989806,
"learning_rate": 5.553984600247463e-07,
"loss": 0.7647,
"step": 365
},
{
"epoch": 1.4313346228239845,
"grad_norm": 3.6440901222517152,
"learning_rate": 5.451706842957421e-07,
"loss": 0.718,
"step": 370
},
{
"epoch": 1.4506769825918762,
"grad_norm": 3.7711569042281576,
"learning_rate": 5.349238146771061e-07,
"loss": 0.7583,
"step": 375
},
{
"epoch": 1.4700193423597678,
"grad_norm": 3.4413207786627957,
"learning_rate": 5.246621825753827e-07,
"loss": 0.7435,
"step": 380
},
{
"epoch": 1.4893617021276595,
"grad_norm": 3.6324458377021265,
"learning_rate": 5.143901256372967e-07,
"loss": 0.7397,
"step": 385
},
{
"epoch": 1.5087040618955512,
"grad_norm": 3.5785578278016485,
"learning_rate": 5.041119859162068e-07,
"loss": 0.7295,
"step": 390
},
{
"epoch": 1.528046421663443,
"grad_norm": 3.5486429985289116,
"learning_rate": 4.938321080366968e-07,
"loss": 0.7484,
"step": 395
},
{
"epoch": 1.5473887814313345,
"grad_norm": 3.563717839228555,
"learning_rate": 4.835548373580792e-07,
"loss": 0.771,
"step": 400
},
{
"epoch": 1.5473887814313345,
"eval_loss": 0.7998485565185547,
"eval_runtime": 127.8334,
"eval_samples_per_second": 57.497,
"eval_steps_per_second": 0.9,
"step": 400
},
{
"epoch": 1.5667311411992264,
"grad_norm": 3.531231527366412,
"learning_rate": 4.73284518137589e-07,
"loss": 0.7472,
"step": 405
},
{
"epoch": 1.5860735009671179,
"grad_norm": 3.4703134540594354,
"learning_rate": 4.630254916940423e-07,
"loss": 0.7349,
"step": 410
},
{
"epoch": 1.6054158607350097,
"grad_norm": 3.571312117827743,
"learning_rate": 4.5278209457273825e-07,
"loss": 0.7539,
"step": 415
},
{
"epoch": 1.6247582205029012,
"grad_norm": 3.6699695284667304,
"learning_rate": 4.425586567123779e-07,
"loss": 0.7561,
"step": 420
},
{
"epoch": 1.644100580270793,
"grad_norm": 3.444249068608954,
"learning_rate": 4.3235949961477627e-07,
"loss": 0.7456,
"step": 425
},
{
"epoch": 1.6634429400386848,
"grad_norm": 3.42086843430337,
"learning_rate": 4.2218893451814e-07,
"loss": 0.7458,
"step": 430
},
{
"epoch": 1.6827852998065764,
"grad_norm": 3.3865347869200857,
"learning_rate": 4.120512605746842e-07,
"loss": 0.7394,
"step": 435
},
{
"epoch": 1.702127659574468,
"grad_norm": 3.677592972618459,
"learning_rate": 4.019507630333577e-07,
"loss": 0.7365,
"step": 440
},
{
"epoch": 1.7214700193423598,
"grad_norm": 3.6136504230202435,
"learning_rate": 3.9189171142844553e-07,
"loss": 0.7559,
"step": 445
},
{
"epoch": 1.7408123791102514,
"grad_norm": 3.572140075959248,
"learning_rate": 3.8187835777481375e-07,
"loss": 0.753,
"step": 450
},
{
"epoch": 1.7408123791102514,
"eval_loss": 0.7954617738723755,
"eval_runtime": 127.7956,
"eval_samples_per_second": 57.514,
"eval_steps_per_second": 0.9,
"step": 450
},
{
"epoch": 1.760154738878143,
"grad_norm": 3.4557270180643984,
"learning_rate": 3.7191493477056086e-07,
"loss": 0.7416,
"step": 455
},
{
"epoch": 1.7794970986460348,
"grad_norm": 3.5454869396159294,
"learning_rate": 3.620056540078323e-07,
"loss": 0.7465,
"step": 460
},
{
"epoch": 1.7988394584139265,
"grad_norm": 3.5193296679089197,
"learning_rate": 3.5215470419255897e-07,
"loss": 0.7381,
"step": 465
},
{
"epoch": 1.8181818181818183,
"grad_norm": 3.6300180309499055,
"learning_rate": 3.423662493738687e-07,
"loss": 0.7393,
"step": 470
},
{
"epoch": 1.8375241779497098,
"grad_norm": 3.6128465808218673,
"learning_rate": 3.3264442718392014e-07,
"loss": 0.7361,
"step": 475
},
{
"epoch": 1.8568665377176017,
"grad_norm": 3.8823278499726266,
"learning_rate": 3.229933470889038e-07,
"loss": 0.743,
"step": 480
},
{
"epoch": 1.8762088974854931,
"grad_norm": 3.4195861313789444,
"learning_rate": 3.134170886519486e-07,
"loss": 0.722,
"step": 485
},
{
"epoch": 1.895551257253385,
"grad_norm": 3.6240796400929005,
"learning_rate": 3.039196998086687e-07,
"loss": 0.7292,
"step": 490
},
{
"epoch": 1.9148936170212765,
"grad_norm": 3.602124125688223,
"learning_rate": 2.9450519515607963e-07,
"loss": 0.7402,
"step": 495
},
{
"epoch": 1.9342359767891684,
"grad_norm": 3.8410929027045757,
"learning_rate": 2.8517755425560663e-07,
"loss": 0.7479,
"step": 500
},
{
"epoch": 1.9342359767891684,
"eval_loss": 0.7919633984565735,
"eval_runtime": 127.71,
"eval_samples_per_second": 57.552,
"eval_steps_per_second": 0.9,
"step": 500
},
{
"epoch": 1.9535783365570598,
"grad_norm": 3.553013732530746,
"learning_rate": 2.7594071995090283e-07,
"loss": 0.7414,
"step": 505
},
{
"epoch": 1.9729206963249517,
"grad_norm": 3.726389314193614,
"learning_rate": 2.667985967011878e-07,
"loss": 0.7442,
"step": 510
},
{
"epoch": 1.9922630560928434,
"grad_norm": 3.6891205891519196,
"learning_rate": 2.577550489308123e-07,
"loss": 0.7445,
"step": 515
},
{
"epoch": 2.011605415860735,
"grad_norm": 4.183763879873082,
"learning_rate": 2.488138993957452e-07,
"loss": 0.7238,
"step": 520
},
{
"epoch": 2.0309477756286265,
"grad_norm": 3.5335122665404533,
"learning_rate": 2.3997892756767394e-07,
"loss": 0.7148,
"step": 525
},
{
"epoch": 2.0502901353965184,
"grad_norm": 3.877744660356318,
"learning_rate": 2.3125386803640183e-07,
"loss": 0.6955,
"step": 530
},
{
"epoch": 2.0696324951644103,
"grad_norm": 3.7193372520055847,
"learning_rate": 2.226424089312174e-07,
"loss": 0.7016,
"step": 535
},
{
"epoch": 2.0889748549323017,
"grad_norm": 3.6986475178104063,
"learning_rate": 2.1414819036190157e-07,
"loss": 0.7054,
"step": 540
},
{
"epoch": 2.1083172147001936,
"grad_norm": 3.7985188083265258,
"learning_rate": 2.057748028800344e-07,
"loss": 0.6801,
"step": 545
},
{
"epoch": 2.127659574468085,
"grad_norm": 3.812215835510966,
"learning_rate": 1.9752578596124952e-07,
"loss": 0.6735,
"step": 550
},
{
"epoch": 2.127659574468085,
"eval_loss": 0.7972328066825867,
"eval_runtime": 127.877,
"eval_samples_per_second": 57.477,
"eval_steps_per_second": 0.899,
"step": 550
},
{
"epoch": 2.147001934235977,
"grad_norm": 3.556112328434251,
"learning_rate": 1.8940462650907912e-07,
"loss": 0.6887,
"step": 555
},
{
"epoch": 2.1663442940038684,
"grad_norm": 4.031706169414501,
"learning_rate": 1.8141475738102086e-07,
"loss": 0.6829,
"step": 560
},
{
"epoch": 2.1856866537717603,
"grad_norm": 3.9038438081899636,
"learning_rate": 1.735595559374508e-07,
"loss": 0.701,
"step": 565
},
{
"epoch": 2.2050290135396517,
"grad_norm": 3.802389298862028,
"learning_rate": 1.6584234261399532e-07,
"loss": 0.6962,
"step": 570
},
{
"epoch": 2.2243713733075436,
"grad_norm": 3.95265234731058,
"learning_rate": 1.5826637951796474e-07,
"loss": 0.704,
"step": 575
},
{
"epoch": 2.243713733075435,
"grad_norm": 3.920278049666238,
"learning_rate": 1.5083486904944387e-07,
"loss": 0.6866,
"step": 580
},
{
"epoch": 2.263056092843327,
"grad_norm": 3.917007172675932,
"learning_rate": 1.4355095254761974e-07,
"loss": 0.6921,
"step": 585
},
{
"epoch": 2.2823984526112184,
"grad_norm": 3.7991457510336915,
"learning_rate": 1.3641770896292082e-07,
"loss": 0.6813,
"step": 590
},
{
"epoch": 2.3017408123791103,
"grad_norm": 3.687358486996167,
"learning_rate": 1.2943815355552851e-07,
"loss": 0.6928,
"step": 595
},
{
"epoch": 2.3210831721470018,
"grad_norm": 3.6543009032276026,
"learning_rate": 1.226152366208104e-07,
"loss": 0.7116,
"step": 600
},
{
"epoch": 2.3210831721470018,
"eval_loss": 0.7951865196228027,
"eval_runtime": 127.85,
"eval_samples_per_second": 57.489,
"eval_steps_per_second": 0.899,
"step": 600
},
{
"epoch": 2.3404255319148937,
"grad_norm": 3.8730917233152384,
"learning_rate": 1.1595184224221466e-07,
"loss": 0.692,
"step": 605
},
{
"epoch": 2.359767891682785,
"grad_norm": 4.03531309851571,
"learning_rate": 1.0945078707215221e-07,
"loss": 0.6865,
"step": 610
},
{
"epoch": 2.379110251450677,
"grad_norm": 3.872705003238953,
"learning_rate": 1.0311481914138371e-07,
"loss": 0.6985,
"step": 615
},
{
"epoch": 2.398452611218569,
"grad_norm": 3.720403831965154,
"learning_rate": 9.6946616697411e-08,
"loss": 0.6968,
"step": 620
},
{
"epoch": 2.4177949709864603,
"grad_norm": 3.8402045858860214,
"learning_rate": 9.094878707236841e-08,
"loss": 0.7002,
"step": 625
},
{
"epoch": 2.437137330754352,
"grad_norm": 3.870172963176105,
"learning_rate": 8.512386558088919e-08,
"loss": 0.7018,
"step": 630
},
{
"epoch": 2.4564796905222437,
"grad_norm": 3.844232920183782,
"learning_rate": 7.947431444841452e-08,
"loss": 0.6953,
"step": 635
},
{
"epoch": 2.4758220502901356,
"grad_norm": 3.9369438805219357,
"learning_rate": 7.400252177039784e-08,
"loss": 0.7023,
"step": 640
},
{
"epoch": 2.495164410058027,
"grad_norm": 3.7400884688082217,
"learning_rate": 6.871080050284394e-08,
"loss": 0.6822,
"step": 645
},
{
"epoch": 2.514506769825919,
"grad_norm": 3.6809188260562498,
"learning_rate": 6.360138748461013e-08,
"loss": 0.6822,
"step": 650
},
{
"epoch": 2.514506769825919,
"eval_loss": 0.794310986995697,
"eval_runtime": 128.0518,
"eval_samples_per_second": 57.399,
"eval_steps_per_second": 0.898,
"step": 650
},
{
"epoch": 2.5338491295938104,
"grad_norm": 4.16744242512269,
"learning_rate": 5.867644249188247e-08,
"loss": 0.6991,
"step": 655
},
{
"epoch": 2.5531914893617023,
"grad_norm": 3.851977029205454,
"learning_rate": 5.3938047325226944e-08,
"loss": 0.6986,
"step": 660
},
{
"epoch": 2.5725338491295937,
"grad_norm": 4.101916973231146,
"learning_rate": 4.9388204929601326e-08,
"loss": 0.6854,
"step": 665
},
{
"epoch": 2.5918762088974856,
"grad_norm": 3.8486651679301023,
"learning_rate": 4.5028838547699346e-08,
"loss": 0.6924,
"step": 670
},
{
"epoch": 2.611218568665377,
"grad_norm": 3.717552659671349,
"learning_rate": 4.0861790906985884e-08,
"loss": 0.6888,
"step": 675
},
{
"epoch": 2.630560928433269,
"grad_norm": 3.9259495383255314,
"learning_rate": 3.6888823440766214e-08,
"loss": 0.6914,
"step": 680
},
{
"epoch": 2.6499032882011604,
"grad_norm": 3.627999141521453,
"learning_rate": 3.311161554361874e-08,
"loss": 0.6773,
"step": 685
},
{
"epoch": 2.6692456479690523,
"grad_norm": 3.9122944066082934,
"learning_rate": 2.9531763861505964e-08,
"loss": 0.6908,
"step": 690
},
{
"epoch": 2.6885880077369437,
"grad_norm": 3.7957015499021547,
"learning_rate": 2.6150781616863794e-08,
"loss": 0.6638,
"step": 695
},
{
"epoch": 2.7079303675048356,
"grad_norm": 3.9116633525167033,
"learning_rate": 2.2970097968953994e-08,
"loss": 0.6861,
"step": 700
},
{
"epoch": 2.7079303675048356,
"eval_loss": 0.7937687635421753,
"eval_runtime": 127.6414,
"eval_samples_per_second": 57.583,
"eval_steps_per_second": 0.901,
"step": 700
},
{
"epoch": 2.7272727272727275,
"grad_norm": 3.7729712553297987,
"learning_rate": 1.9991057409751267e-08,
"loss": 0.6911,
"step": 705
},
{
"epoch": 2.746615087040619,
"grad_norm": 3.834540719290449,
"learning_rate": 1.7214919195619125e-08,
"loss": 0.6909,
"step": 710
},
{
"epoch": 2.7659574468085104,
"grad_norm": 3.7151903198724,
"learning_rate": 1.4642856815015758e-08,
"loss": 0.6999,
"step": 715
},
{
"epoch": 2.7852998065764023,
"grad_norm": 3.973205161583112,
"learning_rate": 1.2275957492453692e-08,
"loss": 0.6933,
"step": 720
},
{
"epoch": 2.804642166344294,
"grad_norm": 3.8696685699188507,
"learning_rate": 1.0115221728924706e-08,
"loss": 0.6775,
"step": 725
},
{
"epoch": 2.8239845261121856,
"grad_norm": 4.076911148311998,
"learning_rate": 8.161562878982398e-09,
"loss": 0.6748,
"step": 730
},
{
"epoch": 2.843326885880077,
"grad_norm": 3.6416916556998817,
"learning_rate": 6.415806764662524e-09,
"loss": 0.6759,
"step": 735
},
{
"epoch": 2.862669245647969,
"grad_norm": 3.9225986402250754,
"learning_rate": 4.8786913264033945e-09,
"loss": 0.698,
"step": 740
},
{
"epoch": 2.882011605415861,
"grad_norm": 4.048565845552162,
"learning_rate": 3.5508663111147306e-09,
"loss": 0.6856,
"step": 745
},
{
"epoch": 2.9013539651837523,
"grad_norm": 4.082712170990346,
"learning_rate": 2.432892997526026e-09,
"loss": 0.6989,
"step": 750
},
{
"epoch": 2.9013539651837523,
"eval_loss": 0.7935436367988586,
"eval_runtime": 127.7461,
"eval_samples_per_second": 57.536,
"eval_steps_per_second": 0.9,
"step": 750
},
{
"epoch": 2.920696324951644,
"grad_norm": 3.8537422532400827,
"learning_rate": 1.5252439589311107e-09,
"loss": 0.6929,
"step": 755
},
{
"epoch": 2.9400386847195357,
"grad_norm": 3.841072555465488,
"learning_rate": 8.283028634287203e-10,
"loss": 0.6963,
"step": 760
},
{
"epoch": 2.9593810444874276,
"grad_norm": 3.741338861355809,
"learning_rate": 3.4236431174428094e-10,
"loss": 0.683,
"step": 765
},
{
"epoch": 2.978723404255319,
"grad_norm": 3.962628716770498,
"learning_rate": 6.763371270035457e-11,
"loss": 0.684,
"step": 770
},
{
"epoch": 2.9941972920696323,
"step": 774,
"total_flos": 4563620855808000.0,
"train_loss": 0.7856120132968716,
"train_runtime": 12058.8177,
"train_samples_per_second": 16.457,
"train_steps_per_second": 0.064
}
],
"logging_steps": 5,
"max_steps": 774,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4563620855808000.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}