nttx's picture
Training in progress, step 200, checkpoint
b384a86 verified
{
"best_metric": 1.429347276687622,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.11825572801182557,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0005912786400591279,
"grad_norm": 0.9726289510726929,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.4873,
"step": 1
},
{
"epoch": 0.0005912786400591279,
"eval_loss": 1.8040848970413208,
"eval_runtime": 237.2932,
"eval_samples_per_second": 12.006,
"eval_steps_per_second": 6.005,
"step": 1
},
{
"epoch": 0.0011825572801182557,
"grad_norm": 0.9772027134895325,
"learning_rate": 6.666666666666667e-06,
"loss": 1.2319,
"step": 2
},
{
"epoch": 0.0017738359201773836,
"grad_norm": 1.1119064092636108,
"learning_rate": 1e-05,
"loss": 1.1924,
"step": 3
},
{
"epoch": 0.0023651145602365115,
"grad_norm": 1.0203392505645752,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.0968,
"step": 4
},
{
"epoch": 0.0029563932002956393,
"grad_norm": 1.0077756643295288,
"learning_rate": 1.6666666666666667e-05,
"loss": 1.28,
"step": 5
},
{
"epoch": 0.003547671840354767,
"grad_norm": 1.048666000366211,
"learning_rate": 2e-05,
"loss": 1.2347,
"step": 6
},
{
"epoch": 0.004138950480413895,
"grad_norm": 1.0571691989898682,
"learning_rate": 2.3333333333333336e-05,
"loss": 1.3182,
"step": 7
},
{
"epoch": 0.004730229120473023,
"grad_norm": 0.7537508606910706,
"learning_rate": 2.6666666666666667e-05,
"loss": 0.9406,
"step": 8
},
{
"epoch": 0.005321507760532151,
"grad_norm": 0.8735187649726868,
"learning_rate": 3e-05,
"loss": 1.2131,
"step": 9
},
{
"epoch": 0.005912786400591279,
"grad_norm": 0.7849937081336975,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.1487,
"step": 10
},
{
"epoch": 0.0065040650406504065,
"grad_norm": 0.6883115768432617,
"learning_rate": 3.6666666666666666e-05,
"loss": 1.0974,
"step": 11
},
{
"epoch": 0.007095343680709534,
"grad_norm": 0.7022745013237,
"learning_rate": 4e-05,
"loss": 1.1683,
"step": 12
},
{
"epoch": 0.007686622320768662,
"grad_norm": 0.7865653038024902,
"learning_rate": 4.3333333333333334e-05,
"loss": 1.1794,
"step": 13
},
{
"epoch": 0.00827790096082779,
"grad_norm": 0.8761787414550781,
"learning_rate": 4.666666666666667e-05,
"loss": 1.2902,
"step": 14
},
{
"epoch": 0.008869179600886918,
"grad_norm": 0.8201810717582703,
"learning_rate": 5e-05,
"loss": 1.2349,
"step": 15
},
{
"epoch": 0.009460458240946046,
"grad_norm": 0.7110611796379089,
"learning_rate": 5.333333333333333e-05,
"loss": 1.2526,
"step": 16
},
{
"epoch": 0.010051736881005174,
"grad_norm": 0.634364902973175,
"learning_rate": 5.666666666666667e-05,
"loss": 1.2653,
"step": 17
},
{
"epoch": 0.010643015521064302,
"grad_norm": 0.7168411612510681,
"learning_rate": 6e-05,
"loss": 1.2168,
"step": 18
},
{
"epoch": 0.01123429416112343,
"grad_norm": 0.6394386291503906,
"learning_rate": 6.333333333333333e-05,
"loss": 1.28,
"step": 19
},
{
"epoch": 0.011825572801182557,
"grad_norm": 0.6675559878349304,
"learning_rate": 6.666666666666667e-05,
"loss": 1.3341,
"step": 20
},
{
"epoch": 0.012416851441241685,
"grad_norm": 0.7060176134109497,
"learning_rate": 7e-05,
"loss": 1.3246,
"step": 21
},
{
"epoch": 0.013008130081300813,
"grad_norm": 0.8127558827400208,
"learning_rate": 7.333333333333333e-05,
"loss": 1.4198,
"step": 22
},
{
"epoch": 0.01359940872135994,
"grad_norm": 0.7225644588470459,
"learning_rate": 7.666666666666667e-05,
"loss": 1.2954,
"step": 23
},
{
"epoch": 0.014190687361419069,
"grad_norm": 0.6734681725502014,
"learning_rate": 8e-05,
"loss": 1.2682,
"step": 24
},
{
"epoch": 0.014781966001478197,
"grad_norm": 0.815269947052002,
"learning_rate": 8.333333333333334e-05,
"loss": 1.5616,
"step": 25
},
{
"epoch": 0.015373244641537324,
"grad_norm": 0.7292295098304749,
"learning_rate": 8.666666666666667e-05,
"loss": 1.5186,
"step": 26
},
{
"epoch": 0.015964523281596452,
"grad_norm": 0.7140643000602722,
"learning_rate": 9e-05,
"loss": 1.3804,
"step": 27
},
{
"epoch": 0.01655580192165558,
"grad_norm": 0.7823128700256348,
"learning_rate": 9.333333333333334e-05,
"loss": 1.4618,
"step": 28
},
{
"epoch": 0.017147080561714708,
"grad_norm": 0.7901281118392944,
"learning_rate": 9.666666666666667e-05,
"loss": 1.431,
"step": 29
},
{
"epoch": 0.017738359201773836,
"grad_norm": 0.8098665475845337,
"learning_rate": 0.0001,
"loss": 1.4555,
"step": 30
},
{
"epoch": 0.018329637841832964,
"grad_norm": 0.7486283779144287,
"learning_rate": 9.999146252290264e-05,
"loss": 1.4853,
"step": 31
},
{
"epoch": 0.01892091648189209,
"grad_norm": 0.8610166907310486,
"learning_rate": 9.996585300715116e-05,
"loss": 1.606,
"step": 32
},
{
"epoch": 0.01951219512195122,
"grad_norm": 0.7736566066741943,
"learning_rate": 9.99231801983717e-05,
"loss": 1.3497,
"step": 33
},
{
"epoch": 0.020103473762010347,
"grad_norm": 0.8386912941932678,
"learning_rate": 9.986345866928941e-05,
"loss": 1.4455,
"step": 34
},
{
"epoch": 0.020694752402069475,
"grad_norm": 0.9145824909210205,
"learning_rate": 9.978670881475172e-05,
"loss": 1.7299,
"step": 35
},
{
"epoch": 0.021286031042128603,
"grad_norm": 0.9461202025413513,
"learning_rate": 9.96929568447637e-05,
"loss": 1.6947,
"step": 36
},
{
"epoch": 0.02187730968218773,
"grad_norm": 0.9634538888931274,
"learning_rate": 9.958223477553714e-05,
"loss": 1.86,
"step": 37
},
{
"epoch": 0.02246858832224686,
"grad_norm": 0.9322118759155273,
"learning_rate": 9.94545804185573e-05,
"loss": 1.8198,
"step": 38
},
{
"epoch": 0.023059866962305987,
"grad_norm": 0.9747868180274963,
"learning_rate": 9.931003736767013e-05,
"loss": 1.9704,
"step": 39
},
{
"epoch": 0.023651145602365115,
"grad_norm": 0.9679445624351501,
"learning_rate": 9.91486549841951e-05,
"loss": 1.9984,
"step": 40
},
{
"epoch": 0.024242424242424242,
"grad_norm": 1.168147087097168,
"learning_rate": 9.89704883800683e-05,
"loss": 1.7387,
"step": 41
},
{
"epoch": 0.02483370288248337,
"grad_norm": 0.9986356496810913,
"learning_rate": 9.877559839902184e-05,
"loss": 1.8842,
"step": 42
},
{
"epoch": 0.025424981522542498,
"grad_norm": 1.2003281116485596,
"learning_rate": 9.85640515958057e-05,
"loss": 1.9681,
"step": 43
},
{
"epoch": 0.026016260162601626,
"grad_norm": 1.135547399520874,
"learning_rate": 9.833592021345937e-05,
"loss": 2.0643,
"step": 44
},
{
"epoch": 0.026607538802660754,
"grad_norm": 1.413825511932373,
"learning_rate": 9.809128215864097e-05,
"loss": 2.184,
"step": 45
},
{
"epoch": 0.02719881744271988,
"grad_norm": 1.2450569868087769,
"learning_rate": 9.783022097502204e-05,
"loss": 1.9967,
"step": 46
},
{
"epoch": 0.02779009608277901,
"grad_norm": 1.3000105619430542,
"learning_rate": 9.755282581475769e-05,
"loss": 2.0032,
"step": 47
},
{
"epoch": 0.028381374722838137,
"grad_norm": 1.388758659362793,
"learning_rate": 9.725919140804099e-05,
"loss": 2.1693,
"step": 48
},
{
"epoch": 0.028972653362897265,
"grad_norm": 1.5098059177398682,
"learning_rate": 9.694941803075283e-05,
"loss": 2.0612,
"step": 49
},
{
"epoch": 0.029563932002956393,
"grad_norm": 1.8700934648513794,
"learning_rate": 9.662361147021779e-05,
"loss": 1.9784,
"step": 50
},
{
"epoch": 0.029563932002956393,
"eval_loss": 1.555537223815918,
"eval_runtime": 238.9307,
"eval_samples_per_second": 11.924,
"eval_steps_per_second": 5.964,
"step": 50
},
{
"epoch": 0.03015521064301552,
"grad_norm": 1.1767340898513794,
"learning_rate": 9.628188298907782e-05,
"loss": 1.5257,
"step": 51
},
{
"epoch": 0.03074648928307465,
"grad_norm": 0.8743080496788025,
"learning_rate": 9.592434928729616e-05,
"loss": 1.1786,
"step": 52
},
{
"epoch": 0.03133776792313378,
"grad_norm": 0.7236108779907227,
"learning_rate": 9.555113246230442e-05,
"loss": 1.1467,
"step": 53
},
{
"epoch": 0.031929046563192905,
"grad_norm": 0.5204956531524658,
"learning_rate": 9.516235996730645e-05,
"loss": 0.9421,
"step": 54
},
{
"epoch": 0.032520325203252036,
"grad_norm": 0.4584566056728363,
"learning_rate": 9.475816456775313e-05,
"loss": 1.1128,
"step": 55
},
{
"epoch": 0.03311160384331116,
"grad_norm": 0.4526122510433197,
"learning_rate": 9.43386842960031e-05,
"loss": 1.0164,
"step": 56
},
{
"epoch": 0.03370288248337029,
"grad_norm": 0.4665258228778839,
"learning_rate": 9.39040624041849e-05,
"loss": 1.0138,
"step": 57
},
{
"epoch": 0.034294161123429416,
"grad_norm": 0.465076744556427,
"learning_rate": 9.345444731527642e-05,
"loss": 1.0276,
"step": 58
},
{
"epoch": 0.03488543976348855,
"grad_norm": 0.527400553226471,
"learning_rate": 9.298999257241863e-05,
"loss": 1.1934,
"step": 59
},
{
"epoch": 0.03547671840354767,
"grad_norm": 0.49932149052619934,
"learning_rate": 9.251085678648072e-05,
"loss": 1.1936,
"step": 60
},
{
"epoch": 0.0360679970436068,
"grad_norm": 0.4894202947616577,
"learning_rate": 9.201720358189464e-05,
"loss": 1.294,
"step": 61
},
{
"epoch": 0.03665927568366593,
"grad_norm": 0.49566730856895447,
"learning_rate": 9.150920154077754e-05,
"loss": 1.1933,
"step": 62
},
{
"epoch": 0.03725055432372506,
"grad_norm": 0.5004077553749084,
"learning_rate": 9.098702414536107e-05,
"loss": 0.9939,
"step": 63
},
{
"epoch": 0.03784183296378418,
"grad_norm": 0.537868320941925,
"learning_rate": 9.045084971874738e-05,
"loss": 1.196,
"step": 64
},
{
"epoch": 0.038433111603843315,
"grad_norm": 0.49859926104545593,
"learning_rate": 8.9900861364012e-05,
"loss": 1.2272,
"step": 65
},
{
"epoch": 0.03902439024390244,
"grad_norm": 0.5565657019615173,
"learning_rate": 8.933724690167417e-05,
"loss": 1.0484,
"step": 66
},
{
"epoch": 0.03961566888396157,
"grad_norm": 0.49499908089637756,
"learning_rate": 8.876019880555649e-05,
"loss": 1.2345,
"step": 67
},
{
"epoch": 0.040206947524020695,
"grad_norm": 0.5242329835891724,
"learning_rate": 8.816991413705516e-05,
"loss": 1.3664,
"step": 68
},
{
"epoch": 0.040798226164079826,
"grad_norm": 0.5149632692337036,
"learning_rate": 8.756659447784368e-05,
"loss": 1.2817,
"step": 69
},
{
"epoch": 0.04138950480413895,
"grad_norm": 0.51432204246521,
"learning_rate": 8.695044586103296e-05,
"loss": 1.2525,
"step": 70
},
{
"epoch": 0.04198078344419808,
"grad_norm": 0.5460650324821472,
"learning_rate": 8.632167870081121e-05,
"loss": 1.2673,
"step": 71
},
{
"epoch": 0.042572062084257206,
"grad_norm": 0.6104385256767273,
"learning_rate": 8.568050772058762e-05,
"loss": 1.3757,
"step": 72
},
{
"epoch": 0.04316334072431634,
"grad_norm": 0.5687586069107056,
"learning_rate": 8.502715187966455e-05,
"loss": 1.2904,
"step": 73
},
{
"epoch": 0.04375461936437546,
"grad_norm": 0.5552237629890442,
"learning_rate": 8.436183429846313e-05,
"loss": 1.2907,
"step": 74
},
{
"epoch": 0.04434589800443459,
"grad_norm": 0.5990892648696899,
"learning_rate": 8.368478218232787e-05,
"loss": 1.3684,
"step": 75
},
{
"epoch": 0.04493717664449372,
"grad_norm": 0.6610813140869141,
"learning_rate": 8.299622674393614e-05,
"loss": 1.5621,
"step": 76
},
{
"epoch": 0.04552845528455285,
"grad_norm": 0.6220623850822449,
"learning_rate": 8.229640312433937e-05,
"loss": 1.4063,
"step": 77
},
{
"epoch": 0.04611973392461197,
"grad_norm": 0.692918598651886,
"learning_rate": 8.158555031266254e-05,
"loss": 1.4554,
"step": 78
},
{
"epoch": 0.046711012564671105,
"grad_norm": 0.6769721508026123,
"learning_rate": 8.086391106448965e-05,
"loss": 1.5272,
"step": 79
},
{
"epoch": 0.04730229120473023,
"grad_norm": 0.704415500164032,
"learning_rate": 8.013173181896283e-05,
"loss": 1.5395,
"step": 80
},
{
"epoch": 0.04789356984478936,
"grad_norm": 0.6997012495994568,
"learning_rate": 7.938926261462366e-05,
"loss": 1.6002,
"step": 81
},
{
"epoch": 0.048484848484848485,
"grad_norm": 0.7182490825653076,
"learning_rate": 7.863675700402526e-05,
"loss": 1.6381,
"step": 82
},
{
"epoch": 0.049076127124907616,
"grad_norm": 0.7177379727363586,
"learning_rate": 7.787447196714427e-05,
"loss": 1.6261,
"step": 83
},
{
"epoch": 0.04966740576496674,
"grad_norm": 0.8007391095161438,
"learning_rate": 7.710266782362247e-05,
"loss": 1.657,
"step": 84
},
{
"epoch": 0.05025868440502587,
"grad_norm": 0.7926273941993713,
"learning_rate": 7.63216081438678e-05,
"loss": 1.6578,
"step": 85
},
{
"epoch": 0.050849963045084996,
"grad_norm": 0.8020119667053223,
"learning_rate": 7.553155965904535e-05,
"loss": 1.6819,
"step": 86
},
{
"epoch": 0.05144124168514413,
"grad_norm": 0.8183744549751282,
"learning_rate": 7.473279216998895e-05,
"loss": 1.7629,
"step": 87
},
{
"epoch": 0.05203252032520325,
"grad_norm": 0.9529420733451843,
"learning_rate": 7.392557845506432e-05,
"loss": 1.7812,
"step": 88
},
{
"epoch": 0.05262379896526238,
"grad_norm": 0.9576981663703918,
"learning_rate": 7.311019417701566e-05,
"loss": 1.8943,
"step": 89
},
{
"epoch": 0.05321507760532151,
"grad_norm": 0.9187182784080505,
"learning_rate": 7.228691778882693e-05,
"loss": 1.8159,
"step": 90
},
{
"epoch": 0.05380635624538064,
"grad_norm": 0.8962719440460205,
"learning_rate": 7.145603043863045e-05,
"loss": 1.7825,
"step": 91
},
{
"epoch": 0.05439763488543976,
"grad_norm": 1.0772016048431396,
"learning_rate": 7.061781587369519e-05,
"loss": 2.2153,
"step": 92
},
{
"epoch": 0.054988913525498895,
"grad_norm": 0.9609599709510803,
"learning_rate": 6.977256034352712e-05,
"loss": 1.8156,
"step": 93
},
{
"epoch": 0.05558019216555802,
"grad_norm": 0.9851707816123962,
"learning_rate": 6.892055250211552e-05,
"loss": 1.8342,
"step": 94
},
{
"epoch": 0.05617147080561715,
"grad_norm": 1.048850178718567,
"learning_rate": 6.806208330935766e-05,
"loss": 2.1506,
"step": 95
},
{
"epoch": 0.056762749445676275,
"grad_norm": 1.2707563638687134,
"learning_rate": 6.719744593169641e-05,
"loss": 2.2091,
"step": 96
},
{
"epoch": 0.057354028085735406,
"grad_norm": 1.1597068309783936,
"learning_rate": 6.632693564200416e-05,
"loss": 1.9979,
"step": 97
},
{
"epoch": 0.05794530672579453,
"grad_norm": 1.3379724025726318,
"learning_rate": 6.545084971874738e-05,
"loss": 2.2064,
"step": 98
},
{
"epoch": 0.05853658536585366,
"grad_norm": 1.2999430894851685,
"learning_rate": 6.456948734446624e-05,
"loss": 2.262,
"step": 99
},
{
"epoch": 0.059127864005912786,
"grad_norm": 1.5316917896270752,
"learning_rate": 6.368314950360415e-05,
"loss": 2.2301,
"step": 100
},
{
"epoch": 0.059127864005912786,
"eval_loss": 1.5102187395095825,
"eval_runtime": 238.8457,
"eval_samples_per_second": 11.928,
"eval_steps_per_second": 5.966,
"step": 100
},
{
"epoch": 0.05971914264597192,
"grad_norm": 0.6291592717170715,
"learning_rate": 6.279213887972179e-05,
"loss": 1.2329,
"step": 101
},
{
"epoch": 0.06031042128603104,
"grad_norm": 0.6495620012283325,
"learning_rate": 6.189675975213094e-05,
"loss": 1.1084,
"step": 102
},
{
"epoch": 0.06090169992609017,
"grad_norm": 0.6183136701583862,
"learning_rate": 6.099731789198344e-05,
"loss": 1.1053,
"step": 103
},
{
"epoch": 0.0614929785661493,
"grad_norm": 0.49170196056365967,
"learning_rate": 6.009412045785051e-05,
"loss": 1.1741,
"step": 104
},
{
"epoch": 0.06208425720620843,
"grad_norm": 0.4875796139240265,
"learning_rate": 5.918747589082853e-05,
"loss": 1.1359,
"step": 105
},
{
"epoch": 0.06267553584626756,
"grad_norm": 0.42337316274642944,
"learning_rate": 5.82776938092065e-05,
"loss": 0.9936,
"step": 106
},
{
"epoch": 0.06326681448632668,
"grad_norm": 0.4198471009731293,
"learning_rate": 5.736508490273188e-05,
"loss": 1.0373,
"step": 107
},
{
"epoch": 0.06385809312638581,
"grad_norm": 0.3972650468349457,
"learning_rate": 5.644996082651017e-05,
"loss": 1.0443,
"step": 108
},
{
"epoch": 0.06444937176644494,
"grad_norm": 0.4315491318702698,
"learning_rate": 5.553263409457504e-05,
"loss": 1.0413,
"step": 109
},
{
"epoch": 0.06504065040650407,
"grad_norm": 0.4730534553527832,
"learning_rate": 5.4613417973165106e-05,
"loss": 1.1193,
"step": 110
},
{
"epoch": 0.06563192904656319,
"grad_norm": 0.497887521982193,
"learning_rate": 5.3692626373743706e-05,
"loss": 1.2155,
"step": 111
},
{
"epoch": 0.06622320768662232,
"grad_norm": 0.44312167167663574,
"learning_rate": 5.27705737457985e-05,
"loss": 1.1505,
"step": 112
},
{
"epoch": 0.06681448632668145,
"grad_norm": 0.44145482778549194,
"learning_rate": 5.184757496945726e-05,
"loss": 0.9454,
"step": 113
},
{
"epoch": 0.06740576496674058,
"grad_norm": 0.43695953488349915,
"learning_rate": 5.092394524795649e-05,
"loss": 1.1793,
"step": 114
},
{
"epoch": 0.0679970436067997,
"grad_norm": 0.44189295172691345,
"learning_rate": 5e-05,
"loss": 1.1351,
"step": 115
},
{
"epoch": 0.06858832224685883,
"grad_norm": 0.47619709372520447,
"learning_rate": 4.907605475204352e-05,
"loss": 1.0839,
"step": 116
},
{
"epoch": 0.06917960088691796,
"grad_norm": 0.46892696619033813,
"learning_rate": 4.8152425030542766e-05,
"loss": 1.1188,
"step": 117
},
{
"epoch": 0.0697708795269771,
"grad_norm": 0.4708046317100525,
"learning_rate": 4.72294262542015e-05,
"loss": 1.2853,
"step": 118
},
{
"epoch": 0.07036215816703621,
"grad_norm": 0.4775142967700958,
"learning_rate": 4.6307373626256306e-05,
"loss": 1.2427,
"step": 119
},
{
"epoch": 0.07095343680709534,
"grad_norm": 0.4797629117965698,
"learning_rate": 4.5386582026834906e-05,
"loss": 1.2563,
"step": 120
},
{
"epoch": 0.07154471544715447,
"grad_norm": 0.5305618047714233,
"learning_rate": 4.446736590542497e-05,
"loss": 1.2615,
"step": 121
},
{
"epoch": 0.0721359940872136,
"grad_norm": 0.5574907660484314,
"learning_rate": 4.3550039173489845e-05,
"loss": 1.395,
"step": 122
},
{
"epoch": 0.07272727272727272,
"grad_norm": 0.5033929944038391,
"learning_rate": 4.2634915097268115e-05,
"loss": 1.1613,
"step": 123
},
{
"epoch": 0.07331855136733186,
"grad_norm": 0.5360080003738403,
"learning_rate": 4.1722306190793495e-05,
"loss": 1.4785,
"step": 124
},
{
"epoch": 0.07390983000739099,
"grad_norm": 0.5265625715255737,
"learning_rate": 4.0812524109171476e-05,
"loss": 1.2515,
"step": 125
},
{
"epoch": 0.07450110864745012,
"grad_norm": 0.6063189506530762,
"learning_rate": 3.99058795421495e-05,
"loss": 1.5267,
"step": 126
},
{
"epoch": 0.07509238728750924,
"grad_norm": 0.6113285422325134,
"learning_rate": 3.9002682108016585e-05,
"loss": 1.3004,
"step": 127
},
{
"epoch": 0.07568366592756837,
"grad_norm": 0.6226088404655457,
"learning_rate": 3.8103240247869075e-05,
"loss": 1.4766,
"step": 128
},
{
"epoch": 0.0762749445676275,
"grad_norm": 0.6256705522537231,
"learning_rate": 3.720786112027822e-05,
"loss": 1.4295,
"step": 129
},
{
"epoch": 0.07686622320768663,
"grad_norm": 0.6089186072349548,
"learning_rate": 3.631685049639586e-05,
"loss": 1.3509,
"step": 130
},
{
"epoch": 0.07745750184774575,
"grad_norm": 0.6548407077789307,
"learning_rate": 3.543051265553377e-05,
"loss": 1.6872,
"step": 131
},
{
"epoch": 0.07804878048780488,
"grad_norm": 0.7260014414787292,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.7412,
"step": 132
},
{
"epoch": 0.07864005912786401,
"grad_norm": 0.7247932553291321,
"learning_rate": 3.367306435799584e-05,
"loss": 1.6406,
"step": 133
},
{
"epoch": 0.07923133776792314,
"grad_norm": 0.7475252747535706,
"learning_rate": 3.2802554068303596e-05,
"loss": 1.7114,
"step": 134
},
{
"epoch": 0.07982261640798226,
"grad_norm": 0.8387429714202881,
"learning_rate": 3.1937916690642356e-05,
"loss": 1.738,
"step": 135
},
{
"epoch": 0.08041389504804139,
"grad_norm": 0.8532408475875854,
"learning_rate": 3.107944749788449e-05,
"loss": 1.9453,
"step": 136
},
{
"epoch": 0.08100517368810052,
"grad_norm": 0.7722581624984741,
"learning_rate": 3.0227439656472877e-05,
"loss": 1.6583,
"step": 137
},
{
"epoch": 0.08159645232815965,
"grad_norm": 0.7713450193405151,
"learning_rate": 2.9382184126304834e-05,
"loss": 1.7002,
"step": 138
},
{
"epoch": 0.08218773096821877,
"grad_norm": 0.8988615870475769,
"learning_rate": 2.8543969561369556e-05,
"loss": 1.9302,
"step": 139
},
{
"epoch": 0.0827790096082779,
"grad_norm": 1.005490779876709,
"learning_rate": 2.771308221117309e-05,
"loss": 2.0785,
"step": 140
},
{
"epoch": 0.08337028824833703,
"grad_norm": 0.9457467198371887,
"learning_rate": 2.688980582298435e-05,
"loss": 1.8597,
"step": 141
},
{
"epoch": 0.08396156688839616,
"grad_norm": 1.0314273834228516,
"learning_rate": 2.607442154493568e-05,
"loss": 2.0085,
"step": 142
},
{
"epoch": 0.08455284552845528,
"grad_norm": 0.9571870565414429,
"learning_rate": 2.5267207830011068e-05,
"loss": 1.9062,
"step": 143
},
{
"epoch": 0.08514412416851441,
"grad_norm": 1.1800788640975952,
"learning_rate": 2.446844034095466e-05,
"loss": 1.9822,
"step": 144
},
{
"epoch": 0.08573540280857354,
"grad_norm": 1.0346328020095825,
"learning_rate": 2.3678391856132204e-05,
"loss": 1.9708,
"step": 145
},
{
"epoch": 0.08632668144863268,
"grad_norm": 1.2094911336898804,
"learning_rate": 2.2897332176377528e-05,
"loss": 2.021,
"step": 146
},
{
"epoch": 0.08691796008869179,
"grad_norm": 1.429420828819275,
"learning_rate": 2.2125528032855724e-05,
"loss": 2.3484,
"step": 147
},
{
"epoch": 0.08750923872875092,
"grad_norm": 1.514777660369873,
"learning_rate": 2.136324299597474e-05,
"loss": 2.3654,
"step": 148
},
{
"epoch": 0.08810051736881006,
"grad_norm": 1.3983551263809204,
"learning_rate": 2.061073738537635e-05,
"loss": 2.1405,
"step": 149
},
{
"epoch": 0.08869179600886919,
"grad_norm": 1.4536499977111816,
"learning_rate": 1.9868268181037185e-05,
"loss": 2.1021,
"step": 150
},
{
"epoch": 0.08869179600886919,
"eval_loss": 1.445052146911621,
"eval_runtime": 238.6227,
"eval_samples_per_second": 11.939,
"eval_steps_per_second": 5.972,
"step": 150
},
{
"epoch": 0.0892830746489283,
"grad_norm": 0.37140536308288574,
"learning_rate": 1.9136088935510362e-05,
"loss": 1.2319,
"step": 151
},
{
"epoch": 0.08987435328898744,
"grad_norm": 0.437547504901886,
"learning_rate": 1.8414449687337464e-05,
"loss": 1.2127,
"step": 152
},
{
"epoch": 0.09046563192904657,
"grad_norm": 0.4470095634460449,
"learning_rate": 1.7703596875660645e-05,
"loss": 1.101,
"step": 153
},
{
"epoch": 0.0910569105691057,
"grad_norm": 0.43900978565216064,
"learning_rate": 1.700377325606388e-05,
"loss": 1.0225,
"step": 154
},
{
"epoch": 0.09164818920916482,
"grad_norm": 0.4545993208885193,
"learning_rate": 1.631521781767214e-05,
"loss": 1.0986,
"step": 155
},
{
"epoch": 0.09223946784922395,
"grad_norm": 0.48182153701782227,
"learning_rate": 1.5638165701536868e-05,
"loss": 1.1413,
"step": 156
},
{
"epoch": 0.09283074648928308,
"grad_norm": 0.4654633402824402,
"learning_rate": 1.4972848120335453e-05,
"loss": 1.104,
"step": 157
},
{
"epoch": 0.09342202512934221,
"grad_norm": 0.4657576382160187,
"learning_rate": 1.4319492279412388e-05,
"loss": 1.0422,
"step": 158
},
{
"epoch": 0.09401330376940133,
"grad_norm": 0.4549095630645752,
"learning_rate": 1.3678321299188801e-05,
"loss": 1.0239,
"step": 159
},
{
"epoch": 0.09460458240946046,
"grad_norm": 0.45037001371383667,
"learning_rate": 1.3049554138967051e-05,
"loss": 0.974,
"step": 160
},
{
"epoch": 0.09519586104951959,
"grad_norm": 0.47302672266960144,
"learning_rate": 1.2433405522156332e-05,
"loss": 1.026,
"step": 161
},
{
"epoch": 0.09578713968957872,
"grad_norm": 0.42686906456947327,
"learning_rate": 1.183008586294485e-05,
"loss": 1.1564,
"step": 162
},
{
"epoch": 0.09637841832963784,
"grad_norm": 0.4490116834640503,
"learning_rate": 1.1239801194443506e-05,
"loss": 1.1345,
"step": 163
},
{
"epoch": 0.09696969696969697,
"grad_norm": 0.4287410080432892,
"learning_rate": 1.066275309832584e-05,
"loss": 1.1877,
"step": 164
},
{
"epoch": 0.0975609756097561,
"grad_norm": 0.43853017687797546,
"learning_rate": 1.0099138635988026e-05,
"loss": 1.1694,
"step": 165
},
{
"epoch": 0.09815225424981523,
"grad_norm": 0.5354306101799011,
"learning_rate": 9.549150281252633e-06,
"loss": 1.1783,
"step": 166
},
{
"epoch": 0.09874353288987435,
"grad_norm": 0.4395792484283447,
"learning_rate": 9.012975854638949e-06,
"loss": 1.0381,
"step": 167
},
{
"epoch": 0.09933481152993348,
"grad_norm": 0.4514259696006775,
"learning_rate": 8.490798459222476e-06,
"loss": 0.9664,
"step": 168
},
{
"epoch": 0.09992609016999261,
"grad_norm": 0.47987860441207886,
"learning_rate": 7.982796418105371e-06,
"loss": 1.1756,
"step": 169
},
{
"epoch": 0.10051736881005174,
"grad_norm": 0.47819146513938904,
"learning_rate": 7.489143213519301e-06,
"loss": 1.057,
"step": 170
},
{
"epoch": 0.10110864745011086,
"grad_norm": 0.466268926858902,
"learning_rate": 7.010007427581378e-06,
"loss": 1.0946,
"step": 171
},
{
"epoch": 0.10169992609016999,
"grad_norm": 0.5092021226882935,
"learning_rate": 6.5455526847235825e-06,
"loss": 1.2211,
"step": 172
},
{
"epoch": 0.10229120473022912,
"grad_norm": 0.4685361683368683,
"learning_rate": 6.0959375958151045e-06,
"loss": 1.0345,
"step": 173
},
{
"epoch": 0.10288248337028826,
"grad_norm": 0.5377731323242188,
"learning_rate": 5.6613157039969055e-06,
"loss": 1.3137,
"step": 174
},
{
"epoch": 0.10347376201034737,
"grad_norm": 0.5368238687515259,
"learning_rate": 5.241835432246889e-06,
"loss": 1.1479,
"step": 175
},
{
"epoch": 0.1040650406504065,
"grad_norm": 0.6069375276565552,
"learning_rate": 4.837640032693558e-06,
"loss": 1.4047,
"step": 176
},
{
"epoch": 0.10465631929046564,
"grad_norm": 0.5899990797042847,
"learning_rate": 4.448867537695578e-06,
"loss": 1.4995,
"step": 177
},
{
"epoch": 0.10524759793052477,
"grad_norm": 0.5741286873817444,
"learning_rate": 4.075650712703849e-06,
"loss": 1.2832,
"step": 178
},
{
"epoch": 0.10583887657058388,
"grad_norm": 0.6058112382888794,
"learning_rate": 3.71811701092219e-06,
"loss": 1.4864,
"step": 179
},
{
"epoch": 0.10643015521064302,
"grad_norm": 0.6389828324317932,
"learning_rate": 3.376388529782215e-06,
"loss": 1.5509,
"step": 180
},
{
"epoch": 0.10702143385070215,
"grad_norm": 0.7593545913696289,
"learning_rate": 3.0505819692471792e-06,
"loss": 1.6754,
"step": 181
},
{
"epoch": 0.10761271249076128,
"grad_norm": 0.6956416368484497,
"learning_rate": 2.7408085919590264e-06,
"loss": 1.6197,
"step": 182
},
{
"epoch": 0.1082039911308204,
"grad_norm": 0.7472082376480103,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.6558,
"step": 183
},
{
"epoch": 0.10879526977087953,
"grad_norm": 0.7749246954917908,
"learning_rate": 2.1697790249779636e-06,
"loss": 1.8279,
"step": 184
},
{
"epoch": 0.10938654841093866,
"grad_norm": 0.7523097991943359,
"learning_rate": 1.908717841359048e-06,
"loss": 1.455,
"step": 185
},
{
"epoch": 0.10997782705099779,
"grad_norm": 0.7814156413078308,
"learning_rate": 1.6640797865406288e-06,
"loss": 1.634,
"step": 186
},
{
"epoch": 0.11056910569105691,
"grad_norm": 0.8871944546699524,
"learning_rate": 1.4359484041943038e-06,
"loss": 1.7472,
"step": 187
},
{
"epoch": 0.11116038433111604,
"grad_norm": 0.8749101161956787,
"learning_rate": 1.2244016009781701e-06,
"loss": 1.854,
"step": 188
},
{
"epoch": 0.11175166297117517,
"grad_norm": 0.8801904916763306,
"learning_rate": 1.0295116199317057e-06,
"loss": 1.7069,
"step": 189
},
{
"epoch": 0.1123429416112343,
"grad_norm": 0.9642406105995178,
"learning_rate": 8.513450158049108e-07,
"loss": 1.8409,
"step": 190
},
{
"epoch": 0.11293422025129342,
"grad_norm": 0.9762085676193237,
"learning_rate": 6.899626323298713e-07,
"loss": 1.8907,
"step": 191
},
{
"epoch": 0.11352549889135255,
"grad_norm": 1.0283136367797852,
"learning_rate": 5.454195814427021e-07,
"loss": 1.9047,
"step": 192
},
{
"epoch": 0.11411677753141168,
"grad_norm": 1.4411962032318115,
"learning_rate": 4.177652244628627e-07,
"loss": 1.8633,
"step": 193
},
{
"epoch": 0.11470805617147081,
"grad_norm": 1.2451083660125732,
"learning_rate": 3.0704315523631953e-07,
"loss": 2.1482,
"step": 194
},
{
"epoch": 0.11529933481152993,
"grad_norm": 1.1835973262786865,
"learning_rate": 2.1329118524827662e-07,
"loss": 2.0185,
"step": 195
},
{
"epoch": 0.11589061345158906,
"grad_norm": 1.338761329650879,
"learning_rate": 1.3654133071059893e-07,
"loss": 1.7139,
"step": 196
},
{
"epoch": 0.11648189209164819,
"grad_norm": 1.2973557710647583,
"learning_rate": 7.681980162830282e-08,
"loss": 2.0788,
"step": 197
},
{
"epoch": 0.11707317073170732,
"grad_norm": 1.4432735443115234,
"learning_rate": 3.4146992848854695e-08,
"loss": 2.0671,
"step": 198
},
{
"epoch": 0.11766444937176644,
"grad_norm": 1.424852728843689,
"learning_rate": 8.537477097364522e-09,
"loss": 2.191,
"step": 199
},
{
"epoch": 0.11825572801182557,
"grad_norm": 1.7964314222335815,
"learning_rate": 0.0,
"loss": 1.8229,
"step": 200
},
{
"epoch": 0.11825572801182557,
"eval_loss": 1.429347276687622,
"eval_runtime": 238.6143,
"eval_samples_per_second": 11.94,
"eval_steps_per_second": 5.972,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.051020832441631e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}