samuellimabraz's picture
End of training
6bb72fb verified
{
"best_metric": 0.0942394882440567,
"best_model_checkpoint": "/content/train/Qwen2.5-VL-3B-Instruct-unsloth-r8-rslora/checkpoint-320",
"epoch": 2.4218455743879472,
"eval_steps": 10,
"global_step": 320,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007532956685499058,
"grad_norm": 1.6337342262268066,
"learning_rate": 1e-05,
"loss": 2.8339,
"step": 1
},
{
"epoch": 0.015065913370998116,
"grad_norm": 1.7995260953903198,
"learning_rate": 2e-05,
"loss": 3.1612,
"step": 2
},
{
"epoch": 0.022598870056497175,
"grad_norm": 1.8331536054611206,
"learning_rate": 3e-05,
"loss": 2.9529,
"step": 3
},
{
"epoch": 0.030131826741996232,
"grad_norm": 1.8774369955062866,
"learning_rate": 4e-05,
"loss": 3.1371,
"step": 4
},
{
"epoch": 0.03766478342749529,
"grad_norm": 2.1215269565582275,
"learning_rate": 5e-05,
"loss": 3.4216,
"step": 5
},
{
"epoch": 0.04519774011299435,
"grad_norm": 1.7083489894866943,
"learning_rate": 6e-05,
"loss": 2.5018,
"step": 6
},
{
"epoch": 0.05273069679849341,
"grad_norm": 1.9553663730621338,
"learning_rate": 7e-05,
"loss": 2.8832,
"step": 7
},
{
"epoch": 0.060263653483992465,
"grad_norm": 2.085784673690796,
"learning_rate": 8e-05,
"loss": 2.9757,
"step": 8
},
{
"epoch": 0.06779661016949153,
"grad_norm": 1.949724793434143,
"learning_rate": 9e-05,
"loss": 2.7228,
"step": 9
},
{
"epoch": 0.07532956685499058,
"grad_norm": 2.1280415058135986,
"learning_rate": 0.0001,
"loss": 2.9347,
"step": 10
},
{
"epoch": 0.07532956685499058,
"eval_loss": 2.670034646987915,
"eval_runtime": 54.8057,
"eval_samples_per_second": 1.368,
"eval_steps_per_second": 0.693,
"step": 10
},
{
"epoch": 0.08286252354048965,
"grad_norm": 2.418717861175537,
"learning_rate": 9.999834399079165e-05,
"loss": 3.0651,
"step": 11
},
{
"epoch": 0.0903954802259887,
"grad_norm": 1.9917370080947876,
"learning_rate": 9.99933760728612e-05,
"loss": 2.4549,
"step": 12
},
{
"epoch": 0.09792843691148775,
"grad_norm": 1.8027087450027466,
"learning_rate": 9.99850965752854e-05,
"loss": 2.2586,
"step": 13
},
{
"epoch": 0.10546139359698682,
"grad_norm": 1.6262425184249878,
"learning_rate": 9.997350604650123e-05,
"loss": 2.0871,
"step": 14
},
{
"epoch": 0.11299435028248588,
"grad_norm": 1.5385862588882446,
"learning_rate": 9.995860525426954e-05,
"loss": 2.0329,
"step": 15
},
{
"epoch": 0.12052730696798493,
"grad_norm": 1.1733174324035645,
"learning_rate": 9.994039518562432e-05,
"loss": 1.6894,
"step": 16
},
{
"epoch": 0.128060263653484,
"grad_norm": 1.1716901063919067,
"learning_rate": 9.991887704680724e-05,
"loss": 1.8195,
"step": 17
},
{
"epoch": 0.13559322033898305,
"grad_norm": 1.3725332021713257,
"learning_rate": 9.989405226318772e-05,
"loss": 1.9845,
"step": 18
},
{
"epoch": 0.1431261770244821,
"grad_norm": 0.8536051511764526,
"learning_rate": 9.986592247916858e-05,
"loss": 1.5969,
"step": 19
},
{
"epoch": 0.15065913370998116,
"grad_norm": 0.6615162491798401,
"learning_rate": 9.983448955807708e-05,
"loss": 1.6763,
"step": 20
},
{
"epoch": 0.15065913370998116,
"eval_loss": 1.6168410778045654,
"eval_runtime": 43.9067,
"eval_samples_per_second": 1.708,
"eval_steps_per_second": 0.865,
"step": 20
},
{
"epoch": 0.15819209039548024,
"grad_norm": 0.601297914981842,
"learning_rate": 9.979975558204147e-05,
"loss": 1.6742,
"step": 21
},
{
"epoch": 0.1657250470809793,
"grad_norm": 0.4877667725086212,
"learning_rate": 9.976172285185314e-05,
"loss": 1.6194,
"step": 22
},
{
"epoch": 0.17325800376647835,
"grad_norm": 0.4728071391582489,
"learning_rate": 9.972039388681413e-05,
"loss": 1.6243,
"step": 23
},
{
"epoch": 0.1807909604519774,
"grad_norm": 0.37902191281318665,
"learning_rate": 9.967577142457032e-05,
"loss": 1.4569,
"step": 24
},
{
"epoch": 0.18832391713747645,
"grad_norm": 0.45830923318862915,
"learning_rate": 9.962785842093003e-05,
"loss": 1.6214,
"step": 25
},
{
"epoch": 0.1958568738229755,
"grad_norm": 0.39010411500930786,
"learning_rate": 9.957665804966829e-05,
"loss": 1.345,
"step": 26
},
{
"epoch": 0.2033898305084746,
"grad_norm": 0.5287721753120422,
"learning_rate": 9.952217370231653e-05,
"loss": 1.6019,
"step": 27
},
{
"epoch": 0.21092278719397364,
"grad_norm": 0.4444209635257721,
"learning_rate": 9.946440898793801e-05,
"loss": 1.4566,
"step": 28
},
{
"epoch": 0.2184557438794727,
"grad_norm": 0.4856286346912384,
"learning_rate": 9.940336773288865e-05,
"loss": 1.5087,
"step": 29
},
{
"epoch": 0.22598870056497175,
"grad_norm": 0.4327958822250366,
"learning_rate": 9.933905398056372e-05,
"loss": 1.2924,
"step": 30
},
{
"epoch": 0.22598870056497175,
"eval_loss": 1.4212392568588257,
"eval_runtime": 44.0456,
"eval_samples_per_second": 1.703,
"eval_steps_per_second": 0.863,
"step": 30
},
{
"epoch": 0.2335216572504708,
"grad_norm": 0.49079710245132446,
"learning_rate": 9.92714719911298e-05,
"loss": 1.3501,
"step": 31
},
{
"epoch": 0.24105461393596986,
"grad_norm": 0.8166887760162354,
"learning_rate": 9.920062624124282e-05,
"loss": 1.4564,
"step": 32
},
{
"epoch": 0.24858757062146894,
"grad_norm": 0.6213160157203674,
"learning_rate": 9.912652142375132e-05,
"loss": 1.5757,
"step": 33
},
{
"epoch": 0.256120527306968,
"grad_norm": 0.49864110350608826,
"learning_rate": 9.904916244738571e-05,
"loss": 1.3778,
"step": 34
},
{
"epoch": 0.263653483992467,
"grad_norm": 0.5232111215591431,
"learning_rate": 9.896855443643308e-05,
"loss": 1.3466,
"step": 35
},
{
"epoch": 0.2711864406779661,
"grad_norm": 0.4295203983783722,
"learning_rate": 9.888470273039775e-05,
"loss": 1.3928,
"step": 36
},
{
"epoch": 0.2787193973634652,
"grad_norm": 0.3830944895744324,
"learning_rate": 9.879761288364766e-05,
"loss": 1.4017,
"step": 37
},
{
"epoch": 0.2862523540489642,
"grad_norm": 0.3481033146381378,
"learning_rate": 9.870729066504629e-05,
"loss": 1.3344,
"step": 38
},
{
"epoch": 0.2937853107344633,
"grad_norm": 0.28261324763298035,
"learning_rate": 9.861374205757068e-05,
"loss": 1.2005,
"step": 39
},
{
"epoch": 0.3013182674199623,
"grad_norm": 0.31103572249412537,
"learning_rate": 9.851697325791505e-05,
"loss": 1.2026,
"step": 40
},
{
"epoch": 0.3013182674199623,
"eval_loss": 1.2615383863449097,
"eval_runtime": 43.98,
"eval_samples_per_second": 1.705,
"eval_steps_per_second": 0.864,
"step": 40
},
{
"epoch": 0.3088512241054614,
"grad_norm": 0.41986966133117676,
"learning_rate": 9.841699067608033e-05,
"loss": 1.2891,
"step": 41
},
{
"epoch": 0.3163841807909605,
"grad_norm": 0.3304821848869324,
"learning_rate": 9.831380093494957e-05,
"loss": 1.1725,
"step": 42
},
{
"epoch": 0.3239171374764595,
"grad_norm": 0.38536179065704346,
"learning_rate": 9.820741086984924e-05,
"loss": 1.2084,
"step": 43
},
{
"epoch": 0.3314500941619586,
"grad_norm": 0.3935360908508301,
"learning_rate": 9.809782752809644e-05,
"loss": 1.2919,
"step": 44
},
{
"epoch": 0.3389830508474576,
"grad_norm": 0.3400743305683136,
"learning_rate": 9.798505816853208e-05,
"loss": 1.2405,
"step": 45
},
{
"epoch": 0.3465160075329567,
"grad_norm": 0.3844086825847626,
"learning_rate": 9.786911026104007e-05,
"loss": 1.2489,
"step": 46
},
{
"epoch": 0.3540489642184557,
"grad_norm": 0.3005635738372803,
"learning_rate": 9.774999148605251e-05,
"loss": 1.1564,
"step": 47
},
{
"epoch": 0.3615819209039548,
"grad_norm": 0.2923331558704376,
"learning_rate": 9.762770973404094e-05,
"loss": 1.1403,
"step": 48
},
{
"epoch": 0.3691148775894539,
"grad_norm": 0.30060315132141113,
"learning_rate": 9.750227310499366e-05,
"loss": 1.1172,
"step": 49
},
{
"epoch": 0.3766478342749529,
"grad_norm": 0.2783910036087036,
"learning_rate": 9.737368990787916e-05,
"loss": 1.0288,
"step": 50
},
{
"epoch": 0.3766478342749529,
"eval_loss": 1.122483491897583,
"eval_runtime": 43.9499,
"eval_samples_per_second": 1.706,
"eval_steps_per_second": 0.865,
"step": 50
},
{
"epoch": 0.384180790960452,
"grad_norm": 0.2709607779979706,
"learning_rate": 9.72419686600958e-05,
"loss": 1.0827,
"step": 51
},
{
"epoch": 0.391713747645951,
"grad_norm": 0.27541661262512207,
"learning_rate": 9.710711808690754e-05,
"loss": 1.172,
"step": 52
},
{
"epoch": 0.3992467043314501,
"grad_norm": 0.25708749890327454,
"learning_rate": 9.696914712086603e-05,
"loss": 1.058,
"step": 53
},
{
"epoch": 0.4067796610169492,
"grad_norm": 0.29954883456230164,
"learning_rate": 9.682806490121885e-05,
"loss": 1.0781,
"step": 54
},
{
"epoch": 0.4143126177024482,
"grad_norm": 0.28139016032218933,
"learning_rate": 9.668388077330421e-05,
"loss": 1.0746,
"step": 55
},
{
"epoch": 0.4218455743879473,
"grad_norm": 0.2823486328125,
"learning_rate": 9.653660428793188e-05,
"loss": 1.052,
"step": 56
},
{
"epoch": 0.4293785310734463,
"grad_norm": 0.28060609102249146,
"learning_rate": 9.638624520075046e-05,
"loss": 1.0088,
"step": 57
},
{
"epoch": 0.4369114877589454,
"grad_norm": 0.26586028933525085,
"learning_rate": 9.623281347160127e-05,
"loss": 0.9935,
"step": 58
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.2685753405094147,
"learning_rate": 9.607631926385859e-05,
"loss": 1.002,
"step": 59
},
{
"epoch": 0.4519774011299435,
"grad_norm": 0.24896016716957092,
"learning_rate": 9.591677294375636e-05,
"loss": 0.9303,
"step": 60
},
{
"epoch": 0.4519774011299435,
"eval_loss": 0.9904586672782898,
"eval_runtime": 43.9768,
"eval_samples_per_second": 1.705,
"eval_steps_per_second": 0.864,
"step": 60
},
{
"epoch": 0.4595103578154426,
"grad_norm": 0.2723137140274048,
"learning_rate": 9.575418507970161e-05,
"loss": 1.0173,
"step": 61
},
{
"epoch": 0.4670433145009416,
"grad_norm": 0.28442397713661194,
"learning_rate": 9.558856644157432e-05,
"loss": 0.9589,
"step": 62
},
{
"epoch": 0.4745762711864407,
"grad_norm": 0.25184300541877747,
"learning_rate": 9.541992800001409e-05,
"loss": 0.9013,
"step": 63
},
{
"epoch": 0.4821092278719397,
"grad_norm": 0.2699225842952728,
"learning_rate": 9.52482809256934e-05,
"loss": 0.9436,
"step": 64
},
{
"epoch": 0.4896421845574388,
"grad_norm": 0.24603509902954102,
"learning_rate": 9.507363658857768e-05,
"loss": 0.907,
"step": 65
},
{
"epoch": 0.4971751412429379,
"grad_norm": 0.24069125950336456,
"learning_rate": 9.489600655717217e-05,
"loss": 0.9147,
"step": 66
},
{
"epoch": 0.504708097928437,
"grad_norm": 0.2531817555427551,
"learning_rate": 9.471540259775554e-05,
"loss": 0.8429,
"step": 67
},
{
"epoch": 0.512241054613936,
"grad_norm": 0.29864469170570374,
"learning_rate": 9.453183667360062e-05,
"loss": 0.9324,
"step": 68
},
{
"epoch": 0.519774011299435,
"grad_norm": 0.28371143341064453,
"learning_rate": 9.43453209441818e-05,
"loss": 0.8185,
"step": 69
},
{
"epoch": 0.527306967984934,
"grad_norm": 0.3073464035987854,
"learning_rate": 9.415586776436973e-05,
"loss": 0.9043,
"step": 70
},
{
"epoch": 0.527306967984934,
"eval_loss": 0.8400442600250244,
"eval_runtime": 43.8471,
"eval_samples_per_second": 1.71,
"eval_steps_per_second": 0.867,
"step": 70
},
{
"epoch": 0.5348399246704332,
"grad_norm": 0.31865832209587097,
"learning_rate": 9.396348968361281e-05,
"loss": 0.8984,
"step": 71
},
{
"epoch": 0.5423728813559322,
"grad_norm": 0.30962345004081726,
"learning_rate": 9.376819944510598e-05,
"loss": 0.8379,
"step": 72
},
{
"epoch": 0.5499058380414312,
"grad_norm": 0.40249139070510864,
"learning_rate": 9.357000998494656e-05,
"loss": 0.8553,
"step": 73
},
{
"epoch": 0.5574387947269304,
"grad_norm": 0.33023273944854736,
"learning_rate": 9.336893443127738e-05,
"loss": 0.762,
"step": 74
},
{
"epoch": 0.5649717514124294,
"grad_norm": 0.2891211211681366,
"learning_rate": 9.31649861034172e-05,
"loss": 0.7443,
"step": 75
},
{
"epoch": 0.5725047080979284,
"grad_norm": 0.33249810338020325,
"learning_rate": 9.295817851097837e-05,
"loss": 0.789,
"step": 76
},
{
"epoch": 0.5800376647834274,
"grad_norm": 0.32798218727111816,
"learning_rate": 9.274852535297198e-05,
"loss": 0.7544,
"step": 77
},
{
"epoch": 0.5875706214689266,
"grad_norm": 0.38781678676605225,
"learning_rate": 9.253604051690046e-05,
"loss": 0.691,
"step": 78
},
{
"epoch": 0.5951035781544256,
"grad_norm": 0.3911118805408478,
"learning_rate": 9.232073807783759e-05,
"loss": 0.6637,
"step": 79
},
{
"epoch": 0.6026365348399246,
"grad_norm": 0.31271758675575256,
"learning_rate": 9.210263229749626e-05,
"loss": 0.6649,
"step": 80
},
{
"epoch": 0.6026365348399246,
"eval_loss": 0.657122015953064,
"eval_runtime": 43.9006,
"eval_samples_per_second": 1.708,
"eval_steps_per_second": 0.866,
"step": 80
},
{
"epoch": 0.6101694915254238,
"grad_norm": 0.3603556454181671,
"learning_rate": 9.188173762328367e-05,
"loss": 0.6501,
"step": 81
},
{
"epoch": 0.6177024482109228,
"grad_norm": 0.39841267466545105,
"learning_rate": 9.165806868734444e-05,
"loss": 0.6572,
"step": 82
},
{
"epoch": 0.6252354048964218,
"grad_norm": 0.34062060713768005,
"learning_rate": 9.143164030559122e-05,
"loss": 0.6706,
"step": 83
},
{
"epoch": 0.632768361581921,
"grad_norm": 0.31477442383766174,
"learning_rate": 9.120246747672347e-05,
"loss": 0.629,
"step": 84
},
{
"epoch": 0.64030131826742,
"grad_norm": 0.28587597608566284,
"learning_rate": 9.097056538123376e-05,
"loss": 0.5698,
"step": 85
},
{
"epoch": 0.647834274952919,
"grad_norm": 0.2795222997665405,
"learning_rate": 9.073594938040231e-05,
"loss": 0.573,
"step": 86
},
{
"epoch": 0.655367231638418,
"grad_norm": 0.23265816271305084,
"learning_rate": 9.049863501527947e-05,
"loss": 0.626,
"step": 87
},
{
"epoch": 0.6629001883239172,
"grad_norm": 0.26728200912475586,
"learning_rate": 9.025863800565613e-05,
"loss": 0.6103,
"step": 88
},
{
"epoch": 0.6704331450094162,
"grad_norm": 0.24294917285442352,
"learning_rate": 9.001597424902267e-05,
"loss": 0.5109,
"step": 89
},
{
"epoch": 0.6779661016949152,
"grad_norm": 0.27384141087532043,
"learning_rate": 8.977065981951566e-05,
"loss": 0.5266,
"step": 90
},
{
"epoch": 0.6779661016949152,
"eval_loss": 0.5219850540161133,
"eval_runtime": 43.852,
"eval_samples_per_second": 1.71,
"eval_steps_per_second": 0.867,
"step": 90
},
{
"epoch": 0.6854990583804144,
"grad_norm": 0.2532135248184204,
"learning_rate": 8.952271096685332e-05,
"loss": 0.5027,
"step": 91
},
{
"epoch": 0.6930320150659134,
"grad_norm": 0.25869038701057434,
"learning_rate": 8.927214411525895e-05,
"loss": 0.5846,
"step": 92
},
{
"epoch": 0.7005649717514124,
"grad_norm": 0.25413447618484497,
"learning_rate": 8.90189758623731e-05,
"loss": 0.5171,
"step": 93
},
{
"epoch": 0.7080979284369114,
"grad_norm": 0.2756996154785156,
"learning_rate": 8.876322297815405e-05,
"loss": 0.5104,
"step": 94
},
{
"epoch": 0.7156308851224106,
"grad_norm": 0.25177237391471863,
"learning_rate": 8.850490240376711e-05,
"loss": 0.4921,
"step": 95
},
{
"epoch": 0.7231638418079096,
"grad_norm": 0.22342444956302643,
"learning_rate": 8.824403125046225e-05,
"loss": 0.45,
"step": 96
},
{
"epoch": 0.7306967984934086,
"grad_norm": 0.2055438607931137,
"learning_rate": 8.798062679844077e-05,
"loss": 0.4527,
"step": 97
},
{
"epoch": 0.7382297551789078,
"grad_norm": 0.21503853797912598,
"learning_rate": 8.771470649571056e-05,
"loss": 0.5282,
"step": 98
},
{
"epoch": 0.7457627118644068,
"grad_norm": 0.1799071580171585,
"learning_rate": 8.744628795693047e-05,
"loss": 0.5086,
"step": 99
},
{
"epoch": 0.7532956685499058,
"grad_norm": 0.20049640536308289,
"learning_rate": 8.717538896224332e-05,
"loss": 0.5187,
"step": 100
},
{
"epoch": 0.7532956685499058,
"eval_loss": 0.457345575094223,
"eval_runtime": 43.8906,
"eval_samples_per_second": 1.709,
"eval_steps_per_second": 0.866,
"step": 100
},
{
"epoch": 0.7608286252354048,
"grad_norm": 0.19504646956920624,
"learning_rate": 8.690202745609835e-05,
"loss": 0.3967,
"step": 101
},
{
"epoch": 0.768361581920904,
"grad_norm": 0.18335066735744476,
"learning_rate": 8.662622154606237e-05,
"loss": 0.4931,
"step": 102
},
{
"epoch": 0.775894538606403,
"grad_norm": 0.2755972743034363,
"learning_rate": 8.634798950162048e-05,
"loss": 0.5392,
"step": 103
},
{
"epoch": 0.783427495291902,
"grad_norm": 0.18094103038311005,
"learning_rate": 8.606734975296578e-05,
"loss": 0.4579,
"step": 104
},
{
"epoch": 0.7909604519774012,
"grad_norm": 0.20168878138065338,
"learning_rate": 8.578432088977859e-05,
"loss": 0.3906,
"step": 105
},
{
"epoch": 0.7984934086629002,
"grad_norm": 0.20075590908527374,
"learning_rate": 8.549892165999505e-05,
"loss": 0.4453,
"step": 106
},
{
"epoch": 0.8060263653483992,
"grad_norm": 0.19419971108436584,
"learning_rate": 8.521117096856528e-05,
"loss": 0.4881,
"step": 107
},
{
"epoch": 0.8135593220338984,
"grad_norm": 0.25679534673690796,
"learning_rate": 8.492108787620105e-05,
"loss": 0.4528,
"step": 108
},
{
"epoch": 0.8210922787193974,
"grad_norm": 0.17266082763671875,
"learning_rate": 8.462869159811327e-05,
"loss": 0.4072,
"step": 109
},
{
"epoch": 0.8286252354048964,
"grad_norm": 0.20196932554244995,
"learning_rate": 8.433400150273906e-05,
"loss": 0.4506,
"step": 110
},
{
"epoch": 0.8286252354048964,
"eval_loss": 0.4128032326698303,
"eval_runtime": 43.8756,
"eval_samples_per_second": 1.709,
"eval_steps_per_second": 0.866,
"step": 110
},
{
"epoch": 0.8361581920903954,
"grad_norm": 0.18674571812152863,
"learning_rate": 8.403703711045892e-05,
"loss": 0.4425,
"step": 111
},
{
"epoch": 0.8436911487758946,
"grad_norm": 0.19765950739383698,
"learning_rate": 8.373781809230355e-05,
"loss": 0.4913,
"step": 112
},
{
"epoch": 0.8512241054613936,
"grad_norm": 0.157321497797966,
"learning_rate": 8.343636426865096e-05,
"loss": 0.4163,
"step": 113
},
{
"epoch": 0.8587570621468926,
"grad_norm": 0.20251426100730896,
"learning_rate": 8.313269560791342e-05,
"loss": 0.3659,
"step": 114
},
{
"epoch": 0.8662900188323918,
"grad_norm": 0.16248001158237457,
"learning_rate": 8.28268322252149e-05,
"loss": 0.4309,
"step": 115
},
{
"epoch": 0.8738229755178908,
"grad_norm": 0.16724838316440582,
"learning_rate": 8.251879438105854e-05,
"loss": 0.4167,
"step": 116
},
{
"epoch": 0.8813559322033898,
"grad_norm": 0.20615121722221375,
"learning_rate": 8.220860247998456e-05,
"loss": 0.3559,
"step": 117
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.2065141499042511,
"learning_rate": 8.189627706921877e-05,
"loss": 0.3626,
"step": 118
},
{
"epoch": 0.896421845574388,
"grad_norm": 0.17293284833431244,
"learning_rate": 8.15818388373114e-05,
"loss": 0.4325,
"step": 119
},
{
"epoch": 0.903954802259887,
"grad_norm": 0.19183604419231415,
"learning_rate": 8.126530861276677e-05,
"loss": 0.3387,
"step": 120
},
{
"epoch": 0.903954802259887,
"eval_loss": 0.38071778416633606,
"eval_runtime": 43.8754,
"eval_samples_per_second": 1.709,
"eval_steps_per_second": 0.866,
"step": 120
},
{
"epoch": 0.911487758945386,
"grad_norm": 0.1738622784614563,
"learning_rate": 8.094670736266353e-05,
"loss": 0.4149,
"step": 121
},
{
"epoch": 0.9190207156308852,
"grad_norm": 0.23153111338615417,
"learning_rate": 8.062605619126584e-05,
"loss": 0.4434,
"step": 122
},
{
"epoch": 0.9265536723163842,
"grad_norm": 0.26979267597198486,
"learning_rate": 8.030337633862542e-05,
"loss": 0.3639,
"step": 123
},
{
"epoch": 0.9340866290018832,
"grad_norm": 0.19837673008441925,
"learning_rate": 7.997868917917453e-05,
"loss": 0.3336,
"step": 124
},
{
"epoch": 0.9416195856873822,
"grad_norm": 0.18979325890541077,
"learning_rate": 7.965201622031021e-05,
"loss": 0.3715,
"step": 125
},
{
"epoch": 0.9491525423728814,
"grad_norm": 0.20212630927562714,
"learning_rate": 7.932337910096961e-05,
"loss": 0.333,
"step": 126
},
{
"epoch": 0.9566854990583804,
"grad_norm": 0.22705507278442383,
"learning_rate": 7.899279959019654e-05,
"loss": 0.3456,
"step": 127
},
{
"epoch": 0.9642184557438794,
"grad_norm": 0.24515411257743835,
"learning_rate": 7.866029958569956e-05,
"loss": 0.3336,
"step": 128
},
{
"epoch": 0.9717514124293786,
"grad_norm": 0.2214023470878601,
"learning_rate": 7.832590111240145e-05,
"loss": 0.3951,
"step": 129
},
{
"epoch": 0.9792843691148776,
"grad_norm": 0.20626534521579742,
"learning_rate": 7.798962632098024e-05,
"loss": 0.3636,
"step": 130
},
{
"epoch": 0.9792843691148776,
"eval_loss": 0.3497096598148346,
"eval_runtime": 43.8538,
"eval_samples_per_second": 1.71,
"eval_steps_per_second": 0.867,
"step": 130
},
{
"epoch": 0.9868173258003766,
"grad_norm": 0.21280796825885773,
"learning_rate": 7.765149748640197e-05,
"loss": 0.2931,
"step": 131
},
{
"epoch": 0.9943502824858758,
"grad_norm": 0.18561464548110962,
"learning_rate": 7.73115370064452e-05,
"loss": 0.3363,
"step": 132
},
{
"epoch": 1.0075329566854991,
"grad_norm": 0.3493395447731018,
"learning_rate": 7.696976740021733e-05,
"loss": 0.7106,
"step": 133
},
{
"epoch": 1.015065913370998,
"grad_norm": 0.24458995461463928,
"learning_rate": 7.6626211306663e-05,
"loss": 0.2806,
"step": 134
},
{
"epoch": 1.0225988700564972,
"grad_norm": 0.2562405467033386,
"learning_rate": 7.628089148306434e-05,
"loss": 0.3048,
"step": 135
},
{
"epoch": 1.0301318267419963,
"grad_norm": 0.22578994929790497,
"learning_rate": 7.59338308035337e-05,
"loss": 0.3142,
"step": 136
},
{
"epoch": 1.0376647834274952,
"grad_norm": 0.20230619609355927,
"learning_rate": 7.558505225749827e-05,
"loss": 0.3104,
"step": 137
},
{
"epoch": 1.0451977401129944,
"grad_norm": 0.22089356184005737,
"learning_rate": 7.523457894817745e-05,
"loss": 0.2626,
"step": 138
},
{
"epoch": 1.0527306967984935,
"grad_norm": 0.23944571614265442,
"learning_rate": 7.488243409105233e-05,
"loss": 0.2984,
"step": 139
},
{
"epoch": 1.0602636534839924,
"grad_norm": 0.2213144749403,
"learning_rate": 7.452864101232798e-05,
"loss": 0.3714,
"step": 140
},
{
"epoch": 1.0602636534839924,
"eval_loss": 0.31596410274505615,
"eval_runtime": 43.9549,
"eval_samples_per_second": 1.706,
"eval_steps_per_second": 0.865,
"step": 140
},
{
"epoch": 1.0677966101694916,
"grad_norm": 0.2673434019088745,
"learning_rate": 7.417322314738822e-05,
"loss": 0.3455,
"step": 141
},
{
"epoch": 1.0753295668549905,
"grad_norm": 0.25528621673583984,
"learning_rate": 7.381620403924333e-05,
"loss": 0.311,
"step": 142
},
{
"epoch": 1.0828625235404896,
"grad_norm": 0.3240588903427124,
"learning_rate": 7.345760733697055e-05,
"loss": 0.3363,
"step": 143
},
{
"epoch": 1.0903954802259888,
"grad_norm": 0.31120482087135315,
"learning_rate": 7.30974567941475e-05,
"loss": 0.3529,
"step": 144
},
{
"epoch": 1.0979284369114877,
"grad_norm": 0.2683728039264679,
"learning_rate": 7.273577626727884e-05,
"loss": 0.2926,
"step": 145
},
{
"epoch": 1.1054613935969868,
"grad_norm": 0.2490655928850174,
"learning_rate": 7.237258971421587e-05,
"loss": 0.3029,
"step": 146
},
{
"epoch": 1.112994350282486,
"grad_norm": 0.2618696093559265,
"learning_rate": 7.20079211925696e-05,
"loss": 0.278,
"step": 147
},
{
"epoch": 1.1205273069679849,
"grad_norm": 0.25481289625167847,
"learning_rate": 7.164179485811727e-05,
"loss": 0.2936,
"step": 148
},
{
"epoch": 1.128060263653484,
"grad_norm": 0.36896899342536926,
"learning_rate": 7.127423496320212e-05,
"loss": 0.2876,
"step": 149
},
{
"epoch": 1.1355932203389831,
"grad_norm": 0.3275793194770813,
"learning_rate": 7.090526585512696e-05,
"loss": 0.3279,
"step": 150
},
{
"epoch": 1.1355932203389831,
"eval_loss": 0.2823197543621063,
"eval_runtime": 43.9363,
"eval_samples_per_second": 1.707,
"eval_steps_per_second": 0.865,
"step": 150
},
{
"epoch": 1.143126177024482,
"grad_norm": 0.263179749250412,
"learning_rate": 7.053491197454142e-05,
"loss": 0.2342,
"step": 151
},
{
"epoch": 1.1506591337099812,
"grad_norm": 0.2754746377468109,
"learning_rate": 7.016319785382296e-05,
"loss": 0.3234,
"step": 152
},
{
"epoch": 1.1581920903954803,
"grad_norm": 0.29197824001312256,
"learning_rate": 6.979014811545189e-05,
"loss": 0.2458,
"step": 153
},
{
"epoch": 1.1657250470809792,
"grad_norm": 0.45912623405456543,
"learning_rate": 6.941578747038023e-05,
"loss": 0.2357,
"step": 154
},
{
"epoch": 1.1732580037664784,
"grad_norm": 0.36572179198265076,
"learning_rate": 6.904014071639503e-05,
"loss": 0.283,
"step": 155
},
{
"epoch": 1.1807909604519775,
"grad_norm": 0.29460588097572327,
"learning_rate": 6.866323273647563e-05,
"loss": 0.2653,
"step": 156
},
{
"epoch": 1.1883239171374764,
"grad_norm": 0.2520604729652405,
"learning_rate": 6.828508849714546e-05,
"loss": 0.3155,
"step": 157
},
{
"epoch": 1.1958568738229756,
"grad_norm": 0.2615698575973511,
"learning_rate": 6.79057330468182e-05,
"loss": 0.1852,
"step": 158
},
{
"epoch": 1.2033898305084745,
"grad_norm": 0.30008143186569214,
"learning_rate": 6.752519151413861e-05,
"loss": 0.2543,
"step": 159
},
{
"epoch": 1.2109227871939736,
"grad_norm": 0.3347761929035187,
"learning_rate": 6.7143489106318e-05,
"loss": 0.3602,
"step": 160
},
{
"epoch": 1.2109227871939736,
"eval_loss": 0.25164613127708435,
"eval_runtime": 44.1497,
"eval_samples_per_second": 1.699,
"eval_steps_per_second": 0.861,
"step": 160
},
{
"epoch": 1.2184557438794728,
"grad_norm": 0.3353792726993561,
"learning_rate": 6.676065110746444e-05,
"loss": 0.2489,
"step": 161
},
{
"epoch": 1.2259887005649717,
"grad_norm": 0.32569241523742676,
"learning_rate": 6.637670287690799e-05,
"loss": 0.194,
"step": 162
},
{
"epoch": 1.2335216572504708,
"grad_norm": 0.336117684841156,
"learning_rate": 6.599166984752087e-05,
"loss": 0.1816,
"step": 163
},
{
"epoch": 1.24105461393597,
"grad_norm": 0.37371379137039185,
"learning_rate": 6.560557752403277e-05,
"loss": 0.2807,
"step": 164
},
{
"epoch": 1.2485875706214689,
"grad_norm": 0.2796350419521332,
"learning_rate": 6.52184514813414e-05,
"loss": 0.2149,
"step": 165
},
{
"epoch": 1.256120527306968,
"grad_norm": 0.4764145314693451,
"learning_rate": 6.483031736281843e-05,
"loss": 0.1903,
"step": 166
},
{
"epoch": 1.2636534839924671,
"grad_norm": 0.36564409732818604,
"learning_rate": 6.444120087861081e-05,
"loss": 0.2638,
"step": 167
},
{
"epoch": 1.271186440677966,
"grad_norm": 0.34570133686065674,
"learning_rate": 6.40511278039378e-05,
"loss": 0.2568,
"step": 168
},
{
"epoch": 1.2787193973634652,
"grad_norm": 0.3726440966129303,
"learning_rate": 6.366012397738355e-05,
"loss": 0.2396,
"step": 169
},
{
"epoch": 1.286252354048964,
"grad_norm": 0.22987572848796844,
"learning_rate": 6.326821529918553e-05,
"loss": 0.2168,
"step": 170
},
{
"epoch": 1.286252354048964,
"eval_loss": 0.22047367691993713,
"eval_runtime": 43.9518,
"eval_samples_per_second": 1.706,
"eval_steps_per_second": 0.865,
"step": 170
},
{
"epoch": 1.2937853107344632,
"grad_norm": 0.38589489459991455,
"learning_rate": 6.287542772951897e-05,
"loss": 0.2671,
"step": 171
},
{
"epoch": 1.3013182674199624,
"grad_norm": 0.26084208488464355,
"learning_rate": 6.248178728677711e-05,
"loss": 0.2628,
"step": 172
},
{
"epoch": 1.3088512241054615,
"grad_norm": 0.3167526423931122,
"learning_rate": 6.208732004584791e-05,
"loss": 0.2128,
"step": 173
},
{
"epoch": 1.3163841807909604,
"grad_norm": 0.3979116976261139,
"learning_rate": 6.16920521363867e-05,
"loss": 0.217,
"step": 174
},
{
"epoch": 1.3239171374764596,
"grad_norm": 0.3168756663799286,
"learning_rate": 6.129600974108538e-05,
"loss": 0.2284,
"step": 175
},
{
"epoch": 1.3314500941619585,
"grad_norm": 0.30765026807785034,
"learning_rate": 6.089921909393812e-05,
"loss": 0.2698,
"step": 176
},
{
"epoch": 1.3389830508474576,
"grad_norm": 0.3305312991142273,
"learning_rate": 6.050170647850351e-05,
"loss": 0.257,
"step": 177
},
{
"epoch": 1.3465160075329567,
"grad_norm": 0.6542951464653015,
"learning_rate": 6.0103498226163603e-05,
"loss": 0.2666,
"step": 178
},
{
"epoch": 1.3540489642184557,
"grad_norm": 0.3485718071460724,
"learning_rate": 5.970462071437973e-05,
"loss": 0.1579,
"step": 179
},
{
"epoch": 1.3615819209039548,
"grad_norm": 0.4073808789253235,
"learning_rate": 5.93051003649452e-05,
"loss": 0.1643,
"step": 180
},
{
"epoch": 1.3615819209039548,
"eval_loss": 0.19270579516887665,
"eval_runtime": 44.023,
"eval_samples_per_second": 1.704,
"eval_steps_per_second": 0.863,
"step": 180
},
{
"epoch": 1.369114877589454,
"grad_norm": 0.37884190678596497,
"learning_rate": 5.890496364223509e-05,
"loss": 0.2015,
"step": 181
},
{
"epoch": 1.3766478342749529,
"grad_norm": 0.2823837101459503,
"learning_rate": 5.850423705145334e-05,
"loss": 0.1774,
"step": 182
},
{
"epoch": 1.384180790960452,
"grad_norm": 0.33302009105682373,
"learning_rate": 5.8102947136876876e-05,
"loss": 0.2202,
"step": 183
},
{
"epoch": 1.3917137476459511,
"grad_norm": 0.30890604853630066,
"learning_rate": 5.770112048009747e-05,
"loss": 0.1372,
"step": 184
},
{
"epoch": 1.39924670433145,
"grad_norm": 0.3033400774002075,
"learning_rate": 5.7298783698260874e-05,
"loss": 0.1857,
"step": 185
},
{
"epoch": 1.4067796610169492,
"grad_norm": 0.2779131829738617,
"learning_rate": 5.68959634423037e-05,
"loss": 0.187,
"step": 186
},
{
"epoch": 1.414312617702448,
"grad_norm": 0.485245943069458,
"learning_rate": 5.64926863951881e-05,
"loss": 0.2437,
"step": 187
},
{
"epoch": 1.4218455743879472,
"grad_norm": 0.37200334668159485,
"learning_rate": 5.60889792701342e-05,
"loss": 0.1923,
"step": 188
},
{
"epoch": 1.4293785310734464,
"grad_norm": 0.32306909561157227,
"learning_rate": 5.568486880885068e-05,
"loss": 0.1713,
"step": 189
},
{
"epoch": 1.4369114877589455,
"grad_norm": 0.3483230471611023,
"learning_rate": 5.52803817797633e-05,
"loss": 0.1336,
"step": 190
},
{
"epoch": 1.4369114877589455,
"eval_loss": 0.17033681273460388,
"eval_runtime": 43.9231,
"eval_samples_per_second": 1.708,
"eval_steps_per_second": 0.865,
"step": 190
},
{
"epoch": 1.4444444444444444,
"grad_norm": 0.3841436505317688,
"learning_rate": 5.487554497624189e-05,
"loss": 0.1797,
"step": 191
},
{
"epoch": 1.4519774011299436,
"grad_norm": 0.3441992998123169,
"learning_rate": 5.4470385214825416e-05,
"loss": 0.2176,
"step": 192
},
{
"epoch": 1.4595103578154425,
"grad_norm": 0.5087612867355347,
"learning_rate": 5.406492933344571e-05,
"loss": 0.2663,
"step": 193
},
{
"epoch": 1.4670433145009416,
"grad_norm": 0.3248097598552704,
"learning_rate": 5.365920418964973e-05,
"loss": 0.1739,
"step": 194
},
{
"epoch": 1.4745762711864407,
"grad_norm": 0.4247044622898102,
"learning_rate": 5.3253236658820396e-05,
"loss": 0.1454,
"step": 195
},
{
"epoch": 1.4821092278719397,
"grad_norm": 0.2898694574832916,
"learning_rate": 5.28470536323965e-05,
"loss": 0.1609,
"step": 196
},
{
"epoch": 1.4896421845574388,
"grad_norm": 0.41251513361930847,
"learning_rate": 5.244068201609133e-05,
"loss": 0.143,
"step": 197
},
{
"epoch": 1.497175141242938,
"grad_norm": 0.525326132774353,
"learning_rate": 5.2034148728110424e-05,
"loss": 0.1938,
"step": 198
},
{
"epoch": 1.5047080979284368,
"grad_norm": 0.36119720339775085,
"learning_rate": 5.162748069736851e-05,
"loss": 0.1518,
"step": 199
},
{
"epoch": 1.512241054613936,
"grad_norm": 0.38599205017089844,
"learning_rate": 5.1220704861705774e-05,
"loss": 0.1198,
"step": 200
},
{
"epoch": 1.512241054613936,
"eval_loss": 0.15222826600074768,
"eval_runtime": 44.0589,
"eval_samples_per_second": 1.702,
"eval_steps_per_second": 0.862,
"step": 200
},
{
"epoch": 1.5197740112994351,
"grad_norm": 0.36092710494995117,
"learning_rate": 5.081384816610336e-05,
"loss": 0.1485,
"step": 201
},
{
"epoch": 1.527306967984934,
"grad_norm": 0.3418121039867401,
"learning_rate": 5.0406937560898646e-05,
"loss": 0.1368,
"step": 202
},
{
"epoch": 1.5348399246704332,
"grad_norm": 0.39115968346595764,
"learning_rate": 5e-05,
"loss": 0.1843,
"step": 203
},
{
"epoch": 1.542372881355932,
"grad_norm": 0.4022577106952667,
"learning_rate": 4.9593062439101365e-05,
"loss": 0.128,
"step": 204
},
{
"epoch": 1.5499058380414312,
"grad_norm": 0.3265657126903534,
"learning_rate": 4.918615183389665e-05,
"loss": 0.1365,
"step": 205
},
{
"epoch": 1.5574387947269304,
"grad_norm": 0.32065853476524353,
"learning_rate": 4.877929513829424e-05,
"loss": 0.1495,
"step": 206
},
{
"epoch": 1.5649717514124295,
"grad_norm": 0.2755489647388458,
"learning_rate": 4.8372519302631486e-05,
"loss": 0.1105,
"step": 207
},
{
"epoch": 1.5725047080979284,
"grad_norm": 0.5003211498260498,
"learning_rate": 4.796585127188958e-05,
"loss": 0.1284,
"step": 208
},
{
"epoch": 1.5800376647834273,
"grad_norm": 0.43929675221443176,
"learning_rate": 4.755931798390867e-05,
"loss": 0.1889,
"step": 209
},
{
"epoch": 1.5875706214689265,
"grad_norm": 0.2825930118560791,
"learning_rate": 4.715294636760352e-05,
"loss": 0.1364,
"step": 210
},
{
"epoch": 1.5875706214689265,
"eval_loss": 0.13791552186012268,
"eval_runtime": 43.9041,
"eval_samples_per_second": 1.708,
"eval_steps_per_second": 0.866,
"step": 210
},
{
"epoch": 1.5951035781544256,
"grad_norm": 0.3584269881248474,
"learning_rate": 4.674676334117962e-05,
"loss": 0.1261,
"step": 211
},
{
"epoch": 1.6026365348399247,
"grad_norm": 0.3463616967201233,
"learning_rate": 4.634079581035029e-05,
"loss": 0.185,
"step": 212
},
{
"epoch": 1.6101694915254239,
"grad_norm": 0.3442526161670685,
"learning_rate": 4.59350706665543e-05,
"loss": 0.104,
"step": 213
},
{
"epoch": 1.6177024482109228,
"grad_norm": 0.3831660747528076,
"learning_rate": 4.55296147851746e-05,
"loss": 0.1294,
"step": 214
},
{
"epoch": 1.6252354048964217,
"grad_norm": 0.3194776475429535,
"learning_rate": 4.512445502375813e-05,
"loss": 0.1068,
"step": 215
},
{
"epoch": 1.6327683615819208,
"grad_norm": 0.3435104787349701,
"learning_rate": 4.471961822023671e-05,
"loss": 0.0864,
"step": 216
},
{
"epoch": 1.64030131826742,
"grad_norm": 0.2775423526763916,
"learning_rate": 4.431513119114934e-05,
"loss": 0.1858,
"step": 217
},
{
"epoch": 1.6478342749529191,
"grad_norm": 0.3337323069572449,
"learning_rate": 4.391102072986581e-05,
"loss": 0.1946,
"step": 218
},
{
"epoch": 1.655367231638418,
"grad_norm": 0.32516106963157654,
"learning_rate": 4.350731360481191e-05,
"loss": 0.1014,
"step": 219
},
{
"epoch": 1.6629001883239172,
"grad_norm": 0.2920524775981903,
"learning_rate": 4.3104036557696295e-05,
"loss": 0.2689,
"step": 220
},
{
"epoch": 1.6629001883239172,
"eval_loss": 0.12488219887018204,
"eval_runtime": 43.9386,
"eval_samples_per_second": 1.707,
"eval_steps_per_second": 0.865,
"step": 220
},
{
"epoch": 1.670433145009416,
"grad_norm": 0.4062795042991638,
"learning_rate": 4.270121630173913e-05,
"loss": 0.1495,
"step": 221
},
{
"epoch": 1.6779661016949152,
"grad_norm": 0.30497950315475464,
"learning_rate": 4.229887951990255e-05,
"loss": 0.1369,
"step": 222
},
{
"epoch": 1.6854990583804144,
"grad_norm": 0.26471954584121704,
"learning_rate": 4.189705286312314e-05,
"loss": 0.0776,
"step": 223
},
{
"epoch": 1.6930320150659135,
"grad_norm": 0.22838452458381653,
"learning_rate": 4.149576294854668e-05,
"loss": 0.0942,
"step": 224
},
{
"epoch": 1.7005649717514124,
"grad_norm": 0.2828744649887085,
"learning_rate": 4.1095036357764915e-05,
"loss": 0.0787,
"step": 225
},
{
"epoch": 1.7080979284369113,
"grad_norm": 0.3124982714653015,
"learning_rate": 4.069489963505482e-05,
"loss": 0.13,
"step": 226
},
{
"epoch": 1.7156308851224105,
"grad_norm": 0.3018610179424286,
"learning_rate": 4.029537928562028e-05,
"loss": 0.1263,
"step": 227
},
{
"epoch": 1.7231638418079096,
"grad_norm": 0.28386422991752625,
"learning_rate": 3.98965017738364e-05,
"loss": 0.089,
"step": 228
},
{
"epoch": 1.7306967984934087,
"grad_norm": 0.24454158544540405,
"learning_rate": 3.9498293521496503e-05,
"loss": 0.1525,
"step": 229
},
{
"epoch": 1.7382297551789079,
"grad_norm": 0.29519349336624146,
"learning_rate": 3.9100780906061896e-05,
"loss": 0.1383,
"step": 230
},
{
"epoch": 1.7382297551789079,
"eval_loss": 0.11655016243457794,
"eval_runtime": 43.924,
"eval_samples_per_second": 1.707,
"eval_steps_per_second": 0.865,
"step": 230
},
{
"epoch": 1.7457627118644068,
"grad_norm": 0.2894473075866699,
"learning_rate": 3.8703990258914614e-05,
"loss": 0.1709,
"step": 231
},
{
"epoch": 1.7532956685499057,
"grad_norm": 0.31395775079727173,
"learning_rate": 3.83079478636133e-05,
"loss": 0.0944,
"step": 232
},
{
"epoch": 1.7608286252354048,
"grad_norm": 0.2754004895687103,
"learning_rate": 3.791267995415208e-05,
"loss": 0.1075,
"step": 233
},
{
"epoch": 1.768361581920904,
"grad_norm": 0.27213406562805176,
"learning_rate": 3.7518212713222906e-05,
"loss": 0.1264,
"step": 234
},
{
"epoch": 1.7758945386064031,
"grad_norm": 0.44370901584625244,
"learning_rate": 3.7124572270481056e-05,
"loss": 0.1066,
"step": 235
},
{
"epoch": 1.783427495291902,
"grad_norm": 0.27574431896209717,
"learning_rate": 3.673178470081448e-05,
"loss": 0.1138,
"step": 236
},
{
"epoch": 1.7909604519774012,
"grad_norm": 0.25503936409950256,
"learning_rate": 3.633987602261647e-05,
"loss": 0.0913,
"step": 237
},
{
"epoch": 1.7984934086629,
"grad_norm": 0.33877211809158325,
"learning_rate": 3.594887219606221e-05,
"loss": 0.1084,
"step": 238
},
{
"epoch": 1.8060263653483992,
"grad_norm": 0.3899424374103546,
"learning_rate": 3.55587991213892e-05,
"loss": 0.203,
"step": 239
},
{
"epoch": 1.8135593220338984,
"grad_norm": 0.3390370011329651,
"learning_rate": 3.516968263718159e-05,
"loss": 0.1145,
"step": 240
},
{
"epoch": 1.8135593220338984,
"eval_loss": 0.11021808534860611,
"eval_runtime": 43.9451,
"eval_samples_per_second": 1.707,
"eval_steps_per_second": 0.865,
"step": 240
},
{
"epoch": 1.8210922787193975,
"grad_norm": 0.33010998368263245,
"learning_rate": 3.47815485186586e-05,
"loss": 0.0726,
"step": 241
},
{
"epoch": 1.8286252354048964,
"grad_norm": 0.280018150806427,
"learning_rate": 3.439442247596724e-05,
"loss": 0.1107,
"step": 242
},
{
"epoch": 1.8361581920903953,
"grad_norm": 0.2712264657020569,
"learning_rate": 3.400833015247913e-05,
"loss": 0.0737,
"step": 243
},
{
"epoch": 1.8436911487758945,
"grad_norm": 0.2368086278438568,
"learning_rate": 3.3623297123092006e-05,
"loss": 0.1474,
"step": 244
},
{
"epoch": 1.8512241054613936,
"grad_norm": 0.29197973012924194,
"learning_rate": 3.323934889253556e-05,
"loss": 0.078,
"step": 245
},
{
"epoch": 1.8587570621468927,
"grad_norm": 0.2464105784893036,
"learning_rate": 3.285651089368202e-05,
"loss": 0.1029,
"step": 246
},
{
"epoch": 1.8662900188323919,
"grad_norm": 0.24727390706539154,
"learning_rate": 3.2474808485861397e-05,
"loss": 0.0552,
"step": 247
},
{
"epoch": 1.8738229755178908,
"grad_norm": 0.23096492886543274,
"learning_rate": 3.209426695318182e-05,
"loss": 0.0818,
"step": 248
},
{
"epoch": 1.8813559322033897,
"grad_norm": 0.23165109753608704,
"learning_rate": 3.171491150285456e-05,
"loss": 0.1493,
"step": 249
},
{
"epoch": 1.8888888888888888,
"grad_norm": 0.3371273875236511,
"learning_rate": 3.133676726352438e-05,
"loss": 0.1037,
"step": 250
},
{
"epoch": 1.8888888888888888,
"eval_loss": 0.10490331053733826,
"eval_runtime": 43.9307,
"eval_samples_per_second": 1.707,
"eval_steps_per_second": 0.865,
"step": 250
},
{
"epoch": 1.896421845574388,
"grad_norm": 0.2399485558271408,
"learning_rate": 3.0959859283604984e-05,
"loss": 0.0929,
"step": 251
},
{
"epoch": 1.9039548022598871,
"grad_norm": 0.26773276925086975,
"learning_rate": 3.0584212529619775e-05,
"loss": 0.0637,
"step": 252
},
{
"epoch": 1.911487758945386,
"grad_norm": 0.41889140009880066,
"learning_rate": 3.0209851884548117e-05,
"loss": 0.1263,
"step": 253
},
{
"epoch": 1.9190207156308852,
"grad_norm": 0.2959991991519928,
"learning_rate": 2.9836802146177034e-05,
"loss": 0.1339,
"step": 254
},
{
"epoch": 1.926553672316384,
"grad_norm": 0.24563738703727722,
"learning_rate": 2.9465088025458586e-05,
"loss": 0.1496,
"step": 255
},
{
"epoch": 1.9340866290018832,
"grad_norm": 0.24098043143749237,
"learning_rate": 2.9094734144873036e-05,
"loss": 0.0539,
"step": 256
},
{
"epoch": 1.9416195856873824,
"grad_norm": 0.3055378198623657,
"learning_rate": 2.8725765036797892e-05,
"loss": 0.1087,
"step": 257
},
{
"epoch": 1.9491525423728815,
"grad_norm": 0.27689605951309204,
"learning_rate": 2.835820514188273e-05,
"loss": 0.1004,
"step": 258
},
{
"epoch": 1.9566854990583804,
"grad_norm": 0.30273178219795227,
"learning_rate": 2.7992078807430422e-05,
"loss": 0.1261,
"step": 259
},
{
"epoch": 1.9642184557438793,
"grad_norm": 0.3399432897567749,
"learning_rate": 2.7627410285784163e-05,
"loss": 0.1318,
"step": 260
},
{
"epoch": 1.9642184557438793,
"eval_loss": 0.10169798880815506,
"eval_runtime": 43.9845,
"eval_samples_per_second": 1.705,
"eval_steps_per_second": 0.864,
"step": 260
},
{
"epoch": 1.9717514124293785,
"grad_norm": 0.2374039739370346,
"learning_rate": 2.7264223732721167e-05,
"loss": 0.0795,
"step": 261
},
{
"epoch": 1.9792843691148776,
"grad_norm": 0.2347286343574524,
"learning_rate": 2.6902543205852492e-05,
"loss": 0.1129,
"step": 262
},
{
"epoch": 1.9868173258003767,
"grad_norm": 0.2325945943593979,
"learning_rate": 2.6542392663029463e-05,
"loss": 0.117,
"step": 263
},
{
"epoch": 1.9943502824858759,
"grad_norm": 0.2451542764902115,
"learning_rate": 2.618379596075668e-05,
"loss": 0.0742,
"step": 264
},
{
"epoch": 2.007532956685499,
"grad_norm": 0.4711216390132904,
"learning_rate": 2.582677685261179e-05,
"loss": 0.2505,
"step": 265
},
{
"epoch": 2.0150659133709983,
"grad_norm": 0.25324326753616333,
"learning_rate": 2.5471358987672017e-05,
"loss": 0.0925,
"step": 266
},
{
"epoch": 2.022598870056497,
"grad_norm": 0.24556219577789307,
"learning_rate": 2.511756590894765e-05,
"loss": 0.069,
"step": 267
},
{
"epoch": 2.030131826741996,
"grad_norm": 0.20775027573108673,
"learning_rate": 2.476542105182254e-05,
"loss": 0.0687,
"step": 268
},
{
"epoch": 2.0376647834274952,
"grad_norm": 0.2542414367198944,
"learning_rate": 2.4414947742501744e-05,
"loss": 0.0769,
"step": 269
},
{
"epoch": 2.0451977401129944,
"grad_norm": 0.26574474573135376,
"learning_rate": 2.4066169196466326e-05,
"loss": 0.0834,
"step": 270
},
{
"epoch": 2.0451977401129944,
"eval_loss": 0.0993381068110466,
"eval_runtime": 43.9044,
"eval_samples_per_second": 1.708,
"eval_steps_per_second": 0.866,
"step": 270
},
{
"epoch": 2.0527306967984935,
"grad_norm": 0.3231644034385681,
"learning_rate": 2.3719108516935683e-05,
"loss": 0.0774,
"step": 271
},
{
"epoch": 2.0602636534839927,
"grad_norm": 0.22078053653240204,
"learning_rate": 2.3373788693337024e-05,
"loss": 0.0884,
"step": 272
},
{
"epoch": 2.0677966101694913,
"grad_norm": 0.22590897977352142,
"learning_rate": 2.303023259978267e-05,
"loss": 0.0781,
"step": 273
},
{
"epoch": 2.0753295668549905,
"grad_norm": 0.23885948956012726,
"learning_rate": 2.268846299355481e-05,
"loss": 0.0543,
"step": 274
},
{
"epoch": 2.0828625235404896,
"grad_norm": 0.2120169848203659,
"learning_rate": 2.2348502513598035e-05,
"loss": 0.0925,
"step": 275
},
{
"epoch": 2.0903954802259888,
"grad_norm": 0.2760743498802185,
"learning_rate": 2.2010373679019776e-05,
"loss": 0.1427,
"step": 276
},
{
"epoch": 2.097928436911488,
"grad_norm": 0.3039568066596985,
"learning_rate": 2.167409888759856e-05,
"loss": 0.1766,
"step": 277
},
{
"epoch": 2.105461393596987,
"grad_norm": 0.2834893763065338,
"learning_rate": 2.133970041430044e-05,
"loss": 0.0816,
"step": 278
},
{
"epoch": 2.1129943502824857,
"grad_norm": 0.2928575277328491,
"learning_rate": 2.1007200409803462e-05,
"loss": 0.1076,
"step": 279
},
{
"epoch": 2.120527306967985,
"grad_norm": 0.2669938802719116,
"learning_rate": 2.067662089903039e-05,
"loss": 0.0889,
"step": 280
},
{
"epoch": 2.120527306967985,
"eval_loss": 0.09731351584196091,
"eval_runtime": 44.078,
"eval_samples_per_second": 1.702,
"eval_steps_per_second": 0.862,
"step": 280
},
{
"epoch": 2.128060263653484,
"grad_norm": 0.28592410683631897,
"learning_rate": 2.03479837796898e-05,
"loss": 0.0685,
"step": 281
},
{
"epoch": 2.135593220338983,
"grad_norm": 0.32021915912628174,
"learning_rate": 2.002131082082549e-05,
"loss": 0.0927,
"step": 282
},
{
"epoch": 2.1431261770244823,
"grad_norm": 0.2118028998374939,
"learning_rate": 1.9696623661374618e-05,
"loss": 0.0634,
"step": 283
},
{
"epoch": 2.150659133709981,
"grad_norm": 0.22075195610523224,
"learning_rate": 1.937394380873418e-05,
"loss": 0.1464,
"step": 284
},
{
"epoch": 2.15819209039548,
"grad_norm": 0.24222980439662933,
"learning_rate": 1.905329263733649e-05,
"loss": 0.1251,
"step": 285
},
{
"epoch": 2.1657250470809792,
"grad_norm": 0.28131377696990967,
"learning_rate": 1.873469138723325e-05,
"loss": 0.1184,
"step": 286
},
{
"epoch": 2.1732580037664784,
"grad_norm": 0.22016428411006927,
"learning_rate": 1.8418161162688615e-05,
"loss": 0.0399,
"step": 287
},
{
"epoch": 2.1807909604519775,
"grad_norm": 0.19806838035583496,
"learning_rate": 1.8103722930781247e-05,
"loss": 0.0756,
"step": 288
},
{
"epoch": 2.1883239171374766,
"grad_norm": 0.332441121339798,
"learning_rate": 1.779139752001545e-05,
"loss": 0.0938,
"step": 289
},
{
"epoch": 2.1958568738229753,
"grad_norm": 0.19058367609977722,
"learning_rate": 1.748120561894147e-05,
"loss": 0.0643,
"step": 290
},
{
"epoch": 2.1958568738229753,
"eval_loss": 0.09646200388669968,
"eval_runtime": 43.99,
"eval_samples_per_second": 1.705,
"eval_steps_per_second": 0.864,
"step": 290
},
{
"epoch": 2.2033898305084745,
"grad_norm": 0.168031245470047,
"learning_rate": 1.7173167774785092e-05,
"loss": 0.0524,
"step": 291
},
{
"epoch": 2.2109227871939736,
"grad_norm": 0.23299571871757507,
"learning_rate": 1.6867304392086575e-05,
"loss": 0.0775,
"step": 292
},
{
"epoch": 2.2184557438794728,
"grad_norm": 0.258078008890152,
"learning_rate": 1.6563635731349057e-05,
"loss": 0.0662,
"step": 293
},
{
"epoch": 2.225988700564972,
"grad_norm": 0.21041317284107208,
"learning_rate": 1.6262181907696454e-05,
"loss": 0.1229,
"step": 294
},
{
"epoch": 2.2335216572504706,
"grad_norm": 0.1758544147014618,
"learning_rate": 1.5962962889541105e-05,
"loss": 0.0487,
"step": 295
},
{
"epoch": 2.2410546139359697,
"grad_norm": 0.24289867281913757,
"learning_rate": 1.5665998497260958e-05,
"loss": 0.103,
"step": 296
},
{
"epoch": 2.248587570621469,
"grad_norm": 0.41178449988365173,
"learning_rate": 1.5371308401886757e-05,
"loss": 0.1671,
"step": 297
},
{
"epoch": 2.256120527306968,
"grad_norm": 0.22324170172214508,
"learning_rate": 1.5078912123798961e-05,
"loss": 0.089,
"step": 298
},
{
"epoch": 2.263653483992467,
"grad_norm": 0.2477371245622635,
"learning_rate": 1.4788829031434732e-05,
"loss": 0.0397,
"step": 299
},
{
"epoch": 2.2711864406779663,
"grad_norm": 0.21119807660579681,
"learning_rate": 1.4501078340004953e-05,
"loss": 0.0677,
"step": 300
},
{
"epoch": 2.2711864406779663,
"eval_loss": 0.09540043771266937,
"eval_runtime": 43.9302,
"eval_samples_per_second": 1.707,
"eval_steps_per_second": 0.865,
"step": 300
},
{
"epoch": 2.2787193973634654,
"grad_norm": 0.24356690049171448,
"learning_rate": 1.4215679110221413e-05,
"loss": 0.1493,
"step": 301
},
{
"epoch": 2.286252354048964,
"grad_norm": 0.180653914809227,
"learning_rate": 1.3932650247034218e-05,
"loss": 0.0703,
"step": 302
},
{
"epoch": 2.2937853107344632,
"grad_norm": 0.3206869959831238,
"learning_rate": 1.3652010498379519e-05,
"loss": 0.1167,
"step": 303
},
{
"epoch": 2.3013182674199624,
"grad_norm": 0.21196141839027405,
"learning_rate": 1.337377845393763e-05,
"loss": 0.0721,
"step": 304
},
{
"epoch": 2.3088512241054615,
"grad_norm": 0.34639954566955566,
"learning_rate": 1.309797254390167e-05,
"loss": 0.1114,
"step": 305
},
{
"epoch": 2.3163841807909606,
"grad_norm": 0.28393760323524475,
"learning_rate": 1.2824611037756684e-05,
"loss": 0.0527,
"step": 306
},
{
"epoch": 2.3239171374764593,
"grad_norm": 0.2436118721961975,
"learning_rate": 1.255371204306956e-05,
"loss": 0.0825,
"step": 307
},
{
"epoch": 2.3314500941619585,
"grad_norm": 0.2877277135848999,
"learning_rate": 1.2285293504289447e-05,
"loss": 0.0721,
"step": 308
},
{
"epoch": 2.3389830508474576,
"grad_norm": 0.18935468792915344,
"learning_rate": 1.2019373201559247e-05,
"loss": 0.0826,
"step": 309
},
{
"epoch": 2.3465160075329567,
"grad_norm": 0.3491658568382263,
"learning_rate": 1.1755968749537754e-05,
"loss": 0.1784,
"step": 310
},
{
"epoch": 2.3465160075329567,
"eval_loss": 0.09475459903478622,
"eval_runtime": 43.9621,
"eval_samples_per_second": 1.706,
"eval_steps_per_second": 0.864,
"step": 310
},
{
"epoch": 2.354048964218456,
"grad_norm": 0.31381261348724365,
"learning_rate": 1.1495097596232901e-05,
"loss": 0.1266,
"step": 311
},
{
"epoch": 2.361581920903955,
"grad_norm": 0.23978573083877563,
"learning_rate": 1.1236777021845956e-05,
"loss": 0.1047,
"step": 312
},
{
"epoch": 2.3691148775894537,
"grad_norm": 0.24144862592220306,
"learning_rate": 1.0981024137626922e-05,
"loss": 0.0572,
"step": 313
},
{
"epoch": 2.376647834274953,
"grad_norm": 0.21653713285923004,
"learning_rate": 1.0727855884741056e-05,
"loss": 0.0672,
"step": 314
},
{
"epoch": 2.384180790960452,
"grad_norm": 0.22251500189304352,
"learning_rate": 1.0477289033146675e-05,
"loss": 0.0598,
"step": 315
},
{
"epoch": 2.391713747645951,
"grad_norm": 0.297451913356781,
"learning_rate": 1.022934018048432e-05,
"loss": 0.0518,
"step": 316
},
{
"epoch": 2.3992467043314503,
"grad_norm": 0.29483604431152344,
"learning_rate": 9.984025750977339e-06,
"loss": 0.0875,
"step": 317
},
{
"epoch": 2.406779661016949,
"grad_norm": 0.3049258887767792,
"learning_rate": 9.741361994343867e-06,
"loss": 0.0843,
"step": 318
},
{
"epoch": 2.414312617702448,
"grad_norm": 0.21589063107967377,
"learning_rate": 9.501364984720557e-06,
"loss": 0.0588,
"step": 319
},
{
"epoch": 2.4218455743879472,
"grad_norm": 0.2357209175825119,
"learning_rate": 9.264050619597697e-06,
"loss": 0.105,
"step": 320
},
{
"epoch": 2.4218455743879472,
"eval_loss": 0.0942394882440567,
"eval_runtime": 43.9875,
"eval_samples_per_second": 1.705,
"eval_steps_per_second": 0.864,
"step": 320
},
{
"epoch": 2.4218455743879472,
"step": 320,
"total_flos": 4.419854408240333e+16,
"train_loss": 0.537436925212387,
"train_runtime": 3691.1633,
"train_samples_per_second": 0.863,
"train_steps_per_second": 0.107
}
],
"logging_steps": 1,
"max_steps": 396,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 4,
"early_stopping_threshold": 0.0015
},
"attributes": {
"early_stopping_patience_counter": 4
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.419854408240333e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}