c14kevincardenas's picture
End of training
4dc4ea5 verified
{
"best_metric": 0.39052605628967285,
"best_model_checkpoint": "limb_classification_person_crop_seq/t4_2heads_2layers_1e-4lr/checkpoint-1332",
"epoch": 20.0,
"eval_steps": 500,
"global_step": 2960,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16891891891891891,
"grad_norm": 475014.78125,
"learning_rate": 1e-05,
"loss": 1.3888,
"step": 25
},
{
"epoch": 0.33783783783783783,
"grad_norm": 306009.0625,
"learning_rate": 2e-05,
"loss": 1.3279,
"step": 50
},
{
"epoch": 0.5067567567567568,
"grad_norm": 281433.40625,
"learning_rate": 3e-05,
"loss": 1.213,
"step": 75
},
{
"epoch": 0.6756756756756757,
"grad_norm": 455867.53125,
"learning_rate": 4e-05,
"loss": 1.0331,
"step": 100
},
{
"epoch": 0.8445945945945946,
"grad_norm": 353824.03125,
"learning_rate": 5e-05,
"loss": 0.728,
"step": 125
},
{
"epoch": 1.0,
"eval_accuracy": 0.8934131736526946,
"eval_loss": 0.46921050548553467,
"eval_runtime": 58.2623,
"eval_samples_per_second": 14.332,
"eval_steps_per_second": 0.463,
"step": 148
},
{
"epoch": 1.0135135135135136,
"grad_norm": 270820.4375,
"learning_rate": 6e-05,
"loss": 0.5057,
"step": 150
},
{
"epoch": 1.1824324324324325,
"grad_norm": 217310.390625,
"learning_rate": 7e-05,
"loss": 0.4943,
"step": 175
},
{
"epoch": 1.3513513513513513,
"grad_norm": 200080.90625,
"learning_rate": 8e-05,
"loss": 0.4913,
"step": 200
},
{
"epoch": 1.5202702702702702,
"grad_norm": 132091.671875,
"learning_rate": 9e-05,
"loss": 0.4412,
"step": 225
},
{
"epoch": 1.689189189189189,
"grad_norm": 172156.890625,
"learning_rate": 0.0001,
"loss": 0.4462,
"step": 250
},
{
"epoch": 1.8581081081081081,
"grad_norm": 174347.53125,
"learning_rate": 9.907749077490776e-05,
"loss": 0.4398,
"step": 275
},
{
"epoch": 2.0,
"eval_accuracy": 0.9173652694610779,
"eval_loss": 0.4154480993747711,
"eval_runtime": 58.5262,
"eval_samples_per_second": 14.267,
"eval_steps_per_second": 0.461,
"step": 296
},
{
"epoch": 2.027027027027027,
"grad_norm": 360909.90625,
"learning_rate": 9.81549815498155e-05,
"loss": 0.4759,
"step": 300
},
{
"epoch": 2.195945945945946,
"grad_norm": 121115.9375,
"learning_rate": 9.723247232472326e-05,
"loss": 0.4261,
"step": 325
},
{
"epoch": 2.364864864864865,
"grad_norm": 104373.140625,
"learning_rate": 9.6309963099631e-05,
"loss": 0.4587,
"step": 350
},
{
"epoch": 2.5337837837837838,
"grad_norm": 182047.796875,
"learning_rate": 9.538745387453874e-05,
"loss": 0.4301,
"step": 375
},
{
"epoch": 2.7027027027027026,
"grad_norm": 161166.515625,
"learning_rate": 9.44649446494465e-05,
"loss": 0.4534,
"step": 400
},
{
"epoch": 2.8716216216216215,
"grad_norm": 210905.515625,
"learning_rate": 9.354243542435425e-05,
"loss": 0.4817,
"step": 425
},
{
"epoch": 3.0,
"eval_accuracy": 0.911377245508982,
"eval_loss": 0.4182611405849457,
"eval_runtime": 58.1688,
"eval_samples_per_second": 14.355,
"eval_steps_per_second": 0.464,
"step": 444
},
{
"epoch": 3.0405405405405403,
"grad_norm": 153966.765625,
"learning_rate": 9.2619926199262e-05,
"loss": 0.4223,
"step": 450
},
{
"epoch": 3.2094594594594597,
"grad_norm": 138052.3125,
"learning_rate": 9.169741697416975e-05,
"loss": 0.4506,
"step": 475
},
{
"epoch": 3.3783783783783785,
"grad_norm": 201322.265625,
"learning_rate": 9.077490774907749e-05,
"loss": 0.4234,
"step": 500
},
{
"epoch": 3.5472972972972974,
"grad_norm": 77149.171875,
"learning_rate": 8.985239852398525e-05,
"loss": 0.4334,
"step": 525
},
{
"epoch": 3.7162162162162162,
"grad_norm": 102979.2890625,
"learning_rate": 8.892988929889299e-05,
"loss": 0.4486,
"step": 550
},
{
"epoch": 3.885135135135135,
"grad_norm": 121940.96875,
"learning_rate": 8.800738007380073e-05,
"loss": 0.4336,
"step": 575
},
{
"epoch": 4.0,
"eval_accuracy": 0.9005988023952096,
"eval_loss": 0.436523973941803,
"eval_runtime": 57.8806,
"eval_samples_per_second": 14.426,
"eval_steps_per_second": 0.466,
"step": 592
},
{
"epoch": 4.054054054054054,
"grad_norm": 151653.296875,
"learning_rate": 8.708487084870849e-05,
"loss": 0.4831,
"step": 600
},
{
"epoch": 4.222972972972973,
"grad_norm": 41036.046875,
"learning_rate": 8.616236162361624e-05,
"loss": 0.4013,
"step": 625
},
{
"epoch": 4.391891891891892,
"grad_norm": 128231.6796875,
"learning_rate": 8.523985239852399e-05,
"loss": 0.4179,
"step": 650
},
{
"epoch": 4.5608108108108105,
"grad_norm": 127975.6015625,
"learning_rate": 8.431734317343174e-05,
"loss": 0.4437,
"step": 675
},
{
"epoch": 4.72972972972973,
"grad_norm": 189921.671875,
"learning_rate": 8.339483394833948e-05,
"loss": 0.4366,
"step": 700
},
{
"epoch": 4.898648648648649,
"grad_norm": 113625.5859375,
"learning_rate": 8.247232472324724e-05,
"loss": 0.4266,
"step": 725
},
{
"epoch": 5.0,
"eval_accuracy": 0.918562874251497,
"eval_loss": 0.40009012818336487,
"eval_runtime": 57.8362,
"eval_samples_per_second": 14.437,
"eval_steps_per_second": 0.467,
"step": 740
},
{
"epoch": 5.0675675675675675,
"grad_norm": 191172.703125,
"learning_rate": 8.154981549815498e-05,
"loss": 0.3962,
"step": 750
},
{
"epoch": 5.236486486486487,
"grad_norm": 215017.75,
"learning_rate": 8.062730627306274e-05,
"loss": 0.3952,
"step": 775
},
{
"epoch": 5.405405405405405,
"grad_norm": 129469.015625,
"learning_rate": 7.970479704797048e-05,
"loss": 0.4021,
"step": 800
},
{
"epoch": 5.574324324324325,
"grad_norm": 217217.0625,
"learning_rate": 7.878228782287823e-05,
"loss": 0.4164,
"step": 825
},
{
"epoch": 5.743243243243243,
"grad_norm": 134499.53125,
"learning_rate": 7.785977859778598e-05,
"loss": 0.4276,
"step": 850
},
{
"epoch": 5.912162162162162,
"grad_norm": 131273.484375,
"learning_rate": 7.693726937269373e-05,
"loss": 0.4375,
"step": 875
},
{
"epoch": 6.0,
"eval_accuracy": 0.9089820359281438,
"eval_loss": 0.4101567566394806,
"eval_runtime": 57.8044,
"eval_samples_per_second": 14.445,
"eval_steps_per_second": 0.467,
"step": 888
},
{
"epoch": 6.081081081081081,
"grad_norm": 131542.328125,
"learning_rate": 7.601476014760149e-05,
"loss": 0.4422,
"step": 900
},
{
"epoch": 6.25,
"grad_norm": 156622.28125,
"learning_rate": 7.509225092250923e-05,
"loss": 0.4222,
"step": 925
},
{
"epoch": 6.418918918918919,
"grad_norm": 112665.125,
"learning_rate": 7.416974169741697e-05,
"loss": 0.4075,
"step": 950
},
{
"epoch": 6.587837837837838,
"grad_norm": 204795.84375,
"learning_rate": 7.324723247232473e-05,
"loss": 0.4173,
"step": 975
},
{
"epoch": 6.756756756756757,
"grad_norm": 99678.734375,
"learning_rate": 7.232472324723247e-05,
"loss": 0.3997,
"step": 1000
},
{
"epoch": 6.925675675675675,
"grad_norm": 63922.734375,
"learning_rate": 7.140221402214023e-05,
"loss": 0.3893,
"step": 1025
},
{
"epoch": 7.0,
"eval_accuracy": 0.9077844311377246,
"eval_loss": 0.4296913743019104,
"eval_runtime": 58.125,
"eval_samples_per_second": 14.366,
"eval_steps_per_second": 0.465,
"step": 1036
},
{
"epoch": 7.094594594594595,
"grad_norm": 142340.53125,
"learning_rate": 7.047970479704797e-05,
"loss": 0.4311,
"step": 1050
},
{
"epoch": 7.263513513513513,
"grad_norm": 129666.21875,
"learning_rate": 6.955719557195572e-05,
"loss": 0.4136,
"step": 1075
},
{
"epoch": 7.4324324324324325,
"grad_norm": 138277.4375,
"learning_rate": 6.863468634686348e-05,
"loss": 0.4285,
"step": 1100
},
{
"epoch": 7.601351351351351,
"grad_norm": 101775.4609375,
"learning_rate": 6.771217712177122e-05,
"loss": 0.3836,
"step": 1125
},
{
"epoch": 7.77027027027027,
"grad_norm": 118281.390625,
"learning_rate": 6.678966789667896e-05,
"loss": 0.3961,
"step": 1150
},
{
"epoch": 7.9391891891891895,
"grad_norm": 71398.46875,
"learning_rate": 6.586715867158672e-05,
"loss": 0.4142,
"step": 1175
},
{
"epoch": 8.0,
"eval_accuracy": 0.9221556886227545,
"eval_loss": 0.3987300992012024,
"eval_runtime": 58.9585,
"eval_samples_per_second": 14.162,
"eval_steps_per_second": 0.458,
"step": 1184
},
{
"epoch": 8.108108108108109,
"grad_norm": 133750.8125,
"learning_rate": 6.494464944649446e-05,
"loss": 0.3884,
"step": 1200
},
{
"epoch": 8.277027027027026,
"grad_norm": 63878.5078125,
"learning_rate": 6.402214022140222e-05,
"loss": 0.3709,
"step": 1225
},
{
"epoch": 8.445945945945946,
"grad_norm": 80597.9921875,
"learning_rate": 6.309963099630997e-05,
"loss": 0.4209,
"step": 1250
},
{
"epoch": 8.614864864864865,
"grad_norm": 96441.8125,
"learning_rate": 6.217712177121771e-05,
"loss": 0.3643,
"step": 1275
},
{
"epoch": 8.783783783783784,
"grad_norm": 84658.171875,
"learning_rate": 6.125461254612547e-05,
"loss": 0.428,
"step": 1300
},
{
"epoch": 8.952702702702704,
"grad_norm": 196019.765625,
"learning_rate": 6.033210332103322e-05,
"loss": 0.4409,
"step": 1325
},
{
"epoch": 9.0,
"eval_accuracy": 0.9197604790419162,
"eval_loss": 0.39052605628967285,
"eval_runtime": 58.0247,
"eval_samples_per_second": 14.39,
"eval_steps_per_second": 0.465,
"step": 1332
},
{
"epoch": 9.121621621621621,
"grad_norm": 94252.234375,
"learning_rate": 5.940959409594096e-05,
"loss": 0.3959,
"step": 1350
},
{
"epoch": 9.29054054054054,
"grad_norm": 131315.4375,
"learning_rate": 5.848708487084871e-05,
"loss": 0.3938,
"step": 1375
},
{
"epoch": 9.45945945945946,
"grad_norm": 136357.03125,
"learning_rate": 5.756457564575646e-05,
"loss": 0.4022,
"step": 1400
},
{
"epoch": 9.628378378378379,
"grad_norm": 68101.3046875,
"learning_rate": 5.664206642066421e-05,
"loss": 0.3818,
"step": 1425
},
{
"epoch": 9.797297297297296,
"grad_norm": 99187.109375,
"learning_rate": 5.5719557195571956e-05,
"loss": 0.4237,
"step": 1450
},
{
"epoch": 9.966216216216216,
"grad_norm": 141858.5625,
"learning_rate": 5.479704797047971e-05,
"loss": 0.3839,
"step": 1475
},
{
"epoch": 10.0,
"eval_accuracy": 0.9125748502994012,
"eval_loss": 0.3984138071537018,
"eval_runtime": 58.3717,
"eval_samples_per_second": 14.305,
"eval_steps_per_second": 0.463,
"step": 1480
},
{
"epoch": 10.135135135135135,
"grad_norm": 46070.453125,
"learning_rate": 5.387453874538746e-05,
"loss": 0.4018,
"step": 1500
},
{
"epoch": 10.304054054054054,
"grad_norm": 142353.78125,
"learning_rate": 5.295202952029521e-05,
"loss": 0.3919,
"step": 1525
},
{
"epoch": 10.472972972972974,
"grad_norm": 126190.671875,
"learning_rate": 5.202952029520295e-05,
"loss": 0.3608,
"step": 1550
},
{
"epoch": 10.641891891891891,
"grad_norm": 67269.96875,
"learning_rate": 5.11070110701107e-05,
"loss": 0.3706,
"step": 1575
},
{
"epoch": 10.81081081081081,
"grad_norm": 168949.78125,
"learning_rate": 5.018450184501845e-05,
"loss": 0.4158,
"step": 1600
},
{
"epoch": 10.97972972972973,
"grad_norm": 88223.5078125,
"learning_rate": 4.92619926199262e-05,
"loss": 0.3798,
"step": 1625
},
{
"epoch": 11.0,
"eval_accuracy": 0.9137724550898204,
"eval_loss": 0.4074867069721222,
"eval_runtime": 58.8609,
"eval_samples_per_second": 14.186,
"eval_steps_per_second": 0.459,
"step": 1628
},
{
"epoch": 11.14864864864865,
"grad_norm": 125577.375,
"learning_rate": 4.833948339483395e-05,
"loss": 0.4159,
"step": 1650
},
{
"epoch": 11.317567567567568,
"grad_norm": 68579.34375,
"learning_rate": 4.74169741697417e-05,
"loss": 0.3827,
"step": 1675
},
{
"epoch": 11.486486486486486,
"grad_norm": 219718.03125,
"learning_rate": 4.6494464944649444e-05,
"loss": 0.3599,
"step": 1700
},
{
"epoch": 11.655405405405405,
"grad_norm": 137213.984375,
"learning_rate": 4.55719557195572e-05,
"loss": 0.4054,
"step": 1725
},
{
"epoch": 11.824324324324325,
"grad_norm": 172968.296875,
"learning_rate": 4.464944649446495e-05,
"loss": 0.3661,
"step": 1750
},
{
"epoch": 11.993243243243244,
"grad_norm": 73256.3515625,
"learning_rate": 4.37269372693727e-05,
"loss": 0.4059,
"step": 1775
},
{
"epoch": 12.0,
"eval_accuracy": 0.911377245508982,
"eval_loss": 0.3997330963611603,
"eval_runtime": 58.6188,
"eval_samples_per_second": 14.245,
"eval_steps_per_second": 0.461,
"step": 1776
},
{
"epoch": 12.162162162162161,
"grad_norm": 144390.390625,
"learning_rate": 4.280442804428044e-05,
"loss": 0.3767,
"step": 1800
},
{
"epoch": 12.33108108108108,
"grad_norm": 255976.765625,
"learning_rate": 4.1881918819188195e-05,
"loss": 0.3966,
"step": 1825
},
{
"epoch": 12.5,
"grad_norm": 67650.4609375,
"learning_rate": 4.0959409594095944e-05,
"loss": 0.3884,
"step": 1850
},
{
"epoch": 12.66891891891892,
"grad_norm": 93633.8203125,
"learning_rate": 4.003690036900369e-05,
"loss": 0.4177,
"step": 1875
},
{
"epoch": 12.837837837837839,
"grad_norm": 58474.86328125,
"learning_rate": 3.911439114391144e-05,
"loss": 0.3785,
"step": 1900
},
{
"epoch": 13.0,
"eval_accuracy": 0.911377245508982,
"eval_loss": 0.41688984632492065,
"eval_runtime": 58.2464,
"eval_samples_per_second": 14.336,
"eval_steps_per_second": 0.464,
"step": 1924
},
{
"epoch": 13.006756756756756,
"grad_norm": 155545.9375,
"learning_rate": 3.819188191881919e-05,
"loss": 0.3397,
"step": 1925
},
{
"epoch": 13.175675675675675,
"grad_norm": 169663.40625,
"learning_rate": 3.726937269372694e-05,
"loss": 0.3886,
"step": 1950
},
{
"epoch": 13.344594594594595,
"grad_norm": 127929.265625,
"learning_rate": 3.634686346863469e-05,
"loss": 0.35,
"step": 1975
},
{
"epoch": 13.513513513513514,
"grad_norm": 86534.453125,
"learning_rate": 3.542435424354244e-05,
"loss": 0.3688,
"step": 2000
},
{
"epoch": 13.682432432432432,
"grad_norm": 121822.265625,
"learning_rate": 3.4501845018450186e-05,
"loss": 0.3588,
"step": 2025
},
{
"epoch": 13.85135135135135,
"grad_norm": 99066.6875,
"learning_rate": 3.3579335793357934e-05,
"loss": 0.3823,
"step": 2050
},
{
"epoch": 14.0,
"eval_accuracy": 0.9101796407185628,
"eval_loss": 0.42681849002838135,
"eval_runtime": 58.4378,
"eval_samples_per_second": 14.289,
"eval_steps_per_second": 0.462,
"step": 2072
},
{
"epoch": 14.02027027027027,
"grad_norm": 103248.96875,
"learning_rate": 3.265682656826568e-05,
"loss": 0.3704,
"step": 2075
},
{
"epoch": 14.18918918918919,
"grad_norm": 34235.70703125,
"learning_rate": 3.173431734317343e-05,
"loss": 0.3512,
"step": 2100
},
{
"epoch": 14.358108108108109,
"grad_norm": 107891.5,
"learning_rate": 3.081180811808118e-05,
"loss": 0.37,
"step": 2125
},
{
"epoch": 14.527027027027026,
"grad_norm": 87044.3125,
"learning_rate": 2.9889298892988933e-05,
"loss": 0.3614,
"step": 2150
},
{
"epoch": 14.695945945945946,
"grad_norm": 136785.125,
"learning_rate": 2.8966789667896682e-05,
"loss": 0.3745,
"step": 2175
},
{
"epoch": 14.864864864864865,
"grad_norm": 35941.01953125,
"learning_rate": 2.8044280442804427e-05,
"loss": 0.3601,
"step": 2200
},
{
"epoch": 15.0,
"eval_accuracy": 0.9149700598802395,
"eval_loss": 0.411451518535614,
"eval_runtime": 58.4512,
"eval_samples_per_second": 14.285,
"eval_steps_per_second": 0.462,
"step": 2220
},
{
"epoch": 15.033783783783784,
"grad_norm": 139225.703125,
"learning_rate": 2.7121771217712176e-05,
"loss": 0.3677,
"step": 2225
},
{
"epoch": 15.202702702702704,
"grad_norm": 182318.40625,
"learning_rate": 2.619926199261993e-05,
"loss": 0.3788,
"step": 2250
},
{
"epoch": 15.371621621621621,
"grad_norm": 77662.546875,
"learning_rate": 2.5276752767527677e-05,
"loss": 0.3452,
"step": 2275
},
{
"epoch": 15.54054054054054,
"grad_norm": 132939.1875,
"learning_rate": 2.4354243542435426e-05,
"loss": 0.3516,
"step": 2300
},
{
"epoch": 15.70945945945946,
"grad_norm": 142370.671875,
"learning_rate": 2.3431734317343175e-05,
"loss": 0.3403,
"step": 2325
},
{
"epoch": 15.878378378378379,
"grad_norm": 112973.25,
"learning_rate": 2.2509225092250924e-05,
"loss": 0.3725,
"step": 2350
},
{
"epoch": 16.0,
"eval_accuracy": 0.9245508982035928,
"eval_loss": 0.395658403635025,
"eval_runtime": 58.3339,
"eval_samples_per_second": 14.314,
"eval_steps_per_second": 0.463,
"step": 2368
},
{
"epoch": 16.0472972972973,
"grad_norm": 118476.015625,
"learning_rate": 2.1586715867158673e-05,
"loss": 0.3619,
"step": 2375
},
{
"epoch": 16.216216216216218,
"grad_norm": 110621.640625,
"learning_rate": 2.066420664206642e-05,
"loss": 0.3349,
"step": 2400
},
{
"epoch": 16.385135135135137,
"grad_norm": 56486.99609375,
"learning_rate": 1.974169741697417e-05,
"loss": 0.3547,
"step": 2425
},
{
"epoch": 16.554054054054053,
"grad_norm": 126080.7265625,
"learning_rate": 1.881918819188192e-05,
"loss": 0.3613,
"step": 2450
},
{
"epoch": 16.722972972972972,
"grad_norm": 90989.0703125,
"learning_rate": 1.7896678966789668e-05,
"loss": 0.3489,
"step": 2475
},
{
"epoch": 16.89189189189189,
"grad_norm": 95969.421875,
"learning_rate": 1.6974169741697417e-05,
"loss": 0.3321,
"step": 2500
},
{
"epoch": 17.0,
"eval_accuracy": 0.9137724550898204,
"eval_loss": 0.40843331813812256,
"eval_runtime": 57.8018,
"eval_samples_per_second": 14.446,
"eval_steps_per_second": 0.467,
"step": 2516
},
{
"epoch": 17.06081081081081,
"grad_norm": 120454.2734375,
"learning_rate": 1.6051660516605166e-05,
"loss": 0.3636,
"step": 2525
},
{
"epoch": 17.22972972972973,
"grad_norm": 62468.0234375,
"learning_rate": 1.5129151291512916e-05,
"loss": 0.3329,
"step": 2550
},
{
"epoch": 17.39864864864865,
"grad_norm": 50807.1484375,
"learning_rate": 1.4206642066420663e-05,
"loss": 0.3558,
"step": 2575
},
{
"epoch": 17.56756756756757,
"grad_norm": 103581.3984375,
"learning_rate": 1.3284132841328414e-05,
"loss": 0.3357,
"step": 2600
},
{
"epoch": 17.736486486486488,
"grad_norm": 152761.625,
"learning_rate": 1.2361623616236164e-05,
"loss": 0.3171,
"step": 2625
},
{
"epoch": 17.905405405405407,
"grad_norm": 65535.70703125,
"learning_rate": 1.1439114391143913e-05,
"loss": 0.3718,
"step": 2650
},
{
"epoch": 18.0,
"eval_accuracy": 0.9101796407185628,
"eval_loss": 0.4134296774864197,
"eval_runtime": 58.1204,
"eval_samples_per_second": 14.367,
"eval_steps_per_second": 0.465,
"step": 2664
},
{
"epoch": 18.074324324324323,
"grad_norm": 110142.5234375,
"learning_rate": 1.0516605166051662e-05,
"loss": 0.3579,
"step": 2675
},
{
"epoch": 18.243243243243242,
"grad_norm": 58428.9140625,
"learning_rate": 9.59409594095941e-06,
"loss": 0.3102,
"step": 2700
},
{
"epoch": 18.41216216216216,
"grad_norm": 65891.8671875,
"learning_rate": 8.67158671586716e-06,
"loss": 0.3486,
"step": 2725
},
{
"epoch": 18.58108108108108,
"grad_norm": 65948.9296875,
"learning_rate": 7.749077490774908e-06,
"loss": 0.3299,
"step": 2750
},
{
"epoch": 18.75,
"grad_norm": 178761.265625,
"learning_rate": 6.826568265682657e-06,
"loss": 0.3262,
"step": 2775
},
{
"epoch": 18.91891891891892,
"grad_norm": 114625.7265625,
"learning_rate": 5.904059040590406e-06,
"loss": 0.3605,
"step": 2800
},
{
"epoch": 19.0,
"eval_accuracy": 0.9149700598802395,
"eval_loss": 0.4040866792201996,
"eval_runtime": 58.6317,
"eval_samples_per_second": 14.241,
"eval_steps_per_second": 0.461,
"step": 2812
},
{
"epoch": 19.08783783783784,
"grad_norm": 74821.609375,
"learning_rate": 4.981549815498155e-06,
"loss": 0.355,
"step": 2825
},
{
"epoch": 19.256756756756758,
"grad_norm": 59311.046875,
"learning_rate": 4.059040590405904e-06,
"loss": 0.3298,
"step": 2850
},
{
"epoch": 19.425675675675677,
"grad_norm": 140943.78125,
"learning_rate": 3.136531365313653e-06,
"loss": 0.3437,
"step": 2875
},
{
"epoch": 19.594594594594593,
"grad_norm": 130853.2578125,
"learning_rate": 2.2140221402214023e-06,
"loss": 0.317,
"step": 2900
},
{
"epoch": 19.763513513513512,
"grad_norm": 153757.859375,
"learning_rate": 1.2915129151291513e-06,
"loss": 0.3381,
"step": 2925
},
{
"epoch": 19.93243243243243,
"grad_norm": 91314.546875,
"learning_rate": 3.690036900369004e-07,
"loss": 0.3167,
"step": 2950
},
{
"epoch": 20.0,
"eval_accuracy": 0.918562874251497,
"eval_loss": 0.40735724568367004,
"eval_runtime": 58.6299,
"eval_samples_per_second": 14.242,
"eval_steps_per_second": 0.461,
"step": 2960
},
{
"epoch": 20.0,
"step": 2960,
"total_flos": 0.0,
"train_loss": 0.42357230726126077,
"train_runtime": 10904.2025,
"train_samples_per_second": 8.674,
"train_steps_per_second": 0.271
}
],
"logging_steps": 25,
"max_steps": 2960,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}