c14kevincardenas's picture
End of training
87da958 verified
{
"best_metric": 0.3927362263202667,
"best_model_checkpoint": "limb_classification_person_crop_seq/t4_8heads_1layers_1e-4lr/checkpoint-1776",
"epoch": 20.0,
"eval_steps": 500,
"global_step": 2960,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16891891891891891,
"grad_norm": 358243.25,
"learning_rate": 1e-05,
"loss": 1.3942,
"step": 25
},
{
"epoch": 0.33783783783783783,
"grad_norm": 251778.359375,
"learning_rate": 2e-05,
"loss": 1.3603,
"step": 50
},
{
"epoch": 0.5067567567567568,
"grad_norm": 132885.078125,
"learning_rate": 3e-05,
"loss": 1.2986,
"step": 75
},
{
"epoch": 0.6756756756756757,
"grad_norm": 586557.4375,
"learning_rate": 4e-05,
"loss": 1.1933,
"step": 100
},
{
"epoch": 0.8445945945945946,
"grad_norm": 172983.421875,
"learning_rate": 5e-05,
"loss": 1.0563,
"step": 125
},
{
"epoch": 1.0,
"eval_accuracy": 0.8754491017964072,
"eval_loss": 0.5863036513328552,
"eval_runtime": 60.375,
"eval_samples_per_second": 13.83,
"eval_steps_per_second": 0.447,
"step": 148
},
{
"epoch": 1.0135135135135136,
"grad_norm": 176986.609375,
"learning_rate": 6e-05,
"loss": 0.7536,
"step": 150
},
{
"epoch": 1.1824324324324325,
"grad_norm": 141175.546875,
"learning_rate": 7e-05,
"loss": 0.5917,
"step": 175
},
{
"epoch": 1.3513513513513513,
"grad_norm": 171763.8125,
"learning_rate": 8e-05,
"loss": 0.5229,
"step": 200
},
{
"epoch": 1.5202702702702702,
"grad_norm": 235346.625,
"learning_rate": 9e-05,
"loss": 0.4497,
"step": 225
},
{
"epoch": 1.689189189189189,
"grad_norm": 91142.6953125,
"learning_rate": 0.0001,
"loss": 0.443,
"step": 250
},
{
"epoch": 1.8581081081081081,
"grad_norm": 174055.0,
"learning_rate": 9.907749077490776e-05,
"loss": 0.4235,
"step": 275
},
{
"epoch": 2.0,
"eval_accuracy": 0.8982035928143712,
"eval_loss": 0.4248776435852051,
"eval_runtime": 60.0285,
"eval_samples_per_second": 13.91,
"eval_steps_per_second": 0.45,
"step": 296
},
{
"epoch": 2.027027027027027,
"grad_norm": 457668.3125,
"learning_rate": 9.81549815498155e-05,
"loss": 0.4702,
"step": 300
},
{
"epoch": 2.195945945945946,
"grad_norm": 132193.171875,
"learning_rate": 9.723247232472326e-05,
"loss": 0.4457,
"step": 325
},
{
"epoch": 2.364864864864865,
"grad_norm": 96091.71875,
"learning_rate": 9.6309963099631e-05,
"loss": 0.4403,
"step": 350
},
{
"epoch": 2.5337837837837838,
"grad_norm": 161375.140625,
"learning_rate": 9.538745387453874e-05,
"loss": 0.423,
"step": 375
},
{
"epoch": 2.7027027027027026,
"grad_norm": 130612.0078125,
"learning_rate": 9.44649446494465e-05,
"loss": 0.43,
"step": 400
},
{
"epoch": 2.8716216216216215,
"grad_norm": 238536.953125,
"learning_rate": 9.354243542435425e-05,
"loss": 0.4494,
"step": 425
},
{
"epoch": 3.0,
"eval_accuracy": 0.9089820359281438,
"eval_loss": 0.42482611536979675,
"eval_runtime": 59.7234,
"eval_samples_per_second": 13.981,
"eval_steps_per_second": 0.452,
"step": 444
},
{
"epoch": 3.0405405405405403,
"grad_norm": 60061.19921875,
"learning_rate": 9.2619926199262e-05,
"loss": 0.4362,
"step": 450
},
{
"epoch": 3.2094594594594597,
"grad_norm": 96977.171875,
"learning_rate": 9.169741697416975e-05,
"loss": 0.4302,
"step": 475
},
{
"epoch": 3.3783783783783785,
"grad_norm": 98531.53125,
"learning_rate": 9.077490774907749e-05,
"loss": 0.409,
"step": 500
},
{
"epoch": 3.5472972972972974,
"grad_norm": 101922.609375,
"learning_rate": 8.985239852398525e-05,
"loss": 0.3979,
"step": 525
},
{
"epoch": 3.7162162162162162,
"grad_norm": 108008.8671875,
"learning_rate": 8.892988929889299e-05,
"loss": 0.4353,
"step": 550
},
{
"epoch": 3.885135135135135,
"grad_norm": 93226.5390625,
"learning_rate": 8.800738007380073e-05,
"loss": 0.4183,
"step": 575
},
{
"epoch": 4.0,
"eval_accuracy": 0.8934131736526946,
"eval_loss": 0.43285271525382996,
"eval_runtime": 60.1768,
"eval_samples_per_second": 13.876,
"eval_steps_per_second": 0.449,
"step": 592
},
{
"epoch": 4.054054054054054,
"grad_norm": 104521.6328125,
"learning_rate": 8.708487084870849e-05,
"loss": 0.4753,
"step": 600
},
{
"epoch": 4.222972972972973,
"grad_norm": 67537.2265625,
"learning_rate": 8.616236162361624e-05,
"loss": 0.3917,
"step": 625
},
{
"epoch": 4.391891891891892,
"grad_norm": 150912.1875,
"learning_rate": 8.523985239852399e-05,
"loss": 0.4119,
"step": 650
},
{
"epoch": 4.5608108108108105,
"grad_norm": 73587.5078125,
"learning_rate": 8.431734317343174e-05,
"loss": 0.4247,
"step": 675
},
{
"epoch": 4.72972972972973,
"grad_norm": 115034.75,
"learning_rate": 8.339483394833948e-05,
"loss": 0.4285,
"step": 700
},
{
"epoch": 4.898648648648649,
"grad_norm": 99327.8828125,
"learning_rate": 8.247232472324724e-05,
"loss": 0.4203,
"step": 725
},
{
"epoch": 5.0,
"eval_accuracy": 0.918562874251497,
"eval_loss": 0.39791449904441833,
"eval_runtime": 60.3518,
"eval_samples_per_second": 13.836,
"eval_steps_per_second": 0.447,
"step": 740
},
{
"epoch": 5.0675675675675675,
"grad_norm": 198293.546875,
"learning_rate": 8.154981549815498e-05,
"loss": 0.4155,
"step": 750
},
{
"epoch": 5.236486486486487,
"grad_norm": 82787.34375,
"learning_rate": 8.062730627306274e-05,
"loss": 0.3911,
"step": 775
},
{
"epoch": 5.405405405405405,
"grad_norm": 114961.9765625,
"learning_rate": 7.970479704797048e-05,
"loss": 0.4102,
"step": 800
},
{
"epoch": 5.574324324324325,
"grad_norm": 153899.546875,
"learning_rate": 7.878228782287823e-05,
"loss": 0.419,
"step": 825
},
{
"epoch": 5.743243243243243,
"grad_norm": 108072.7109375,
"learning_rate": 7.785977859778598e-05,
"loss": 0.4064,
"step": 850
},
{
"epoch": 5.912162162162162,
"grad_norm": 73436.2421875,
"learning_rate": 7.693726937269373e-05,
"loss": 0.4073,
"step": 875
},
{
"epoch": 6.0,
"eval_accuracy": 0.9149700598802395,
"eval_loss": 0.39525091648101807,
"eval_runtime": 59.9209,
"eval_samples_per_second": 13.935,
"eval_steps_per_second": 0.451,
"step": 888
},
{
"epoch": 6.081081081081081,
"grad_norm": 109643.8359375,
"learning_rate": 7.601476014760149e-05,
"loss": 0.4227,
"step": 900
},
{
"epoch": 6.25,
"grad_norm": 103467.765625,
"learning_rate": 7.509225092250923e-05,
"loss": 0.3968,
"step": 925
},
{
"epoch": 6.418918918918919,
"grad_norm": 114479.0859375,
"learning_rate": 7.416974169741697e-05,
"loss": 0.4118,
"step": 950
},
{
"epoch": 6.587837837837838,
"grad_norm": 171495.734375,
"learning_rate": 7.324723247232473e-05,
"loss": 0.4134,
"step": 975
},
{
"epoch": 6.756756756756757,
"grad_norm": 166568.453125,
"learning_rate": 7.232472324723247e-05,
"loss": 0.3839,
"step": 1000
},
{
"epoch": 6.925675675675675,
"grad_norm": 68976.8828125,
"learning_rate": 7.140221402214023e-05,
"loss": 0.3927,
"step": 1025
},
{
"epoch": 7.0,
"eval_accuracy": 0.9209580838323354,
"eval_loss": 0.39880236983299255,
"eval_runtime": 60.3856,
"eval_samples_per_second": 13.828,
"eval_steps_per_second": 0.447,
"step": 1036
},
{
"epoch": 7.094594594594595,
"grad_norm": 101431.9921875,
"learning_rate": 7.047970479704797e-05,
"loss": 0.4122,
"step": 1050
},
{
"epoch": 7.263513513513513,
"grad_norm": 44332.38671875,
"learning_rate": 6.955719557195572e-05,
"loss": 0.4072,
"step": 1075
},
{
"epoch": 7.4324324324324325,
"grad_norm": 120717.484375,
"learning_rate": 6.863468634686348e-05,
"loss": 0.4207,
"step": 1100
},
{
"epoch": 7.601351351351351,
"grad_norm": 141817.53125,
"learning_rate": 6.771217712177122e-05,
"loss": 0.3748,
"step": 1125
},
{
"epoch": 7.77027027027027,
"grad_norm": 108992.875,
"learning_rate": 6.678966789667896e-05,
"loss": 0.3973,
"step": 1150
},
{
"epoch": 7.9391891891891895,
"grad_norm": 125266.5390625,
"learning_rate": 6.586715867158672e-05,
"loss": 0.3931,
"step": 1175
},
{
"epoch": 8.0,
"eval_accuracy": 0.9173652694610779,
"eval_loss": 0.4065897464752197,
"eval_runtime": 59.0702,
"eval_samples_per_second": 14.136,
"eval_steps_per_second": 0.457,
"step": 1184
},
{
"epoch": 8.108108108108109,
"grad_norm": 79784.984375,
"learning_rate": 6.494464944649446e-05,
"loss": 0.3869,
"step": 1200
},
{
"epoch": 8.277027027027026,
"grad_norm": 58690.61328125,
"learning_rate": 6.402214022140222e-05,
"loss": 0.3796,
"step": 1225
},
{
"epoch": 8.445945945945946,
"grad_norm": 79699.40625,
"learning_rate": 6.309963099630997e-05,
"loss": 0.4066,
"step": 1250
},
{
"epoch": 8.614864864864865,
"grad_norm": 116672.609375,
"learning_rate": 6.217712177121771e-05,
"loss": 0.3723,
"step": 1275
},
{
"epoch": 8.783783783783784,
"grad_norm": 84205.125,
"learning_rate": 6.125461254612547e-05,
"loss": 0.419,
"step": 1300
},
{
"epoch": 8.952702702702704,
"grad_norm": 194455.796875,
"learning_rate": 6.033210332103322e-05,
"loss": 0.4225,
"step": 1325
},
{
"epoch": 9.0,
"eval_accuracy": 0.9221556886227545,
"eval_loss": 0.39390280842781067,
"eval_runtime": 59.2665,
"eval_samples_per_second": 14.089,
"eval_steps_per_second": 0.456,
"step": 1332
},
{
"epoch": 9.121621621621621,
"grad_norm": 124840.765625,
"learning_rate": 5.940959409594096e-05,
"loss": 0.3988,
"step": 1350
},
{
"epoch": 9.29054054054054,
"grad_norm": 166515.8125,
"learning_rate": 5.848708487084871e-05,
"loss": 0.4045,
"step": 1375
},
{
"epoch": 9.45945945945946,
"grad_norm": 69126.875,
"learning_rate": 5.756457564575646e-05,
"loss": 0.3878,
"step": 1400
},
{
"epoch": 9.628378378378379,
"grad_norm": 93407.296875,
"learning_rate": 5.664206642066421e-05,
"loss": 0.3846,
"step": 1425
},
{
"epoch": 9.797297297297296,
"grad_norm": 70069.578125,
"learning_rate": 5.5719557195571956e-05,
"loss": 0.4031,
"step": 1450
},
{
"epoch": 9.966216216216216,
"grad_norm": 89726.5859375,
"learning_rate": 5.479704797047971e-05,
"loss": 0.3768,
"step": 1475
},
{
"epoch": 10.0,
"eval_accuracy": 0.911377245508982,
"eval_loss": 0.404273122549057,
"eval_runtime": 59.2046,
"eval_samples_per_second": 14.104,
"eval_steps_per_second": 0.456,
"step": 1480
},
{
"epoch": 10.135135135135135,
"grad_norm": 50900.421875,
"learning_rate": 5.387453874538746e-05,
"loss": 0.3971,
"step": 1500
},
{
"epoch": 10.304054054054054,
"grad_norm": 195171.375,
"learning_rate": 5.295202952029521e-05,
"loss": 0.4006,
"step": 1525
},
{
"epoch": 10.472972972972974,
"grad_norm": 133812.78125,
"learning_rate": 5.202952029520295e-05,
"loss": 0.3527,
"step": 1550
},
{
"epoch": 10.641891891891891,
"grad_norm": 77933.1328125,
"learning_rate": 5.11070110701107e-05,
"loss": 0.377,
"step": 1575
},
{
"epoch": 10.81081081081081,
"grad_norm": 141872.90625,
"learning_rate": 5.018450184501845e-05,
"loss": 0.4182,
"step": 1600
},
{
"epoch": 10.97972972972973,
"grad_norm": 105366.984375,
"learning_rate": 4.92619926199262e-05,
"loss": 0.3918,
"step": 1625
},
{
"epoch": 11.0,
"eval_accuracy": 0.9101796407185628,
"eval_loss": 0.4043692350387573,
"eval_runtime": 58.8135,
"eval_samples_per_second": 14.197,
"eval_steps_per_second": 0.459,
"step": 1628
},
{
"epoch": 11.14864864864865,
"grad_norm": 102680.625,
"learning_rate": 4.833948339483395e-05,
"loss": 0.4068,
"step": 1650
},
{
"epoch": 11.317567567567568,
"grad_norm": 54038.0625,
"learning_rate": 4.74169741697417e-05,
"loss": 0.3948,
"step": 1675
},
{
"epoch": 11.486486486486486,
"grad_norm": 104827.5078125,
"learning_rate": 4.6494464944649444e-05,
"loss": 0.3606,
"step": 1700
},
{
"epoch": 11.655405405405405,
"grad_norm": 120384.34375,
"learning_rate": 4.55719557195572e-05,
"loss": 0.3815,
"step": 1725
},
{
"epoch": 11.824324324324325,
"grad_norm": 236821.96875,
"learning_rate": 4.464944649446495e-05,
"loss": 0.3613,
"step": 1750
},
{
"epoch": 11.993243243243244,
"grad_norm": 83007.3359375,
"learning_rate": 4.37269372693727e-05,
"loss": 0.398,
"step": 1775
},
{
"epoch": 12.0,
"eval_accuracy": 0.9173652694610779,
"eval_loss": 0.3927362263202667,
"eval_runtime": 59.8526,
"eval_samples_per_second": 13.951,
"eval_steps_per_second": 0.451,
"step": 1776
},
{
"epoch": 12.162162162162161,
"grad_norm": 114086.703125,
"learning_rate": 4.280442804428044e-05,
"loss": 0.3695,
"step": 1800
},
{
"epoch": 12.33108108108108,
"grad_norm": 189440.21875,
"learning_rate": 4.1881918819188195e-05,
"loss": 0.3917,
"step": 1825
},
{
"epoch": 12.5,
"grad_norm": 77842.9921875,
"learning_rate": 4.0959409594095944e-05,
"loss": 0.3908,
"step": 1850
},
{
"epoch": 12.66891891891892,
"grad_norm": 106204.4609375,
"learning_rate": 4.003690036900369e-05,
"loss": 0.4054,
"step": 1875
},
{
"epoch": 12.837837837837839,
"grad_norm": 79211.3125,
"learning_rate": 3.911439114391144e-05,
"loss": 0.381,
"step": 1900
},
{
"epoch": 13.0,
"eval_accuracy": 0.9173652694610779,
"eval_loss": 0.39785751700401306,
"eval_runtime": 59.3643,
"eval_samples_per_second": 14.066,
"eval_steps_per_second": 0.455,
"step": 1924
},
{
"epoch": 13.006756756756756,
"grad_norm": 96323.5703125,
"learning_rate": 3.819188191881919e-05,
"loss": 0.3419,
"step": 1925
},
{
"epoch": 13.175675675675675,
"grad_norm": 179315.421875,
"learning_rate": 3.726937269372694e-05,
"loss": 0.3987,
"step": 1950
},
{
"epoch": 13.344594594594595,
"grad_norm": 99669.9609375,
"learning_rate": 3.634686346863469e-05,
"loss": 0.3626,
"step": 1975
},
{
"epoch": 13.513513513513514,
"grad_norm": 90959.078125,
"learning_rate": 3.542435424354244e-05,
"loss": 0.3744,
"step": 2000
},
{
"epoch": 13.682432432432432,
"grad_norm": 123356.265625,
"learning_rate": 3.4501845018450186e-05,
"loss": 0.3613,
"step": 2025
},
{
"epoch": 13.85135135135135,
"grad_norm": 62648.0234375,
"learning_rate": 3.3579335793357934e-05,
"loss": 0.3817,
"step": 2050
},
{
"epoch": 14.0,
"eval_accuracy": 0.9137724550898204,
"eval_loss": 0.4134121239185333,
"eval_runtime": 60.0106,
"eval_samples_per_second": 13.914,
"eval_steps_per_second": 0.45,
"step": 2072
},
{
"epoch": 14.02027027027027,
"grad_norm": 104165.796875,
"learning_rate": 3.265682656826568e-05,
"loss": 0.3751,
"step": 2075
},
{
"epoch": 14.18918918918919,
"grad_norm": 69535.90625,
"learning_rate": 3.173431734317343e-05,
"loss": 0.3675,
"step": 2100
},
{
"epoch": 14.358108108108109,
"grad_norm": 103158.5,
"learning_rate": 3.081180811808118e-05,
"loss": 0.39,
"step": 2125
},
{
"epoch": 14.527027027027026,
"grad_norm": 68135.5859375,
"learning_rate": 2.9889298892988933e-05,
"loss": 0.3658,
"step": 2150
},
{
"epoch": 14.695945945945946,
"grad_norm": 167864.78125,
"learning_rate": 2.8966789667896682e-05,
"loss": 0.3857,
"step": 2175
},
{
"epoch": 14.864864864864865,
"grad_norm": 98201.515625,
"learning_rate": 2.8044280442804427e-05,
"loss": 0.3815,
"step": 2200
},
{
"epoch": 15.0,
"eval_accuracy": 0.9173652694610779,
"eval_loss": 0.40676751732826233,
"eval_runtime": 58.4707,
"eval_samples_per_second": 14.281,
"eval_steps_per_second": 0.462,
"step": 2220
},
{
"epoch": 15.033783783783784,
"grad_norm": 186007.90625,
"learning_rate": 2.7121771217712176e-05,
"loss": 0.3717,
"step": 2225
},
{
"epoch": 15.202702702702704,
"grad_norm": 157017.15625,
"learning_rate": 2.619926199261993e-05,
"loss": 0.3717,
"step": 2250
},
{
"epoch": 15.371621621621621,
"grad_norm": 74874.7109375,
"learning_rate": 2.5276752767527677e-05,
"loss": 0.3537,
"step": 2275
},
{
"epoch": 15.54054054054054,
"grad_norm": 92439.0078125,
"learning_rate": 2.4354243542435426e-05,
"loss": 0.3535,
"step": 2300
},
{
"epoch": 15.70945945945946,
"grad_norm": 164555.34375,
"learning_rate": 2.3431734317343175e-05,
"loss": 0.3645,
"step": 2325
},
{
"epoch": 15.878378378378379,
"grad_norm": 91601.59375,
"learning_rate": 2.2509225092250924e-05,
"loss": 0.3774,
"step": 2350
},
{
"epoch": 16.0,
"eval_accuracy": 0.9233532934131736,
"eval_loss": 0.39439886808395386,
"eval_runtime": 58.9596,
"eval_samples_per_second": 14.162,
"eval_steps_per_second": 0.458,
"step": 2368
},
{
"epoch": 16.0472972972973,
"grad_norm": 82947.125,
"learning_rate": 2.1586715867158673e-05,
"loss": 0.3644,
"step": 2375
},
{
"epoch": 16.216216216216218,
"grad_norm": 77509.625,
"learning_rate": 2.066420664206642e-05,
"loss": 0.3418,
"step": 2400
},
{
"epoch": 16.385135135135137,
"grad_norm": 63176.84765625,
"learning_rate": 1.974169741697417e-05,
"loss": 0.3502,
"step": 2425
},
{
"epoch": 16.554054054054053,
"grad_norm": 194086.15625,
"learning_rate": 1.881918819188192e-05,
"loss": 0.3788,
"step": 2450
},
{
"epoch": 16.722972972972972,
"grad_norm": 81391.1171875,
"learning_rate": 1.7896678966789668e-05,
"loss": 0.347,
"step": 2475
},
{
"epoch": 16.89189189189189,
"grad_norm": 34088.36328125,
"learning_rate": 1.6974169741697417e-05,
"loss": 0.3499,
"step": 2500
},
{
"epoch": 17.0,
"eval_accuracy": 0.9209580838323354,
"eval_loss": 0.3971590995788574,
"eval_runtime": 59.2527,
"eval_samples_per_second": 14.092,
"eval_steps_per_second": 0.456,
"step": 2516
},
{
"epoch": 17.06081081081081,
"grad_norm": 151931.296875,
"learning_rate": 1.6051660516605166e-05,
"loss": 0.3927,
"step": 2525
},
{
"epoch": 17.22972972972973,
"grad_norm": 76409.84375,
"learning_rate": 1.5129151291512916e-05,
"loss": 0.3497,
"step": 2550
},
{
"epoch": 17.39864864864865,
"grad_norm": 65936.53125,
"learning_rate": 1.4206642066420663e-05,
"loss": 0.363,
"step": 2575
},
{
"epoch": 17.56756756756757,
"grad_norm": 50398.3203125,
"learning_rate": 1.3284132841328414e-05,
"loss": 0.3499,
"step": 2600
},
{
"epoch": 17.736486486486488,
"grad_norm": 109634.046875,
"learning_rate": 1.2361623616236164e-05,
"loss": 0.337,
"step": 2625
},
{
"epoch": 17.905405405405407,
"grad_norm": 40496.69921875,
"learning_rate": 1.1439114391143913e-05,
"loss": 0.3825,
"step": 2650
},
{
"epoch": 18.0,
"eval_accuracy": 0.9197604790419162,
"eval_loss": 0.4014655351638794,
"eval_runtime": 59.3898,
"eval_samples_per_second": 14.06,
"eval_steps_per_second": 0.455,
"step": 2664
},
{
"epoch": 18.074324324324323,
"grad_norm": 117493.03125,
"learning_rate": 1.0516605166051662e-05,
"loss": 0.3519,
"step": 2675
},
{
"epoch": 18.243243243243242,
"grad_norm": 46973.1875,
"learning_rate": 9.59409594095941e-06,
"loss": 0.3237,
"step": 2700
},
{
"epoch": 18.41216216216216,
"grad_norm": 114745.4609375,
"learning_rate": 8.67158671586716e-06,
"loss": 0.3714,
"step": 2725
},
{
"epoch": 18.58108108108108,
"grad_norm": 38971.9296875,
"learning_rate": 7.749077490774908e-06,
"loss": 0.3231,
"step": 2750
},
{
"epoch": 18.75,
"grad_norm": 156186.84375,
"learning_rate": 6.826568265682657e-06,
"loss": 0.3416,
"step": 2775
},
{
"epoch": 18.91891891891892,
"grad_norm": 76312.9921875,
"learning_rate": 5.904059040590406e-06,
"loss": 0.3731,
"step": 2800
},
{
"epoch": 19.0,
"eval_accuracy": 0.918562874251497,
"eval_loss": 0.39593929052352905,
"eval_runtime": 58.8099,
"eval_samples_per_second": 14.198,
"eval_steps_per_second": 0.459,
"step": 2812
},
{
"epoch": 19.08783783783784,
"grad_norm": 91183.2109375,
"learning_rate": 4.981549815498155e-06,
"loss": 0.3723,
"step": 2825
},
{
"epoch": 19.256756756756758,
"grad_norm": 80640.0234375,
"learning_rate": 4.059040590405904e-06,
"loss": 0.3349,
"step": 2850
},
{
"epoch": 19.425675675675677,
"grad_norm": 89848.125,
"learning_rate": 3.136531365313653e-06,
"loss": 0.3679,
"step": 2875
},
{
"epoch": 19.594594594594593,
"grad_norm": 107155.53125,
"learning_rate": 2.2140221402214023e-06,
"loss": 0.3463,
"step": 2900
},
{
"epoch": 19.763513513513512,
"grad_norm": 128204.28125,
"learning_rate": 1.2915129151291513e-06,
"loss": 0.3549,
"step": 2925
},
{
"epoch": 19.93243243243243,
"grad_norm": 91720.7734375,
"learning_rate": 3.690036900369004e-07,
"loss": 0.323,
"step": 2950
},
{
"epoch": 20.0,
"eval_accuracy": 0.9209580838323354,
"eval_loss": 0.3986169397830963,
"eval_runtime": 58.2229,
"eval_samples_per_second": 14.341,
"eval_steps_per_second": 0.464,
"step": 2960
},
{
"epoch": 20.0,
"step": 2960,
"total_flos": 0.0,
"train_loss": 0.43191270973231344,
"train_runtime": 11059.7547,
"train_samples_per_second": 8.552,
"train_steps_per_second": 0.268
}
],
"logging_steps": 25,
"max_steps": 2960,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}