fabricator / checkpoint-5787 /trainer_state.json
redcathode's picture
Upload folder using huggingface_hub
2308bb5 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.998790810157194,
"eval_steps": 414,
"global_step": 5787,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0006049606775559589,
"eval_loss": 1.4577645063400269,
"eval_runtime": 63.3298,
"eval_samples_per_second": 3.0,
"eval_steps_per_second": 3.0,
"step": 1
},
{
"epoch": 0.006049606775559589,
"grad_norm": 1.2785438299179077,
"learning_rate": 3e-06,
"loss": 1.374,
"step": 10
},
{
"epoch": 0.012099213551119177,
"grad_norm": 0.7358320355415344,
"learning_rate": 6e-06,
"loss": 1.2219,
"step": 20
},
{
"epoch": 0.018148820326678767,
"grad_norm": 3.1359198093414307,
"learning_rate": 9e-06,
"loss": 1.06,
"step": 30
},
{
"epoch": 0.024198427102238355,
"grad_norm": 1.704594612121582,
"learning_rate": 1.2e-05,
"loss": 1.4456,
"step": 40
},
{
"epoch": 0.030248033877797943,
"grad_norm": 1.833866834640503,
"learning_rate": 1.5e-05,
"loss": 1.1583,
"step": 50
},
{
"epoch": 0.036297640653357534,
"grad_norm": 1.3696911334991455,
"learning_rate": 1.8e-05,
"loss": 1.3531,
"step": 60
},
{
"epoch": 0.04234724742891712,
"grad_norm": 2.034799575805664,
"learning_rate": 2.1e-05,
"loss": 1.3092,
"step": 70
},
{
"epoch": 0.04839685420447671,
"grad_norm": 1.2183700799942017,
"learning_rate": 2.4e-05,
"loss": 1.3778,
"step": 80
},
{
"epoch": 0.0544464609800363,
"grad_norm": 2.252204418182373,
"learning_rate": 2.7000000000000002e-05,
"loss": 1.354,
"step": 90
},
{
"epoch": 0.060496067755595885,
"grad_norm": 2.8017024993896484,
"learning_rate": 3e-05,
"loss": 1.0258,
"step": 100
},
{
"epoch": 0.06654567453115548,
"grad_norm": 1.4654804468154907,
"learning_rate": 2.9999957023765772e-05,
"loss": 1.0931,
"step": 110
},
{
"epoch": 0.07259528130671507,
"grad_norm": 1.917343258857727,
"learning_rate": 2.999982809530935e-05,
"loss": 1.2745,
"step": 120
},
{
"epoch": 0.07864488808227466,
"grad_norm": 1.5257304906845093,
"learning_rate": 2.9999613215369516e-05,
"loss": 0.9593,
"step": 130
},
{
"epoch": 0.08469449485783424,
"grad_norm": 0.9834230542182922,
"learning_rate": 2.9999312385177566e-05,
"loss": 0.9462,
"step": 140
},
{
"epoch": 0.09074410163339383,
"grad_norm": 1.3462891578674316,
"learning_rate": 2.9998925606457308e-05,
"loss": 0.9853,
"step": 150
},
{
"epoch": 0.09679370840895342,
"grad_norm": 1.9642091989517212,
"learning_rate": 2.999845288142505e-05,
"loss": 1.1006,
"step": 160
},
{
"epoch": 0.102843315184513,
"grad_norm": 1.897597312927246,
"learning_rate": 2.9997894212789577e-05,
"loss": 1.1229,
"step": 170
},
{
"epoch": 0.1088929219600726,
"grad_norm": 1.2907172441482544,
"learning_rate": 2.9997249603752158e-05,
"loss": 0.9851,
"step": 180
},
{
"epoch": 0.11494252873563218,
"grad_norm": 1.7979787588119507,
"learning_rate": 2.9996519058006505e-05,
"loss": 1.0547,
"step": 190
},
{
"epoch": 0.12099213551119177,
"grad_norm": 2.189631462097168,
"learning_rate": 2.999570257973877e-05,
"loss": 1.1659,
"step": 200
},
{
"epoch": 0.12704174228675136,
"grad_norm": 1.6810208559036255,
"learning_rate": 2.9994800173627507e-05,
"loss": 0.8911,
"step": 210
},
{
"epoch": 0.13309134906231096,
"grad_norm": 1.385323405265808,
"learning_rate": 2.9993811844843646e-05,
"loss": 1.0701,
"step": 220
},
{
"epoch": 0.13914095583787053,
"grad_norm": 1.4298688173294067,
"learning_rate": 2.999273759905048e-05,
"loss": 1.0507,
"step": 230
},
{
"epoch": 0.14519056261343014,
"grad_norm": 1.5514591932296753,
"learning_rate": 2.9991577442403607e-05,
"loss": 1.1145,
"step": 240
},
{
"epoch": 0.1512401693889897,
"grad_norm": 1.8914713859558105,
"learning_rate": 2.999033138155093e-05,
"loss": 0.7448,
"step": 250
},
{
"epoch": 0.1572897761645493,
"grad_norm": 1.1297534704208374,
"learning_rate": 2.9988999423632562e-05,
"loss": 1.0863,
"step": 260
},
{
"epoch": 0.16333938294010888,
"grad_norm": 2.141075849533081,
"learning_rate": 2.998758157628086e-05,
"loss": 1.0313,
"step": 270
},
{
"epoch": 0.1693889897156685,
"grad_norm": 1.38814115524292,
"learning_rate": 2.998607784762031e-05,
"loss": 1.1764,
"step": 280
},
{
"epoch": 0.17543859649122806,
"grad_norm": 1.4910017251968384,
"learning_rate": 2.9984488246267533e-05,
"loss": 1.2147,
"step": 290
},
{
"epoch": 0.18148820326678766,
"grad_norm": 2.273433208465576,
"learning_rate": 2.99828127813312e-05,
"loss": 1.1703,
"step": 300
},
{
"epoch": 0.18753781004234724,
"grad_norm": 2.22914981842041,
"learning_rate": 2.9981051462412002e-05,
"loss": 1.1059,
"step": 310
},
{
"epoch": 0.19358741681790684,
"grad_norm": 1.5342557430267334,
"learning_rate": 2.997920429960259e-05,
"loss": 1.0767,
"step": 320
},
{
"epoch": 0.1996370235934664,
"grad_norm": 1.8331257104873657,
"learning_rate": 2.9977271303487513e-05,
"loss": 1.2202,
"step": 330
},
{
"epoch": 0.205686630369026,
"grad_norm": 3.037759304046631,
"learning_rate": 2.9975252485143145e-05,
"loss": 0.9495,
"step": 340
},
{
"epoch": 0.21173623714458562,
"grad_norm": 1.4192079305648804,
"learning_rate": 2.9973147856137658e-05,
"loss": 1.1555,
"step": 350
},
{
"epoch": 0.2177858439201452,
"grad_norm": 2.3369932174682617,
"learning_rate": 2.997095742853092e-05,
"loss": 1.073,
"step": 360
},
{
"epoch": 0.2238354506957048,
"grad_norm": 2.341386556625366,
"learning_rate": 2.9968681214874442e-05,
"loss": 1.2284,
"step": 370
},
{
"epoch": 0.22988505747126436,
"grad_norm": 1.4051792621612549,
"learning_rate": 2.9966319228211296e-05,
"loss": 1.2316,
"step": 380
},
{
"epoch": 0.23593466424682397,
"grad_norm": 2.0274314880371094,
"learning_rate": 2.996387148207607e-05,
"loss": 1.1257,
"step": 390
},
{
"epoch": 0.24198427102238354,
"grad_norm": 1.4432406425476074,
"learning_rate": 2.9961337990494733e-05,
"loss": 1.0545,
"step": 400
},
{
"epoch": 0.24803387779794314,
"grad_norm": 1.7953746318817139,
"learning_rate": 2.995871876798462e-05,
"loss": 0.8797,
"step": 410
},
{
"epoch": 0.25045372050816694,
"eval_loss": 1.0715683698654175,
"eval_runtime": 64.6596,
"eval_samples_per_second": 2.938,
"eval_steps_per_second": 2.938,
"step": 414
},
{
"epoch": 0.2540834845735027,
"grad_norm": 1.0973708629608154,
"learning_rate": 2.995601382955431e-05,
"loss": 0.9573,
"step": 420
},
{
"epoch": 0.2601330913490623,
"grad_norm": 1.390816569328308,
"learning_rate": 2.9953223190703535e-05,
"loss": 1.1024,
"step": 430
},
{
"epoch": 0.2661826981246219,
"grad_norm": 1.5752558708190918,
"learning_rate": 2.9950346867423124e-05,
"loss": 1.2828,
"step": 440
},
{
"epoch": 0.27223230490018147,
"grad_norm": 3.318897247314453,
"learning_rate": 2.9947384876194876e-05,
"loss": 0.9254,
"step": 450
},
{
"epoch": 0.27828191167574107,
"grad_norm": 2.932630777359009,
"learning_rate": 2.994433723399149e-05,
"loss": 1.3966,
"step": 460
},
{
"epoch": 0.28433151845130067,
"grad_norm": 1.24905264377594,
"learning_rate": 2.9941203958276463e-05,
"loss": 1.0706,
"step": 470
},
{
"epoch": 0.29038112522686027,
"grad_norm": 1.6373075246810913,
"learning_rate": 2.993798506700397e-05,
"loss": 1.0972,
"step": 480
},
{
"epoch": 0.2964307320024198,
"grad_norm": 1.8339027166366577,
"learning_rate": 2.9934680578618796e-05,
"loss": 1.2781,
"step": 490
},
{
"epoch": 0.3024803387779794,
"grad_norm": 1.5762040615081787,
"learning_rate": 2.9931290512056206e-05,
"loss": 0.9826,
"step": 500
},
{
"epoch": 0.308529945553539,
"grad_norm": 1.3458601236343384,
"learning_rate": 2.9927814886741828e-05,
"loss": 1.128,
"step": 510
},
{
"epoch": 0.3145795523290986,
"grad_norm": 2.5347025394439697,
"learning_rate": 2.9924253722591575e-05,
"loss": 1.0372,
"step": 520
},
{
"epoch": 0.3206291591046582,
"grad_norm": 1.329092025756836,
"learning_rate": 2.99206070400115e-05,
"loss": 0.9917,
"step": 530
},
{
"epoch": 0.32667876588021777,
"grad_norm": 1.4007670879364014,
"learning_rate": 2.9916874859897704e-05,
"loss": 0.9245,
"step": 540
},
{
"epoch": 0.33272837265577737,
"grad_norm": 1.8608225584030151,
"learning_rate": 2.9913057203636176e-05,
"loss": 1.0985,
"step": 550
},
{
"epoch": 0.338777979431337,
"grad_norm": 1.2260061502456665,
"learning_rate": 2.990915409310273e-05,
"loss": 1.1108,
"step": 560
},
{
"epoch": 0.3448275862068966,
"grad_norm": 4.154330730438232,
"learning_rate": 2.990516555066282e-05,
"loss": 0.9983,
"step": 570
},
{
"epoch": 0.3508771929824561,
"grad_norm": 1.5533702373504639,
"learning_rate": 2.990109159917146e-05,
"loss": 0.9014,
"step": 580
},
{
"epoch": 0.3569267997580157,
"grad_norm": 1.5335776805877686,
"learning_rate": 2.9896932261973056e-05,
"loss": 1.0543,
"step": 590
},
{
"epoch": 0.3629764065335753,
"grad_norm": 5.594316482543945,
"learning_rate": 2.98926875629013e-05,
"loss": 1.0405,
"step": 600
},
{
"epoch": 0.3690260133091349,
"grad_norm": 1.4797818660736084,
"learning_rate": 2.9888357526279008e-05,
"loss": 0.9019,
"step": 610
},
{
"epoch": 0.37507562008469447,
"grad_norm": 0.40816277265548706,
"learning_rate": 2.9883942176918015e-05,
"loss": 0.8362,
"step": 620
},
{
"epoch": 0.3811252268602541,
"grad_norm": 1.1441352367401123,
"learning_rate": 2.9879441540118988e-05,
"loss": 1.0765,
"step": 630
},
{
"epoch": 0.3871748336358137,
"grad_norm": 1.28082275390625,
"learning_rate": 2.9874855641671325e-05,
"loss": 0.8888,
"step": 640
},
{
"epoch": 0.3932244404113733,
"grad_norm": 1.6251568794250488,
"learning_rate": 2.987018450785297e-05,
"loss": 1.2763,
"step": 650
},
{
"epoch": 0.3992740471869328,
"grad_norm": 3.480515956878662,
"learning_rate": 2.9865428165430292e-05,
"loss": 1.191,
"step": 660
},
{
"epoch": 0.4053236539624924,
"grad_norm": 1.7192935943603516,
"learning_rate": 2.9860586641657922e-05,
"loss": 1.1641,
"step": 670
},
{
"epoch": 0.411373260738052,
"grad_norm": 3.2280263900756836,
"learning_rate": 2.9855659964278584e-05,
"loss": 1.108,
"step": 680
},
{
"epoch": 0.41742286751361163,
"grad_norm": 3.256509304046631,
"learning_rate": 2.9850648161522945e-05,
"loss": 1.0619,
"step": 690
},
{
"epoch": 0.42347247428917123,
"grad_norm": 0.8531071543693542,
"learning_rate": 2.984555126210946e-05,
"loss": 0.9536,
"step": 700
},
{
"epoch": 0.4295220810647308,
"grad_norm": 1.469575047492981,
"learning_rate": 2.984036929524421e-05,
"loss": 1.0315,
"step": 710
},
{
"epoch": 0.4355716878402904,
"grad_norm": 1.530128836631775,
"learning_rate": 2.983510229062071e-05,
"loss": 0.9523,
"step": 720
},
{
"epoch": 0.44162129461585,
"grad_norm": 1.4602179527282715,
"learning_rate": 2.982975027841977e-05,
"loss": 0.9123,
"step": 730
},
{
"epoch": 0.4476709013914096,
"grad_norm": 1.4414767026901245,
"learning_rate": 2.9824313289309294e-05,
"loss": 0.8937,
"step": 740
},
{
"epoch": 0.4537205081669691,
"grad_norm": 1.5764180421829224,
"learning_rate": 2.9818791354444127e-05,
"loss": 0.8378,
"step": 750
},
{
"epoch": 0.45977011494252873,
"grad_norm": 1.798797369003296,
"learning_rate": 2.981318450546587e-05,
"loss": 1.1008,
"step": 760
},
{
"epoch": 0.46581972171808833,
"grad_norm": 2.5538880825042725,
"learning_rate": 2.9807492774502675e-05,
"loss": 1.0576,
"step": 770
},
{
"epoch": 0.47186932849364793,
"grad_norm": 1.5585602521896362,
"learning_rate": 2.9801716194169113e-05,
"loss": 1.0796,
"step": 780
},
{
"epoch": 0.4779189352692075,
"grad_norm": 1.1259243488311768,
"learning_rate": 2.9795854797565933e-05,
"loss": 0.8746,
"step": 790
},
{
"epoch": 0.4839685420447671,
"grad_norm": 2.3184139728546143,
"learning_rate": 2.97899086182799e-05,
"loss": 0.9604,
"step": 800
},
{
"epoch": 0.4900181488203267,
"grad_norm": 2.184471845626831,
"learning_rate": 2.9783877690383607e-05,
"loss": 1.1267,
"step": 810
},
{
"epoch": 0.4960677555958863,
"grad_norm": 1.2299617528915405,
"learning_rate": 2.9777762048435256e-05,
"loss": 1.1073,
"step": 820
},
{
"epoch": 0.5009074410163339,
"eval_loss": 1.0542737245559692,
"eval_runtime": 64.7633,
"eval_samples_per_second": 2.934,
"eval_steps_per_second": 2.934,
"step": 828
},
{
"epoch": 0.5021173623714459,
"grad_norm": 1.2418574094772339,
"learning_rate": 2.9771561727478487e-05,
"loss": 0.9079,
"step": 830
},
{
"epoch": 0.5081669691470054,
"grad_norm": 1.4546465873718262,
"learning_rate": 2.976527676304216e-05,
"loss": 0.9962,
"step": 840
},
{
"epoch": 0.514216575922565,
"grad_norm": 1.7091883420944214,
"learning_rate": 2.9758907191140147e-05,
"loss": 1.0415,
"step": 850
},
{
"epoch": 0.5202661826981246,
"grad_norm": 1.6179261207580566,
"learning_rate": 2.9752453048271152e-05,
"loss": 1.008,
"step": 860
},
{
"epoch": 0.5263157894736842,
"grad_norm": 1.8634966611862183,
"learning_rate": 2.974591437141847e-05,
"loss": 1.1791,
"step": 870
},
{
"epoch": 0.5323653962492438,
"grad_norm": 1.787575125694275,
"learning_rate": 2.9739291198049802e-05,
"loss": 1.1416,
"step": 880
},
{
"epoch": 0.5384150030248034,
"grad_norm": 0.6841387152671814,
"learning_rate": 2.9732583566117016e-05,
"loss": 1.0713,
"step": 890
},
{
"epoch": 0.5444646098003629,
"grad_norm": 1.1443063020706177,
"learning_rate": 2.972579151405595e-05,
"loss": 0.9909,
"step": 900
},
{
"epoch": 0.5505142165759226,
"grad_norm": 1.3013931512832642,
"learning_rate": 2.9718915080786173e-05,
"loss": 1.035,
"step": 910
},
{
"epoch": 0.5565638233514821,
"grad_norm": 3.635468006134033,
"learning_rate": 2.971195430571079e-05,
"loss": 1.1064,
"step": 920
},
{
"epoch": 0.5626134301270418,
"grad_norm": 2.095000743865967,
"learning_rate": 2.970490922871618e-05,
"loss": 1.0936,
"step": 930
},
{
"epoch": 0.5686630369026013,
"grad_norm": 1.4885168075561523,
"learning_rate": 2.9697779890171798e-05,
"loss": 1.0447,
"step": 940
},
{
"epoch": 0.5747126436781609,
"grad_norm": 1.1731853485107422,
"learning_rate": 2.969056633092992e-05,
"loss": 1.0816,
"step": 950
},
{
"epoch": 0.5807622504537205,
"grad_norm": 1.6034612655639648,
"learning_rate": 2.9683268592325437e-05,
"loss": 1.1776,
"step": 960
},
{
"epoch": 0.5868118572292801,
"grad_norm": 1.821656346321106,
"learning_rate": 2.967588671617559e-05,
"loss": 0.8754,
"step": 970
},
{
"epoch": 0.5928614640048396,
"grad_norm": 0.9973627328872681,
"learning_rate": 2.9668420744779733e-05,
"loss": 1.0242,
"step": 980
},
{
"epoch": 0.5989110707803993,
"grad_norm": 1.5308775901794434,
"learning_rate": 2.9660870720919127e-05,
"loss": 1.0948,
"step": 990
},
{
"epoch": 0.6049606775559588,
"grad_norm": 1.8341882228851318,
"learning_rate": 2.965323668785664e-05,
"loss": 1.0485,
"step": 1000
},
{
"epoch": 0.6110102843315185,
"grad_norm": 1.383766531944275,
"learning_rate": 2.9645518689336543e-05,
"loss": 0.9551,
"step": 1010
},
{
"epoch": 0.617059891107078,
"grad_norm": 1.5288898944854736,
"learning_rate": 2.9637716769584235e-05,
"loss": 0.7649,
"step": 1020
},
{
"epoch": 0.6231094978826376,
"grad_norm": 1.0173107385635376,
"learning_rate": 2.9629830973306e-05,
"loss": 1.2116,
"step": 1030
},
{
"epoch": 0.6291591046581972,
"grad_norm": 19.645925521850586,
"learning_rate": 2.9621861345688754e-05,
"loss": 1.0067,
"step": 1040
},
{
"epoch": 0.6352087114337568,
"grad_norm": 1.1569609642028809,
"learning_rate": 2.9613807932399767e-05,
"loss": 0.9409,
"step": 1050
},
{
"epoch": 0.6412583182093164,
"grad_norm": 4.655813694000244,
"learning_rate": 2.9605670779586426e-05,
"loss": 1.0169,
"step": 1060
},
{
"epoch": 0.647307924984876,
"grad_norm": 1.9381617307662964,
"learning_rate": 2.9597449933875955e-05,
"loss": 1.1012,
"step": 1070
},
{
"epoch": 0.6533575317604355,
"grad_norm": 1.3556163311004639,
"learning_rate": 2.958914544237515e-05,
"loss": 0.8116,
"step": 1080
},
{
"epoch": 0.6594071385359952,
"grad_norm": 1.453025460243225,
"learning_rate": 2.9580757352670125e-05,
"loss": 1.0269,
"step": 1090
},
{
"epoch": 0.6654567453115547,
"grad_norm": 1.458400011062622,
"learning_rate": 2.9572285712825998e-05,
"loss": 1.0251,
"step": 1100
},
{
"epoch": 0.6715063520871143,
"grad_norm": 2.5511817932128906,
"learning_rate": 2.9563730571386676e-05,
"loss": 1.0311,
"step": 1110
},
{
"epoch": 0.677555958862674,
"grad_norm": 1.1598625183105469,
"learning_rate": 2.9555091977374512e-05,
"loss": 0.9025,
"step": 1120
},
{
"epoch": 0.6836055656382335,
"grad_norm": 1.7414718866348267,
"learning_rate": 2.9546369980290082e-05,
"loss": 0.9769,
"step": 1130
},
{
"epoch": 0.6896551724137931,
"grad_norm": 1.4089117050170898,
"learning_rate": 2.953756463011186e-05,
"loss": 0.9785,
"step": 1140
},
{
"epoch": 0.6957047791893527,
"grad_norm": 1.2085853815078735,
"learning_rate": 2.9528675977295954e-05,
"loss": 1.071,
"step": 1150
},
{
"epoch": 0.7017543859649122,
"grad_norm": 1.2800265550613403,
"learning_rate": 2.9519704072775806e-05,
"loss": 1.0059,
"step": 1160
},
{
"epoch": 0.7078039927404719,
"grad_norm": 1.5352321863174438,
"learning_rate": 2.9510648967961903e-05,
"loss": 1.0462,
"step": 1170
},
{
"epoch": 0.7138535995160314,
"grad_norm": 1.59364652633667,
"learning_rate": 2.9501510714741494e-05,
"loss": 1.0539,
"step": 1180
},
{
"epoch": 0.719903206291591,
"grad_norm": 0.7905394434928894,
"learning_rate": 2.9492289365478263e-05,
"loss": 1.085,
"step": 1190
},
{
"epoch": 0.7259528130671506,
"grad_norm": 1.8049817085266113,
"learning_rate": 2.9482984973012064e-05,
"loss": 0.9335,
"step": 1200
},
{
"epoch": 0.7320024198427102,
"grad_norm": 1.6577391624450684,
"learning_rate": 2.9473597590658595e-05,
"loss": 1.062,
"step": 1210
},
{
"epoch": 0.7380520266182699,
"grad_norm": 2.00862717628479,
"learning_rate": 2.9464127272209107e-05,
"loss": 1.265,
"step": 1220
},
{
"epoch": 0.7441016333938294,
"grad_norm": 1.7227792739868164,
"learning_rate": 2.9454574071930075e-05,
"loss": 1.198,
"step": 1230
},
{
"epoch": 0.7501512401693889,
"grad_norm": 2.7444136142730713,
"learning_rate": 2.9444938044562916e-05,
"loss": 0.9352,
"step": 1240
},
{
"epoch": 0.7513611615245009,
"eval_loss": 1.0343616008758545,
"eval_runtime": 64.7966,
"eval_samples_per_second": 2.932,
"eval_steps_per_second": 2.932,
"step": 1242
},
{
"epoch": 0.7562008469449486,
"grad_norm": 2.819312572479248,
"learning_rate": 2.9435219245323653e-05,
"loss": 1.0105,
"step": 1250
},
{
"epoch": 0.7622504537205081,
"grad_norm": 1.6756153106689453,
"learning_rate": 2.9425417729902596e-05,
"loss": 0.9951,
"step": 1260
},
{
"epoch": 0.7683000604960678,
"grad_norm": 2.266688823699951,
"learning_rate": 2.941553355446405e-05,
"loss": 1.2856,
"step": 1270
},
{
"epoch": 0.7743496672716274,
"grad_norm": 1.9197466373443604,
"learning_rate": 2.940556677564596e-05,
"loss": 1.025,
"step": 1280
},
{
"epoch": 0.7803992740471869,
"grad_norm": 1.2381973266601562,
"learning_rate": 2.939551745055962e-05,
"loss": 0.9614,
"step": 1290
},
{
"epoch": 0.7864488808227466,
"grad_norm": 1.3409382104873657,
"learning_rate": 2.9385385636789308e-05,
"loss": 1.0783,
"step": 1300
},
{
"epoch": 0.7924984875983061,
"grad_norm": 1.3915735483169556,
"learning_rate": 2.937517139239198e-05,
"loss": 0.9862,
"step": 1310
},
{
"epoch": 0.7985480943738656,
"grad_norm": 3.1286187171936035,
"learning_rate": 2.936487477589695e-05,
"loss": 1.0044,
"step": 1320
},
{
"epoch": 0.8045977011494253,
"grad_norm": 2.222550868988037,
"learning_rate": 2.9354495846305513e-05,
"loss": 0.9816,
"step": 1330
},
{
"epoch": 0.8106473079249849,
"grad_norm": 1.239647626876831,
"learning_rate": 2.934403466309065e-05,
"loss": 1.1207,
"step": 1340
},
{
"epoch": 0.8166969147005445,
"grad_norm": 1.5290794372558594,
"learning_rate": 2.933349128619666e-05,
"loss": 1.2575,
"step": 1350
},
{
"epoch": 0.822746521476104,
"grad_norm": 1.5801492929458618,
"learning_rate": 2.932286577603883e-05,
"loss": 0.9342,
"step": 1360
},
{
"epoch": 0.8287961282516636,
"grad_norm": 2.460664749145508,
"learning_rate": 2.9312158193503076e-05,
"loss": 0.9571,
"step": 1370
},
{
"epoch": 0.8348457350272233,
"grad_norm": 1.5209215879440308,
"learning_rate": 2.9301368599945616e-05,
"loss": 1.1786,
"step": 1380
},
{
"epoch": 0.8408953418027828,
"grad_norm": 1.3077425956726074,
"learning_rate": 2.929049705719259e-05,
"loss": 0.8198,
"step": 1390
},
{
"epoch": 0.8469449485783425,
"grad_norm": 1.7702195644378662,
"learning_rate": 2.927954362753973e-05,
"loss": 0.978,
"step": 1400
},
{
"epoch": 0.852994555353902,
"grad_norm": 2.0915708541870117,
"learning_rate": 2.926850837375199e-05,
"loss": 1.0862,
"step": 1410
},
{
"epoch": 0.8590441621294616,
"grad_norm": 1.815843105316162,
"learning_rate": 2.9257391359063196e-05,
"loss": 1.1928,
"step": 1420
},
{
"epoch": 0.8650937689050212,
"grad_norm": 2.303849697113037,
"learning_rate": 2.924619264717566e-05,
"loss": 1.0549,
"step": 1430
},
{
"epoch": 0.8711433756805808,
"grad_norm": 1.3767309188842773,
"learning_rate": 2.9234912302259855e-05,
"loss": 0.7791,
"step": 1440
},
{
"epoch": 0.8771929824561403,
"grad_norm": 1.6787207126617432,
"learning_rate": 2.9223550388954005e-05,
"loss": 1.0006,
"step": 1450
},
{
"epoch": 0.8832425892317,
"grad_norm": 1.1419081687927246,
"learning_rate": 2.9212106972363748e-05,
"loss": 1.1581,
"step": 1460
},
{
"epoch": 0.8892921960072595,
"grad_norm": 1.349205732345581,
"learning_rate": 2.9200582118061743e-05,
"loss": 0.9921,
"step": 1470
},
{
"epoch": 0.8953418027828192,
"grad_norm": 1.474130392074585,
"learning_rate": 2.9188975892087303e-05,
"loss": 0.8283,
"step": 1480
},
{
"epoch": 0.9013914095583787,
"grad_norm": 1.0713589191436768,
"learning_rate": 2.917728836094601e-05,
"loss": 1.0369,
"step": 1490
},
{
"epoch": 0.9074410163339383,
"grad_norm": 1.2351691722869873,
"learning_rate": 2.9165519591609345e-05,
"loss": 0.9484,
"step": 1500
},
{
"epoch": 0.9134906231094979,
"grad_norm": 0.9272758364677429,
"learning_rate": 2.9153669651514287e-05,
"loss": 0.9043,
"step": 1510
},
{
"epoch": 0.9195402298850575,
"grad_norm": 2.782837390899658,
"learning_rate": 2.9141738608562947e-05,
"loss": 1.0876,
"step": 1520
},
{
"epoch": 0.925589836660617,
"grad_norm": 1.687536358833313,
"learning_rate": 2.9129726531122165e-05,
"loss": 0.9161,
"step": 1530
},
{
"epoch": 0.9316394434361767,
"grad_norm": 2.1093292236328125,
"learning_rate": 2.9117633488023116e-05,
"loss": 0.9388,
"step": 1540
},
{
"epoch": 0.9376890502117362,
"grad_norm": 1.5271527767181396,
"learning_rate": 2.9105459548560938e-05,
"loss": 0.9626,
"step": 1550
},
{
"epoch": 0.9437386569872959,
"grad_norm": 1.899366021156311,
"learning_rate": 2.9093204782494297e-05,
"loss": 0.9612,
"step": 1560
},
{
"epoch": 0.9497882637628554,
"grad_norm": 2.3809289932250977,
"learning_rate": 2.908086926004502e-05,
"loss": 1.1344,
"step": 1570
},
{
"epoch": 0.955837870538415,
"grad_norm": 2.101499557495117,
"learning_rate": 2.9068453051897688e-05,
"loss": 0.9209,
"step": 1580
},
{
"epoch": 0.9618874773139746,
"grad_norm": 1.0858937501907349,
"learning_rate": 2.905595622919921e-05,
"loss": 0.9232,
"step": 1590
},
{
"epoch": 0.9679370840895342,
"grad_norm": 1.7263002395629883,
"learning_rate": 2.9043378863558442e-05,
"loss": 1.0186,
"step": 1600
},
{
"epoch": 0.9739866908650938,
"grad_norm": 1.759886622428894,
"learning_rate": 2.9030721027045752e-05,
"loss": 0.9193,
"step": 1610
},
{
"epoch": 0.9800362976406534,
"grad_norm": 2.344141960144043,
"learning_rate": 2.9017982792192636e-05,
"loss": 1.0178,
"step": 1620
},
{
"epoch": 0.9860859044162129,
"grad_norm": 2.1581854820251465,
"learning_rate": 2.9005164231991265e-05,
"loss": 0.9483,
"step": 1630
},
{
"epoch": 0.9921355111917726,
"grad_norm": 1.9486889839172363,
"learning_rate": 2.8992265419894105e-05,
"loss": 1.0735,
"step": 1640
},
{
"epoch": 0.9981851179673321,
"grad_norm": 1.6225651502609253,
"learning_rate": 2.8979286429813473e-05,
"loss": 1.0419,
"step": 1650
},
{
"epoch": 2.0024183796856105,
"eval_loss": 1.0315272808074951,
"eval_runtime": 61.5654,
"eval_samples_per_second": 3.086,
"eval_steps_per_second": 1.543,
"step": 1656
},
{
"epoch": 2.007255139056832,
"grad_norm": 0.9425010085105896,
"learning_rate": 2.5953521207704197e-05,
"loss": 1.029,
"step": 1660
},
{
"epoch": 2.019347037484885,
"grad_norm": 0.900601863861084,
"learning_rate": 2.5903985145302143e-05,
"loss": 0.8985,
"step": 1670
},
{
"epoch": 2.0314389359129383,
"grad_norm": 1.3217060565948486,
"learning_rate": 2.5854195615561937e-05,
"loss": 0.8201,
"step": 1680
},
{
"epoch": 2.0435308343409915,
"grad_norm": 1.2372468709945679,
"learning_rate": 2.5804153775860385e-05,
"loss": 0.9249,
"step": 1690
},
{
"epoch": 2.0556227327690446,
"grad_norm": 1.5520892143249512,
"learning_rate": 2.5753860789439343e-05,
"loss": 0.9418,
"step": 1700
},
{
"epoch": 2.0677146311970978,
"grad_norm": 1.664322853088379,
"learning_rate": 2.5703317825378677e-05,
"loss": 0.8835,
"step": 1710
},
{
"epoch": 2.0798065296251513,
"grad_norm": 1.084291934967041,
"learning_rate": 2.5652526058569055e-05,
"loss": 0.9336,
"step": 1720
},
{
"epoch": 2.0918984280532045,
"grad_norm": 1.3585351705551147,
"learning_rate": 2.5601486669684686e-05,
"loss": 0.9808,
"step": 1730
},
{
"epoch": 2.1039903264812576,
"grad_norm": 1.5963308811187744,
"learning_rate": 2.5550200845155842e-05,
"loss": 1.0655,
"step": 1740
},
{
"epoch": 2.1160822249093107,
"grad_norm": 1.2812840938568115,
"learning_rate": 2.549866977714128e-05,
"loss": 1.0266,
"step": 1750
},
{
"epoch": 2.128174123337364,
"grad_norm": 1.5718134641647339,
"learning_rate": 2.5446894663500544e-05,
"loss": 0.7775,
"step": 1760
},
{
"epoch": 2.140266021765417,
"grad_norm": 1.943947434425354,
"learning_rate": 2.5394876707766105e-05,
"loss": 0.9393,
"step": 1770
},
{
"epoch": 2.15235792019347,
"grad_norm": 1.304726004600525,
"learning_rate": 2.5342617119115404e-05,
"loss": 0.8872,
"step": 1780
},
{
"epoch": 2.1644498186215237,
"grad_norm": 1.2231858968734741,
"learning_rate": 2.529011711234272e-05,
"loss": 0.9708,
"step": 1790
},
{
"epoch": 2.176541717049577,
"grad_norm": 1.0328049659729004,
"learning_rate": 2.5237377907830947e-05,
"loss": 0.8888,
"step": 1800
},
{
"epoch": 2.18863361547763,
"grad_norm": 1.471614122390747,
"learning_rate": 2.5184400731523233e-05,
"loss": 0.8714,
"step": 1810
},
{
"epoch": 2.200725513905683,
"grad_norm": 1.3779953718185425,
"learning_rate": 2.5131186814894456e-05,
"loss": 0.9406,
"step": 1820
},
{
"epoch": 2.2128174123337363,
"grad_norm": 1.3803290128707886,
"learning_rate": 2.5077737394922627e-05,
"loss": 0.8063,
"step": 1830
},
{
"epoch": 2.2249093107617894,
"grad_norm": 1.749267578125,
"learning_rate": 2.5024053714060116e-05,
"loss": 0.9785,
"step": 1840
},
{
"epoch": 2.2370012091898426,
"grad_norm": 2.1147618293762207,
"learning_rate": 2.4970137020204773e-05,
"loss": 0.9254,
"step": 1850
},
{
"epoch": 2.249093107617896,
"grad_norm": 1.076285719871521,
"learning_rate": 2.4915988566670944e-05,
"loss": 0.9499,
"step": 1860
},
{
"epoch": 2.2611850060459493,
"grad_norm": 1.9593374729156494,
"learning_rate": 2.4861609612160297e-05,
"loss": 0.9589,
"step": 1870
},
{
"epoch": 2.2732769044740024,
"grad_norm": 1.759669542312622,
"learning_rate": 2.4807001420732592e-05,
"loss": 0.9817,
"step": 1880
},
{
"epoch": 2.2853688029020556,
"grad_norm": 1.2852345705032349,
"learning_rate": 2.475216526177629e-05,
"loss": 1.1129,
"step": 1890
},
{
"epoch": 2.2974607013301087,
"grad_norm": 1.3759500980377197,
"learning_rate": 2.4697102409979054e-05,
"loss": 0.7167,
"step": 1900
},
{
"epoch": 2.309552599758162,
"grad_norm": 1.2431224584579468,
"learning_rate": 2.464181414529809e-05,
"loss": 0.8352,
"step": 1910
},
{
"epoch": 2.321644498186215,
"grad_norm": 1.6261695623397827,
"learning_rate": 2.458630175293043e-05,
"loss": 0.9874,
"step": 1920
},
{
"epoch": 2.3337363966142686,
"grad_norm": 1.4598480463027954,
"learning_rate": 2.4530566523283032e-05,
"loss": 0.9489,
"step": 1930
},
{
"epoch": 2.3458282950423217,
"grad_norm": 1.9565013647079468,
"learning_rate": 2.4474609751942798e-05,
"loss": 1.0428,
"step": 1940
},
{
"epoch": 2.357920193470375,
"grad_norm": 1.8509588241577148,
"learning_rate": 2.4418432739646443e-05,
"loss": 0.9298,
"step": 1950
},
{
"epoch": 2.370012091898428,
"grad_norm": 1.634786605834961,
"learning_rate": 2.4362036792250275e-05,
"loss": 0.9312,
"step": 1960
},
{
"epoch": 2.382103990326481,
"grad_norm": 1.9874886274337769,
"learning_rate": 2.430542322069984e-05,
"loss": 1.0114,
"step": 1970
},
{
"epoch": 2.3941958887545347,
"grad_norm": 1.8052939176559448,
"learning_rate": 2.4248593340999423e-05,
"loss": 0.8945,
"step": 1980
},
{
"epoch": 2.406287787182588,
"grad_norm": 1.6896426677703857,
"learning_rate": 2.4191548474181494e-05,
"loss": 0.9642,
"step": 1990
},
{
"epoch": 2.418379685610641,
"grad_norm": 1.6607788801193237,
"learning_rate": 2.4134289946275963e-05,
"loss": 0.9253,
"step": 2000
},
{
"epoch": 2.430471584038694,
"grad_norm": 2.010333299636841,
"learning_rate": 2.4076819088279395e-05,
"loss": 0.9328,
"step": 2010
},
{
"epoch": 2.4425634824667473,
"grad_norm": 1.9691773653030396,
"learning_rate": 2.4019137236124027e-05,
"loss": 1.0397,
"step": 2020
},
{
"epoch": 2.4546553808948004,
"grad_norm": 1.8054171800613403,
"learning_rate": 2.3961245730646758e-05,
"loss": 0.8893,
"step": 2030
},
{
"epoch": 2.4667472793228535,
"grad_norm": 1.0990610122680664,
"learning_rate": 2.3903145917557945e-05,
"loss": 0.9129,
"step": 2040
},
{
"epoch": 2.478839177750907,
"grad_norm": 1.6305997371673584,
"learning_rate": 2.3844839147410146e-05,
"loss": 0.9494,
"step": 2050
},
{
"epoch": 2.4909310761789603,
"grad_norm": 2.0180811882019043,
"learning_rate": 2.3786326775566705e-05,
"loss": 0.8741,
"step": 2060
},
{
"epoch": 2.5030229746070134,
"grad_norm": 1.390211820602417,
"learning_rate": 2.3727610162170262e-05,
"loss": 0.9242,
"step": 2070
},
{
"epoch": 2.5030229746070134,
"eval_loss": 1.027010202407837,
"eval_runtime": 62.1115,
"eval_samples_per_second": 3.059,
"eval_steps_per_second": 1.53,
"step": 2070
},
{
"epoch": 2.5151148730350665,
"grad_norm": 2.0877339839935303,
"learning_rate": 2.3668690672111135e-05,
"loss": 0.9463,
"step": 2080
},
{
"epoch": 2.5272067714631197,
"grad_norm": 1.8376761674880981,
"learning_rate": 2.3609569674995575e-05,
"loss": 0.9761,
"step": 2090
},
{
"epoch": 2.539298669891173,
"grad_norm": 1.2929617166519165,
"learning_rate": 2.355024854511396e-05,
"loss": 0.9552,
"step": 2100
},
{
"epoch": 2.551390568319226,
"grad_norm": 1.428189992904663,
"learning_rate": 2.3490728661408823e-05,
"loss": 0.9305,
"step": 2110
},
{
"epoch": 2.5634824667472795,
"grad_norm": 1.873153567314148,
"learning_rate": 2.3431011407442793e-05,
"loss": 0.9285,
"step": 2120
},
{
"epoch": 2.5755743651753327,
"grad_norm": 1.3220646381378174,
"learning_rate": 2.3371098171366473e-05,
"loss": 0.9198,
"step": 2130
},
{
"epoch": 2.587666263603386,
"grad_norm": 1.2938573360443115,
"learning_rate": 2.3310990345886124e-05,
"loss": 0.7408,
"step": 2140
},
{
"epoch": 2.599758162031439,
"grad_norm": 1.5691282749176025,
"learning_rate": 2.325068932823132e-05,
"loss": 1.085,
"step": 2150
},
{
"epoch": 2.611850060459492,
"grad_norm": 1.8494837284088135,
"learning_rate": 2.3190196520122468e-05,
"loss": 0.9864,
"step": 2160
},
{
"epoch": 2.6239419588875452,
"grad_norm": 2.0735583305358887,
"learning_rate": 2.312951332773821e-05,
"loss": 0.9551,
"step": 2170
},
{
"epoch": 2.6360338573155984,
"grad_norm": 2.0026185512542725,
"learning_rate": 2.3068641161682747e-05,
"loss": 0.8341,
"step": 2180
},
{
"epoch": 2.648125755743652,
"grad_norm": 1.0748159885406494,
"learning_rate": 2.3007581436953043e-05,
"loss": 0.8707,
"step": 2190
},
{
"epoch": 2.660217654171705,
"grad_norm": 1.2758897542953491,
"learning_rate": 2.294633557290594e-05,
"loss": 0.9761,
"step": 2200
},
{
"epoch": 2.672309552599758,
"grad_norm": 1.5994787216186523,
"learning_rate": 2.288490499322515e-05,
"loss": 0.9284,
"step": 2210
},
{
"epoch": 2.6844014510278114,
"grad_norm": 1.913944125175476,
"learning_rate": 2.282329112588819e-05,
"loss": 0.9475,
"step": 2220
},
{
"epoch": 2.6964933494558645,
"grad_norm": 1.6219775676727295,
"learning_rate": 2.2761495403133156e-05,
"loss": 0.8979,
"step": 2230
},
{
"epoch": 2.7085852478839176,
"grad_norm": 2.0113205909729004,
"learning_rate": 2.2699519261425437e-05,
"loss": 1.0181,
"step": 2240
},
{
"epoch": 2.7206771463119708,
"grad_norm": 1.56183922290802,
"learning_rate": 2.2637364141424347e-05,
"loss": 1.027,
"step": 2250
},
{
"epoch": 2.7327690447400244,
"grad_norm": 1.4773386716842651,
"learning_rate": 2.2575031487949607e-05,
"loss": 0.9299,
"step": 2260
},
{
"epoch": 2.7448609431680775,
"grad_norm": 1.8613166809082031,
"learning_rate": 2.251252274994778e-05,
"loss": 0.9851,
"step": 2270
},
{
"epoch": 2.7569528415961306,
"grad_norm": 1.6664507389068604,
"learning_rate": 2.244983938045858e-05,
"loss": 0.8041,
"step": 2280
},
{
"epoch": 2.7690447400241838,
"grad_norm": 1.3423569202423096,
"learning_rate": 2.2386982836581084e-05,
"loss": 0.8549,
"step": 2290
},
{
"epoch": 2.781136638452237,
"grad_norm": 1.7735357284545898,
"learning_rate": 2.232395457943989e-05,
"loss": 0.997,
"step": 2300
},
{
"epoch": 2.79322853688029,
"grad_norm": 2.512533664703369,
"learning_rate": 2.226075607415114e-05,
"loss": 0.8865,
"step": 2310
},
{
"epoch": 2.805320435308343,
"grad_norm": 2.101529121398926,
"learning_rate": 2.2197388789788444e-05,
"loss": 0.9071,
"step": 2320
},
{
"epoch": 2.8174123337363968,
"grad_norm": 1.0863012075424194,
"learning_rate": 2.213385419934876e-05,
"loss": 0.7935,
"step": 2330
},
{
"epoch": 2.82950423216445,
"grad_norm": 1.9452869892120361,
"learning_rate": 2.2070153779718127e-05,
"loss": 0.8955,
"step": 2340
},
{
"epoch": 2.841596130592503,
"grad_norm": 2.226400136947632,
"learning_rate": 2.200628901163737e-05,
"loss": 0.8089,
"step": 2350
},
{
"epoch": 2.853688029020556,
"grad_norm": 1.274936318397522,
"learning_rate": 2.1942261379667626e-05,
"loss": 0.9588,
"step": 2360
},
{
"epoch": 2.8657799274486093,
"grad_norm": 1.3687050342559814,
"learning_rate": 2.18780723721559e-05,
"loss": 0.9048,
"step": 2370
},
{
"epoch": 2.877871825876663,
"grad_norm": 1.7625335454940796,
"learning_rate": 2.1813723481200394e-05,
"loss": 1.0745,
"step": 2380
},
{
"epoch": 2.8899637243047156,
"grad_norm": 1.4267995357513428,
"learning_rate": 2.1749216202615907e-05,
"loss": 1.0377,
"step": 2390
},
{
"epoch": 2.902055622732769,
"grad_norm": 1.885882019996643,
"learning_rate": 2.1684552035898982e-05,
"loss": 0.8572,
"step": 2400
},
{
"epoch": 2.9141475211608223,
"grad_norm": 1.9667267799377441,
"learning_rate": 2.1619732484193113e-05,
"loss": 0.8767,
"step": 2410
},
{
"epoch": 2.9262394195888755,
"grad_norm": 1.962418556213379,
"learning_rate": 2.1554759054253758e-05,
"loss": 0.9886,
"step": 2420
},
{
"epoch": 2.9383313180169286,
"grad_norm": 2.5986828804016113,
"learning_rate": 2.148963325641335e-05,
"loss": 0.9386,
"step": 2430
},
{
"epoch": 2.9504232164449817,
"grad_norm": 2.477590560913086,
"learning_rate": 2.142435660454616e-05,
"loss": 0.9533,
"step": 2440
},
{
"epoch": 2.9625151148730353,
"grad_norm": 1.5716705322265625,
"learning_rate": 2.135893061603313e-05,
"loss": 0.9436,
"step": 2450
},
{
"epoch": 2.974607013301088,
"grad_norm": 2.0462253093719482,
"learning_rate": 2.129335681172658e-05,
"loss": 0.8393,
"step": 2460
},
{
"epoch": 2.9866989117291416,
"grad_norm": 1.807516098022461,
"learning_rate": 2.122763671591487e-05,
"loss": 0.8802,
"step": 2470
},
{
"epoch": 2.9987908101571947,
"grad_norm": 1.7970890998840332,
"learning_rate": 2.1161771856286964e-05,
"loss": 0.8121,
"step": 2480
},
{
"epoch": 3.0024183796856105,
"eval_loss": 1.025052547454834,
"eval_runtime": 62.1601,
"eval_samples_per_second": 3.057,
"eval_steps_per_second": 1.528,
"step": 2484
},
{
"epoch": 3.0096735187424426,
"grad_norm": 1.9102685451507568,
"learning_rate": 2.1095763763896915e-05,
"loss": 1.0081,
"step": 2490
},
{
"epoch": 3.0217654171704957,
"grad_norm": 1.673042893409729,
"learning_rate": 2.1029613973128274e-05,
"loss": 0.9355,
"step": 2500
},
{
"epoch": 3.033857315598549,
"grad_norm": 3.155738115310669,
"learning_rate": 2.0963324021658435e-05,
"loss": 0.7849,
"step": 2510
},
{
"epoch": 3.045949214026602,
"grad_norm": 1.792328953742981,
"learning_rate": 2.089689545042286e-05,
"loss": 0.8839,
"step": 2520
},
{
"epoch": 3.0580411124546556,
"grad_norm": 2.182460069656372,
"learning_rate": 2.0830329803579312e-05,
"loss": 0.8556,
"step": 2530
},
{
"epoch": 3.0701330108827087,
"grad_norm": 1.5327653884887695,
"learning_rate": 2.07636286284719e-05,
"loss": 0.9156,
"step": 2540
},
{
"epoch": 3.082224909310762,
"grad_norm": 1.9906131029129028,
"learning_rate": 2.0696793475595162e-05,
"loss": 0.78,
"step": 2550
},
{
"epoch": 3.094316807738815,
"grad_norm": 2.0401036739349365,
"learning_rate": 2.0629825898557984e-05,
"loss": 0.9573,
"step": 2560
},
{
"epoch": 3.106408706166868,
"grad_norm": 2.0492191314697266,
"learning_rate": 2.056272745404751e-05,
"loss": 0.851,
"step": 2570
},
{
"epoch": 3.1185006045949213,
"grad_norm": 2.2973692417144775,
"learning_rate": 2.0495499701792954e-05,
"loss": 0.8719,
"step": 2580
},
{
"epoch": 3.1305925030229744,
"grad_norm": 2.114962577819824,
"learning_rate": 2.0428144204529338e-05,
"loss": 0.8664,
"step": 2590
},
{
"epoch": 3.142684401451028,
"grad_norm": 1.581209659576416,
"learning_rate": 2.0360662527961144e-05,
"loss": 0.8698,
"step": 2600
},
{
"epoch": 3.154776299879081,
"grad_norm": 2.2733969688415527,
"learning_rate": 2.0293056240725978e-05,
"loss": 0.8688,
"step": 2610
},
{
"epoch": 3.1668681983071343,
"grad_norm": 1.9230784177780151,
"learning_rate": 2.022532691435803e-05,
"loss": 0.9029,
"step": 2620
},
{
"epoch": 3.1789600967351874,
"grad_norm": 1.8609906435012817,
"learning_rate": 2.0157476123251618e-05,
"loss": 0.8416,
"step": 2630
},
{
"epoch": 3.1910519951632406,
"grad_norm": 3.587183713912964,
"learning_rate": 2.0089505444624523e-05,
"loss": 0.8078,
"step": 2640
},
{
"epoch": 3.2031438935912937,
"grad_norm": 1.5747348070144653,
"learning_rate": 2.0021416458481382e-05,
"loss": 0.8362,
"step": 2650
},
{
"epoch": 3.215235792019347,
"grad_norm": 2.3190407752990723,
"learning_rate": 1.9953210747576925e-05,
"loss": 0.8644,
"step": 2660
},
{
"epoch": 3.2273276904474004,
"grad_norm": 2.1347427368164062,
"learning_rate": 1.988488989737919e-05,
"loss": 0.8559,
"step": 2670
},
{
"epoch": 3.2394195888754536,
"grad_norm": 1.6797113418579102,
"learning_rate": 1.9816455496032678e-05,
"loss": 0.6892,
"step": 2680
},
{
"epoch": 3.2515114873035067,
"grad_norm": 2.676992416381836,
"learning_rate": 1.9747909134321433e-05,
"loss": 0.9424,
"step": 2690
},
{
"epoch": 3.26360338573156,
"grad_norm": 2.5450267791748047,
"learning_rate": 1.9679252405632056e-05,
"loss": 1.0132,
"step": 2700
},
{
"epoch": 3.275695284159613,
"grad_norm": 1.7200360298156738,
"learning_rate": 1.9610486905916672e-05,
"loss": 0.7815,
"step": 2710
},
{
"epoch": 3.287787182587666,
"grad_norm": 1.8882542848587036,
"learning_rate": 1.9541614233655832e-05,
"loss": 0.7036,
"step": 2720
},
{
"epoch": 3.2998790810157193,
"grad_norm": 2.0537140369415283,
"learning_rate": 1.9472635989821346e-05,
"loss": 0.8662,
"step": 2730
},
{
"epoch": 3.311970979443773,
"grad_norm": 2.3131861686706543,
"learning_rate": 1.9403553777839097e-05,
"loss": 0.8448,
"step": 2740
},
{
"epoch": 3.324062877871826,
"grad_norm": 2.300368309020996,
"learning_rate": 1.9334369203551722e-05,
"loss": 0.8958,
"step": 2750
},
{
"epoch": 3.336154776299879,
"grad_norm": 2.186077117919922,
"learning_rate": 1.926508387518133e-05,
"loss": 0.8127,
"step": 2760
},
{
"epoch": 3.3482466747279322,
"grad_norm": 2.2586021423339844,
"learning_rate": 1.9195699403292072e-05,
"loss": 0.8263,
"step": 2770
},
{
"epoch": 3.3603385731559854,
"grad_norm": 2.485656499862671,
"learning_rate": 1.912621740075276e-05,
"loss": 0.9142,
"step": 2780
},
{
"epoch": 3.3724304715840385,
"grad_norm": 2.753103494644165,
"learning_rate": 1.9056639482699323e-05,
"loss": 0.8142,
"step": 2790
},
{
"epoch": 3.3845223700120917,
"grad_norm": 2.5374977588653564,
"learning_rate": 1.8986967266497293e-05,
"loss": 0.7762,
"step": 2800
},
{
"epoch": 3.3966142684401452,
"grad_norm": 2.3932859897613525,
"learning_rate": 1.8917202371704184e-05,
"loss": 0.8581,
"step": 2810
},
{
"epoch": 3.4087061668681984,
"grad_norm": 2.2613675594329834,
"learning_rate": 1.8847346420031876e-05,
"loss": 0.9741,
"step": 2820
},
{
"epoch": 3.4207980652962515,
"grad_norm": 1.7016990184783936,
"learning_rate": 1.877740103530889e-05,
"loss": 0.8193,
"step": 2830
},
{
"epoch": 3.4328899637243047,
"grad_norm": 2.1311845779418945,
"learning_rate": 1.8707367843442672e-05,
"loss": 0.7398,
"step": 2840
},
{
"epoch": 3.444981862152358,
"grad_norm": 2.0460548400878906,
"learning_rate": 1.8637248472381743e-05,
"loss": 0.7596,
"step": 2850
},
{
"epoch": 3.457073760580411,
"grad_norm": 2.456862688064575,
"learning_rate": 1.8567044552077932e-05,
"loss": 0.9296,
"step": 2860
},
{
"epoch": 3.4691656590084645,
"grad_norm": 1.7966513633728027,
"learning_rate": 1.8496757714448413e-05,
"loss": 0.8596,
"step": 2870
},
{
"epoch": 3.4812575574365177,
"grad_norm": 2.129179000854492,
"learning_rate": 1.8426389593337842e-05,
"loss": 0.8115,
"step": 2880
},
{
"epoch": 3.493349455864571,
"grad_norm": 1.8818175792694092,
"learning_rate": 1.835594182448031e-05,
"loss": 0.7811,
"step": 2890
},
{
"epoch": 3.5030229746070134,
"eval_loss": 1.0463489294052124,
"eval_runtime": 62.151,
"eval_samples_per_second": 3.057,
"eval_steps_per_second": 1.529,
"step": 2898
},
{
"epoch": 3.505441354292624,
"grad_norm": 2.0948948860168457,
"learning_rate": 1.828541604546136e-05,
"loss": 0.8436,
"step": 2900
},
{
"epoch": 3.517533252720677,
"grad_norm": 1.82291579246521,
"learning_rate": 1.8214813895679916e-05,
"loss": 0.7953,
"step": 2910
},
{
"epoch": 3.52962515114873,
"grad_norm": 2.061556100845337,
"learning_rate": 1.8144137016310164e-05,
"loss": 0.8741,
"step": 2920
},
{
"epoch": 3.541717049576784,
"grad_norm": 3.4671363830566406,
"learning_rate": 1.8073387050263416e-05,
"loss": 0.9057,
"step": 2930
},
{
"epoch": 3.5538089480048365,
"grad_norm": 3.073554515838623,
"learning_rate": 1.80025656421499e-05,
"loss": 0.9202,
"step": 2940
},
{
"epoch": 3.56590084643289,
"grad_norm": 2.2745471000671387,
"learning_rate": 1.7931674438240554e-05,
"loss": 0.869,
"step": 2950
},
{
"epoch": 3.577992744860943,
"grad_norm": 2.1967475414276123,
"learning_rate": 1.786071508642874e-05,
"loss": 0.8464,
"step": 2960
},
{
"epoch": 3.5900846432889963,
"grad_norm": 3.3642377853393555,
"learning_rate": 1.7789689236191942e-05,
"loss": 0.7493,
"step": 2970
},
{
"epoch": 3.6021765417170495,
"grad_norm": 2.1377601623535156,
"learning_rate": 1.7718598538553436e-05,
"loss": 0.8941,
"step": 2980
},
{
"epoch": 3.6142684401451026,
"grad_norm": 2.482862710952759,
"learning_rate": 1.7647444646043887e-05,
"loss": 0.9424,
"step": 2990
},
{
"epoch": 3.626360338573156,
"grad_norm": 2.38171648979187,
"learning_rate": 1.7576229212662955e-05,
"loss": 0.7499,
"step": 3000
},
{
"epoch": 3.6384522370012093,
"grad_norm": 2.8328399658203125,
"learning_rate": 1.750495389384085e-05,
"loss": 0.8494,
"step": 3010
},
{
"epoch": 3.6505441354292625,
"grad_norm": 2.6801421642303467,
"learning_rate": 1.7433620346399833e-05,
"loss": 0.8353,
"step": 3020
},
{
"epoch": 3.6626360338573156,
"grad_norm": 2.9922213554382324,
"learning_rate": 1.7362230228515706e-05,
"loss": 0.7786,
"step": 3030
},
{
"epoch": 3.6747279322853688,
"grad_norm": 1.9845452308654785,
"learning_rate": 1.729078519967929e-05,
"loss": 0.8261,
"step": 3040
},
{
"epoch": 3.686819830713422,
"grad_norm": 2.896380662918091,
"learning_rate": 1.721928692065781e-05,
"loss": 0.8031,
"step": 3050
},
{
"epoch": 3.698911729141475,
"grad_norm": 2.3239529132843018,
"learning_rate": 1.714773705345633e-05,
"loss": 0.8602,
"step": 3060
},
{
"epoch": 3.7110036275695286,
"grad_norm": 1.6386851072311401,
"learning_rate": 1.7076137261279105e-05,
"loss": 0.7999,
"step": 3070
},
{
"epoch": 3.7230955259975818,
"grad_norm": 2.6418874263763428,
"learning_rate": 1.700448920849089e-05,
"loss": 0.8791,
"step": 3080
},
{
"epoch": 3.735187424425635,
"grad_norm": 2.601285696029663,
"learning_rate": 1.6932794560578302e-05,
"loss": 0.8067,
"step": 3090
},
{
"epoch": 3.747279322853688,
"grad_norm": 2.0848560333251953,
"learning_rate": 1.6861054984111065e-05,
"loss": 0.7878,
"step": 3100
},
{
"epoch": 3.759371221281741,
"grad_norm": 2.478421449661255,
"learning_rate": 1.6789272146703296e-05,
"loss": 0.6949,
"step": 3110
},
{
"epoch": 3.7714631197097943,
"grad_norm": 2.1585545539855957,
"learning_rate": 1.6717447716974713e-05,
"loss": 0.9221,
"step": 3120
},
{
"epoch": 3.7835550181378474,
"grad_norm": 1.5959584712982178,
"learning_rate": 1.664558336451188e-05,
"loss": 0.774,
"step": 3130
},
{
"epoch": 3.795646916565901,
"grad_norm": 2.674715757369995,
"learning_rate": 1.657368075982937e-05,
"loss": 0.842,
"step": 3140
},
{
"epoch": 3.807738814993954,
"grad_norm": 2.4131863117218018,
"learning_rate": 1.6501741574330948e-05,
"loss": 0.8084,
"step": 3150
},
{
"epoch": 3.8198307134220073,
"grad_norm": 2.0096678733825684,
"learning_rate": 1.6429767480270713e-05,
"loss": 0.8851,
"step": 3160
},
{
"epoch": 3.8319226118500604,
"grad_norm": 2.3412530422210693,
"learning_rate": 1.635776015071423e-05,
"loss": 0.8997,
"step": 3170
},
{
"epoch": 3.8440145102781136,
"grad_norm": 2.2029662132263184,
"learning_rate": 1.628572125949963e-05,
"loss": 0.8425,
"step": 3180
},
{
"epoch": 3.8561064087061667,
"grad_norm": 2.2945916652679443,
"learning_rate": 1.6213652481198715e-05,
"loss": 0.8195,
"step": 3190
},
{
"epoch": 3.86819830713422,
"grad_norm": 3.0929417610168457,
"learning_rate": 1.6141555491078012e-05,
"loss": 0.78,
"step": 3200
},
{
"epoch": 3.8802902055622734,
"grad_norm": 3.166088581085205,
"learning_rate": 1.6069431965059854e-05,
"loss": 1.0325,
"step": 3210
},
{
"epoch": 3.8923821039903266,
"grad_norm": 2.1898717880249023,
"learning_rate": 1.5997283579683403e-05,
"loss": 0.7925,
"step": 3220
},
{
"epoch": 3.9044740024183797,
"grad_norm": 2.5372607707977295,
"learning_rate": 1.5925112012065697e-05,
"loss": 0.8772,
"step": 3230
},
{
"epoch": 3.916565900846433,
"grad_norm": 1.7813209295272827,
"learning_rate": 1.5852918939862637e-05,
"loss": 0.8521,
"step": 3240
},
{
"epoch": 3.928657799274486,
"grad_norm": 4.24637508392334,
"learning_rate": 1.578070604123003e-05,
"loss": 0.872,
"step": 3250
},
{
"epoch": 3.940749697702539,
"grad_norm": 2.9286880493164062,
"learning_rate": 1.5708474994784525e-05,
"loss": 0.9763,
"step": 3260
},
{
"epoch": 3.9528415961305923,
"grad_norm": 2.25107479095459,
"learning_rate": 1.563622747956466e-05,
"loss": 0.7783,
"step": 3270
},
{
"epoch": 3.964933494558646,
"grad_norm": 3.030318021774292,
"learning_rate": 1.5563965174991762e-05,
"loss": 0.9837,
"step": 3280
},
{
"epoch": 3.977025392986699,
"grad_norm": 2.7458813190460205,
"learning_rate": 1.5491689760830966e-05,
"loss": 0.7787,
"step": 3290
},
{
"epoch": 3.989117291414752,
"grad_norm": 2.2329845428466797,
"learning_rate": 1.5419402917152133e-05,
"loss": 0.9339,
"step": 3300
},
{
"epoch": 4.0024183796856105,
"grad_norm": 1.8471683263778687,
"learning_rate": 1.5347106324290817e-05,
"loss": 0.8205,
"step": 3310
},
{
"epoch": 4.004836759371221,
"eval_loss": 1.0431386232376099,
"eval_runtime": 62.1184,
"eval_samples_per_second": 3.059,
"eval_steps_per_second": 1.529,
"step": 3312
},
{
"epoch": 4.014510278113664,
"grad_norm": 1.8121156692504883,
"learning_rate": 1.5274801662809178e-05,
"loss": 0.7915,
"step": 3320
},
{
"epoch": 4.026602176541717,
"grad_norm": 2.4977829456329346,
"learning_rate": 1.5202490613456963e-05,
"loss": 0.6828,
"step": 3330
},
{
"epoch": 4.03869407496977,
"grad_norm": 2.696383237838745,
"learning_rate": 1.5130174857132367e-05,
"loss": 0.7595,
"step": 3340
},
{
"epoch": 4.050785973397823,
"grad_norm": 2.9162650108337402,
"learning_rate": 1.5057856074843042e-05,
"loss": 0.8107,
"step": 3350
},
{
"epoch": 4.062877871825877,
"grad_norm": 2.7059521675109863,
"learning_rate": 1.4985535947666946e-05,
"loss": 0.8078,
"step": 3360
},
{
"epoch": 4.07496977025393,
"grad_norm": 2.8959505558013916,
"learning_rate": 1.4913216156713333e-05,
"loss": 0.8511,
"step": 3370
},
{
"epoch": 4.087061668681983,
"grad_norm": 2.2846384048461914,
"learning_rate": 1.4840898383083603e-05,
"loss": 0.7364,
"step": 3380
},
{
"epoch": 4.0991535671100365,
"grad_norm": 3.3403897285461426,
"learning_rate": 1.47685843078323e-05,
"loss": 0.9103,
"step": 3390
},
{
"epoch": 4.111245465538089,
"grad_norm": 1.9266448020935059,
"learning_rate": 1.4696275611927962e-05,
"loss": 0.6803,
"step": 3400
},
{
"epoch": 4.123337363966143,
"grad_norm": 2.2451562881469727,
"learning_rate": 1.4623973976214117e-05,
"loss": 0.7053,
"step": 3410
},
{
"epoch": 4.1354292623941955,
"grad_norm": 3.214365005493164,
"learning_rate": 1.4551681081370149e-05,
"loss": 0.7481,
"step": 3420
},
{
"epoch": 4.147521160822249,
"grad_norm": 2.8689422607421875,
"learning_rate": 1.447939860787227e-05,
"loss": 0.8524,
"step": 3430
},
{
"epoch": 4.159613059250303,
"grad_norm": 2.400942087173462,
"learning_rate": 1.440712823595444e-05,
"loss": 0.8388,
"step": 3440
},
{
"epoch": 4.171704957678355,
"grad_norm": 2.9148495197296143,
"learning_rate": 1.433487164556932e-05,
"loss": 0.7776,
"step": 3450
},
{
"epoch": 4.183796856106409,
"grad_norm": 3.156839370727539,
"learning_rate": 1.42626305163492e-05,
"loss": 0.7788,
"step": 3460
},
{
"epoch": 4.195888754534462,
"grad_norm": 3.3853509426116943,
"learning_rate": 1.4190406527566984e-05,
"loss": 0.768,
"step": 3470
},
{
"epoch": 4.207980652962515,
"grad_norm": 2.504469871520996,
"learning_rate": 1.4118201358097127e-05,
"loss": 0.8062,
"step": 3480
},
{
"epoch": 4.220072551390568,
"grad_norm": 3.1411614418029785,
"learning_rate": 1.4046016686376636e-05,
"loss": 0.7413,
"step": 3490
},
{
"epoch": 4.2321644498186215,
"grad_norm": 3.1822478771209717,
"learning_rate": 1.3973854190366023e-05,
"loss": 0.7172,
"step": 3500
},
{
"epoch": 4.244256348246675,
"grad_norm": 2.4940245151519775,
"learning_rate": 1.3901715547510332e-05,
"loss": 0.7663,
"step": 3510
},
{
"epoch": 4.256348246674728,
"grad_norm": 3.3821640014648438,
"learning_rate": 1.3829602434700128e-05,
"loss": 0.7442,
"step": 3520
},
{
"epoch": 4.268440145102781,
"grad_norm": 2.120893955230713,
"learning_rate": 1.375751652823251e-05,
"loss": 0.7644,
"step": 3530
},
{
"epoch": 4.280532043530834,
"grad_norm": 3.397862672805786,
"learning_rate": 1.368545950377217e-05,
"loss": 0.7631,
"step": 3540
},
{
"epoch": 4.292623941958888,
"grad_norm": 3.241807222366333,
"learning_rate": 1.3613433036312415e-05,
"loss": 0.8655,
"step": 3550
},
{
"epoch": 4.30471584038694,
"grad_norm": 2.9539334774017334,
"learning_rate": 1.3541438800136251e-05,
"loss": 0.7683,
"step": 3560
},
{
"epoch": 4.316807738814994,
"grad_norm": 3.136444568634033,
"learning_rate": 1.346947846877744e-05,
"loss": 0.8393,
"step": 3570
},
{
"epoch": 4.3288996372430475,
"grad_norm": 3.4799044132232666,
"learning_rate": 1.3397553714981646e-05,
"loss": 0.8738,
"step": 3580
},
{
"epoch": 4.3409915356711,
"grad_norm": 3.240767240524292,
"learning_rate": 1.3325666210667476e-05,
"loss": 0.9027,
"step": 3590
},
{
"epoch": 4.353083434099154,
"grad_norm": 3.1625399589538574,
"learning_rate": 1.3253817626887698e-05,
"loss": 0.7002,
"step": 3600
},
{
"epoch": 4.3651753325272065,
"grad_norm": 1.863317608833313,
"learning_rate": 1.318200963379032e-05,
"loss": 0.7383,
"step": 3610
},
{
"epoch": 4.37726723095526,
"grad_norm": 3.0041935443878174,
"learning_rate": 1.3110243900579846e-05,
"loss": 0.7579,
"step": 3620
},
{
"epoch": 4.389359129383314,
"grad_norm": 2.631523370742798,
"learning_rate": 1.303852209547839e-05,
"loss": 0.8294,
"step": 3630
},
{
"epoch": 4.401451027811366,
"grad_norm": 2.0372538566589355,
"learning_rate": 1.2966845885686976e-05,
"loss": 0.6767,
"step": 3640
},
{
"epoch": 4.41354292623942,
"grad_norm": 3.182499647140503,
"learning_rate": 1.2895216937346716e-05,
"loss": 0.7367,
"step": 3650
},
{
"epoch": 4.425634824667473,
"grad_norm": 3.0209364891052246,
"learning_rate": 1.2823636915500135e-05,
"loss": 0.7486,
"step": 3660
},
{
"epoch": 4.437726723095526,
"grad_norm": 3.3213560581207275,
"learning_rate": 1.275210748405244e-05,
"loss": 0.7267,
"step": 3670
},
{
"epoch": 4.449818621523579,
"grad_norm": 3.588071346282959,
"learning_rate": 1.2680630305732831e-05,
"loss": 0.7972,
"step": 3680
},
{
"epoch": 4.4619105199516325,
"grad_norm": 3.1862246990203857,
"learning_rate": 1.2609207042055879e-05,
"loss": 0.7063,
"step": 3690
},
{
"epoch": 4.474002418379685,
"grad_norm": 4.018402099609375,
"learning_rate": 1.2537839353282872e-05,
"loss": 0.8669,
"step": 3700
},
{
"epoch": 4.486094316807739,
"grad_norm": 4.134286403656006,
"learning_rate": 1.2466528898383259e-05,
"loss": 0.8608,
"step": 3710
},
{
"epoch": 4.498186215235792,
"grad_norm": 2.790292978286743,
"learning_rate": 1.2395277334996045e-05,
"loss": 0.7505,
"step": 3720
},
{
"epoch": 4.505441354292624,
"eval_loss": 1.0653315782546997,
"eval_runtime": 62.244,
"eval_samples_per_second": 3.053,
"eval_steps_per_second": 1.526,
"step": 3726
},
{
"epoch": 4.510278113663845,
"grad_norm": 5.061868667602539,
"learning_rate": 1.2324086319391297e-05,
"loss": 0.6894,
"step": 3730
},
{
"epoch": 4.522370012091899,
"grad_norm": 3.3010201454162598,
"learning_rate": 1.2252957506431606e-05,
"loss": 0.7144,
"step": 3740
},
{
"epoch": 4.534461910519951,
"grad_norm": 3.672531843185425,
"learning_rate": 1.2181892549533657e-05,
"loss": 0.7517,
"step": 3750
},
{
"epoch": 4.546553808948005,
"grad_norm": 2.89050030708313,
"learning_rate": 1.2110893100629763e-05,
"loss": 0.7112,
"step": 3760
},
{
"epoch": 4.5586457073760585,
"grad_norm": 3.2547507286071777,
"learning_rate": 1.2039960810129491e-05,
"loss": 0.8639,
"step": 3770
},
{
"epoch": 4.570737605804111,
"grad_norm": 3.547781229019165,
"learning_rate": 1.1969097326881272e-05,
"loss": 0.7602,
"step": 3780
},
{
"epoch": 4.582829504232165,
"grad_norm": 3.2739574909210205,
"learning_rate": 1.189830429813409e-05,
"loss": 0.6657,
"step": 3790
},
{
"epoch": 4.594921402660217,
"grad_norm": 4.094822883605957,
"learning_rate": 1.1827583369499199e-05,
"loss": 0.8112,
"step": 3800
},
{
"epoch": 4.607013301088271,
"grad_norm": 3.00482177734375,
"learning_rate": 1.1756936184911834e-05,
"loss": 0.7148,
"step": 3810
},
{
"epoch": 4.619105199516324,
"grad_norm": 3.754075288772583,
"learning_rate": 1.1686364386593047e-05,
"loss": 0.7634,
"step": 3820
},
{
"epoch": 4.631197097944377,
"grad_norm": 1.9571914672851562,
"learning_rate": 1.1615869615011487e-05,
"loss": 0.8019,
"step": 3830
},
{
"epoch": 4.64328899637243,
"grad_norm": 4.519535064697266,
"learning_rate": 1.1545453508845303e-05,
"loss": 0.7501,
"step": 3840
},
{
"epoch": 4.655380894800484,
"grad_norm": 3.0004842281341553,
"learning_rate": 1.147511770494402e-05,
"loss": 0.9003,
"step": 3850
},
{
"epoch": 4.667472793228537,
"grad_norm": 3.110866069793701,
"learning_rate": 1.1404863838290519e-05,
"loss": 0.7781,
"step": 3860
},
{
"epoch": 4.67956469165659,
"grad_norm": 3.056727647781372,
"learning_rate": 1.1334693541963009e-05,
"loss": 0.782,
"step": 3870
},
{
"epoch": 4.691656590084643,
"grad_norm": 5.728208065032959,
"learning_rate": 1.1264608447097082e-05,
"loss": 0.7219,
"step": 3880
},
{
"epoch": 4.703748488512696,
"grad_norm": 4.19158935546875,
"learning_rate": 1.1194610182847785e-05,
"loss": 0.7901,
"step": 3890
},
{
"epoch": 4.71584038694075,
"grad_norm": 4.694254398345947,
"learning_rate": 1.1124700376351755e-05,
"loss": 0.799,
"step": 3900
},
{
"epoch": 4.727932285368803,
"grad_norm": 2.9105007648468018,
"learning_rate": 1.105488065268939e-05,
"loss": 0.7363,
"step": 3910
},
{
"epoch": 4.740024183796856,
"grad_norm": 2.6782641410827637,
"learning_rate": 1.0985152634847087e-05,
"loss": 0.749,
"step": 3920
},
{
"epoch": 4.7521160822249096,
"grad_norm": 4.835862159729004,
"learning_rate": 1.0915517943679495e-05,
"loss": 0.7918,
"step": 3930
},
{
"epoch": 4.764207980652962,
"grad_norm": 3.066210985183716,
"learning_rate": 1.084597819787186e-05,
"loss": 0.76,
"step": 3940
},
{
"epoch": 4.776299879081016,
"grad_norm": 3.978726863861084,
"learning_rate": 1.0776535013902382e-05,
"loss": 0.7956,
"step": 3950
},
{
"epoch": 4.788391777509069,
"grad_norm": 3.4121222496032715,
"learning_rate": 1.0707190006004634e-05,
"loss": 0.772,
"step": 3960
},
{
"epoch": 4.800483675937122,
"grad_norm": 4.126001358032227,
"learning_rate": 1.0637944786130064e-05,
"loss": 0.7623,
"step": 3970
},
{
"epoch": 4.812575574365176,
"grad_norm": 3.501007556915283,
"learning_rate": 1.0568800963910495e-05,
"loss": 0.7658,
"step": 3980
},
{
"epoch": 4.824667472793228,
"grad_norm": 3.7841763496398926,
"learning_rate": 1.0499760146620745e-05,
"loss": 0.8207,
"step": 3990
},
{
"epoch": 4.836759371221282,
"grad_norm": 2.9045231342315674,
"learning_rate": 1.0430823939141204e-05,
"loss": 0.8224,
"step": 4000
},
{
"epoch": 4.848851269649335,
"grad_norm": 2.3933026790618896,
"learning_rate": 1.036199394392061e-05,
"loss": 0.894,
"step": 4010
},
{
"epoch": 4.860943168077388,
"grad_norm": 3.316516876220703,
"learning_rate": 1.0293271760938719e-05,
"loss": 0.7822,
"step": 4020
},
{
"epoch": 4.873035066505441,
"grad_norm": 3.8151934146881104,
"learning_rate": 1.0224658987669182e-05,
"loss": 0.8381,
"step": 4030
},
{
"epoch": 4.8851269649334945,
"grad_norm": 3.1684491634368896,
"learning_rate": 1.0156157219042351e-05,
"loss": 0.7151,
"step": 4040
},
{
"epoch": 4.897218863361548,
"grad_norm": 2.5580480098724365,
"learning_rate": 1.0087768047408257e-05,
"loss": 0.7844,
"step": 4050
},
{
"epoch": 4.909310761789601,
"grad_norm": 3.3300466537475586,
"learning_rate": 1.0019493062499541e-05,
"loss": 0.8607,
"step": 4060
},
{
"epoch": 4.921402660217654,
"grad_norm": 3.0954511165618896,
"learning_rate": 9.951333851394563e-06,
"loss": 0.7207,
"step": 4070
},
{
"epoch": 4.933494558645707,
"grad_norm": 2.838153123855591,
"learning_rate": 9.883291998480459e-06,
"loss": 0.7848,
"step": 4080
},
{
"epoch": 4.945586457073761,
"grad_norm": 2.8606677055358887,
"learning_rate": 9.815369085416321e-06,
"loss": 0.7297,
"step": 4090
},
{
"epoch": 4.957678355501814,
"grad_norm": 2.995903491973877,
"learning_rate": 9.747566691096455e-06,
"loss": 0.7815,
"step": 4100
},
{
"epoch": 4.969770253929867,
"grad_norm": 2.5223402976989746,
"learning_rate": 9.679886391613649e-06,
"loss": 0.764,
"step": 4110
},
{
"epoch": 4.9818621523579205,
"grad_norm": 2.362399101257324,
"learning_rate": 9.612329760222562e-06,
"loss": 0.6716,
"step": 4120
},
{
"epoch": 4.993954050785973,
"grad_norm": 4.546547889709473,
"learning_rate": 9.544898367303126e-06,
"loss": 0.9023,
"step": 4130
},
{
"epoch": 5.008464328899637,
"grad_norm": 2.1150896549224854,
"learning_rate": 9.47759378032407e-06,
"loss": 0.6997,
"step": 4140
},
{
"epoch": 5.008464328899637,
"eval_loss": 1.070144534111023,
"eval_runtime": 62.259,
"eval_samples_per_second": 3.052,
"eval_steps_per_second": 1.526,
"step": 4140
},
{
"epoch": 5.02055622732769,
"grad_norm": 2.231945514678955,
"learning_rate": 9.410417563806457e-06,
"loss": 0.7199,
"step": 4150
},
{
"epoch": 5.032648125755744,
"grad_norm": 2.773264169692993,
"learning_rate": 9.343371279287346e-06,
"loss": 0.7082,
"step": 4160
},
{
"epoch": 5.044740024183797,
"grad_norm": 4.111907482147217,
"learning_rate": 9.276456485283454e-06,
"loss": 0.7083,
"step": 4170
},
{
"epoch": 5.05683192261185,
"grad_norm": 4.149149417877197,
"learning_rate": 9.209674737254977e-06,
"loss": 0.6057,
"step": 4180
},
{
"epoch": 5.0689238210399035,
"grad_norm": 3.5627682209014893,
"learning_rate": 9.143027587569384e-06,
"loss": 0.7398,
"step": 4190
},
{
"epoch": 5.081015719467956,
"grad_norm": 2.7134649753570557,
"learning_rate": 9.076516585465364e-06,
"loss": 0.8352,
"step": 4200
},
{
"epoch": 5.09310761789601,
"grad_norm": 3.4274790287017822,
"learning_rate": 9.010143277016797e-06,
"loss": 0.789,
"step": 4210
},
{
"epoch": 5.105199516324063,
"grad_norm": 2.505985736846924,
"learning_rate": 8.943909205096825e-06,
"loss": 0.7339,
"step": 4220
},
{
"epoch": 5.117291414752116,
"grad_norm": 2.784248113632202,
"learning_rate": 8.877815909341995e-06,
"loss": 0.6168,
"step": 4230
},
{
"epoch": 5.12938331318017,
"grad_norm": 3.0708258152008057,
"learning_rate": 8.811864926116423e-06,
"loss": 0.681,
"step": 4240
},
{
"epoch": 5.141475211608222,
"grad_norm": 2.7348103523254395,
"learning_rate": 8.746057788476165e-06,
"loss": 0.6212,
"step": 4250
},
{
"epoch": 5.153567110036276,
"grad_norm": 3.076500177383423,
"learning_rate": 8.680396026133482e-06,
"loss": 0.6856,
"step": 4260
},
{
"epoch": 5.165659008464329,
"grad_norm": 5.6594085693359375,
"learning_rate": 8.61488116542136e-06,
"loss": 0.8085,
"step": 4270
},
{
"epoch": 5.177750906892382,
"grad_norm": 5.323330402374268,
"learning_rate": 8.549514729257991e-06,
"loss": 0.6922,
"step": 4280
},
{
"epoch": 5.189842805320436,
"grad_norm": 3.687112331390381,
"learning_rate": 8.484298237111392e-06,
"loss": 0.8099,
"step": 4290
},
{
"epoch": 5.201934703748488,
"grad_norm": 3.3872568607330322,
"learning_rate": 8.419233204964046e-06,
"loss": 0.682,
"step": 4300
},
{
"epoch": 5.214026602176542,
"grad_norm": 5.219661235809326,
"learning_rate": 8.354321145277717e-06,
"loss": 0.8211,
"step": 4310
},
{
"epoch": 5.226118500604595,
"grad_norm": 2.866403341293335,
"learning_rate": 8.289563566958256e-06,
"loss": 0.6943,
"step": 4320
},
{
"epoch": 5.238210399032648,
"grad_norm": 5.183192729949951,
"learning_rate": 8.224961975320539e-06,
"loss": 0.5939,
"step": 4330
},
{
"epoch": 5.250302297460701,
"grad_norm": 3.7528531551361084,
"learning_rate": 8.16051787205345e-06,
"loss": 0.7301,
"step": 4340
},
{
"epoch": 5.262394195888755,
"grad_norm": 3.930178642272949,
"learning_rate": 8.096232755185033e-06,
"loss": 0.7815,
"step": 4350
},
{
"epoch": 5.274486094316808,
"grad_norm": 2.9726829528808594,
"learning_rate": 8.03210811904761e-06,
"loss": 0.6982,
"step": 4360
},
{
"epoch": 5.286577992744861,
"grad_norm": 5.79806661605835,
"learning_rate": 7.968145454243052e-06,
"loss": 0.7827,
"step": 4370
},
{
"epoch": 5.298669891172914,
"grad_norm": 4.123749256134033,
"learning_rate": 7.904346247608172e-06,
"loss": 0.6476,
"step": 4380
},
{
"epoch": 5.310761789600967,
"grad_norm": 2.9389238357543945,
"learning_rate": 7.840711982180118e-06,
"loss": 0.7254,
"step": 4390
},
{
"epoch": 5.322853688029021,
"grad_norm": 3.596727132797241,
"learning_rate": 7.77724413716193e-06,
"loss": 0.716,
"step": 4400
},
{
"epoch": 5.334945586457073,
"grad_norm": 2.72917103767395,
"learning_rate": 7.713944187888116e-06,
"loss": 0.6552,
"step": 4410
},
{
"epoch": 5.347037484885127,
"grad_norm": 3.597003221511841,
"learning_rate": 7.650813605790409e-06,
"loss": 0.7794,
"step": 4420
},
{
"epoch": 5.359129383313181,
"grad_norm": 3.1855111122131348,
"learning_rate": 7.587853858363524e-06,
"loss": 0.716,
"step": 4430
},
{
"epoch": 5.371221281741233,
"grad_norm": 4.469402313232422,
"learning_rate": 7.525066409131065e-06,
"loss": 0.746,
"step": 4440
},
{
"epoch": 5.383313180169287,
"grad_norm": 2.5034403800964355,
"learning_rate": 7.462452717611488e-06,
"loss": 0.8737,
"step": 4450
},
{
"epoch": 5.3954050785973395,
"grad_norm": 4.369159698486328,
"learning_rate": 7.40001423928419e-06,
"loss": 0.7226,
"step": 4460
},
{
"epoch": 5.407496977025393,
"grad_norm": 3.4538373947143555,
"learning_rate": 7.337752425555673e-06,
"loss": 0.7042,
"step": 4470
},
{
"epoch": 5.419588875453446,
"grad_norm": 3.6851110458374023,
"learning_rate": 7.275668723725802e-06,
"loss": 0.7007,
"step": 4480
},
{
"epoch": 5.431680773881499,
"grad_norm": 3.3171651363372803,
"learning_rate": 7.213764576954148e-06,
"loss": 0.6816,
"step": 4490
},
{
"epoch": 5.443772672309553,
"grad_norm": 3.6243672370910645,
"learning_rate": 7.152041424226469e-06,
"loss": 0.5568,
"step": 4500
},
{
"epoch": 5.455864570737606,
"grad_norm": 3.998467206954956,
"learning_rate": 7.09050070032126e-06,
"loss": 0.673,
"step": 4510
},
{
"epoch": 5.467956469165659,
"grad_norm": 3.8511223793029785,
"learning_rate": 7.029143835776353e-06,
"loss": 0.6581,
"step": 4520
},
{
"epoch": 5.480048367593712,
"grad_norm": 3.5216002464294434,
"learning_rate": 6.967972256855732e-06,
"loss": 0.6585,
"step": 4530
},
{
"epoch": 5.4921402660217655,
"grad_norm": 3.133366346359253,
"learning_rate": 6.906987385516311e-06,
"loss": 0.6854,
"step": 4540
},
{
"epoch": 5.504232164449819,
"grad_norm": 3.453603506088257,
"learning_rate": 6.846190639374957e-06,
"loss": 0.78,
"step": 4550
},
{
"epoch": 5.50906892382104,
"eval_loss": 1.0946567058563232,
"eval_runtime": 62.2311,
"eval_samples_per_second": 3.053,
"eval_steps_per_second": 1.527,
"step": 4554
},
{
"epoch": 5.516324062877872,
"grad_norm": 3.6046810150146484,
"learning_rate": 6.785583431675454e-06,
"loss": 0.7816,
"step": 4560
},
{
"epoch": 5.528415961305925,
"grad_norm": 4.014889240264893,
"learning_rate": 6.725167171255721e-06,
"loss": 0.7841,
"step": 4570
},
{
"epoch": 5.540507859733978,
"grad_norm": 2.989539384841919,
"learning_rate": 6.664943262515004e-06,
"loss": 0.6477,
"step": 4580
},
{
"epoch": 5.552599758162032,
"grad_norm": 3.522326707839966,
"learning_rate": 6.604913105381298e-06,
"loss": 0.7645,
"step": 4590
},
{
"epoch": 5.564691656590084,
"grad_norm": 3.36456561088562,
"learning_rate": 6.545078095278736e-06,
"loss": 0.6827,
"step": 4600
},
{
"epoch": 5.576783555018138,
"grad_norm": 3.4892289638519287,
"learning_rate": 6.485439623095195e-06,
"loss": 0.6528,
"step": 4610
},
{
"epoch": 5.588875453446191,
"grad_norm": 2.6480884552001953,
"learning_rate": 6.425999075149952e-06,
"loss": 0.7137,
"step": 4620
},
{
"epoch": 5.600967351874244,
"grad_norm": 2.971666097640991,
"learning_rate": 6.366757833161464e-06,
"loss": 0.7081,
"step": 4630
},
{
"epoch": 5.613059250302298,
"grad_norm": 2.607954263687134,
"learning_rate": 6.30771727421523e-06,
"loss": 0.6816,
"step": 4640
},
{
"epoch": 5.6251511487303505,
"grad_norm": 3.269277811050415,
"learning_rate": 6.248878770731804e-06,
"loss": 0.7183,
"step": 4650
},
{
"epoch": 5.637243047158404,
"grad_norm": 4.291733741760254,
"learning_rate": 6.190243690434882e-06,
"loss": 0.7328,
"step": 4660
},
{
"epoch": 5.649334945586457,
"grad_norm": 4.875344276428223,
"learning_rate": 6.131813396319514e-06,
"loss": 0.7915,
"step": 4670
},
{
"epoch": 5.66142684401451,
"grad_norm": 2.9836928844451904,
"learning_rate": 6.073589246620411e-06,
"loss": 0.728,
"step": 4680
},
{
"epoch": 5.673518742442564,
"grad_norm": 3.9223244190216064,
"learning_rate": 6.015572594780369e-06,
"loss": 0.7083,
"step": 4690
},
{
"epoch": 5.685610640870617,
"grad_norm": 4.437389850616455,
"learning_rate": 5.957764789418833e-06,
"loss": 0.7338,
"step": 4700
},
{
"epoch": 5.69770253929867,
"grad_norm": 3.038198709487915,
"learning_rate": 5.900167174300521e-06,
"loss": 0.6842,
"step": 4710
},
{
"epoch": 5.709794437726723,
"grad_norm": 3.954749345779419,
"learning_rate": 5.842781088304205e-06,
"loss": 0.7737,
"step": 4720
},
{
"epoch": 5.7218863361547765,
"grad_norm": 2.9807608127593994,
"learning_rate": 5.785607865391567e-06,
"loss": 0.6652,
"step": 4730
},
{
"epoch": 5.733978234582829,
"grad_norm": 3.8414597511291504,
"learning_rate": 5.728648834576219e-06,
"loss": 0.8281,
"step": 4740
},
{
"epoch": 5.746070133010883,
"grad_norm": 4.835539817810059,
"learning_rate": 5.671905319892789e-06,
"loss": 0.6251,
"step": 4750
},
{
"epoch": 5.7581620314389355,
"grad_norm": 4.228201389312744,
"learning_rate": 5.6153786403661555e-06,
"loss": 0.684,
"step": 4760
},
{
"epoch": 5.770253929866989,
"grad_norm": 5.972825050354004,
"learning_rate": 5.55907010998076e-06,
"loss": 0.6215,
"step": 4770
},
{
"epoch": 5.782345828295043,
"grad_norm": 5.1888322830200195,
"learning_rate": 5.502981037650113e-06,
"loss": 0.671,
"step": 4780
},
{
"epoch": 5.794437726723095,
"grad_norm": 2.67165470123291,
"learning_rate": 5.4471127271863235e-06,
"loss": 0.7918,
"step": 4790
},
{
"epoch": 5.806529625151149,
"grad_norm": 3.7494444847106934,
"learning_rate": 5.3914664772697955e-06,
"loss": 0.7763,
"step": 4800
},
{
"epoch": 5.818621523579202,
"grad_norm": 2.512258768081665,
"learning_rate": 5.336043581419064e-06,
"loss": 0.6537,
"step": 4810
},
{
"epoch": 5.830713422007255,
"grad_norm": 4.046555042266846,
"learning_rate": 5.280845327960708e-06,
"loss": 0.713,
"step": 4820
},
{
"epoch": 5.842805320435309,
"grad_norm": 3.8291842937469482,
"learning_rate": 5.2258729999994135e-06,
"loss": 0.6669,
"step": 4830
},
{
"epoch": 5.8548972188633615,
"grad_norm": 4.035644054412842,
"learning_rate": 5.171127875388126e-06,
"loss": 0.7758,
"step": 4840
},
{
"epoch": 5.866989117291415,
"grad_norm": 4.429537296295166,
"learning_rate": 5.116611226698376e-06,
"loss": 0.645,
"step": 4850
},
{
"epoch": 5.879081015719468,
"grad_norm": 4.3502888679504395,
"learning_rate": 5.0623243211906775e-06,
"loss": 0.786,
"step": 4860
},
{
"epoch": 5.891172914147521,
"grad_norm": 3.3320226669311523,
"learning_rate": 5.008268420785084e-06,
"loss": 0.7135,
"step": 4870
},
{
"epoch": 5.903264812575574,
"grad_norm": 4.4856414794921875,
"learning_rate": 4.954444782031824e-06,
"loss": 0.7089,
"step": 4880
},
{
"epoch": 5.915356711003628,
"grad_norm": 3.6500167846679688,
"learning_rate": 4.900854656082139e-06,
"loss": 0.834,
"step": 4890
},
{
"epoch": 5.92744860943168,
"grad_norm": 3.233738899230957,
"learning_rate": 4.847499288659163e-06,
"loss": 0.6637,
"step": 4900
},
{
"epoch": 5.939540507859734,
"grad_norm": 5.101102352142334,
"learning_rate": 4.7943799200289865e-06,
"loss": 0.6458,
"step": 4910
},
{
"epoch": 5.9516324062877874,
"grad_norm": 3.624640941619873,
"learning_rate": 4.7414977849718e-06,
"loss": 0.72,
"step": 4920
},
{
"epoch": 5.96372430471584,
"grad_norm": 4.193915843963623,
"learning_rate": 4.688854112753217e-06,
"loss": 0.81,
"step": 4930
},
{
"epoch": 5.975816203143894,
"grad_norm": 3.672713279724121,
"learning_rate": 4.636450127095709e-06,
"loss": 0.7575,
"step": 4940
},
{
"epoch": 5.987908101571946,
"grad_norm": 4.2874627113342285,
"learning_rate": 4.5842870461501104e-06,
"loss": 0.7376,
"step": 4950
},
{
"epoch": 6.0,
"grad_norm": 3.7344837188720703,
"learning_rate": 4.532366082467356e-06,
"loss": 0.6445,
"step": 4960
},
{
"epoch": 6.008464328899637,
"eval_loss": 1.1057277917861938,
"eval_runtime": 62.2006,
"eval_samples_per_second": 3.055,
"eval_steps_per_second": 1.527,
"step": 4968
},
{
"epoch": 6.010882708585248,
"grad_norm": 3.557671546936035,
"learning_rate": 4.480688442970244e-06,
"loss": 0.7159,
"step": 4970
},
{
"epoch": 6.0229746070133015,
"grad_norm": 3.6378707885742188,
"learning_rate": 4.429255328925449e-06,
"loss": 0.6371,
"step": 4980
},
{
"epoch": 6.035066505441354,
"grad_norm": 5.355988502502441,
"learning_rate": 4.378067935915519e-06,
"loss": 0.6781,
"step": 4990
},
{
"epoch": 6.047158403869408,
"grad_norm": 3.7519631385803223,
"learning_rate": 4.327127453811147e-06,
"loss": 0.7401,
"step": 5000
},
{
"epoch": 6.05925030229746,
"grad_norm": 4.124637126922607,
"learning_rate": 4.2764350667434605e-06,
"loss": 0.7264,
"step": 5010
},
{
"epoch": 6.071342200725514,
"grad_norm": 3.4475553035736084,
"learning_rate": 4.225991953076558e-06,
"loss": 0.6487,
"step": 5020
},
{
"epoch": 6.083434099153567,
"grad_norm": 4.422087669372559,
"learning_rate": 4.1757992853800516e-06,
"loss": 0.7784,
"step": 5030
},
{
"epoch": 6.09552599758162,
"grad_norm": 4.515448570251465,
"learning_rate": 4.125858230401853e-06,
"loss": 0.6513,
"step": 5040
},
{
"epoch": 6.107617896009674,
"grad_norm": 3.828434944152832,
"learning_rate": 4.076169949041037e-06,
"loss": 0.6473,
"step": 5050
},
{
"epoch": 6.119709794437727,
"grad_norm": 3.175715684890747,
"learning_rate": 4.02673559632086e-06,
"loss": 0.6962,
"step": 5060
},
{
"epoch": 6.13180169286578,
"grad_norm": 3.969517946243286,
"learning_rate": 3.977556321361913e-06,
"loss": 0.6401,
"step": 5070
},
{
"epoch": 6.143893591293833,
"grad_norm": 4.145517349243164,
"learning_rate": 3.928633267355388e-06,
"loss": 0.6098,
"step": 5080
},
{
"epoch": 6.155985489721886,
"grad_norm": 3.487616539001465,
"learning_rate": 3.879967571536543e-06,
"loss": 0.6851,
"step": 5090
},
{
"epoch": 6.168077388149939,
"grad_norm": 4.306605815887451,
"learning_rate": 3.831560365158237e-06,
"loss": 0.5277,
"step": 5100
},
{
"epoch": 6.180169286577993,
"grad_norm": 4.123151779174805,
"learning_rate": 3.783412773464647e-06,
"loss": 0.6657,
"step": 5110
},
{
"epoch": 6.192261185006046,
"grad_norm": 4.117605209350586,
"learning_rate": 3.735525915665094e-06,
"loss": 0.7622,
"step": 5120
},
{
"epoch": 6.204353083434099,
"grad_norm": 3.730079174041748,
"learning_rate": 3.687900904908053e-06,
"loss": 0.6035,
"step": 5130
},
{
"epoch": 6.2164449818621526,
"grad_norm": 4.194562911987305,
"learning_rate": 3.6405388482552564e-06,
"loss": 0.6141,
"step": 5140
},
{
"epoch": 6.228536880290205,
"grad_norm": 4.338625431060791,
"learning_rate": 3.593440846655976e-06,
"loss": 0.6479,
"step": 5150
},
{
"epoch": 6.240628778718259,
"grad_norm": 3.5001437664031982,
"learning_rate": 3.5466079949214046e-06,
"loss": 0.6212,
"step": 5160
},
{
"epoch": 6.252720677146312,
"grad_norm": 5.18671989440918,
"learning_rate": 3.50004138169924e-06,
"loss": 0.6163,
"step": 5170
},
{
"epoch": 6.264812575574365,
"grad_norm": 3.782536029815674,
"learning_rate": 3.4537420894483555e-06,
"loss": 0.6026,
"step": 5180
},
{
"epoch": 6.276904474002419,
"grad_norm": 3.839702844619751,
"learning_rate": 3.407711194413655e-06,
"loss": 0.6264,
"step": 5190
},
{
"epoch": 6.288996372430471,
"grad_norm": 3.8614888191223145,
"learning_rate": 3.3619497666010256e-06,
"loss": 0.6775,
"step": 5200
},
{
"epoch": 6.301088270858525,
"grad_norm": 2.7845358848571777,
"learning_rate": 3.3164588697525084e-06,
"loss": 0.5879,
"step": 5210
},
{
"epoch": 6.313180169286578,
"grad_norm": 3.5601367950439453,
"learning_rate": 3.271239561321538e-06,
"loss": 0.6745,
"step": 5220
},
{
"epoch": 6.325272067714631,
"grad_norm": 4.521182537078857,
"learning_rate": 3.2262928924483625e-06,
"loss": 0.7594,
"step": 5230
},
{
"epoch": 6.337363966142684,
"grad_norm": 3.8560290336608887,
"learning_rate": 3.18161990793563e-06,
"loss": 0.6327,
"step": 5240
},
{
"epoch": 6.3494558645707375,
"grad_norm": 4.2319207191467285,
"learning_rate": 3.1372216462240887e-06,
"loss": 0.7454,
"step": 5250
},
{
"epoch": 6.361547762998791,
"grad_norm": 4.699012279510498,
"learning_rate": 3.0930991393684505e-06,
"loss": 0.688,
"step": 5260
},
{
"epoch": 6.373639661426844,
"grad_norm": 5.492476940155029,
"learning_rate": 3.0492534130133874e-06,
"loss": 0.6861,
"step": 5270
},
{
"epoch": 6.385731559854897,
"grad_norm": 4.874731063842773,
"learning_rate": 3.0056854863697175e-06,
"loss": 0.5878,
"step": 5280
},
{
"epoch": 6.39782345828295,
"grad_norm": 4.2611775398254395,
"learning_rate": 2.962396372190688e-06,
"loss": 0.7257,
"step": 5290
},
{
"epoch": 6.409915356711004,
"grad_norm": 3.473050594329834,
"learning_rate": 2.9193870767484502e-06,
"loss": 0.6894,
"step": 5300
},
{
"epoch": 6.422007255139057,
"grad_norm": 3.451538562774658,
"learning_rate": 2.876658599810651e-06,
"loss": 0.662,
"step": 5310
},
{
"epoch": 6.43409915356711,
"grad_norm": 7.277347087860107,
"learning_rate": 2.8342119346172102e-06,
"loss": 0.7509,
"step": 5320
},
{
"epoch": 6.4461910519951635,
"grad_norm": 3.73040509223938,
"learning_rate": 2.7920480678572237e-06,
"loss": 0.6465,
"step": 5330
},
{
"epoch": 6.458282950423216,
"grad_norm": 3.874638319015503,
"learning_rate": 2.7501679796460332e-06,
"loss": 0.7057,
"step": 5340
},
{
"epoch": 6.47037484885127,
"grad_norm": 4.644408226013184,
"learning_rate": 2.7085726435024267e-06,
"loss": 0.7528,
"step": 5350
},
{
"epoch": 6.4824667472793225,
"grad_norm": 4.328802585601807,
"learning_rate": 2.667263026326024e-06,
"loss": 0.6572,
"step": 5360
},
{
"epoch": 6.494558645707376,
"grad_norm": 5.580451488494873,
"learning_rate": 2.626240088374817e-06,
"loss": 0.7067,
"step": 5370
},
{
"epoch": 6.50665054413543,
"grad_norm": 4.495114803314209,
"learning_rate": 2.5855047832427988e-06,
"loss": 0.6848,
"step": 5380
},
{
"epoch": 6.50906892382104,
"eval_loss": 1.1272510290145874,
"eval_runtime": 62.2526,
"eval_samples_per_second": 3.052,
"eval_steps_per_second": 1.526,
"step": 5382
},
{
"epoch": 6.518742442563482,
"grad_norm": 5.930830478668213,
"learning_rate": 2.5450580578378484e-06,
"loss": 0.6359,
"step": 5390
},
{
"epoch": 6.530834340991536,
"grad_norm": 4.322328567504883,
"learning_rate": 2.5049008523596778e-06,
"loss": 0.6232,
"step": 5400
},
{
"epoch": 6.542926239419589,
"grad_norm": 3.9086663722991943,
"learning_rate": 2.4650341002780264e-06,
"loss": 0.6725,
"step": 5410
},
{
"epoch": 6.555018137847642,
"grad_norm": 3.7998921871185303,
"learning_rate": 2.4254587283109014e-06,
"loss": 0.6814,
"step": 5420
},
{
"epoch": 6.567110036275695,
"grad_norm": 3.609553575515747,
"learning_rate": 2.386175656403085e-06,
"loss": 0.599,
"step": 5430
},
{
"epoch": 6.5792019347037485,
"grad_norm": 4.122236251831055,
"learning_rate": 2.347185797704726e-06,
"loss": 0.6335,
"step": 5440
},
{
"epoch": 6.591293833131802,
"grad_norm": 3.8587522506713867,
"learning_rate": 2.3084900585501273e-06,
"loss": 0.6883,
"step": 5450
},
{
"epoch": 6.603385731559855,
"grad_norm": 6.455262184143066,
"learning_rate": 2.2700893384366516e-06,
"loss": 0.6836,
"step": 5460
},
{
"epoch": 6.615477629987908,
"grad_norm": 4.4655442237854,
"learning_rate": 2.231984530003845e-06,
"loss": 0.6687,
"step": 5470
},
{
"epoch": 6.627569528415961,
"grad_norm": 5.083980560302734,
"learning_rate": 2.1941765190126718e-06,
"loss": 0.6486,
"step": 5480
},
{
"epoch": 6.639661426844015,
"grad_norm": 3.8917980194091797,
"learning_rate": 2.1566661843249203e-06,
"loss": 0.6143,
"step": 5490
},
{
"epoch": 6.651753325272068,
"grad_norm": 5.429650783538818,
"learning_rate": 2.119454397882784e-06,
"loss": 0.7289,
"step": 5500
},
{
"epoch": 6.663845223700121,
"grad_norm": 5.140111446380615,
"learning_rate": 2.082542024688578e-06,
"loss": 0.8017,
"step": 5510
},
{
"epoch": 6.6759371221281745,
"grad_norm": 5.339375972747803,
"learning_rate": 2.045929922784649e-06,
"loss": 0.7102,
"step": 5520
},
{
"epoch": 6.688029020556227,
"grad_norm": 3.4869794845581055,
"learning_rate": 2.0096189432334194e-06,
"loss": 0.6714,
"step": 5530
},
{
"epoch": 6.700120918984281,
"grad_norm": 4.189328670501709,
"learning_rate": 1.9736099300976124e-06,
"loss": 0.7404,
"step": 5540
},
{
"epoch": 6.712212817412333,
"grad_norm": 4.462172031402588,
"learning_rate": 1.937903720420609e-06,
"loss": 0.6742,
"step": 5550
},
{
"epoch": 6.724304715840387,
"grad_norm": 5.784437656402588,
"learning_rate": 1.9025011442070245e-06,
"loss": 0.6307,
"step": 5560
},
{
"epoch": 6.73639661426844,
"grad_norm": 4.614073276519775,
"learning_rate": 1.8674030244033896e-06,
"loss": 0.5992,
"step": 5570
},
{
"epoch": 6.748488512696493,
"grad_norm": 3.2273013591766357,
"learning_rate": 1.8326101768790331e-06,
"loss": 0.6459,
"step": 5580
},
{
"epoch": 6.760580411124547,
"grad_norm": 3.989841938018799,
"learning_rate": 1.798123410407097e-06,
"loss": 0.6755,
"step": 5590
},
{
"epoch": 6.7726723095526,
"grad_norm": 5.584028720855713,
"learning_rate": 1.7639435266457759e-06,
"loss": 0.6293,
"step": 5600
},
{
"epoch": 6.784764207980653,
"grad_norm": 5.334152698516846,
"learning_rate": 1.7300713201196344e-06,
"loss": 0.7763,
"step": 5610
},
{
"epoch": 6.796856106408706,
"grad_norm": 3.9883923530578613,
"learning_rate": 1.6965075782011796e-06,
"loss": 0.7426,
"step": 5620
},
{
"epoch": 6.808948004836759,
"grad_norm": 5.658714771270752,
"learning_rate": 1.6632530810925178e-06,
"loss": 0.7003,
"step": 5630
},
{
"epoch": 6.821039903264813,
"grad_norm": 4.874862194061279,
"learning_rate": 1.6303086018072654e-06,
"loss": 0.6416,
"step": 5640
},
{
"epoch": 6.833131801692866,
"grad_norm": 5.249368190765381,
"learning_rate": 1.5976749061525436e-06,
"loss": 0.695,
"step": 5650
},
{
"epoch": 6.845223700120919,
"grad_norm": 3.46927809715271,
"learning_rate": 1.5653527527111822e-06,
"loss": 0.6439,
"step": 5660
},
{
"epoch": 6.857315598548972,
"grad_norm": 4.2240309715271,
"learning_rate": 1.5333428928240988e-06,
"loss": 0.6616,
"step": 5670
},
{
"epoch": 6.869407496977026,
"grad_norm": 6.947134494781494,
"learning_rate": 1.5016460705728285e-06,
"loss": 0.7057,
"step": 5680
},
{
"epoch": 6.881499395405078,
"grad_norm": 6.7013654708862305,
"learning_rate": 1.4702630227622249e-06,
"loss": 0.6777,
"step": 5690
},
{
"epoch": 6.893591293833132,
"grad_norm": 3.3105950355529785,
"learning_rate": 1.4391944789033278e-06,
"loss": 0.6382,
"step": 5700
},
{
"epoch": 6.9056831922611845,
"grad_norm": 4.079921245574951,
"learning_rate": 1.4084411611964187e-06,
"loss": 0.6842,
"step": 5710
},
{
"epoch": 6.917775090689238,
"grad_norm": 4.0758209228515625,
"learning_rate": 1.3780037845142218e-06,
"loss": 0.6355,
"step": 5720
},
{
"epoch": 6.929866989117292,
"grad_norm": 4.716675758361816,
"learning_rate": 1.3478830563852962e-06,
"loss": 0.5975,
"step": 5730
},
{
"epoch": 6.941958887545344,
"grad_norm": 3.8192057609558105,
"learning_rate": 1.318079676977572e-06,
"loss": 0.6477,
"step": 5740
},
{
"epoch": 6.954050785973398,
"grad_norm": 3.5390541553497314,
"learning_rate": 1.2885943390820959e-06,
"loss": 0.6842,
"step": 5750
},
{
"epoch": 6.966142684401451,
"grad_norm": 2.6589369773864746,
"learning_rate": 1.2594277280969124e-06,
"loss": 0.7192,
"step": 5760
},
{
"epoch": 6.978234582829504,
"grad_norm": 4.020471096038818,
"learning_rate": 1.23058052201114e-06,
"loss": 0.7203,
"step": 5770
},
{
"epoch": 6.990326481257558,
"grad_norm": 3.9986772537231445,
"learning_rate": 1.2020533913892056e-06,
"loss": 0.6348,
"step": 5780
}
],
"logging_steps": 10,
"max_steps": 6616,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.585532162215772e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}