mm-interp-RLAIF-V-Cosi-q0_75 / trainer_state.json
htlou's picture
Upload folder using huggingface_hub
b8a5b49 verified
raw
history blame
25.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9930394431554523,
"eval_steps": 50,
"global_step": 645,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02320185614849188,
"grad_norm": 26.745355095184312,
"learning_rate": 5e-07,
"loss": 1.6569,
"step": 5
},
{
"epoch": 0.04640371229698376,
"grad_norm": 13.080188660738868,
"learning_rate": 1e-06,
"loss": 1.5099,
"step": 10
},
{
"epoch": 0.06960556844547564,
"grad_norm": 7.775505635686351,
"learning_rate": 9.998470286265414e-07,
"loss": 1.2782,
"step": 15
},
{
"epoch": 0.09280742459396751,
"grad_norm": 6.492859712313467,
"learning_rate": 9.993882081071305e-07,
"loss": 1.2359,
"step": 20
},
{
"epoch": 0.11600928074245939,
"grad_norm": 5.355551984540261,
"learning_rate": 9.986238191873872e-07,
"loss": 1.204,
"step": 25
},
{
"epoch": 0.13921113689095127,
"grad_norm": 5.125393165293572,
"learning_rate": 9.975543295858033e-07,
"loss": 1.1627,
"step": 30
},
{
"epoch": 0.16241299303944315,
"grad_norm": 4.710071299991241,
"learning_rate": 9.961803937075514e-07,
"loss": 1.1463,
"step": 35
},
{
"epoch": 0.18561484918793503,
"grad_norm": 5.029376799191572,
"learning_rate": 9.945028522440653e-07,
"loss": 1.1394,
"step": 40
},
{
"epoch": 0.2088167053364269,
"grad_norm": 4.763291242870039,
"learning_rate": 9.925227316586314e-07,
"loss": 1.1371,
"step": 45
},
{
"epoch": 0.23201856148491878,
"grad_norm": 4.903033982523367,
"learning_rate": 9.902412435583125e-07,
"loss": 1.1181,
"step": 50
},
{
"epoch": 0.23201856148491878,
"eval_loss": 1.1214605569839478,
"eval_runtime": 105.8329,
"eval_samples_per_second": 57.912,
"eval_steps_per_second": 0.907,
"step": 50
},
{
"epoch": 0.2552204176334107,
"grad_norm": 4.785575700738186,
"learning_rate": 9.876597839525813e-07,
"loss": 1.1163,
"step": 55
},
{
"epoch": 0.27842227378190254,
"grad_norm": 4.600727000401806,
"learning_rate": 9.847799323991233e-07,
"loss": 1.1232,
"step": 60
},
{
"epoch": 0.30162412993039445,
"grad_norm": 4.8200416840356315,
"learning_rate": 9.816034510373285e-07,
"loss": 1.125,
"step": 65
},
{
"epoch": 0.3248259860788863,
"grad_norm": 4.958997518359378,
"learning_rate": 9.781322835100637e-07,
"loss": 1.108,
"step": 70
},
{
"epoch": 0.3480278422273782,
"grad_norm": 4.883541365508776,
"learning_rate": 9.743685537743856e-07,
"loss": 1.106,
"step": 75
},
{
"epoch": 0.37122969837587005,
"grad_norm": 4.973507458353338,
"learning_rate": 9.70314564801922e-07,
"loss": 1.0973,
"step": 80
},
{
"epoch": 0.39443155452436196,
"grad_norm": 4.704415990191669,
"learning_rate": 9.659727971697173e-07,
"loss": 1.0964,
"step": 85
},
{
"epoch": 0.4176334106728538,
"grad_norm": 4.759885977268913,
"learning_rate": 9.613459075424033e-07,
"loss": 1.0956,
"step": 90
},
{
"epoch": 0.4408352668213457,
"grad_norm": 4.868535908803129,
"learning_rate": 9.564367270466245e-07,
"loss": 1.0787,
"step": 95
},
{
"epoch": 0.46403712296983757,
"grad_norm": 5.180286116736628,
"learning_rate": 9.51248259538713e-07,
"loss": 1.0765,
"step": 100
},
{
"epoch": 0.46403712296983757,
"eval_loss": 1.0775035619735718,
"eval_runtime": 105.5293,
"eval_samples_per_second": 58.079,
"eval_steps_per_second": 0.91,
"step": 100
},
{
"epoch": 0.4872389791183295,
"grad_norm": 5.290465762761348,
"learning_rate": 9.457836797666721e-07,
"loss": 1.0903,
"step": 105
},
{
"epoch": 0.5104408352668214,
"grad_norm": 4.81291157554945,
"learning_rate": 9.400463314275941e-07,
"loss": 1.0697,
"step": 110
},
{
"epoch": 0.5336426914153132,
"grad_norm": 4.914554202012043,
"learning_rate": 9.340397251217008e-07,
"loss": 1.0668,
"step": 115
},
{
"epoch": 0.5568445475638051,
"grad_norm": 5.240457841494325,
"learning_rate": 9.27767536204258e-07,
"loss": 1.0676,
"step": 120
},
{
"epoch": 0.580046403712297,
"grad_norm": 4.957459385263701,
"learning_rate": 9.212336025366787e-07,
"loss": 1.0746,
"step": 125
},
{
"epoch": 0.6032482598607889,
"grad_norm": 5.29032668711839,
"learning_rate": 9.144419221381918e-07,
"loss": 1.0724,
"step": 130
},
{
"epoch": 0.6264501160092807,
"grad_norm": 4.908560953587426,
"learning_rate": 9.073966507395121e-07,
"loss": 1.0745,
"step": 135
},
{
"epoch": 0.6496519721577726,
"grad_norm": 4.912842113728852,
"learning_rate": 9.001020992400085e-07,
"loss": 1.0559,
"step": 140
},
{
"epoch": 0.6728538283062645,
"grad_norm": 5.088585906783296,
"learning_rate": 8.925627310699274e-07,
"loss": 1.0705,
"step": 145
},
{
"epoch": 0.6960556844547564,
"grad_norm": 5.140684832177941,
"learning_rate": 8.84783159459285e-07,
"loss": 1.0639,
"step": 150
},
{
"epoch": 0.6960556844547564,
"eval_loss": 1.0501643419265747,
"eval_runtime": 105.4561,
"eval_samples_per_second": 58.119,
"eval_steps_per_second": 0.91,
"step": 150
},
{
"epoch": 0.7192575406032483,
"grad_norm": 5.311257433234373,
"learning_rate": 8.767681446150976e-07,
"loss": 1.0472,
"step": 155
},
{
"epoch": 0.7424593967517401,
"grad_norm": 5.091539509688025,
"learning_rate": 8.68522590808682e-07,
"loss": 1.0645,
"step": 160
},
{
"epoch": 0.765661252900232,
"grad_norm": 5.132013982763288,
"learning_rate": 8.600515433748001e-07,
"loss": 1.0416,
"step": 165
},
{
"epoch": 0.7888631090487239,
"grad_norm": 4.753354098230195,
"learning_rate": 8.51360185624495e-07,
"loss": 1.0478,
"step": 170
},
{
"epoch": 0.8120649651972158,
"grad_norm": 5.029473978539478,
"learning_rate": 8.424538356734956e-07,
"loss": 1.0383,
"step": 175
},
{
"epoch": 0.8352668213457076,
"grad_norm": 4.9588553004593345,
"learning_rate": 8.333379431881397e-07,
"loss": 1.0342,
"step": 180
},
{
"epoch": 0.8584686774941995,
"grad_norm": 5.234591483099779,
"learning_rate": 8.240180860508026e-07,
"loss": 1.0413,
"step": 185
},
{
"epoch": 0.8816705336426914,
"grad_norm": 5.121566469508508,
"learning_rate": 8.144999669468713e-07,
"loss": 1.0264,
"step": 190
},
{
"epoch": 0.9048723897911833,
"grad_norm": 5.0479045768726305,
"learning_rate": 8.047894098753539e-07,
"loss": 1.028,
"step": 195
},
{
"epoch": 0.9280742459396751,
"grad_norm": 5.0838098259091185,
"learning_rate": 7.948923565852597e-07,
"loss": 1.0308,
"step": 200
},
{
"epoch": 0.9280742459396751,
"eval_loss": 1.0281875133514404,
"eval_runtime": 105.8568,
"eval_samples_per_second": 57.899,
"eval_steps_per_second": 0.907,
"step": 200
},
{
"epoch": 0.951276102088167,
"grad_norm": 5.3244675969022826,
"learning_rate": 7.848148629399285e-07,
"loss": 1.0262,
"step": 205
},
{
"epoch": 0.974477958236659,
"grad_norm": 4.9307215762355305,
"learning_rate": 7.745630952115363e-07,
"loss": 1.0349,
"step": 210
},
{
"epoch": 0.9976798143851509,
"grad_norm": 4.994203203030838,
"learning_rate": 7.641433263080418e-07,
"loss": 1.0216,
"step": 215
},
{
"epoch": 1.0208816705336428,
"grad_norm": 5.112958880673586,
"learning_rate": 7.535619319348865e-07,
"loss": 0.9241,
"step": 220
},
{
"epoch": 1.0440835266821347,
"grad_norm": 5.264187445397404,
"learning_rate": 7.428253866937918e-07,
"loss": 0.9001,
"step": 225
},
{
"epoch": 1.0672853828306264,
"grad_norm": 5.645584402922182,
"learning_rate": 7.319402601210447e-07,
"loss": 0.8916,
"step": 230
},
{
"epoch": 1.0904872389791183,
"grad_norm": 5.655360994963379,
"learning_rate": 7.209132126676933e-07,
"loss": 0.8876,
"step": 235
},
{
"epoch": 1.1136890951276102,
"grad_norm": 5.3773890810778795,
"learning_rate": 7.097509916241145e-07,
"loss": 0.8931,
"step": 240
},
{
"epoch": 1.136890951276102,
"grad_norm": 5.658881203794,
"learning_rate": 6.984604269914436e-07,
"loss": 0.905,
"step": 245
},
{
"epoch": 1.160092807424594,
"grad_norm": 5.966282577193694,
"learning_rate": 6.870484273023967e-07,
"loss": 0.9038,
"step": 250
},
{
"epoch": 1.160092807424594,
"eval_loss": 1.0220295190811157,
"eval_runtime": 105.8362,
"eval_samples_per_second": 57.91,
"eval_steps_per_second": 0.907,
"step": 250
},
{
"epoch": 1.1832946635730859,
"grad_norm": 5.794176185315156,
"learning_rate": 6.755219753940388e-07,
"loss": 0.8964,
"step": 255
},
{
"epoch": 1.2064965197215778,
"grad_norm": 6.603391500331007,
"learning_rate": 6.638881241350883e-07,
"loss": 0.8898,
"step": 260
},
{
"epoch": 1.2296983758700697,
"grad_norm": 5.5914639272443205,
"learning_rate": 6.52153992110368e-07,
"loss": 0.8951,
"step": 265
},
{
"epoch": 1.2529002320185616,
"grad_norm": 5.339661007608592,
"learning_rate": 6.403267592650466e-07,
"loss": 0.8961,
"step": 270
},
{
"epoch": 1.2761020881670533,
"grad_norm": 5.448280965038798,
"learning_rate": 6.28413662511334e-07,
"loss": 0.8919,
"step": 275
},
{
"epoch": 1.2993039443155452,
"grad_norm": 5.476822697700394,
"learning_rate": 6.164219913003207e-07,
"loss": 0.8931,
"step": 280
},
{
"epoch": 1.322505800464037,
"grad_norm": 5.783548079343189,
"learning_rate": 6.043590831616676e-07,
"loss": 0.8792,
"step": 285
},
{
"epoch": 1.345707656612529,
"grad_norm": 5.59782698134665,
"learning_rate": 5.92232319213878e-07,
"loss": 0.8768,
"step": 290
},
{
"epoch": 1.368909512761021,
"grad_norm": 5.193853086769952,
"learning_rate": 5.800491196478988e-07,
"loss": 0.8788,
"step": 295
},
{
"epoch": 1.3921113689095128,
"grad_norm": 5.539347488257,
"learning_rate": 5.678169391868127e-07,
"loss": 0.8973,
"step": 300
},
{
"epoch": 1.3921113689095128,
"eval_loss": 1.0114275217056274,
"eval_runtime": 106.216,
"eval_samples_per_second": 57.703,
"eval_steps_per_second": 0.904,
"step": 300
},
{
"epoch": 1.4153132250580047,
"grad_norm": 5.567338787725618,
"learning_rate": 5.555432625244023e-07,
"loss": 0.8831,
"step": 305
},
{
"epoch": 1.4385150812064964,
"grad_norm": 5.412598997121907,
"learning_rate": 5.432355997453728e-07,
"loss": 0.8848,
"step": 310
},
{
"epoch": 1.4617169373549883,
"grad_norm": 5.4356510240439775,
"learning_rate": 5.309014817300421e-07,
"loss": 0.8999,
"step": 315
},
{
"epoch": 1.4849187935034802,
"grad_norm": 5.525607667062919,
"learning_rate": 5.185484555463026e-07,
"loss": 0.8901,
"step": 320
},
{
"epoch": 1.5081206496519721,
"grad_norm": 5.583006624663847,
"learning_rate": 5.061840798316814e-07,
"loss": 0.8909,
"step": 325
},
{
"epoch": 1.531322505800464,
"grad_norm": 5.822776934487761,
"learning_rate": 4.938159201683186e-07,
"loss": 0.8829,
"step": 330
},
{
"epoch": 1.554524361948956,
"grad_norm": 5.427885443572571,
"learning_rate": 4.814515444536974e-07,
"loss": 0.8867,
"step": 335
},
{
"epoch": 1.5777262180974478,
"grad_norm": 5.513594905050496,
"learning_rate": 4.69098518269958e-07,
"loss": 0.892,
"step": 340
},
{
"epoch": 1.6009280742459397,
"grad_norm": 5.785273130658459,
"learning_rate": 4.5676440025462726e-07,
"loss": 0.8775,
"step": 345
},
{
"epoch": 1.6241299303944317,
"grad_norm": 5.494906178164733,
"learning_rate": 4.444567374755977e-07,
"loss": 0.8747,
"step": 350
},
{
"epoch": 1.6241299303944317,
"eval_loss": 1.0039345026016235,
"eval_runtime": 105.8025,
"eval_samples_per_second": 57.929,
"eval_steps_per_second": 0.907,
"step": 350
},
{
"epoch": 1.6473317865429236,
"grad_norm": 5.49251321009188,
"learning_rate": 4.3218306081318713e-07,
"loss": 0.884,
"step": 355
},
{
"epoch": 1.6705336426914155,
"grad_norm": 5.618196571147986,
"learning_rate": 4.199508803521012e-07,
"loss": 0.8945,
"step": 360
},
{
"epoch": 1.6937354988399071,
"grad_norm": 5.908619967180135,
"learning_rate": 4.0776768078612207e-07,
"loss": 0.8793,
"step": 365
},
{
"epoch": 1.716937354988399,
"grad_norm": 5.822870470090775,
"learning_rate": 3.9564091683833244e-07,
"loss": 0.8785,
"step": 370
},
{
"epoch": 1.740139211136891,
"grad_norm": 5.685661727934108,
"learning_rate": 3.835780086996793e-07,
"loss": 0.8772,
"step": 375
},
{
"epoch": 1.7633410672853829,
"grad_norm": 5.692617224399981,
"learning_rate": 3.7158633748866607e-07,
"loss": 0.8701,
"step": 380
},
{
"epoch": 1.7865429234338746,
"grad_norm": 5.6393286368292355,
"learning_rate": 3.596732407349536e-07,
"loss": 0.871,
"step": 385
},
{
"epoch": 1.8097447795823665,
"grad_norm": 5.672110777773612,
"learning_rate": 3.4784600788963193e-07,
"loss": 0.8751,
"step": 390
},
{
"epoch": 1.8329466357308584,
"grad_norm": 5.835389029793195,
"learning_rate": 3.3611187586491157e-07,
"loss": 0.8687,
"step": 395
},
{
"epoch": 1.8561484918793503,
"grad_norm": 5.386053225084359,
"learning_rate": 3.244780246059612e-07,
"loss": 0.8818,
"step": 400
},
{
"epoch": 1.8561484918793503,
"eval_loss": 0.996471107006073,
"eval_runtime": 105.8008,
"eval_samples_per_second": 57.93,
"eval_steps_per_second": 0.907,
"step": 400
},
{
"epoch": 1.8793503480278422,
"grad_norm": 5.480011457003802,
"learning_rate": 3.129515726976034e-07,
"loss": 0.8753,
"step": 405
},
{
"epoch": 1.902552204176334,
"grad_norm": 5.823910567007799,
"learning_rate": 3.015395730085565e-07,
"loss": 0.8731,
"step": 410
},
{
"epoch": 1.925754060324826,
"grad_norm": 5.629571730016235,
"learning_rate": 2.902490083758856e-07,
"loss": 0.8752,
"step": 415
},
{
"epoch": 1.948955916473318,
"grad_norm": 5.503089045781313,
"learning_rate": 2.790867873323067e-07,
"loss": 0.8879,
"step": 420
},
{
"epoch": 1.9721577726218098,
"grad_norm": 5.385767782543798,
"learning_rate": 2.680597398789554e-07,
"loss": 0.8728,
"step": 425
},
{
"epoch": 1.9953596287703017,
"grad_norm": 5.747520951511272,
"learning_rate": 2.5717461330620815e-07,
"loss": 0.8786,
"step": 430
},
{
"epoch": 2.0185614849187936,
"grad_norm": 6.233985626421451,
"learning_rate": 2.464380680651134e-07,
"loss": 0.8171,
"step": 435
},
{
"epoch": 2.0417633410672855,
"grad_norm": 6.389834673239529,
"learning_rate": 2.358566736919581e-07,
"loss": 0.7862,
"step": 440
},
{
"epoch": 2.0649651972157774,
"grad_norm": 5.666671581642326,
"learning_rate": 2.2543690478846388e-07,
"loss": 0.789,
"step": 445
},
{
"epoch": 2.0881670533642693,
"grad_norm": 5.935817576452604,
"learning_rate": 2.1518513706007152e-07,
"loss": 0.7851,
"step": 450
},
{
"epoch": 2.0881670533642693,
"eval_loss": 1.0085411071777344,
"eval_runtime": 105.7628,
"eval_samples_per_second": 57.95,
"eval_steps_per_second": 0.908,
"step": 450
},
{
"epoch": 2.111368909512761,
"grad_norm": 6.090381517467198,
"learning_rate": 2.051076434147403e-07,
"loss": 0.8027,
"step": 455
},
{
"epoch": 2.1345707656612527,
"grad_norm": 6.054221465181829,
"learning_rate": 1.9521059012464607e-07,
"loss": 0.7961,
"step": 460
},
{
"epoch": 2.1577726218097446,
"grad_norm": 6.13881987352279,
"learning_rate": 1.855000330531289e-07,
"loss": 0.7881,
"step": 465
},
{
"epoch": 2.1809744779582365,
"grad_norm": 5.9538509573599105,
"learning_rate": 1.7598191394919737e-07,
"loss": 0.794,
"step": 470
},
{
"epoch": 2.2041763341067284,
"grad_norm": 6.097297386772382,
"learning_rate": 1.666620568118603e-07,
"loss": 0.7766,
"step": 475
},
{
"epoch": 2.2273781902552203,
"grad_norm": 6.054056527354383,
"learning_rate": 1.5754616432650443e-07,
"loss": 0.775,
"step": 480
},
{
"epoch": 2.2505800464037122,
"grad_norm": 6.615286727024051,
"learning_rate": 1.4863981437550498e-07,
"loss": 0.784,
"step": 485
},
{
"epoch": 2.273781902552204,
"grad_norm": 6.027054222640892,
"learning_rate": 1.3994845662519983e-07,
"loss": 0.7858,
"step": 490
},
{
"epoch": 2.296983758700696,
"grad_norm": 6.07483919257801,
"learning_rate": 1.3147740919131812e-07,
"loss": 0.7676,
"step": 495
},
{
"epoch": 2.320185614849188,
"grad_norm": 5.967675794639106,
"learning_rate": 1.2323185538490228e-07,
"loss": 0.7784,
"step": 500
},
{
"epoch": 2.320185614849188,
"eval_loss": 1.0106791257858276,
"eval_runtime": 105.5124,
"eval_samples_per_second": 58.088,
"eval_steps_per_second": 0.91,
"step": 500
},
{
"epoch": 2.34338747099768,
"grad_norm": 6.054810430525525,
"learning_rate": 1.1521684054071523e-07,
"loss": 0.7911,
"step": 505
},
{
"epoch": 2.3665893271461718,
"grad_norm": 6.138500387539917,
"learning_rate": 1.0743726893007254e-07,
"loss": 0.7845,
"step": 510
},
{
"epoch": 2.3897911832946637,
"grad_norm": 6.348927557753904,
"learning_rate": 9.989790075999144e-08,
"loss": 0.7839,
"step": 515
},
{
"epoch": 2.4129930394431556,
"grad_norm": 6.417849773781747,
"learning_rate": 9.260334926048785e-08,
"loss": 0.7797,
"step": 520
},
{
"epoch": 2.4361948955916475,
"grad_norm": 5.884674838799749,
"learning_rate": 8.555807786180813e-08,
"loss": 0.7886,
"step": 525
},
{
"epoch": 2.4593967517401394,
"grad_norm": 5.804544655789366,
"learning_rate": 7.876639746332131e-08,
"loss": 0.7798,
"step": 530
},
{
"epoch": 2.4825986078886313,
"grad_norm": 5.958627298561463,
"learning_rate": 7.223246379574205e-08,
"loss": 0.779,
"step": 535
},
{
"epoch": 2.505800464037123,
"grad_norm": 6.388817649378131,
"learning_rate": 6.596027487829913e-08,
"loss": 0.7865,
"step": 540
},
{
"epoch": 2.529002320185615,
"grad_norm": 6.003273390035336,
"learning_rate": 5.995366857240591e-08,
"loss": 0.7677,
"step": 545
},
{
"epoch": 2.5522041763341066,
"grad_norm": 5.825668962659247,
"learning_rate": 5.421632023332778e-08,
"loss": 0.7773,
"step": 550
},
{
"epoch": 2.5522041763341066,
"eval_loss": 1.0086498260498047,
"eval_runtime": 105.6712,
"eval_samples_per_second": 58.001,
"eval_steps_per_second": 0.908,
"step": 550
},
{
"epoch": 2.5754060324825985,
"grad_norm": 5.802127812425504,
"learning_rate": 4.8751740461286826e-08,
"loss": 0.7898,
"step": 555
},
{
"epoch": 2.5986078886310904,
"grad_norm": 6.325532075096703,
"learning_rate": 4.356327295337542e-08,
"loss": 0.789,
"step": 560
},
{
"epoch": 2.6218097447795823,
"grad_norm": 5.941260110564892,
"learning_rate": 3.865409245759671e-08,
"loss": 0.7859,
"step": 565
},
{
"epoch": 2.645011600928074,
"grad_norm": 6.37567880053289,
"learning_rate": 3.402720283028277e-08,
"loss": 0.7909,
"step": 570
},
{
"epoch": 2.668213457076566,
"grad_norm": 6.239205173519981,
"learning_rate": 2.968543519807809e-08,
"loss": 0.7861,
"step": 575
},
{
"epoch": 2.691415313225058,
"grad_norm": 5.933276204870584,
"learning_rate": 2.5631446225614527e-08,
"loss": 0.782,
"step": 580
},
{
"epoch": 2.71461716937355,
"grad_norm": 5.950522022327479,
"learning_rate": 2.1867716489936294e-08,
"loss": 0.7801,
"step": 585
},
{
"epoch": 2.737819025522042,
"grad_norm": 5.995755253270831,
"learning_rate": 1.8396548962671454e-08,
"loss": 0.7855,
"step": 590
},
{
"epoch": 2.7610208816705337,
"grad_norm": 5.805329755980229,
"learning_rate": 1.5220067600876684e-08,
"loss": 0.7837,
"step": 595
},
{
"epoch": 2.7842227378190256,
"grad_norm": 5.774699503150204,
"learning_rate": 1.2340216047418694e-08,
"loss": 0.784,
"step": 600
},
{
"epoch": 2.7842227378190256,
"eval_loss": 1.0076881647109985,
"eval_runtime": 105.502,
"eval_samples_per_second": 58.094,
"eval_steps_per_second": 0.91,
"step": 600
},
{
"epoch": 2.8074245939675175,
"grad_norm": 5.826083780753947,
"learning_rate": 9.758756441687332e-09,
"loss": 0.789,
"step": 605
},
{
"epoch": 2.8306264501160094,
"grad_norm": 6.2351189691440165,
"learning_rate": 7.477268341368359e-09,
"loss": 0.7827,
"step": 610
},
{
"epoch": 2.853828306264501,
"grad_norm": 6.240042624521366,
"learning_rate": 5.497147755934628e-09,
"loss": 0.7729,
"step": 615
},
{
"epoch": 2.877030162412993,
"grad_norm": 6.055045568235345,
"learning_rate": 3.819606292448541e-09,
"loss": 0.7741,
"step": 620
},
{
"epoch": 2.9002320185614847,
"grad_norm": 6.022458929092381,
"learning_rate": 2.4456704141967433e-09,
"loss": 0.7799,
"step": 625
},
{
"epoch": 2.9234338747099766,
"grad_norm": 5.799562136804793,
"learning_rate": 1.3761808126126483e-09,
"loss": 0.7855,
"step": 630
},
{
"epoch": 2.9466357308584685,
"grad_norm": 5.88567918164354,
"learning_rate": 6.117918928693622e-10,
"loss": 0.7758,
"step": 635
},
{
"epoch": 2.9698375870069604,
"grad_norm": 6.126328207666765,
"learning_rate": 1.529713734584326e-10,
"loss": 0.7781,
"step": 640
},
{
"epoch": 2.9930394431554523,
"grad_norm": 5.625571183866048,
"learning_rate": 0.0,
"loss": 0.782,
"step": 645
},
{
"epoch": 2.9930394431554523,
"step": 645,
"total_flos": 3802978389590016.0,
"train_loss": 0.9273426562316658,
"train_runtime": 9552.8802,
"train_samples_per_second": 17.321,
"train_steps_per_second": 0.068
}
],
"logging_steps": 5,
"max_steps": 645,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3802978389590016.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}