htlou's picture
Upload folder using huggingface_hub
6a86a04 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9932279909706545,
"eval_steps": 50,
"global_step": 663,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.022573363431151242,
"grad_norm": 18.10373149143315,
"learning_rate": 5e-07,
"loss": 1.7281,
"step": 5
},
{
"epoch": 0.045146726862302484,
"grad_norm": 12.098369998439862,
"learning_rate": 1e-06,
"loss": 1.5872,
"step": 10
},
{
"epoch": 0.06772009029345373,
"grad_norm": 7.799480392523437,
"learning_rate": 9.998553453302385e-07,
"loss": 1.2742,
"step": 15
},
{
"epoch": 0.09029345372460497,
"grad_norm": 4.3243576184049015,
"learning_rate": 9.99421465020848e-07,
"loss": 1.1293,
"step": 20
},
{
"epoch": 0.11286681715575621,
"grad_norm": 3.8123521250599035,
"learning_rate": 9.9869861012308e-07,
"loss": 1.0773,
"step": 25
},
{
"epoch": 0.13544018058690746,
"grad_norm": 3.7302202580496955,
"learning_rate": 9.976871988942804e-07,
"loss": 1.0241,
"step": 30
},
{
"epoch": 0.1580135440180587,
"grad_norm": 3.6817019638097315,
"learning_rate": 9.963878165558785e-07,
"loss": 0.9907,
"step": 35
},
{
"epoch": 0.18058690744920994,
"grad_norm": 3.6570665491053798,
"learning_rate": 9.948012149547666e-07,
"loss": 0.9535,
"step": 40
},
{
"epoch": 0.20316027088036118,
"grad_norm": 3.5815310725056895,
"learning_rate": 9.929283121282675e-07,
"loss": 0.9562,
"step": 45
},
{
"epoch": 0.22573363431151242,
"grad_norm": 3.5857342727356256,
"learning_rate": 9.9077019177294e-07,
"loss": 0.9196,
"step": 50
},
{
"epoch": 0.22573363431151242,
"eval_loss": 0.9316988587379456,
"eval_runtime": 110.0444,
"eval_samples_per_second": 57.25,
"eval_steps_per_second": 0.9,
"step": 50
},
{
"epoch": 0.24830699774266365,
"grad_norm": 3.8011407294134836,
"learning_rate": 9.88328102617534e-07,
"loss": 0.9367,
"step": 55
},
{
"epoch": 0.2708803611738149,
"grad_norm": 3.4590500642130086,
"learning_rate": 9.856034577004504e-07,
"loss": 0.9275,
"step": 60
},
{
"epoch": 0.29345372460496616,
"grad_norm": 3.576144528772592,
"learning_rate": 9.82597833552132e-07,
"loss": 0.919,
"step": 65
},
{
"epoch": 0.3160270880361174,
"grad_norm": 3.324174107907928,
"learning_rate": 9.793129692828533e-07,
"loss": 0.9057,
"step": 70
},
{
"epoch": 0.33860045146726864,
"grad_norm": 3.8855820879993144,
"learning_rate": 9.757507655764384e-07,
"loss": 0.8901,
"step": 75
},
{
"epoch": 0.3611738148984199,
"grad_norm": 3.469991558293526,
"learning_rate": 9.719132835904906e-07,
"loss": 0.9147,
"step": 80
},
{
"epoch": 0.3837471783295711,
"grad_norm": 3.514613408390947,
"learning_rate": 9.678027437637677e-07,
"loss": 0.8819,
"step": 85
},
{
"epoch": 0.40632054176072235,
"grad_norm": 3.648475056062095,
"learning_rate": 9.634215245313939e-07,
"loss": 0.8867,
"step": 90
},
{
"epoch": 0.4288939051918736,
"grad_norm": 3.5559364392174695,
"learning_rate": 9.587721609486543e-07,
"loss": 0.8809,
"step": 95
},
{
"epoch": 0.45146726862302483,
"grad_norm": 3.4130223122860843,
"learning_rate": 9.538573432241637e-07,
"loss": 0.8737,
"step": 100
},
{
"epoch": 0.45146726862302483,
"eval_loss": 0.8765010833740234,
"eval_runtime": 109.5348,
"eval_samples_per_second": 57.516,
"eval_steps_per_second": 0.904,
"step": 100
},
{
"epoch": 0.47404063205417607,
"grad_norm": 3.787400905783148,
"learning_rate": 9.486799151632612e-07,
"loss": 0.885,
"step": 105
},
{
"epoch": 0.4966139954853273,
"grad_norm": 3.535608347392897,
"learning_rate": 9.432428725225326e-07,
"loss": 0.8814,
"step": 110
},
{
"epoch": 0.5191873589164786,
"grad_norm": 3.6894303777250927,
"learning_rate": 9.375493612764085e-07,
"loss": 0.881,
"step": 115
},
{
"epoch": 0.5417607223476298,
"grad_norm": 3.577445458553245,
"learning_rate": 9.316026757968454e-07,
"loss": 0.8572,
"step": 120
},
{
"epoch": 0.5643340857787811,
"grad_norm": 3.5992375922791284,
"learning_rate": 9.2540625694714e-07,
"loss": 0.8654,
"step": 125
},
{
"epoch": 0.5869074492099323,
"grad_norm": 3.4939966180593824,
"learning_rate": 9.189636900909817e-07,
"loss": 0.8603,
"step": 130
},
{
"epoch": 0.6094808126410836,
"grad_norm": 3.815700262637612,
"learning_rate": 9.122787030178949e-07,
"loss": 0.8511,
"step": 135
},
{
"epoch": 0.6320541760722348,
"grad_norm": 3.7360996720156314,
"learning_rate": 9.053551637862692e-07,
"loss": 0.8622,
"step": 140
},
{
"epoch": 0.654627539503386,
"grad_norm": 3.7059514280124,
"learning_rate": 8.98197078485229e-07,
"loss": 0.8371,
"step": 145
},
{
"epoch": 0.6772009029345373,
"grad_norm": 3.4566197799411778,
"learning_rate": 8.908085889166357e-07,
"loss": 0.8557,
"step": 150
},
{
"epoch": 0.6772009029345373,
"eval_loss": 0.8529915809631348,
"eval_runtime": 109.5763,
"eval_samples_per_second": 57.494,
"eval_steps_per_second": 0.903,
"step": 150
},
{
"epoch": 0.6997742663656885,
"grad_norm": 4.108966235237678,
"learning_rate": 8.831939701985636e-07,
"loss": 0.8493,
"step": 155
},
{
"epoch": 0.7223476297968398,
"grad_norm": 3.500706471908252,
"learning_rate": 8.75357628291637e-07,
"loss": 0.8561,
"step": 160
},
{
"epoch": 0.744920993227991,
"grad_norm": 3.465064110849232,
"learning_rate": 8.673040974496584e-07,
"loss": 0.8382,
"step": 165
},
{
"epoch": 0.7674943566591422,
"grad_norm": 3.4468913764280824,
"learning_rate": 8.590380375960053e-07,
"loss": 0.8347,
"step": 170
},
{
"epoch": 0.7900677200902935,
"grad_norm": 3.689915124175266,
"learning_rate": 8.505642316273111e-07,
"loss": 0.8421,
"step": 175
},
{
"epoch": 0.8126410835214447,
"grad_norm": 3.380084999051841,
"learning_rate": 8.418875826459919e-07,
"loss": 0.8317,
"step": 180
},
{
"epoch": 0.835214446952596,
"grad_norm": 3.6696844138559808,
"learning_rate": 8.330131111232201e-07,
"loss": 0.8407,
"step": 185
},
{
"epoch": 0.8577878103837472,
"grad_norm": 3.4940832177035293,
"learning_rate": 8.239459519939851e-07,
"loss": 0.8539,
"step": 190
},
{
"epoch": 0.8803611738148984,
"grad_norm": 3.214918919448116,
"learning_rate": 8.14691351685925e-07,
"loss": 0.8347,
"step": 195
},
{
"epoch": 0.9029345372460497,
"grad_norm": 3.427510592132526,
"learning_rate": 8.052546650836453e-07,
"loss": 0.8242,
"step": 200
},
{
"epoch": 0.9029345372460497,
"eval_loss": 0.8354331254959106,
"eval_runtime": 109.4188,
"eval_samples_per_second": 57.577,
"eval_steps_per_second": 0.905,
"step": 200
},
{
"epoch": 0.9255079006772009,
"grad_norm": 3.6927117939398886,
"learning_rate": 7.956413524302823e-07,
"loss": 0.8204,
"step": 205
},
{
"epoch": 0.9480812641083521,
"grad_norm": 3.6666105985738606,
"learning_rate": 7.858569761681047e-07,
"loss": 0.8545,
"step": 210
},
{
"epoch": 0.9706546275395034,
"grad_norm": 3.875619463407146,
"learning_rate": 7.759071977199806e-07,
"loss": 0.8431,
"step": 215
},
{
"epoch": 0.9932279909706546,
"grad_norm": 3.2790864439936875,
"learning_rate": 7.657977742135725e-07,
"loss": 0.8245,
"step": 220
},
{
"epoch": 1.0158013544018059,
"grad_norm": 3.3063820393602876,
"learning_rate": 7.555345551501557e-07,
"loss": 0.7956,
"step": 225
},
{
"epoch": 1.0383747178329572,
"grad_norm": 3.707635627262872,
"learning_rate": 7.451234790199871e-07,
"loss": 0.7706,
"step": 230
},
{
"epoch": 1.0609480812641083,
"grad_norm": 3.6504981451650096,
"learning_rate": 7.345705698661852e-07,
"loss": 0.7521,
"step": 235
},
{
"epoch": 1.0835214446952597,
"grad_norm": 3.4617956642188097,
"learning_rate": 7.23881933799104e-07,
"loss": 0.7465,
"step": 240
},
{
"epoch": 1.1060948081264108,
"grad_norm": 3.6172380022954935,
"learning_rate": 7.130637554632257e-07,
"loss": 0.7727,
"step": 245
},
{
"epoch": 1.1286681715575622,
"grad_norm": 3.793915550967959,
"learning_rate": 7.021222944586088e-07,
"loss": 0.7715,
"step": 250
},
{
"epoch": 1.1286681715575622,
"eval_loss": 0.8277800679206848,
"eval_runtime": 109.5657,
"eval_samples_per_second": 57.5,
"eval_steps_per_second": 0.904,
"step": 250
},
{
"epoch": 1.1512415349887133,
"grad_norm": 3.4955286501011735,
"learning_rate": 6.910638817189664e-07,
"loss": 0.7589,
"step": 255
},
{
"epoch": 1.1738148984198646,
"grad_norm": 3.5834856426118615,
"learning_rate": 6.798949158484705e-07,
"loss": 0.7784,
"step": 260
},
{
"epoch": 1.1963882618510158,
"grad_norm": 3.655680306693504,
"learning_rate": 6.686218594193993e-07,
"loss": 0.7402,
"step": 265
},
{
"epoch": 1.2189616252821671,
"grad_norm": 3.86229701873481,
"learning_rate": 6.572512352327726e-07,
"loss": 0.7759,
"step": 270
},
{
"epoch": 1.2415349887133182,
"grad_norm": 3.5211869951686983,
"learning_rate": 6.457896225441371e-07,
"loss": 0.7549,
"step": 275
},
{
"epoch": 1.2641083521444696,
"grad_norm": 3.5889293223377208,
"learning_rate": 6.342436532566865e-07,
"loss": 0.7564,
"step": 280
},
{
"epoch": 1.2866817155756207,
"grad_norm": 3.607920044891375,
"learning_rate": 6.226200080839182e-07,
"loss": 0.7629,
"step": 285
},
{
"epoch": 1.309255079006772,
"grad_norm": 3.553446162126762,
"learning_rate": 6.109254126840479e-07,
"loss": 0.7531,
"step": 290
},
{
"epoch": 1.3318284424379232,
"grad_norm": 3.5301798790372207,
"learning_rate": 5.991666337684176e-07,
"loss": 0.7572,
"step": 295
},
{
"epoch": 1.3544018058690745,
"grad_norm": 3.627738643773189,
"learning_rate": 5.873504751861507e-07,
"loss": 0.7752,
"step": 300
},
{
"epoch": 1.3544018058690745,
"eval_loss": 0.819725513458252,
"eval_runtime": 109.6357,
"eval_samples_per_second": 57.463,
"eval_steps_per_second": 0.903,
"step": 300
},
{
"epoch": 1.3769751693002257,
"grad_norm": 3.609521496977633,
"learning_rate": 5.754837739873178e-07,
"loss": 0.7622,
"step": 305
},
{
"epoch": 1.399548532731377,
"grad_norm": 3.556720507344035,
"learning_rate": 5.635733964668909e-07,
"loss": 0.7487,
"step": 310
},
{
"epoch": 1.4221218961625282,
"grad_norm": 3.8469023502440756,
"learning_rate": 5.516262341917778e-07,
"loss": 0.7568,
"step": 315
},
{
"epoch": 1.4446952595936795,
"grad_norm": 3.553420846727805,
"learning_rate": 5.396492000132325e-07,
"loss": 0.7584,
"step": 320
},
{
"epoch": 1.4672686230248306,
"grad_norm": 3.671248637273475,
"learning_rate": 5.276492240669503e-07,
"loss": 0.7708,
"step": 325
},
{
"epoch": 1.489841986455982,
"grad_norm": 3.4883623820641922,
"learning_rate": 5.156332497631621e-07,
"loss": 0.742,
"step": 330
},
{
"epoch": 1.5124153498871333,
"grad_norm": 3.5733319503202776,
"learning_rate": 5.036082297690464e-07,
"loss": 0.7578,
"step": 335
},
{
"epoch": 1.5349887133182845,
"grad_norm": 3.453860123250934,
"learning_rate": 4.915811219857882e-07,
"loss": 0.7542,
"step": 340
},
{
"epoch": 1.5575620767494356,
"grad_norm": 3.6891903258202206,
"learning_rate": 4.795588855226055e-07,
"loss": 0.7589,
"step": 345
},
{
"epoch": 1.580135440180587,
"grad_norm": 3.647875532185229,
"learning_rate": 4.6754847667008004e-07,
"loss": 0.7359,
"step": 350
},
{
"epoch": 1.580135440180587,
"eval_loss": 0.8135123252868652,
"eval_runtime": 109.5806,
"eval_samples_per_second": 57.492,
"eval_steps_per_second": 0.903,
"step": 350
},
{
"epoch": 1.6027088036117383,
"grad_norm": 3.5680267072205427,
"learning_rate": 4.5555684487511693e-07,
"loss": 0.7674,
"step": 355
},
{
"epoch": 1.6252821670428894,
"grad_norm": 3.281054570962508,
"learning_rate": 4.435909287198646e-07,
"loss": 0.7565,
"step": 360
},
{
"epoch": 1.6478555304740405,
"grad_norm": 3.3765578443383224,
"learning_rate": 4.316576519069226e-07,
"loss": 0.7578,
"step": 365
},
{
"epoch": 1.670428893905192,
"grad_norm": 3.536076248505112,
"learning_rate": 4.197639192531573e-07,
"loss": 0.7533,
"step": 370
},
{
"epoch": 1.6930022573363432,
"grad_norm": 3.9641281540021023,
"learning_rate": 4.079166126944453e-07,
"loss": 0.7674,
"step": 375
},
{
"epoch": 1.7155756207674944,
"grad_norm": 3.777058403651495,
"learning_rate": 3.9612258730365823e-07,
"loss": 0.7355,
"step": 380
},
{
"epoch": 1.7381489841986455,
"grad_norm": 3.7311845269819783,
"learning_rate": 3.843886673241883e-07,
"loss": 0.7512,
"step": 385
},
{
"epoch": 1.7607223476297968,
"grad_norm": 3.5531260457665073,
"learning_rate": 3.7272164222131387e-07,
"loss": 0.7544,
"step": 390
},
{
"epoch": 1.7832957110609482,
"grad_norm": 3.815873779507455,
"learning_rate": 3.611282627536887e-07,
"loss": 0.7572,
"step": 395
},
{
"epoch": 1.8058690744920993,
"grad_norm": 3.5694044492417034,
"learning_rate": 3.496152370672255e-07,
"loss": 0.7467,
"step": 400
},
{
"epoch": 1.8058690744920993,
"eval_loss": 0.8077942132949829,
"eval_runtime": 109.5421,
"eval_samples_per_second": 57.512,
"eval_steps_per_second": 0.904,
"step": 400
},
{
"epoch": 1.8284424379232505,
"grad_norm": 3.5715817669773866,
"learning_rate": 3.381892268136392e-07,
"loss": 0.7387,
"step": 405
},
{
"epoch": 1.8510158013544018,
"grad_norm": 3.632625798422972,
"learning_rate": 3.2685684329588956e-07,
"loss": 0.7459,
"step": 410
},
{
"epoch": 1.8735891647855532,
"grad_norm": 3.4083610886198614,
"learning_rate": 3.1562464364275774e-07,
"loss": 0.755,
"step": 415
},
{
"epoch": 1.8961625282167043,
"grad_norm": 3.5098768134992255,
"learning_rate": 3.044991270147699e-07,
"loss": 0.7485,
"step": 420
},
{
"epoch": 1.9187358916478554,
"grad_norm": 3.570449170370788,
"learning_rate": 2.934867308436613e-07,
"loss": 0.7457,
"step": 425
},
{
"epoch": 1.9413092550790068,
"grad_norm": 3.5437186541935386,
"learning_rate": 2.825938271075572e-07,
"loss": 0.7456,
"step": 430
},
{
"epoch": 1.963882618510158,
"grad_norm": 3.6156752309028244,
"learning_rate": 2.7182671864402856e-07,
"loss": 0.7367,
"step": 435
},
{
"epoch": 1.9864559819413092,
"grad_norm": 3.541183052041991,
"learning_rate": 2.6119163550315194e-07,
"loss": 0.7605,
"step": 440
},
{
"epoch": 2.0090293453724604,
"grad_norm": 3.668354373946963,
"learning_rate": 2.506947313426854e-07,
"loss": 0.7443,
"step": 445
},
{
"epoch": 2.0316027088036117,
"grad_norm": 3.6174990546812342,
"learning_rate": 2.4034207986744847e-07,
"loss": 0.7024,
"step": 450
},
{
"epoch": 2.0316027088036117,
"eval_loss": 0.8078460097312927,
"eval_runtime": 109.5409,
"eval_samples_per_second": 57.513,
"eval_steps_per_second": 0.904,
"step": 450
},
{
"epoch": 2.054176072234763,
"grad_norm": 3.6197344117961983,
"learning_rate": 2.301396713149627e-07,
"loss": 0.7042,
"step": 455
},
{
"epoch": 2.0767494356659144,
"grad_norm": 3.6219776063121687,
"learning_rate": 2.2009340898938738e-07,
"loss": 0.6819,
"step": 460
},
{
"epoch": 2.0993227990970653,
"grad_norm": 3.749685991397984,
"learning_rate": 2.1020910584575891e-07,
"loss": 0.7138,
"step": 465
},
{
"epoch": 2.1218961625282167,
"grad_norm": 3.686378460444093,
"learning_rate": 2.0049248112650563e-07,
"loss": 0.6967,
"step": 470
},
{
"epoch": 2.144469525959368,
"grad_norm": 3.8202046159252903,
"learning_rate": 1.9094915705218711e-07,
"loss": 0.6911,
"step": 475
},
{
"epoch": 2.1670428893905194,
"grad_norm": 4.093386454062259,
"learning_rate": 1.8158465556837304e-07,
"loss": 0.6825,
"step": 480
},
{
"epoch": 2.1896162528216703,
"grad_norm": 3.6669210094430875,
"learning_rate": 1.7240439515054218e-07,
"loss": 0.6957,
"step": 485
},
{
"epoch": 2.2121896162528216,
"grad_norm": 3.7920265300049887,
"learning_rate": 1.634136876688504e-07,
"loss": 0.7123,
"step": 490
},
{
"epoch": 2.234762979683973,
"grad_norm": 3.8406082616032484,
"learning_rate": 1.5461773531458455e-07,
"loss": 0.7014,
"step": 495
},
{
"epoch": 2.2573363431151243,
"grad_norm": 3.7396677582411995,
"learning_rate": 1.460216275900769e-07,
"loss": 0.6984,
"step": 500
},
{
"epoch": 2.2573363431151243,
"eval_loss": 0.8086156249046326,
"eval_runtime": 109.6842,
"eval_samples_per_second": 57.438,
"eval_steps_per_second": 0.903,
"step": 500
},
{
"epoch": 2.2799097065462752,
"grad_norm": 3.810172890487716,
"learning_rate": 1.3763033836382392e-07,
"loss": 0.6953,
"step": 505
},
{
"epoch": 2.3024830699774266,
"grad_norm": 3.7650154233741318,
"learning_rate": 1.294487229925132e-07,
"loss": 0.7049,
"step": 510
},
{
"epoch": 2.325056433408578,
"grad_norm": 3.7977324434068622,
"learning_rate": 1.2148151551162345e-07,
"loss": 0.6996,
"step": 515
},
{
"epoch": 2.3476297968397293,
"grad_norm": 3.7271690299752636,
"learning_rate": 1.137333258962227e-07,
"loss": 0.7046,
"step": 520
},
{
"epoch": 2.37020316027088,
"grad_norm": 3.7696874152910693,
"learning_rate": 1.0620863739355135e-07,
"loss": 0.7038,
"step": 525
},
{
"epoch": 2.3927765237020315,
"grad_norm": 3.6784969120555937,
"learning_rate": 9.891180392893117e-08,
"loss": 0.684,
"step": 530
},
{
"epoch": 2.415349887133183,
"grad_norm": 3.923781431171795,
"learning_rate": 9.184704758650241e-08,
"loss": 0.7056,
"step": 535
},
{
"epoch": 2.4379232505643342,
"grad_norm": 3.8228025983942624,
"learning_rate": 8.501845616624798e-08,
"loss": 0.6876,
"step": 540
},
{
"epoch": 2.460496613995485,
"grad_norm": 3.9688517448973624,
"learning_rate": 7.842998081871493e-08,
"loss": 0.7139,
"step": 545
},
{
"epoch": 2.4830699774266365,
"grad_norm": 3.909192582651566,
"learning_rate": 7.208543375880594e-08,
"loss": 0.7063,
"step": 550
},
{
"epoch": 2.4830699774266365,
"eval_loss": 0.8080305457115173,
"eval_runtime": 109.6111,
"eval_samples_per_second": 57.476,
"eval_steps_per_second": 0.903,
"step": 550
},
{
"epoch": 2.505643340857788,
"grad_norm": 3.6883752732106125,
"learning_rate": 6.598848605996004e-08,
"loss": 0.6922,
"step": 555
},
{
"epoch": 2.528216704288939,
"grad_norm": 3.8586414745206814,
"learning_rate": 6.014266553000074e-08,
"loss": 0.7121,
"step": 560
},
{
"epoch": 2.55079006772009,
"grad_norm": 3.6987869873044725,
"learning_rate": 5.4551354669881145e-08,
"loss": 0.6944,
"step": 565
},
{
"epoch": 2.5733634311512414,
"grad_norm": 3.7083356878972222,
"learning_rate": 4.921778871650539e-08,
"loss": 0.703,
"step": 570
},
{
"epoch": 2.595936794582393,
"grad_norm": 3.7810271982778305,
"learning_rate": 4.414505377075978e-08,
"loss": 0.692,
"step": 575
},
{
"epoch": 2.618510158013544,
"grad_norm": 4.00249924161769,
"learning_rate": 3.933608501183788e-08,
"loss": 0.6901,
"step": 580
},
{
"epoch": 2.6410835214446955,
"grad_norm": 3.790359559860126,
"learning_rate": 3.479366499889058e-08,
"loss": 0.6997,
"step": 585
},
{
"epoch": 2.6636568848758464,
"grad_norm": 3.588096321178088,
"learning_rate": 3.052042206098537e-08,
"loss": 0.6826,
"step": 590
},
{
"epoch": 2.6862302483069977,
"grad_norm": 3.7054037608925148,
"learning_rate": 2.6518828776306347e-08,
"loss": 0.6909,
"step": 595
},
{
"epoch": 2.708803611738149,
"grad_norm": 3.703171492172673,
"learning_rate": 2.279120054147393e-08,
"loss": 0.6873,
"step": 600
},
{
"epoch": 2.708803611738149,
"eval_loss": 0.80707848072052,
"eval_runtime": 109.463,
"eval_samples_per_second": 57.554,
"eval_steps_per_second": 0.904,
"step": 600
},
{
"epoch": 2.7313769751693,
"grad_norm": 3.9293753380698884,
"learning_rate": 1.9339694231813252e-08,
"loss": 0.6997,
"step": 605
},
{
"epoch": 2.7539503386004514,
"grad_norm": 4.137942412644628,
"learning_rate": 1.616630695334592e-08,
"loss": 0.7078,
"step": 610
},
{
"epoch": 2.7765237020316027,
"grad_norm": 3.9534669915381877,
"learning_rate": 1.3272874887227281e-08,
"loss": 0.6966,
"step": 615
},
{
"epoch": 2.799097065462754,
"grad_norm": 3.7231816025762168,
"learning_rate": 1.066107222729712e-08,
"loss": 0.7013,
"step": 620
},
{
"epoch": 2.8216704288939054,
"grad_norm": 3.58992452351805,
"learning_rate": 8.332410211360608e-09,
"loss": 0.7107,
"step": 625
},
{
"epoch": 2.8442437923250563,
"grad_norm": 3.744424909335921,
"learning_rate": 6.288236246757284e-09,
"loss": 0.6907,
"step": 630
},
{
"epoch": 2.8668171557562077,
"grad_norm": 3.9524208371805956,
"learning_rate": 4.529733130726299e-09,
"loss": 0.7125,
"step": 635
},
{
"epoch": 2.889390519187359,
"grad_norm": 3.7901367668148027,
"learning_rate": 3.0579183660177086e-09,
"loss": 0.6974,
"step": 640
},
{
"epoch": 2.91196388261851,
"grad_norm": 3.6006642909643714,
"learning_rate": 1.8736435721465326e-09,
"loss": 0.695,
"step": 645
},
{
"epoch": 2.9345372460496613,
"grad_norm": 3.671968035371273,
"learning_rate": 9.775939926296439e-10,
"loss": 0.7067,
"step": 650
},
{
"epoch": 2.9345372460496613,
"eval_loss": 0.8069904446601868,
"eval_runtime": 109.6344,
"eval_samples_per_second": 57.464,
"eval_steps_per_second": 0.903,
"step": 650
},
{
"epoch": 2.9571106094808126,
"grad_norm": 3.6036456106402284,
"learning_rate": 3.7028809849098954e-10,
"loss": 0.705,
"step": 655
},
{
"epoch": 2.979683972911964,
"grad_norm": 3.7354799118910087,
"learning_rate": 5.2077288264951166e-11,
"loss": 0.6905,
"step": 660
},
{
"epoch": 2.9932279909706545,
"step": 663,
"total_flos": 3909114547666944.0,
"train_loss": 0.7958762821928228,
"train_runtime": 10129.4465,
"train_samples_per_second": 16.793,
"train_steps_per_second": 0.065
}
],
"logging_steps": 5,
"max_steps": 663,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3909114547666944.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}