htlou's picture
Upload folder using huggingface_hub
b8a5b49 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.3921113689095128,
"eval_steps": 50,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02320185614849188,
"grad_norm": 26.745355095184312,
"learning_rate": 5e-07,
"loss": 1.6569,
"step": 5
},
{
"epoch": 0.04640371229698376,
"grad_norm": 13.080188660738868,
"learning_rate": 1e-06,
"loss": 1.5099,
"step": 10
},
{
"epoch": 0.06960556844547564,
"grad_norm": 7.775505635686351,
"learning_rate": 9.998470286265414e-07,
"loss": 1.2782,
"step": 15
},
{
"epoch": 0.09280742459396751,
"grad_norm": 6.492859712313467,
"learning_rate": 9.993882081071305e-07,
"loss": 1.2359,
"step": 20
},
{
"epoch": 0.11600928074245939,
"grad_norm": 5.355551984540261,
"learning_rate": 9.986238191873872e-07,
"loss": 1.204,
"step": 25
},
{
"epoch": 0.13921113689095127,
"grad_norm": 5.125393165293572,
"learning_rate": 9.975543295858033e-07,
"loss": 1.1627,
"step": 30
},
{
"epoch": 0.16241299303944315,
"grad_norm": 4.710071299991241,
"learning_rate": 9.961803937075514e-07,
"loss": 1.1463,
"step": 35
},
{
"epoch": 0.18561484918793503,
"grad_norm": 5.029376799191572,
"learning_rate": 9.945028522440653e-07,
"loss": 1.1394,
"step": 40
},
{
"epoch": 0.2088167053364269,
"grad_norm": 4.763291242870039,
"learning_rate": 9.925227316586314e-07,
"loss": 1.1371,
"step": 45
},
{
"epoch": 0.23201856148491878,
"grad_norm": 4.903033982523367,
"learning_rate": 9.902412435583125e-07,
"loss": 1.1181,
"step": 50
},
{
"epoch": 0.23201856148491878,
"eval_loss": 1.1214605569839478,
"eval_runtime": 105.8329,
"eval_samples_per_second": 57.912,
"eval_steps_per_second": 0.907,
"step": 50
},
{
"epoch": 0.2552204176334107,
"grad_norm": 4.785575700738186,
"learning_rate": 9.876597839525813e-07,
"loss": 1.1163,
"step": 55
},
{
"epoch": 0.27842227378190254,
"grad_norm": 4.600727000401806,
"learning_rate": 9.847799323991233e-07,
"loss": 1.1232,
"step": 60
},
{
"epoch": 0.30162412993039445,
"grad_norm": 4.8200416840356315,
"learning_rate": 9.816034510373285e-07,
"loss": 1.125,
"step": 65
},
{
"epoch": 0.3248259860788863,
"grad_norm": 4.958997518359378,
"learning_rate": 9.781322835100637e-07,
"loss": 1.108,
"step": 70
},
{
"epoch": 0.3480278422273782,
"grad_norm": 4.883541365508776,
"learning_rate": 9.743685537743856e-07,
"loss": 1.106,
"step": 75
},
{
"epoch": 0.37122969837587005,
"grad_norm": 4.973507458353338,
"learning_rate": 9.70314564801922e-07,
"loss": 1.0973,
"step": 80
},
{
"epoch": 0.39443155452436196,
"grad_norm": 4.704415990191669,
"learning_rate": 9.659727971697173e-07,
"loss": 1.0964,
"step": 85
},
{
"epoch": 0.4176334106728538,
"grad_norm": 4.759885977268913,
"learning_rate": 9.613459075424033e-07,
"loss": 1.0956,
"step": 90
},
{
"epoch": 0.4408352668213457,
"grad_norm": 4.868535908803129,
"learning_rate": 9.564367270466245e-07,
"loss": 1.0787,
"step": 95
},
{
"epoch": 0.46403712296983757,
"grad_norm": 5.180286116736628,
"learning_rate": 9.51248259538713e-07,
"loss": 1.0765,
"step": 100
},
{
"epoch": 0.46403712296983757,
"eval_loss": 1.0775035619735718,
"eval_runtime": 105.5293,
"eval_samples_per_second": 58.079,
"eval_steps_per_second": 0.91,
"step": 100
},
{
"epoch": 0.4872389791183295,
"grad_norm": 5.290465762761348,
"learning_rate": 9.457836797666721e-07,
"loss": 1.0903,
"step": 105
},
{
"epoch": 0.5104408352668214,
"grad_norm": 4.81291157554945,
"learning_rate": 9.400463314275941e-07,
"loss": 1.0697,
"step": 110
},
{
"epoch": 0.5336426914153132,
"grad_norm": 4.914554202012043,
"learning_rate": 9.340397251217008e-07,
"loss": 1.0668,
"step": 115
},
{
"epoch": 0.5568445475638051,
"grad_norm": 5.240457841494325,
"learning_rate": 9.27767536204258e-07,
"loss": 1.0676,
"step": 120
},
{
"epoch": 0.580046403712297,
"grad_norm": 4.957459385263701,
"learning_rate": 9.212336025366787e-07,
"loss": 1.0746,
"step": 125
},
{
"epoch": 0.6032482598607889,
"grad_norm": 5.29032668711839,
"learning_rate": 9.144419221381918e-07,
"loss": 1.0724,
"step": 130
},
{
"epoch": 0.6264501160092807,
"grad_norm": 4.908560953587426,
"learning_rate": 9.073966507395121e-07,
"loss": 1.0745,
"step": 135
},
{
"epoch": 0.6496519721577726,
"grad_norm": 4.912842113728852,
"learning_rate": 9.001020992400085e-07,
"loss": 1.0559,
"step": 140
},
{
"epoch": 0.6728538283062645,
"grad_norm": 5.088585906783296,
"learning_rate": 8.925627310699274e-07,
"loss": 1.0705,
"step": 145
},
{
"epoch": 0.6960556844547564,
"grad_norm": 5.140684832177941,
"learning_rate": 8.84783159459285e-07,
"loss": 1.0639,
"step": 150
},
{
"epoch": 0.6960556844547564,
"eval_loss": 1.0501643419265747,
"eval_runtime": 105.4561,
"eval_samples_per_second": 58.119,
"eval_steps_per_second": 0.91,
"step": 150
},
{
"epoch": 0.7192575406032483,
"grad_norm": 5.311257433234373,
"learning_rate": 8.767681446150976e-07,
"loss": 1.0472,
"step": 155
},
{
"epoch": 0.7424593967517401,
"grad_norm": 5.091539509688025,
"learning_rate": 8.68522590808682e-07,
"loss": 1.0645,
"step": 160
},
{
"epoch": 0.765661252900232,
"grad_norm": 5.132013982763288,
"learning_rate": 8.600515433748001e-07,
"loss": 1.0416,
"step": 165
},
{
"epoch": 0.7888631090487239,
"grad_norm": 4.753354098230195,
"learning_rate": 8.51360185624495e-07,
"loss": 1.0478,
"step": 170
},
{
"epoch": 0.8120649651972158,
"grad_norm": 5.029473978539478,
"learning_rate": 8.424538356734956e-07,
"loss": 1.0383,
"step": 175
},
{
"epoch": 0.8352668213457076,
"grad_norm": 4.9588553004593345,
"learning_rate": 8.333379431881397e-07,
"loss": 1.0342,
"step": 180
},
{
"epoch": 0.8584686774941995,
"grad_norm": 5.234591483099779,
"learning_rate": 8.240180860508026e-07,
"loss": 1.0413,
"step": 185
},
{
"epoch": 0.8816705336426914,
"grad_norm": 5.121566469508508,
"learning_rate": 8.144999669468713e-07,
"loss": 1.0264,
"step": 190
},
{
"epoch": 0.9048723897911833,
"grad_norm": 5.0479045768726305,
"learning_rate": 8.047894098753539e-07,
"loss": 1.028,
"step": 195
},
{
"epoch": 0.9280742459396751,
"grad_norm": 5.0838098259091185,
"learning_rate": 7.948923565852597e-07,
"loss": 1.0308,
"step": 200
},
{
"epoch": 0.9280742459396751,
"eval_loss": 1.0281875133514404,
"eval_runtime": 105.8568,
"eval_samples_per_second": 57.899,
"eval_steps_per_second": 0.907,
"step": 200
},
{
"epoch": 0.951276102088167,
"grad_norm": 5.3244675969022826,
"learning_rate": 7.848148629399285e-07,
"loss": 1.0262,
"step": 205
},
{
"epoch": 0.974477958236659,
"grad_norm": 4.9307215762355305,
"learning_rate": 7.745630952115363e-07,
"loss": 1.0349,
"step": 210
},
{
"epoch": 0.9976798143851509,
"grad_norm": 4.994203203030838,
"learning_rate": 7.641433263080418e-07,
"loss": 1.0216,
"step": 215
},
{
"epoch": 1.0208816705336428,
"grad_norm": 5.112958880673586,
"learning_rate": 7.535619319348865e-07,
"loss": 0.9241,
"step": 220
},
{
"epoch": 1.0440835266821347,
"grad_norm": 5.264187445397404,
"learning_rate": 7.428253866937918e-07,
"loss": 0.9001,
"step": 225
},
{
"epoch": 1.0672853828306264,
"grad_norm": 5.645584402922182,
"learning_rate": 7.319402601210447e-07,
"loss": 0.8916,
"step": 230
},
{
"epoch": 1.0904872389791183,
"grad_norm": 5.655360994963379,
"learning_rate": 7.209132126676933e-07,
"loss": 0.8876,
"step": 235
},
{
"epoch": 1.1136890951276102,
"grad_norm": 5.3773890810778795,
"learning_rate": 7.097509916241145e-07,
"loss": 0.8931,
"step": 240
},
{
"epoch": 1.136890951276102,
"grad_norm": 5.658881203794,
"learning_rate": 6.984604269914436e-07,
"loss": 0.905,
"step": 245
},
{
"epoch": 1.160092807424594,
"grad_norm": 5.966282577193694,
"learning_rate": 6.870484273023967e-07,
"loss": 0.9038,
"step": 250
},
{
"epoch": 1.160092807424594,
"eval_loss": 1.0220295190811157,
"eval_runtime": 105.8362,
"eval_samples_per_second": 57.91,
"eval_steps_per_second": 0.907,
"step": 250
},
{
"epoch": 1.1832946635730859,
"grad_norm": 5.794176185315156,
"learning_rate": 6.755219753940388e-07,
"loss": 0.8964,
"step": 255
},
{
"epoch": 1.2064965197215778,
"grad_norm": 6.603391500331007,
"learning_rate": 6.638881241350883e-07,
"loss": 0.8898,
"step": 260
},
{
"epoch": 1.2296983758700697,
"grad_norm": 5.5914639272443205,
"learning_rate": 6.52153992110368e-07,
"loss": 0.8951,
"step": 265
},
{
"epoch": 1.2529002320185616,
"grad_norm": 5.339661007608592,
"learning_rate": 6.403267592650466e-07,
"loss": 0.8961,
"step": 270
},
{
"epoch": 1.2761020881670533,
"grad_norm": 5.448280965038798,
"learning_rate": 6.28413662511334e-07,
"loss": 0.8919,
"step": 275
},
{
"epoch": 1.2993039443155452,
"grad_norm": 5.476822697700394,
"learning_rate": 6.164219913003207e-07,
"loss": 0.8931,
"step": 280
},
{
"epoch": 1.322505800464037,
"grad_norm": 5.783548079343189,
"learning_rate": 6.043590831616676e-07,
"loss": 0.8792,
"step": 285
},
{
"epoch": 1.345707656612529,
"grad_norm": 5.59782698134665,
"learning_rate": 5.92232319213878e-07,
"loss": 0.8768,
"step": 290
},
{
"epoch": 1.368909512761021,
"grad_norm": 5.193853086769952,
"learning_rate": 5.800491196478988e-07,
"loss": 0.8788,
"step": 295
},
{
"epoch": 1.3921113689095128,
"grad_norm": 5.539347488257,
"learning_rate": 5.678169391868127e-07,
"loss": 0.8973,
"step": 300
},
{
"epoch": 1.3921113689095128,
"eval_loss": 1.0114275217056274,
"eval_runtime": 106.216,
"eval_samples_per_second": 57.703,
"eval_steps_per_second": 0.904,
"step": 300
}
],
"logging_steps": 5,
"max_steps": 645,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1768702026448896.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}