trpylate-25500 / trainer_state.json
Speedsy's picture
Upload folder using huggingface_hub
68b26a5 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5112474437627812,
"eval_steps": 500,
"global_step": 25500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01002445968162316,
"grad_norm": 0.27810075879096985,
"learning_rate": 2.969926620955131e-05,
"loss": 0.0283,
"step": 500
},
{
"epoch": 0.02004891936324632,
"grad_norm": 0.32781505584716797,
"learning_rate": 2.939853241910261e-05,
"loss": 0.0244,
"step": 1000
},
{
"epoch": 0.03007337904486948,
"grad_norm": 0.21168646216392517,
"learning_rate": 2.9097798628653917e-05,
"loss": 0.0235,
"step": 1500
},
{
"epoch": 0.04009783872649264,
"grad_norm": 0.15510809421539307,
"learning_rate": 2.879706483820522e-05,
"loss": 0.0225,
"step": 2000
},
{
"epoch": 0.0501222984081158,
"grad_norm": 0.2416733056306839,
"learning_rate": 2.849633104775653e-05,
"loss": 0.0221,
"step": 2500
},
{
"epoch": 0.06014675808973896,
"grad_norm": 0.19131343066692352,
"learning_rate": 2.819559725730783e-05,
"loss": 0.021,
"step": 3000
},
{
"epoch": 0.07017121777136212,
"grad_norm": 0.14675654470920563,
"learning_rate": 2.7894863466859138e-05,
"loss": 0.0209,
"step": 3500
},
{
"epoch": 0.08019567745298528,
"grad_norm": 0.1758842021226883,
"learning_rate": 2.7594129676410442e-05,
"loss": 0.0205,
"step": 4000
},
{
"epoch": 0.09022013713460844,
"grad_norm": 0.14844126999378204,
"learning_rate": 2.729339588596175e-05,
"loss": 0.0203,
"step": 4500
},
{
"epoch": 0.1002445968162316,
"grad_norm": 0.25966688990592957,
"learning_rate": 2.699266209551305e-05,
"loss": 0.02,
"step": 5000
},
{
"epoch": 0.11026905649785476,
"grad_norm": 0.15259282290935516,
"learning_rate": 2.6691928305064358e-05,
"loss": 0.0201,
"step": 5500
},
{
"epoch": 0.12029351617947792,
"grad_norm": 0.2339366376399994,
"learning_rate": 2.6391194514615663e-05,
"loss": 0.0197,
"step": 6000
},
{
"epoch": 0.13031797586110108,
"grad_norm": 0.19134366512298584,
"learning_rate": 2.609046072416697e-05,
"loss": 0.0195,
"step": 6500
},
{
"epoch": 0.14034243554272424,
"grad_norm": 0.18384341895580292,
"learning_rate": 2.578972693371827e-05,
"loss": 0.0195,
"step": 7000
},
{
"epoch": 0.1503668952243474,
"grad_norm": 0.12643477320671082,
"learning_rate": 2.548899314326958e-05,
"loss": 0.0189,
"step": 7500
},
{
"epoch": 0.16039135490597056,
"grad_norm": 0.1328970193862915,
"learning_rate": 2.5188259352820883e-05,
"loss": 0.0189,
"step": 8000
},
{
"epoch": 0.17041581458759372,
"grad_norm": 0.11257112771272659,
"learning_rate": 2.488752556237219e-05,
"loss": 0.0188,
"step": 8500
},
{
"epoch": 0.18044027426921688,
"grad_norm": 0.1610465794801712,
"learning_rate": 2.4586791771923492e-05,
"loss": 0.0185,
"step": 9000
},
{
"epoch": 0.19046473395084004,
"grad_norm": 0.19272467494010925,
"learning_rate": 2.42860579814748e-05,
"loss": 0.0186,
"step": 9500
},
{
"epoch": 0.2004891936324632,
"grad_norm": 0.14881977438926697,
"learning_rate": 2.3985324191026104e-05,
"loss": 0.0185,
"step": 10000
},
{
"epoch": 0.21051365331408636,
"grad_norm": 0.1241874098777771,
"learning_rate": 2.368459040057741e-05,
"loss": 0.0183,
"step": 10500
},
{
"epoch": 0.22053811299570952,
"grad_norm": 0.13214462995529175,
"learning_rate": 2.3383856610128712e-05,
"loss": 0.0178,
"step": 11000
},
{
"epoch": 0.23056257267733268,
"grad_norm": 0.1579483300447464,
"learning_rate": 2.308312281968002e-05,
"loss": 0.018,
"step": 11500
},
{
"epoch": 0.24058703235895584,
"grad_norm": 0.15338820219039917,
"learning_rate": 2.2782389029231324e-05,
"loss": 0.0179,
"step": 12000
},
{
"epoch": 0.25061149204057903,
"grad_norm": 0.25933489203453064,
"learning_rate": 2.2481655238782632e-05,
"loss": 0.018,
"step": 12500
},
{
"epoch": 0.26063595172220216,
"grad_norm": 0.14008085429668427,
"learning_rate": 2.2180921448333933e-05,
"loss": 0.0178,
"step": 13000
},
{
"epoch": 0.27066041140382535,
"grad_norm": 0.17158617079257965,
"learning_rate": 2.188018765788524e-05,
"loss": 0.0176,
"step": 13500
},
{
"epoch": 0.2806848710854485,
"grad_norm": 0.1249169260263443,
"learning_rate": 2.1579453867436545e-05,
"loss": 0.0176,
"step": 14000
},
{
"epoch": 0.29070933076707167,
"grad_norm": 0.15336212515830994,
"learning_rate": 2.1278720076987853e-05,
"loss": 0.0174,
"step": 14500
},
{
"epoch": 0.3007337904486948,
"grad_norm": 0.17169423401355743,
"learning_rate": 2.0977986286539154e-05,
"loss": 0.0171,
"step": 15000
},
{
"epoch": 0.310758250130318,
"grad_norm": 0.12056533992290497,
"learning_rate": 2.067725249609046e-05,
"loss": 0.0175,
"step": 15500
},
{
"epoch": 0.3207827098119411,
"grad_norm": 0.15232908725738525,
"learning_rate": 2.0376518705641766e-05,
"loss": 0.017,
"step": 16000
},
{
"epoch": 0.3308071694935643,
"grad_norm": 0.12425834685564041,
"learning_rate": 2.0075784915193073e-05,
"loss": 0.0173,
"step": 16500
},
{
"epoch": 0.34083162917518744,
"grad_norm": 0.09441632032394409,
"learning_rate": 1.9775051124744374e-05,
"loss": 0.0171,
"step": 17000
},
{
"epoch": 0.35085608885681063,
"grad_norm": 0.11303837597370148,
"learning_rate": 1.9474317334295682e-05,
"loss": 0.017,
"step": 17500
},
{
"epoch": 0.36088054853843377,
"grad_norm": 0.13934484124183655,
"learning_rate": 1.9173583543846986e-05,
"loss": 0.0166,
"step": 18000
},
{
"epoch": 0.37090500822005695,
"grad_norm": 0.17438557744026184,
"learning_rate": 1.8872849753398294e-05,
"loss": 0.0168,
"step": 18500
},
{
"epoch": 0.3809294679016801,
"grad_norm": 0.10365904122591019,
"learning_rate": 1.8572115962949595e-05,
"loss": 0.0166,
"step": 19000
},
{
"epoch": 0.3909539275833033,
"grad_norm": 0.10702642053365707,
"learning_rate": 1.8271382172500902e-05,
"loss": 0.0165,
"step": 19500
},
{
"epoch": 0.4009783872649264,
"grad_norm": 0.08877725899219513,
"learning_rate": 1.7970648382052207e-05,
"loss": 0.0165,
"step": 20000
},
{
"epoch": 0.4110028469465496,
"grad_norm": 0.1188698559999466,
"learning_rate": 1.7669914591603514e-05,
"loss": 0.0168,
"step": 20500
},
{
"epoch": 0.4210273066281727,
"grad_norm": 0.08468133956193924,
"learning_rate": 1.7369180801154815e-05,
"loss": 0.0164,
"step": 21000
},
{
"epoch": 0.4310517663097959,
"grad_norm": 0.09056028723716736,
"learning_rate": 1.7068447010706123e-05,
"loss": 0.0163,
"step": 21500
},
{
"epoch": 0.44107622599141905,
"grad_norm": 0.14430883526802063,
"learning_rate": 1.6767713220257427e-05,
"loss": 0.0166,
"step": 22000
},
{
"epoch": 0.45110068567304223,
"grad_norm": 0.0935540571808815,
"learning_rate": 1.6466979429808735e-05,
"loss": 0.0163,
"step": 22500
},
{
"epoch": 0.46112514535466537,
"grad_norm": 0.08878663927316666,
"learning_rate": 1.616624563936004e-05,
"loss": 0.0163,
"step": 23000
},
{
"epoch": 0.47114960503628855,
"grad_norm": 0.11340079456567764,
"learning_rate": 1.5865511848911344e-05,
"loss": 0.016,
"step": 23500
},
{
"epoch": 0.4811740647179117,
"grad_norm": 0.1878892481327057,
"learning_rate": 1.556477805846265e-05,
"loss": 0.0163,
"step": 24000
},
{
"epoch": 0.4911985243995349,
"grad_norm": 0.06906645745038986,
"learning_rate": 1.5264044268013956e-05,
"loss": 0.0164,
"step": 24500
},
{
"epoch": 0.5012229840811581,
"grad_norm": 0.11366972327232361,
"learning_rate": 1.496331047756526e-05,
"loss": 0.0162,
"step": 25000
},
{
"epoch": 0.5112474437627812,
"grad_norm": 0.10021985322237015,
"learning_rate": 1.4662576687116564e-05,
"loss": 0.016,
"step": 25500
}
],
"logging_steps": 500,
"max_steps": 49878,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}