|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.2806848710854485, |
|
"eval_steps": 500, |
|
"global_step": 14000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01002445968162316, |
|
"grad_norm": 0.1908789426088333, |
|
"learning_rate": 2.969926620955131e-05, |
|
"loss": 0.0305, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.02004891936324632, |
|
"grad_norm": 0.2333366870880127, |
|
"learning_rate": 2.939853241910261e-05, |
|
"loss": 0.027, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.03007337904486948, |
|
"grad_norm": 0.18207737803459167, |
|
"learning_rate": 2.9097798628653917e-05, |
|
"loss": 0.026, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.04009783872649264, |
|
"grad_norm": 0.13153837621212006, |
|
"learning_rate": 2.879706483820522e-05, |
|
"loss": 0.0253, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.0501222984081158, |
|
"grad_norm": 0.20580655336380005, |
|
"learning_rate": 2.849633104775653e-05, |
|
"loss": 0.0249, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.06014675808973896, |
|
"grad_norm": 0.16466622054576874, |
|
"learning_rate": 2.8196198724888727e-05, |
|
"loss": 0.0239, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.07017121777136212, |
|
"grad_norm": 0.1590748131275177, |
|
"learning_rate": 2.7895464934440035e-05, |
|
"loss": 0.0239, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.08019567745298528, |
|
"grad_norm": 0.14150911569595337, |
|
"learning_rate": 2.759473114399134e-05, |
|
"loss": 0.0236, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.09022013713460844, |
|
"grad_norm": 0.09914161264896393, |
|
"learning_rate": 2.7293997353542647e-05, |
|
"loss": 0.0236, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.1002445968162316, |
|
"grad_norm": 0.16175945103168488, |
|
"learning_rate": 2.6993263563093947e-05, |
|
"loss": 0.0232, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.11026905649785476, |
|
"grad_norm": 0.14557033777236938, |
|
"learning_rate": 2.6692529772645255e-05, |
|
"loss": 0.0229, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.12029351617947792, |
|
"grad_norm": 0.14236459136009216, |
|
"learning_rate": 2.639179598219656e-05, |
|
"loss": 0.0228, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.13031797586110108, |
|
"grad_norm": 0.1859973669052124, |
|
"learning_rate": 2.6091062191747867e-05, |
|
"loss": 0.0226, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.14034243554272424, |
|
"grad_norm": 0.21533679962158203, |
|
"learning_rate": 2.5790929868880065e-05, |
|
"loss": 0.0226, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.1503668952243474, |
|
"grad_norm": 0.13067375123500824, |
|
"learning_rate": 2.5490196078431373e-05, |
|
"loss": 0.0222, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.16039135490597056, |
|
"grad_norm": 0.15966835618019104, |
|
"learning_rate": 2.518946228798268e-05, |
|
"loss": 0.0223, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.17041581458759372, |
|
"grad_norm": 0.16358782351016998, |
|
"learning_rate": 2.4888728497533985e-05, |
|
"loss": 0.0218, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.18044027426921688, |
|
"grad_norm": 0.2502037286758423, |
|
"learning_rate": 2.458799470708529e-05, |
|
"loss": 0.0219, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.19046473395084004, |
|
"grad_norm": 0.13426147401332855, |
|
"learning_rate": 2.4287260916636593e-05, |
|
"loss": 0.0221, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.2004891936324632, |
|
"grad_norm": 0.17509840428829193, |
|
"learning_rate": 2.39865271261879e-05, |
|
"loss": 0.0219, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.21051365331408636, |
|
"grad_norm": 0.12009692937135696, |
|
"learning_rate": 2.3685793335739205e-05, |
|
"loss": 0.0216, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.22053811299570952, |
|
"grad_norm": 0.1322207748889923, |
|
"learning_rate": 2.338505954529051e-05, |
|
"loss": 0.0211, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.23056257267733268, |
|
"grad_norm": 0.14883308112621307, |
|
"learning_rate": 2.308492722242271e-05, |
|
"loss": 0.0214, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.24058703235895584, |
|
"grad_norm": 0.13109040260314941, |
|
"learning_rate": 2.278419343197402e-05, |
|
"loss": 0.0211, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.25061149204057903, |
|
"grad_norm": 0.1747232973575592, |
|
"learning_rate": 2.2483459641525323e-05, |
|
"loss": 0.0214, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.26063595172220216, |
|
"grad_norm": 0.14633044600486755, |
|
"learning_rate": 2.2182725851076627e-05, |
|
"loss": 0.0212, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.27066041140382535, |
|
"grad_norm": 0.16928279399871826, |
|
"learning_rate": 2.188199206062793e-05, |
|
"loss": 0.0209, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.2806848710854485, |
|
"grad_norm": 0.1422567069530487, |
|
"learning_rate": 2.158125827017924e-05, |
|
"loss": 0.0206, |
|
"step": 14000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 49878, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|