htlou's picture
Upload folder using huggingface_hub
251ffb7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.46403712296983757,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02320185614849188,
"grad_norm": 29.851993456423653,
"learning_rate": 5e-07,
"loss": 1.6448,
"step": 5
},
{
"epoch": 0.04640371229698376,
"grad_norm": 14.665681394229273,
"learning_rate": 1e-06,
"loss": 1.5147,
"step": 10
},
{
"epoch": 0.06960556844547564,
"grad_norm": 7.818944779433003,
"learning_rate": 9.998470286265414e-07,
"loss": 1.2888,
"step": 15
},
{
"epoch": 0.09280742459396751,
"grad_norm": 6.011350341653904,
"learning_rate": 9.993882081071305e-07,
"loss": 1.2291,
"step": 20
},
{
"epoch": 0.11600928074245939,
"grad_norm": 5.834488035843988,
"learning_rate": 9.986238191873872e-07,
"loss": 1.1827,
"step": 25
},
{
"epoch": 0.13921113689095127,
"grad_norm": 4.944851583820086,
"learning_rate": 9.975543295858033e-07,
"loss": 1.1637,
"step": 30
},
{
"epoch": 0.16241299303944315,
"grad_norm": 5.051276519398371,
"learning_rate": 9.961803937075514e-07,
"loss": 1.1448,
"step": 35
},
{
"epoch": 0.18561484918793503,
"grad_norm": 5.008630441799495,
"learning_rate": 9.945028522440653e-07,
"loss": 1.1539,
"step": 40
},
{
"epoch": 0.2088167053364269,
"grad_norm": 4.527889171881266,
"learning_rate": 9.925227316586314e-07,
"loss": 1.1302,
"step": 45
},
{
"epoch": 0.23201856148491878,
"grad_norm": 4.665448898349117,
"learning_rate": 9.902412435583125e-07,
"loss": 1.1315,
"step": 50
},
{
"epoch": 0.23201856148491878,
"eval_loss": 1.1243596076965332,
"eval_runtime": 107.3706,
"eval_samples_per_second": 57.083,
"eval_steps_per_second": 0.894,
"step": 50
},
{
"epoch": 0.2552204176334107,
"grad_norm": 4.780072580854242,
"learning_rate": 9.876597839525813e-07,
"loss": 1.1184,
"step": 55
},
{
"epoch": 0.27842227378190254,
"grad_norm": 4.698899616021614,
"learning_rate": 9.847799323991233e-07,
"loss": 1.1112,
"step": 60
},
{
"epoch": 0.30162412993039445,
"grad_norm": 4.798544900134018,
"learning_rate": 9.816034510373285e-07,
"loss": 1.1022,
"step": 65
},
{
"epoch": 0.3248259860788863,
"grad_norm": 4.7554420584590105,
"learning_rate": 9.781322835100637e-07,
"loss": 1.1109,
"step": 70
},
{
"epoch": 0.3480278422273782,
"grad_norm": 4.715791460597178,
"learning_rate": 9.743685537743856e-07,
"loss": 1.1044,
"step": 75
},
{
"epoch": 0.37122969837587005,
"grad_norm": 4.781446446456838,
"learning_rate": 9.70314564801922e-07,
"loss": 1.0982,
"step": 80
},
{
"epoch": 0.39443155452436196,
"grad_norm": 4.616375479434296,
"learning_rate": 9.659727971697173e-07,
"loss": 1.0798,
"step": 85
},
{
"epoch": 0.4176334106728538,
"grad_norm": 4.81678899141071,
"learning_rate": 9.613459075424033e-07,
"loss": 1.0925,
"step": 90
},
{
"epoch": 0.4408352668213457,
"grad_norm": 4.804913673447656,
"learning_rate": 9.564367270466245e-07,
"loss": 1.0726,
"step": 95
},
{
"epoch": 0.46403712296983757,
"grad_norm": 5.428814841421836,
"learning_rate": 9.51248259538713e-07,
"loss": 1.0732,
"step": 100
},
{
"epoch": 0.46403712296983757,
"eval_loss": 1.079745888710022,
"eval_runtime": 106.1023,
"eval_samples_per_second": 57.765,
"eval_steps_per_second": 0.905,
"step": 100
}
],
"logging_steps": 5,
"max_steps": 645,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 589411381149696.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}