htlou's picture
Upload folder using huggingface_hub
b8a5b49 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.46403712296983757,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02320185614849188,
"grad_norm": 26.745355095184312,
"learning_rate": 5e-07,
"loss": 1.6569,
"step": 5
},
{
"epoch": 0.04640371229698376,
"grad_norm": 13.080188660738868,
"learning_rate": 1e-06,
"loss": 1.5099,
"step": 10
},
{
"epoch": 0.06960556844547564,
"grad_norm": 7.775505635686351,
"learning_rate": 9.998470286265414e-07,
"loss": 1.2782,
"step": 15
},
{
"epoch": 0.09280742459396751,
"grad_norm": 6.492859712313467,
"learning_rate": 9.993882081071305e-07,
"loss": 1.2359,
"step": 20
},
{
"epoch": 0.11600928074245939,
"grad_norm": 5.355551984540261,
"learning_rate": 9.986238191873872e-07,
"loss": 1.204,
"step": 25
},
{
"epoch": 0.13921113689095127,
"grad_norm": 5.125393165293572,
"learning_rate": 9.975543295858033e-07,
"loss": 1.1627,
"step": 30
},
{
"epoch": 0.16241299303944315,
"grad_norm": 4.710071299991241,
"learning_rate": 9.961803937075514e-07,
"loss": 1.1463,
"step": 35
},
{
"epoch": 0.18561484918793503,
"grad_norm": 5.029376799191572,
"learning_rate": 9.945028522440653e-07,
"loss": 1.1394,
"step": 40
},
{
"epoch": 0.2088167053364269,
"grad_norm": 4.763291242870039,
"learning_rate": 9.925227316586314e-07,
"loss": 1.1371,
"step": 45
},
{
"epoch": 0.23201856148491878,
"grad_norm": 4.903033982523367,
"learning_rate": 9.902412435583125e-07,
"loss": 1.1181,
"step": 50
},
{
"epoch": 0.23201856148491878,
"eval_loss": 1.1214605569839478,
"eval_runtime": 105.8329,
"eval_samples_per_second": 57.912,
"eval_steps_per_second": 0.907,
"step": 50
},
{
"epoch": 0.2552204176334107,
"grad_norm": 4.785575700738186,
"learning_rate": 9.876597839525813e-07,
"loss": 1.1163,
"step": 55
},
{
"epoch": 0.27842227378190254,
"grad_norm": 4.600727000401806,
"learning_rate": 9.847799323991233e-07,
"loss": 1.1232,
"step": 60
},
{
"epoch": 0.30162412993039445,
"grad_norm": 4.8200416840356315,
"learning_rate": 9.816034510373285e-07,
"loss": 1.125,
"step": 65
},
{
"epoch": 0.3248259860788863,
"grad_norm": 4.958997518359378,
"learning_rate": 9.781322835100637e-07,
"loss": 1.108,
"step": 70
},
{
"epoch": 0.3480278422273782,
"grad_norm": 4.883541365508776,
"learning_rate": 9.743685537743856e-07,
"loss": 1.106,
"step": 75
},
{
"epoch": 0.37122969837587005,
"grad_norm": 4.973507458353338,
"learning_rate": 9.70314564801922e-07,
"loss": 1.0973,
"step": 80
},
{
"epoch": 0.39443155452436196,
"grad_norm": 4.704415990191669,
"learning_rate": 9.659727971697173e-07,
"loss": 1.0964,
"step": 85
},
{
"epoch": 0.4176334106728538,
"grad_norm": 4.759885977268913,
"learning_rate": 9.613459075424033e-07,
"loss": 1.0956,
"step": 90
},
{
"epoch": 0.4408352668213457,
"grad_norm": 4.868535908803129,
"learning_rate": 9.564367270466245e-07,
"loss": 1.0787,
"step": 95
},
{
"epoch": 0.46403712296983757,
"grad_norm": 5.180286116736628,
"learning_rate": 9.51248259538713e-07,
"loss": 1.0765,
"step": 100
},
{
"epoch": 0.46403712296983757,
"eval_loss": 1.0775035619735718,
"eval_runtime": 105.5293,
"eval_samples_per_second": 58.079,
"eval_steps_per_second": 0.91,
"step": 100
}
],
"logging_steps": 5,
"max_steps": 645,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 589411381149696.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}