htlou's picture
Upload folder using huggingface_hub
5478be1 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6956521739130435,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.034782608695652174,
"grad_norm": 27.628698806495482,
"learning_rate": 5e-07,
"loss": 1.6404,
"step": 5
},
{
"epoch": 0.06956521739130435,
"grad_norm": 14.379307335524663,
"learning_rate": 1e-06,
"loss": 1.5079,
"step": 10
},
{
"epoch": 0.10434782608695652,
"grad_norm": 7.6171415097012805,
"learning_rate": 9.99648681635985e-07,
"loss": 1.2858,
"step": 15
},
{
"epoch": 0.1391304347826087,
"grad_norm": 5.902380140367801,
"learning_rate": 9.985952202423114e-07,
"loss": 1.2292,
"step": 20
},
{
"epoch": 0.17391304347826086,
"grad_norm": 5.431235666701782,
"learning_rate": 9.96841096220313e-07,
"loss": 1.2035,
"step": 25
},
{
"epoch": 0.20869565217391303,
"grad_norm": 5.011022329519287,
"learning_rate": 9.943887745939163e-07,
"loss": 1.1615,
"step": 30
},
{
"epoch": 0.24347826086956523,
"grad_norm": 4.933604072753844,
"learning_rate": 9.912417015456088e-07,
"loss": 1.1627,
"step": 35
},
{
"epoch": 0.2782608695652174,
"grad_norm": 4.904429773196176,
"learning_rate": 9.874042995736093e-07,
"loss": 1.1468,
"step": 40
},
{
"epoch": 0.3130434782608696,
"grad_norm": 4.566916992899494,
"learning_rate": 9.828819612770495e-07,
"loss": 1.1457,
"step": 45
},
{
"epoch": 0.34782608695652173,
"grad_norm": 4.618513047328418,
"learning_rate": 9.77681041777897e-07,
"loss": 1.1363,
"step": 50
},
{
"epoch": 0.34782608695652173,
"eval_loss": 1.1214656829833984,
"eval_runtime": 71.1557,
"eval_samples_per_second": 57.423,
"eval_steps_per_second": 0.899,
"step": 50
},
{
"epoch": 0.3826086956521739,
"grad_norm": 4.482447125747761,
"learning_rate": 9.718088497902707e-07,
"loss": 1.1252,
"step": 55
},
{
"epoch": 0.41739130434782606,
"grad_norm": 4.746315495183657,
"learning_rate": 9.652736373497e-07,
"loss": 1.1144,
"step": 60
},
{
"epoch": 0.45217391304347826,
"grad_norm": 4.800644875448408,
"learning_rate": 9.580845882167572e-07,
"loss": 1.1048,
"step": 65
},
{
"epoch": 0.48695652173913045,
"grad_norm": 5.041563912063608,
"learning_rate": 9.502518049713631e-07,
"loss": 1.0967,
"step": 70
},
{
"epoch": 0.5217391304347826,
"grad_norm": 4.834159384378619,
"learning_rate": 9.417862948158997e-07,
"loss": 1.0922,
"step": 75
},
{
"epoch": 0.5565217391304348,
"grad_norm": 4.966276459660724,
"learning_rate": 9.326999541070803e-07,
"loss": 1.1065,
"step": 80
},
{
"epoch": 0.591304347826087,
"grad_norm": 4.651039844387456,
"learning_rate": 9.23005551638316e-07,
"loss": 1.08,
"step": 85
},
{
"epoch": 0.6260869565217392,
"grad_norm": 4.61712494812361,
"learning_rate": 9.127167106960681e-07,
"loss": 1.0871,
"step": 90
},
{
"epoch": 0.6608695652173913,
"grad_norm": 4.965649778107791,
"learning_rate": 9.018478899154066e-07,
"loss": 1.0791,
"step": 95
},
{
"epoch": 0.6956521739130435,
"grad_norm": 4.710879346221803,
"learning_rate": 8.904143629616732e-07,
"loss": 1.0856,
"step": 100
},
{
"epoch": 0.6956521739130435,
"eval_loss": 1.074812412261963,
"eval_runtime": 70.7335,
"eval_samples_per_second": 57.766,
"eval_steps_per_second": 0.905,
"step": 100
}
],
"logging_steps": 5,
"max_steps": 429,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 589411381149696.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}