htlou's picture
Upload folder using huggingface_hub
8b0113d verified
raw
history blame
8.46 kB
{
"best_metric": 1.0907771587371826,
"best_model_checkpoint": "./outputs/llava-mistral/RLAIF-V_Coocur-q0_25/checkpoint-200",
"epoch": 2.7777777777777777,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06944444444444445,
"grad_norm": 28.445383103621488,
"learning_rate": 5e-07,
"loss": 1.692,
"step": 5
},
{
"epoch": 0.1388888888888889,
"grad_norm": 15.214710637322117,
"learning_rate": 1e-06,
"loss": 1.5428,
"step": 10
},
{
"epoch": 0.20833333333333334,
"grad_norm": 7.725515360957365,
"learning_rate": 9.985471028179154e-07,
"loss": 1.3232,
"step": 15
},
{
"epoch": 0.2777777777777778,
"grad_norm": 5.771045580757361,
"learning_rate": 9.94196854912548e-07,
"loss": 1.2575,
"step": 20
},
{
"epoch": 0.3472222222222222,
"grad_norm": 5.8072030206153045,
"learning_rate": 9.869745381355905e-07,
"loss": 1.2163,
"step": 25
},
{
"epoch": 0.4166666666666667,
"grad_norm": 5.2740216973392755,
"learning_rate": 9.769221256218162e-07,
"loss": 1.1937,
"step": 30
},
{
"epoch": 0.4861111111111111,
"grad_norm": 5.030592874663904,
"learning_rate": 9.64098037858483e-07,
"loss": 1.1473,
"step": 35
},
{
"epoch": 0.5555555555555556,
"grad_norm": 4.82797980028618,
"learning_rate": 9.485768031694871e-07,
"loss": 1.1529,
"step": 40
},
{
"epoch": 0.625,
"grad_norm": 5.01413477732448,
"learning_rate": 9.304486245873971e-07,
"loss": 1.1598,
"step": 45
},
{
"epoch": 0.6944444444444444,
"grad_norm": 5.005624161351076,
"learning_rate": 9.098188556305262e-07,
"loss": 1.1343,
"step": 50
},
{
"epoch": 0.6944444444444444,
"eval_loss": 1.1338481903076172,
"eval_runtime": 35.4083,
"eval_samples_per_second": 57.698,
"eval_steps_per_second": 0.904,
"step": 50
},
{
"epoch": 0.7638888888888888,
"grad_norm": 5.121349492890328,
"learning_rate": 8.868073880316123e-07,
"loss": 1.1465,
"step": 55
},
{
"epoch": 0.8333333333333334,
"grad_norm": 4.967730285097845,
"learning_rate": 8.615479549763755e-07,
"loss": 1.1205,
"step": 60
},
{
"epoch": 0.9027777777777778,
"grad_norm": 4.888543325436437,
"learning_rate": 8.341873539012443e-07,
"loss": 1.1101,
"step": 65
},
{
"epoch": 0.9722222222222222,
"grad_norm": 5.136971847854665,
"learning_rate": 8.048845933670271e-07,
"loss": 1.1137,
"step": 70
},
{
"epoch": 1.0416666666666667,
"grad_norm": 5.65186903458913,
"learning_rate": 7.738099689665539e-07,
"loss": 1.0344,
"step": 75
},
{
"epoch": 1.1111111111111112,
"grad_norm": 5.436495818544836,
"learning_rate": 7.41144073636728e-07,
"loss": 0.9903,
"step": 80
},
{
"epoch": 1.1805555555555556,
"grad_norm": 5.1217292921898485,
"learning_rate": 7.070767481266492e-07,
"loss": 0.976,
"step": 85
},
{
"epoch": 1.25,
"grad_norm": 5.550754536917889,
"learning_rate": 6.718059777212565e-07,
"loss": 0.9731,
"step": 90
},
{
"epoch": 1.3194444444444444,
"grad_norm": 5.3291194115834095,
"learning_rate": 6.355367416322778e-07,
"loss": 0.9687,
"step": 95
},
{
"epoch": 1.3888888888888888,
"grad_norm": 5.187329472162837,
"learning_rate": 5.984798217433531e-07,
"loss": 0.9522,
"step": 100
},
{
"epoch": 1.3888888888888888,
"eval_loss": 1.0978167057037354,
"eval_runtime": 35.0302,
"eval_samples_per_second": 58.321,
"eval_steps_per_second": 0.913,
"step": 100
},
{
"epoch": 1.4583333333333333,
"grad_norm": 5.429628497748933,
"learning_rate": 5.608505776324157e-07,
"loss": 0.9579,
"step": 105
},
{
"epoch": 1.5277777777777777,
"grad_norm": 5.186690517651686,
"learning_rate": 5.228676949903973e-07,
"loss": 0.9593,
"step": 110
},
{
"epoch": 1.5972222222222223,
"grad_norm": 5.242848282685733,
"learning_rate": 4.847519147099294e-07,
"loss": 0.952,
"step": 115
},
{
"epoch": 1.6666666666666665,
"grad_norm": 5.57786673198586,
"learning_rate": 4.46724750030062e-07,
"loss": 0.9444,
"step": 120
},
{
"epoch": 1.7361111111111112,
"grad_norm": 5.398693342092869,
"learning_rate": 4.0900719919241935e-07,
"loss": 0.9505,
"step": 125
},
{
"epoch": 1.8055555555555556,
"grad_norm": 5.747268409628116,
"learning_rate": 3.7181846109031e-07,
"loss": 0.9547,
"step": 130
},
{
"epoch": 1.875,
"grad_norm": 5.567481347529886,
"learning_rate": 3.353746613749093e-07,
"loss": 0.9463,
"step": 135
},
{
"epoch": 1.9444444444444444,
"grad_norm": 5.400936083455457,
"learning_rate": 2.9988759642186093e-07,
"loss": 0.943,
"step": 140
},
{
"epoch": 2.013888888888889,
"grad_norm": 6.366675658176883,
"learning_rate": 2.655635024578483e-07,
"loss": 0.9177,
"step": 145
},
{
"epoch": 2.0833333333333335,
"grad_norm": 5.430229049864778,
"learning_rate": 2.3260185700046292e-07,
"loss": 0.8681,
"step": 150
},
{
"epoch": 2.0833333333333335,
"eval_loss": 1.084751844406128,
"eval_runtime": 35.1219,
"eval_samples_per_second": 58.169,
"eval_steps_per_second": 0.911,
"step": 150
},
{
"epoch": 2.1527777777777777,
"grad_norm": 5.491519565547442,
"learning_rate": 2.0119421957691218e-07,
"loss": 0.842,
"step": 155
},
{
"epoch": 2.2222222222222223,
"grad_norm": 5.753980047210634,
"learning_rate": 1.7152311845883094e-07,
"loss": 0.8516,
"step": 160
},
{
"epoch": 2.2916666666666665,
"grad_norm": 5.773460825469727,
"learning_rate": 1.4376098988303404e-07,
"loss": 0.8471,
"step": 165
},
{
"epoch": 2.361111111111111,
"grad_norm": 5.780876141809209,
"learning_rate": 1.1806917592302761e-07,
"loss": 0.8699,
"step": 170
},
{
"epoch": 2.4305555555555554,
"grad_norm": 5.458710876272155,
"learning_rate": 9.459698683523204e-08,
"loss": 0.8268,
"step": 175
},
{
"epoch": 2.5,
"grad_norm": 5.709014238644116,
"learning_rate": 7.348083332917926e-08,
"loss": 0.8705,
"step": 180
},
{
"epoch": 2.5694444444444446,
"grad_norm": 5.718392280522503,
"learning_rate": 5.484343380457124e-08,
"loss": 0.8466,
"step": 185
},
{
"epoch": 2.638888888888889,
"grad_norm": 5.770791652413458,
"learning_rate": 3.879310116241041e-08,
"loss": 0.8482,
"step": 190
},
{
"epoch": 2.7083333333333335,
"grad_norm": 5.555636942265803,
"learning_rate": 2.5423113334966218e-08,
"loss": 0.8456,
"step": 195
},
{
"epoch": 2.7777777777777777,
"grad_norm": 5.6835384408720735,
"learning_rate": 1.4811171192794624e-08,
"loss": 0.8498,
"step": 200
},
{
"epoch": 2.7777777777777777,
"eval_loss": 1.0907771587371826,
"eval_runtime": 35.1999,
"eval_samples_per_second": 58.04,
"eval_steps_per_second": 0.909,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 216,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1179056703799296.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}