mm-interp-RLAIF-V-Cosi-q0_50 / trainer_state.json
htlou's picture
Upload folder using huggingface_hub
1b75a1b verified
raw
history blame
17.3 kB
{
"best_metric": 1.0423126220703125,
"best_model_checkpoint": "./outputs/llava-mistral/RLAIF-V-Cosi-q0_50/checkpoint-400",
"epoch": 2.9843478260869567,
"eval_steps": 50,
"global_step": 429,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.034782608695652174,
"grad_norm": 27.662987193358703,
"learning_rate": 5e-07,
"loss": 1.6833,
"step": 5
},
{
"epoch": 0.06956521739130435,
"grad_norm": 13.995087929114321,
"learning_rate": 1e-06,
"loss": 1.5247,
"step": 10
},
{
"epoch": 0.10434782608695652,
"grad_norm": 7.541630391104073,
"learning_rate": 9.99648681635985e-07,
"loss": 1.2991,
"step": 15
},
{
"epoch": 0.1391304347826087,
"grad_norm": 5.489316670983378,
"learning_rate": 9.985952202423114e-07,
"loss": 1.2447,
"step": 20
},
{
"epoch": 0.17391304347826086,
"grad_norm": 5.136089076739333,
"learning_rate": 9.96841096220313e-07,
"loss": 1.1959,
"step": 25
},
{
"epoch": 0.20869565217391303,
"grad_norm": 5.414214340511089,
"learning_rate": 9.943887745939163e-07,
"loss": 1.1638,
"step": 30
},
{
"epoch": 0.24347826086956523,
"grad_norm": 5.152832718688883,
"learning_rate": 9.912417015456088e-07,
"loss": 1.1559,
"step": 35
},
{
"epoch": 0.2782608695652174,
"grad_norm": 4.840777750826951,
"learning_rate": 9.874042995736093e-07,
"loss": 1.1673,
"step": 40
},
{
"epoch": 0.3130434782608696,
"grad_norm": 4.747365301611716,
"learning_rate": 9.828819612770495e-07,
"loss": 1.1403,
"step": 45
},
{
"epoch": 0.34782608695652173,
"grad_norm": 4.903438717062914,
"learning_rate": 9.77681041777897e-07,
"loss": 1.1387,
"step": 50
},
{
"epoch": 0.34782608695652173,
"eval_loss": 1.1293702125549316,
"eval_runtime": 71.7154,
"eval_samples_per_second": 56.975,
"eval_steps_per_second": 0.892,
"step": 50
},
{
"epoch": 0.3826086956521739,
"grad_norm": 4.784163808478027,
"learning_rate": 9.718088497902707e-07,
"loss": 1.1378,
"step": 55
},
{
"epoch": 0.41739130434782606,
"grad_norm": 4.940465636430018,
"learning_rate": 9.652736373497e-07,
"loss": 1.1359,
"step": 60
},
{
"epoch": 0.45217391304347826,
"grad_norm": 4.908919655982328,
"learning_rate": 9.580845882167572e-07,
"loss": 1.1054,
"step": 65
},
{
"epoch": 0.48695652173913045,
"grad_norm": 4.804151392451496,
"learning_rate": 9.502518049713631e-07,
"loss": 1.1003,
"step": 70
},
{
"epoch": 0.5217391304347826,
"grad_norm": 5.02157901407176,
"learning_rate": 9.417862948158997e-07,
"loss": 1.1012,
"step": 75
},
{
"epoch": 0.5565217391304348,
"grad_norm": 4.674083326212897,
"learning_rate": 9.326999541070803e-07,
"loss": 1.1004,
"step": 80
},
{
"epoch": 0.591304347826087,
"grad_norm": 4.873056311068642,
"learning_rate": 9.23005551638316e-07,
"loss": 1.1176,
"step": 85
},
{
"epoch": 0.6260869565217392,
"grad_norm": 4.8872398044511725,
"learning_rate": 9.127167106960681e-07,
"loss": 1.0953,
"step": 90
},
{
"epoch": 0.6608695652173913,
"grad_norm": 4.895825564974144,
"learning_rate": 9.018478899154066e-07,
"loss": 1.0863,
"step": 95
},
{
"epoch": 0.6956521739130435,
"grad_norm": 5.166155043032071,
"learning_rate": 8.904143629616732e-07,
"loss": 1.0986,
"step": 100
},
{
"epoch": 0.6956521739130435,
"eval_loss": 1.0837830305099487,
"eval_runtime": 70.6524,
"eval_samples_per_second": 57.832,
"eval_steps_per_second": 0.906,
"step": 100
},
{
"epoch": 0.7304347826086957,
"grad_norm": 5.2985929076708045,
"learning_rate": 8.784321970668053e-07,
"loss": 1.0741,
"step": 105
},
{
"epoch": 0.7652173913043478,
"grad_norm": 4.689563303857618,
"learning_rate": 8.659182304504808e-07,
"loss": 1.0891,
"step": 110
},
{
"epoch": 0.8,
"grad_norm": 4.9567032009809235,
"learning_rate": 8.528900486578158e-07,
"loss": 1.0738,
"step": 115
},
{
"epoch": 0.8347826086956521,
"grad_norm": 4.991558123014301,
"learning_rate": 8.393659598468642e-07,
"loss": 1.0691,
"step": 120
},
{
"epoch": 0.8695652173913043,
"grad_norm": 5.066850027771844,
"learning_rate": 8.253649690606494e-07,
"loss": 1.0759,
"step": 125
},
{
"epoch": 0.9043478260869565,
"grad_norm": 4.76302682218398,
"learning_rate": 8.10906751519882e-07,
"loss": 1.0619,
"step": 130
},
{
"epoch": 0.9391304347826087,
"grad_norm": 4.5649236697962605,
"learning_rate": 7.960116249738937e-07,
"loss": 1.06,
"step": 135
},
{
"epoch": 0.9739130434782609,
"grad_norm": 5.1197283855125795,
"learning_rate": 7.807005211486444e-07,
"loss": 1.0632,
"step": 140
},
{
"epoch": 1.008695652173913,
"grad_norm": 5.776747498694688,
"learning_rate": 7.649949563319227e-07,
"loss": 1.0377,
"step": 145
},
{
"epoch": 1.0434782608695652,
"grad_norm": 5.61630184476014,
"learning_rate": 7.489170011370779e-07,
"loss": 0.9413,
"step": 150
},
{
"epoch": 1.0434782608695652,
"eval_loss": 1.0630384683609009,
"eval_runtime": 70.2189,
"eval_samples_per_second": 58.189,
"eval_steps_per_second": 0.911,
"step": 150
},
{
"epoch": 1.0782608695652174,
"grad_norm": 5.1690625947327975,
"learning_rate": 7.324892494877733e-07,
"loss": 0.9337,
"step": 155
},
{
"epoch": 1.1130434782608696,
"grad_norm": 5.298477294496114,
"learning_rate": 7.15734786867344e-07,
"loss": 0.9139,
"step": 160
},
{
"epoch": 1.1478260869565218,
"grad_norm": 5.205807925552876,
"learning_rate": 6.986771578773811e-07,
"loss": 0.9151,
"step": 165
},
{
"epoch": 1.182608695652174,
"grad_norm": 5.913625439498387,
"learning_rate": 6.81340333151128e-07,
"loss": 0.9238,
"step": 170
},
{
"epoch": 1.2173913043478262,
"grad_norm": 5.640823940985171,
"learning_rate": 6.637486756681842e-07,
"loss": 0.9168,
"step": 175
},
{
"epoch": 1.2521739130434781,
"grad_norm": 5.877719234112348,
"learning_rate": 6.459269065178591e-07,
"loss": 0.9171,
"step": 180
},
{
"epoch": 1.2869565217391306,
"grad_norm": 5.5144114934814334,
"learning_rate": 6.279000701592794e-07,
"loss": 0.9259,
"step": 185
},
{
"epoch": 1.3217391304347825,
"grad_norm": 5.240042947267847,
"learning_rate": 6.096934992270767e-07,
"loss": 0.9273,
"step": 190
},
{
"epoch": 1.3565217391304347,
"grad_norm": 5.190013788934036,
"learning_rate": 5.913327789321077e-07,
"loss": 0.9145,
"step": 195
},
{
"epoch": 1.391304347826087,
"grad_norm": 5.967784073401065,
"learning_rate": 5.728437111072375e-07,
"loss": 0.9147,
"step": 200
},
{
"epoch": 1.391304347826087,
"eval_loss": 1.0509551763534546,
"eval_runtime": 70.5155,
"eval_samples_per_second": 57.945,
"eval_steps_per_second": 0.908,
"step": 200
},
{
"epoch": 1.4260869565217391,
"grad_norm": 5.411235956525858,
"learning_rate": 5.542522779487071e-07,
"loss": 0.9193,
"step": 205
},
{
"epoch": 1.4608695652173913,
"grad_norm": 5.59012586828579,
"learning_rate": 5.355846055040448e-07,
"loss": 0.9067,
"step": 210
},
{
"epoch": 1.4956521739130435,
"grad_norm": 5.384704251574017,
"learning_rate": 5.168669269578232e-07,
"loss": 0.9131,
"step": 215
},
{
"epoch": 1.5304347826086957,
"grad_norm": 5.57479352243273,
"learning_rate": 4.981255457668624e-07,
"loss": 0.8942,
"step": 220
},
{
"epoch": 1.5652173913043477,
"grad_norm": 5.463119101861457,
"learning_rate": 4.793867986966802e-07,
"loss": 0.9294,
"step": 225
},
{
"epoch": 1.6,
"grad_norm": 5.4708003052094005,
"learning_rate": 4.606770188111338e-07,
"loss": 0.9111,
"step": 230
},
{
"epoch": 1.634782608695652,
"grad_norm": 5.568398955000735,
"learning_rate": 4.420224984672653e-07,
"loss": 0.9099,
"step": 235
},
{
"epoch": 1.6695652173913045,
"grad_norm": 5.68039222529444,
"learning_rate": 4.2344945236734963e-07,
"loss": 0.9073,
"step": 240
},
{
"epoch": 1.7043478260869565,
"grad_norm": 5.2293512491951315,
"learning_rate": 4.049839807200688e-07,
"loss": 0.908,
"step": 245
},
{
"epoch": 1.7391304347826086,
"grad_norm": 5.782117460596377,
"learning_rate": 3.866520325625825e-07,
"loss": 0.9097,
"step": 250
},
{
"epoch": 1.7391304347826086,
"eval_loss": 1.0377378463745117,
"eval_runtime": 70.8114,
"eval_samples_per_second": 57.703,
"eval_steps_per_second": 0.904,
"step": 250
},
{
"epoch": 1.7739130434782608,
"grad_norm": 5.732500362531355,
"learning_rate": 3.684793692950344e-07,
"loss": 0.8975,
"step": 255
},
{
"epoch": 1.808695652173913,
"grad_norm": 5.818636957308969,
"learning_rate": 3.504915284787405e-07,
"loss": 0.9214,
"step": 260
},
{
"epoch": 1.8434782608695652,
"grad_norm": 5.455615333156063,
"learning_rate": 3.327137879489312e-07,
"loss": 0.9069,
"step": 265
},
{
"epoch": 1.8782608695652174,
"grad_norm": 5.7276818612093985,
"learning_rate": 3.1517113029248233e-07,
"loss": 0.9095,
"step": 270
},
{
"epoch": 1.9130434782608696,
"grad_norm": 5.482559766492109,
"learning_rate": 2.9788820774054697e-07,
"loss": 0.892,
"step": 275
},
{
"epoch": 1.9478260869565216,
"grad_norm": 5.334288743778187,
"learning_rate": 2.8088930752543063e-07,
"loss": 0.8931,
"step": 280
},
{
"epoch": 1.982608695652174,
"grad_norm": 5.688455541428037,
"learning_rate": 2.641983177503876e-07,
"loss": 0.9201,
"step": 285
},
{
"epoch": 2.017391304347826,
"grad_norm": 6.2731660854774045,
"learning_rate": 2.4783869382030424e-07,
"loss": 0.8534,
"step": 290
},
{
"epoch": 2.0521739130434784,
"grad_norm": 6.243694430077812,
"learning_rate": 2.3183342548044065e-07,
"loss": 0.8189,
"step": 295
},
{
"epoch": 2.0869565217391304,
"grad_norm": 5.92909426577586,
"learning_rate": 2.1620500450955221e-07,
"loss": 0.8125,
"step": 300
},
{
"epoch": 2.0869565217391304,
"eval_loss": 1.0442272424697876,
"eval_runtime": 70.8351,
"eval_samples_per_second": 57.683,
"eval_steps_per_second": 0.904,
"step": 300
},
{
"epoch": 2.121739130434783,
"grad_norm": 5.992567774134156,
"learning_rate": 2.0097539311278898e-07,
"loss": 0.8161,
"step": 305
},
{
"epoch": 2.1565217391304348,
"grad_norm": 5.764357489612678,
"learning_rate": 1.8616599305879333e-07,
"loss": 0.8136,
"step": 310
},
{
"epoch": 2.1913043478260867,
"grad_norm": 6.51190808882011,
"learning_rate": 1.7179761560436097e-07,
"loss": 0.8154,
"step": 315
},
{
"epoch": 2.226086956521739,
"grad_norm": 6.146915879474114,
"learning_rate": 1.5789045224893538e-07,
"loss": 0.8086,
"step": 320
},
{
"epoch": 2.260869565217391,
"grad_norm": 5.7736818396501,
"learning_rate": 1.444640463600293e-07,
"loss": 0.8099,
"step": 325
},
{
"epoch": 2.2956521739130435,
"grad_norm": 5.938531390491637,
"learning_rate": 1.3153726570944828e-07,
"loss": 0.7974,
"step": 330
},
{
"epoch": 2.3304347826086955,
"grad_norm": 6.123098412464914,
"learning_rate": 1.1912827595891312e-07,
"loss": 0.8068,
"step": 335
},
{
"epoch": 2.365217391304348,
"grad_norm": 5.919969337712625,
"learning_rate": 1.0725451513233674e-07,
"loss": 0.8067,
"step": 340
},
{
"epoch": 2.4,
"grad_norm": 5.639760238600868,
"learning_rate": 9.593266911063253e-08,
"loss": 0.8124,
"step": 345
},
{
"epoch": 2.4347826086956523,
"grad_norm": 5.962220123289774,
"learning_rate": 8.517864818348803e-08,
"loss": 0.8092,
"step": 350
},
{
"epoch": 2.4347826086956523,
"eval_loss": 1.0428001880645752,
"eval_runtime": 70.8204,
"eval_samples_per_second": 57.695,
"eval_steps_per_second": 0.904,
"step": 350
},
{
"epoch": 2.4695652173913043,
"grad_norm": 5.957837552151978,
"learning_rate": 7.500756469105818e-08,
"loss": 0.8206,
"step": 355
},
{
"epoch": 2.5043478260869563,
"grad_norm": 5.812454429730802,
"learning_rate": 6.543371178699442e-08,
"loss": 0.8063,
"step": 360
},
{
"epoch": 2.5391304347826087,
"grad_norm": 5.916588345067891,
"learning_rate": 5.647054335265489e-08,
"loss": 0.812,
"step": 365
},
{
"epoch": 2.573913043478261,
"grad_norm": 6.012523824031492,
"learning_rate": 4.813065509072278e-08,
"loss": 0.8124,
"step": 370
},
{
"epoch": 2.608695652173913,
"grad_norm": 5.568112392214681,
"learning_rate": 4.0425766824798814e-08,
"loss": 0.8066,
"step": 375
},
{
"epoch": 2.643478260869565,
"grad_norm": 5.599798167459086,
"learning_rate": 3.3366706029845096e-08,
"loss": 0.8046,
"step": 380
},
{
"epoch": 2.6782608695652175,
"grad_norm": 5.881826522090271,
"learning_rate": 2.696339261662156e-08,
"loss": 0.8165,
"step": 385
},
{
"epoch": 2.7130434782608694,
"grad_norm": 5.992599956475565,
"learning_rate": 2.122482499149869e-08,
"loss": 0.8057,
"step": 390
},
{
"epoch": 2.747826086956522,
"grad_norm": 5.663986990278859,
"learning_rate": 1.6159067411235737e-08,
"loss": 0.7964,
"step": 395
},
{
"epoch": 2.782608695652174,
"grad_norm": 5.977333309297443,
"learning_rate": 1.177323865049512e-08,
"loss": 0.8049,
"step": 400
},
{
"epoch": 2.782608695652174,
"eval_loss": 1.0423126220703125,
"eval_runtime": 70.855,
"eval_samples_per_second": 57.667,
"eval_steps_per_second": 0.903,
"step": 400
},
{
"epoch": 2.8173913043478263,
"grad_norm": 5.779458696126725,
"learning_rate": 8.073501998017152e-09,
"loss": 0.8146,
"step": 405
},
{
"epoch": 2.8521739130434782,
"grad_norm": 5.968600603380175,
"learning_rate": 5.065056595513984e-09,
"loss": 0.7988,
"step": 410
},
{
"epoch": 2.8869565217391306,
"grad_norm": 5.833084881766782,
"learning_rate": 2.752130131453756e-09,
"loss": 0.7937,
"step": 415
},
{
"epoch": 2.9217391304347826,
"grad_norm": 5.65804633124807,
"learning_rate": 1.137972900002171e-09,
"loss": 0.813,
"step": 420
},
{
"epoch": 2.9565217391304346,
"grad_norm": 6.063287910740486,
"learning_rate": 2.2485323347054552e-10,
"loss": 0.7961,
"step": 425
},
{
"epoch": 2.9843478260869567,
"step": 429,
"total_flos": 2529344492666880.0,
"train_loss": 0.9595547367087055,
"train_runtime": 6613.7892,
"train_samples_per_second": 16.679,
"train_steps_per_second": 0.065
}
],
"logging_steps": 5,
"max_steps": 429,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2529344492666880.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}