htlou's picture
Upload folder using huggingface_hub
321baca verified
{
"best_metric": 1.0422885417938232,
"best_model_checkpoint": "./outputs/llava-mistral/RLAIF-V_Coocur-q0_50/checkpoint-300",
"epoch": 2.0869565217391304,
"eval_steps": 50,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.034782608695652174,
"grad_norm": 28.788983552687714,
"learning_rate": 5e-07,
"loss": 1.6631,
"step": 5
},
{
"epoch": 0.06956521739130435,
"grad_norm": 14.801312030106265,
"learning_rate": 1e-06,
"loss": 1.5311,
"step": 10
},
{
"epoch": 0.10434782608695652,
"grad_norm": 8.042597892857103,
"learning_rate": 9.99648681635985e-07,
"loss": 1.2909,
"step": 15
},
{
"epoch": 0.1391304347826087,
"grad_norm": 5.465265266311333,
"learning_rate": 9.985952202423114e-07,
"loss": 1.2361,
"step": 20
},
{
"epoch": 0.17391304347826086,
"grad_norm": 5.471328096600488,
"learning_rate": 9.96841096220313e-07,
"loss": 1.2008,
"step": 25
},
{
"epoch": 0.20869565217391303,
"grad_norm": 5.088536561695133,
"learning_rate": 9.943887745939163e-07,
"loss": 1.184,
"step": 30
},
{
"epoch": 0.24347826086956523,
"grad_norm": 4.979056344341446,
"learning_rate": 9.912417015456088e-07,
"loss": 1.1743,
"step": 35
},
{
"epoch": 0.2782608695652174,
"grad_norm": 5.098810591402351,
"learning_rate": 9.874042995736093e-07,
"loss": 1.1533,
"step": 40
},
{
"epoch": 0.3130434782608696,
"grad_norm": 4.9260826588669575,
"learning_rate": 9.828819612770495e-07,
"loss": 1.15,
"step": 45
},
{
"epoch": 0.34782608695652173,
"grad_norm": 5.009097422362774,
"learning_rate": 9.77681041777897e-07,
"loss": 1.1191,
"step": 50
},
{
"epoch": 0.34782608695652173,
"eval_loss": 1.126644253730774,
"eval_runtime": 71.6515,
"eval_samples_per_second": 57.026,
"eval_steps_per_second": 0.893,
"step": 50
},
{
"epoch": 0.3826086956521739,
"grad_norm": 4.710201613179453,
"learning_rate": 9.718088497902707e-07,
"loss": 1.1278,
"step": 55
},
{
"epoch": 0.41739130434782606,
"grad_norm": 4.656292887228332,
"learning_rate": 9.652736373497e-07,
"loss": 1.1259,
"step": 60
},
{
"epoch": 0.45217391304347826,
"grad_norm": 4.89840217135744,
"learning_rate": 9.580845882167572e-07,
"loss": 1.1196,
"step": 65
},
{
"epoch": 0.48695652173913045,
"grad_norm": 5.131971560708056,
"learning_rate": 9.502518049713631e-07,
"loss": 1.0982,
"step": 70
},
{
"epoch": 0.5217391304347826,
"grad_norm": 5.025459861546626,
"learning_rate": 9.417862948158997e-07,
"loss": 1.1104,
"step": 75
},
{
"epoch": 0.5565217391304348,
"grad_norm": 5.083697590841828,
"learning_rate": 9.326999541070803e-07,
"loss": 1.0856,
"step": 80
},
{
"epoch": 0.591304347826087,
"grad_norm": 5.065321153827992,
"learning_rate": 9.23005551638316e-07,
"loss": 1.1057,
"step": 85
},
{
"epoch": 0.6260869565217392,
"grad_norm": 4.793827429409211,
"learning_rate": 9.127167106960681e-07,
"loss": 1.0966,
"step": 90
},
{
"epoch": 0.6608695652173913,
"grad_norm": 4.988686751743354,
"learning_rate": 9.018478899154066e-07,
"loss": 1.0984,
"step": 95
},
{
"epoch": 0.6956521739130435,
"grad_norm": 5.137325195769105,
"learning_rate": 8.904143629616732e-07,
"loss": 1.0827,
"step": 100
},
{
"epoch": 0.6956521739130435,
"eval_loss": 1.0803709030151367,
"eval_runtime": 71.4447,
"eval_samples_per_second": 57.191,
"eval_steps_per_second": 0.896,
"step": 100
},
{
"epoch": 0.7304347826086957,
"grad_norm": 5.24024786212869,
"learning_rate": 8.784321970668053e-07,
"loss": 1.0754,
"step": 105
},
{
"epoch": 0.7652173913043478,
"grad_norm": 5.09088801959773,
"learning_rate": 8.659182304504808e-07,
"loss": 1.0796,
"step": 110
},
{
"epoch": 0.8,
"grad_norm": 5.068283621568658,
"learning_rate": 8.528900486578158e-07,
"loss": 1.0777,
"step": 115
},
{
"epoch": 0.8347826086956521,
"grad_norm": 5.100518718656821,
"learning_rate": 8.393659598468642e-07,
"loss": 1.0909,
"step": 120
},
{
"epoch": 0.8695652173913043,
"grad_norm": 5.090940478434361,
"learning_rate": 8.253649690606494e-07,
"loss": 1.0672,
"step": 125
},
{
"epoch": 0.9043478260869565,
"grad_norm": 5.14647466195505,
"learning_rate": 8.10906751519882e-07,
"loss": 1.0672,
"step": 130
},
{
"epoch": 0.9391304347826087,
"grad_norm": 4.93995490693938,
"learning_rate": 7.960116249738937e-07,
"loss": 1.056,
"step": 135
},
{
"epoch": 0.9739130434782609,
"grad_norm": 5.093588987883723,
"learning_rate": 7.807005211486444e-07,
"loss": 1.0604,
"step": 140
},
{
"epoch": 1.008695652173913,
"grad_norm": 5.489196503002481,
"learning_rate": 7.649949563319227e-07,
"loss": 1.0113,
"step": 145
},
{
"epoch": 1.0434782608695652,
"grad_norm": 6.039150932058514,
"learning_rate": 7.489170011370779e-07,
"loss": 0.9347,
"step": 150
},
{
"epoch": 1.0434782608695652,
"eval_loss": 1.0631003379821777,
"eval_runtime": 71.1863,
"eval_samples_per_second": 57.399,
"eval_steps_per_second": 0.899,
"step": 150
},
{
"epoch": 1.0782608695652174,
"grad_norm": 5.971802160671762,
"learning_rate": 7.324892494877733e-07,
"loss": 0.9277,
"step": 155
},
{
"epoch": 1.1130434782608696,
"grad_norm": 5.391675039658058,
"learning_rate": 7.15734786867344e-07,
"loss": 0.9296,
"step": 160
},
{
"epoch": 1.1478260869565218,
"grad_norm": 5.534633373232827,
"learning_rate": 6.986771578773811e-07,
"loss": 0.9147,
"step": 165
},
{
"epoch": 1.182608695652174,
"grad_norm": 5.358757256728094,
"learning_rate": 6.81340333151128e-07,
"loss": 0.9163,
"step": 170
},
{
"epoch": 1.2173913043478262,
"grad_norm": 5.543937956600803,
"learning_rate": 6.637486756681842e-07,
"loss": 0.915,
"step": 175
},
{
"epoch": 1.2521739130434781,
"grad_norm": 5.443035617719796,
"learning_rate": 6.459269065178591e-07,
"loss": 0.9234,
"step": 180
},
{
"epoch": 1.2869565217391306,
"grad_norm": 5.754731089663971,
"learning_rate": 6.279000701592794e-07,
"loss": 0.9208,
"step": 185
},
{
"epoch": 1.3217391304347825,
"grad_norm": 5.688995590179024,
"learning_rate": 6.096934992270767e-07,
"loss": 0.9173,
"step": 190
},
{
"epoch": 1.3565217391304347,
"grad_norm": 5.500423257037727,
"learning_rate": 5.913327789321077e-07,
"loss": 0.9141,
"step": 195
},
{
"epoch": 1.391304347826087,
"grad_norm": 5.827091513999703,
"learning_rate": 5.728437111072375e-07,
"loss": 0.9284,
"step": 200
},
{
"epoch": 1.391304347826087,
"eval_loss": 1.0466628074645996,
"eval_runtime": 71.2891,
"eval_samples_per_second": 57.316,
"eval_steps_per_second": 0.898,
"step": 200
},
{
"epoch": 1.4260869565217391,
"grad_norm": 5.802245310068559,
"learning_rate": 5.542522779487071e-07,
"loss": 0.9138,
"step": 205
},
{
"epoch": 1.4608695652173913,
"grad_norm": 5.61384072841023,
"learning_rate": 5.355846055040448e-07,
"loss": 0.8997,
"step": 210
},
{
"epoch": 1.4956521739130435,
"grad_norm": 5.904579252940991,
"learning_rate": 5.168669269578232e-07,
"loss": 0.8884,
"step": 215
},
{
"epoch": 1.5304347826086957,
"grad_norm": 5.271432415804347,
"learning_rate": 4.981255457668624e-07,
"loss": 0.8992,
"step": 220
},
{
"epoch": 1.5652173913043477,
"grad_norm": 5.899020033427604,
"learning_rate": 4.793867986966802e-07,
"loss": 0.8925,
"step": 225
},
{
"epoch": 1.6,
"grad_norm": 5.350779976715096,
"learning_rate": 4.606770188111338e-07,
"loss": 0.8914,
"step": 230
},
{
"epoch": 1.634782608695652,
"grad_norm": 5.512269864038317,
"learning_rate": 4.420224984672653e-07,
"loss": 0.8912,
"step": 235
},
{
"epoch": 1.6695652173913045,
"grad_norm": 5.678688235761292,
"learning_rate": 4.2344945236734963e-07,
"loss": 0.9089,
"step": 240
},
{
"epoch": 1.7043478260869565,
"grad_norm": 5.627490372225429,
"learning_rate": 4.049839807200688e-07,
"loss": 0.9176,
"step": 245
},
{
"epoch": 1.7391304347826086,
"grad_norm": 5.641266523304515,
"learning_rate": 3.866520325625825e-07,
"loss": 0.9103,
"step": 250
},
{
"epoch": 1.7391304347826086,
"eval_loss": 1.03276789188385,
"eval_runtime": 71.4756,
"eval_samples_per_second": 57.166,
"eval_steps_per_second": 0.895,
"step": 250
},
{
"epoch": 1.7739130434782608,
"grad_norm": 5.527194295602311,
"learning_rate": 3.684793692950344e-07,
"loss": 0.9032,
"step": 255
},
{
"epoch": 1.808695652173913,
"grad_norm": 5.767218475736747,
"learning_rate": 3.504915284787405e-07,
"loss": 0.8957,
"step": 260
},
{
"epoch": 1.8434782608695652,
"grad_norm": 5.498699782720146,
"learning_rate": 3.327137879489312e-07,
"loss": 0.8956,
"step": 265
},
{
"epoch": 1.8782608695652174,
"grad_norm": 5.5888005763392,
"learning_rate": 3.1517113029248233e-07,
"loss": 0.8961,
"step": 270
},
{
"epoch": 1.9130434782608696,
"grad_norm": 5.6876581687221766,
"learning_rate": 2.9788820774054697e-07,
"loss": 0.8908,
"step": 275
},
{
"epoch": 1.9478260869565216,
"grad_norm": 6.233330329395755,
"learning_rate": 2.8088930752543063e-07,
"loss": 0.8993,
"step": 280
},
{
"epoch": 1.982608695652174,
"grad_norm": 5.614794208303309,
"learning_rate": 2.641983177503876e-07,
"loss": 0.8856,
"step": 285
},
{
"epoch": 2.017391304347826,
"grad_norm": 6.378827429420443,
"learning_rate": 2.4783869382030424e-07,
"loss": 0.8606,
"step": 290
},
{
"epoch": 2.0521739130434784,
"grad_norm": 6.12687179373102,
"learning_rate": 2.3183342548044065e-07,
"loss": 0.8044,
"step": 295
},
{
"epoch": 2.0869565217391304,
"grad_norm": 5.8322466278221805,
"learning_rate": 2.1620500450955221e-07,
"loss": 0.8121,
"step": 300
},
{
"epoch": 2.0869565217391304,
"eval_loss": 1.0422885417938232,
"eval_runtime": 71.4365,
"eval_samples_per_second": 57.198,
"eval_steps_per_second": 0.896,
"step": 300
}
],
"logging_steps": 5,
"max_steps": 429,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1768702026448896.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}