htlou's picture
Upload folder using huggingface_hub
80a72fd verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.955223880597015,
"eval_steps": 50,
"global_step": 99,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14925373134328357,
"grad_norm": 55.56373270084105,
"learning_rate": 5e-07,
"logits/chosen": -2.712287664413452,
"logits/rejected": -2.7052791118621826,
"logps/chosen": -286.204345703125,
"logps/rejected": -230.34707641601562,
"loss": 0.6917,
"rewards/accuracies": 0.3812499940395355,
"rewards/chosen": 0.01282244361937046,
"rewards/margins": 0.007527926471084356,
"rewards/rejected": 0.005294515751302242,
"step": 5
},
{
"epoch": 0.29850746268656714,
"grad_norm": 51.05262078906845,
"learning_rate": 1e-06,
"logits/chosen": -2.6947524547576904,
"logits/rejected": -2.6870532035827637,
"logps/chosen": -274.43988037109375,
"logps/rejected": -204.74749755859375,
"loss": 0.6339,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": 0.5373737215995789,
"rewards/margins": 0.23426207900047302,
"rewards/rejected": 0.3031116724014282,
"step": 10
},
{
"epoch": 0.44776119402985076,
"grad_norm": 42.09975022539425,
"learning_rate": 9.922326639307916e-07,
"logits/chosen": -2.526808738708496,
"logits/rejected": -2.524719715118408,
"logps/chosen": -264.34503173828125,
"logps/rejected": -198.8230743408203,
"loss": 0.5573,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": 1.6411030292510986,
"rewards/margins": 0.87860506772995,
"rewards/rejected": 0.7624980211257935,
"step": 15
},
{
"epoch": 0.5970149253731343,
"grad_norm": 58.956809776446164,
"learning_rate": 9.691719817616146e-07,
"logits/chosen": -2.4723422527313232,
"logits/rejected": -2.4854772090911865,
"logps/chosen": -257.2549743652344,
"logps/rejected": -190.01406860351562,
"loss": 0.6028,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": 1.877847671508789,
"rewards/margins": 1.1436525583267212,
"rewards/rejected": 0.7341950535774231,
"step": 20
},
{
"epoch": 0.746268656716418,
"grad_norm": 48.04735088035122,
"learning_rate": 9.315344337660421e-07,
"logits/chosen": -2.4798216819763184,
"logits/rejected": -2.456294059753418,
"logps/chosen": -241.55734252929688,
"logps/rejected": -224.9027557373047,
"loss": 0.5879,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 1.5884907245635986,
"rewards/margins": 1.6026525497436523,
"rewards/rejected": -0.014161976985633373,
"step": 25
},
{
"epoch": 0.8955223880597015,
"grad_norm": 38.642660675520226,
"learning_rate": 8.804893938804838e-07,
"logits/chosen": -2.4256484508514404,
"logits/rejected": -2.4327585697174072,
"logps/chosen": -271.9588623046875,
"logps/rejected": -227.793212890625,
"loss": 0.5446,
"rewards/accuracies": 0.8187500238418579,
"rewards/chosen": 1.7691961526870728,
"rewards/margins": 1.9415054321289062,
"rewards/rejected": -0.1723092496395111,
"step": 30
},
{
"epoch": 1.044776119402985,
"grad_norm": 22.04271479326562,
"learning_rate": 8.176227980227692e-07,
"logits/chosen": -2.4325883388519287,
"logits/rejected": -2.413778781890869,
"logps/chosen": -266.7374572753906,
"logps/rejected": -215.2702178955078,
"loss": 0.4634,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 1.7315490245819092,
"rewards/margins": 1.657210350036621,
"rewards/rejected": 0.0743386298418045,
"step": 35
},
{
"epoch": 1.1940298507462686,
"grad_norm": 20.532663094412452,
"learning_rate": 7.448878701031142e-07,
"logits/chosen": -2.396460771560669,
"logits/rejected": -2.3917737007141113,
"logps/chosen": -255.44772338867188,
"logps/rejected": -215.0567626953125,
"loss": 0.2194,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.8698675632476807,
"rewards/margins": 2.881333112716675,
"rewards/rejected": -1.0114656686782837,
"step": 40
},
{
"epoch": 1.3432835820895521,
"grad_norm": 23.184836238799452,
"learning_rate": 6.64544436638005e-07,
"logits/chosen": -2.395986318588257,
"logits/rejected": -2.3959803581237793,
"logps/chosen": -264.5244445800781,
"logps/rejected": -234.6321258544922,
"loss": 0.2456,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 2.4978160858154297,
"rewards/margins": 3.6377322673797607,
"rewards/rejected": -1.139916181564331,
"step": 45
},
{
"epoch": 1.4925373134328357,
"grad_norm": 28.822321280194625,
"learning_rate": 5.790887154221519e-07,
"logits/chosen": -2.3735032081604004,
"logits/rejected": -2.3805091381073,
"logps/chosen": -255.3271942138672,
"logps/rejected": -220.1824951171875,
"loss": 0.2483,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 2.559195041656494,
"rewards/margins": 3.5177524089813232,
"rewards/rejected": -0.9585572481155396,
"step": 50
},
{
"epoch": 1.4925373134328357,
"eval_logits/chosen": -2.3475418090820312,
"eval_logits/rejected": -2.3466103076934814,
"eval_logps/chosen": -280.7090148925781,
"eval_logps/rejected": -235.2706756591797,
"eval_loss": 0.5524222254753113,
"eval_rewards/accuracies": 0.8083333373069763,
"eval_rewards/chosen": 2.127183198928833,
"eval_rewards/margins": 2.6592726707458496,
"eval_rewards/rejected": -0.5320896506309509,
"eval_runtime": 62.8504,
"eval_samples_per_second": 15.115,
"eval_steps_per_second": 0.239,
"step": 50
},
{
"epoch": 1.6417910447761193,
"grad_norm": 22.75352227408355,
"learning_rate": 4.911757596784357e-07,
"logits/chosen": -2.3721463680267334,
"logits/rejected": -2.3761184215545654,
"logps/chosen": -257.5892028808594,
"logps/rejected": -218.6893768310547,
"loss": 0.304,
"rewards/accuracies": 0.856249988079071,
"rewards/chosen": 2.7064647674560547,
"rewards/margins": 3.533766508102417,
"rewards/rejected": -0.8273016810417175,
"step": 55
},
{
"epoch": 1.7910447761194028,
"grad_norm": 29.311168965506727,
"learning_rate": 4.0353696729525153e-07,
"logits/chosen": -2.3819665908813477,
"logits/rejected": -2.3746392726898193,
"logps/chosen": -252.9134979248047,
"logps/rejected": -218.8928985595703,
"loss": 0.2714,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 3.3164374828338623,
"rewards/margins": 3.6505794525146484,
"rewards/rejected": -0.33414122462272644,
"step": 60
},
{
"epoch": 1.9402985074626866,
"grad_norm": 23.46365582271964,
"learning_rate": 3.1889521808515883e-07,
"logits/chosen": -2.3811304569244385,
"logits/rejected": -2.368058204650879,
"logps/chosen": -251.28176879882812,
"logps/rejected": -252.00857543945312,
"loss": 0.316,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 2.9416391849517822,
"rewards/margins": 3.783787250518799,
"rewards/rejected": -0.8421486020088196,
"step": 65
},
{
"epoch": 2.08955223880597,
"grad_norm": 14.481288651867855,
"learning_rate": 2.398802756945589e-07,
"logits/chosen": -2.3532087802886963,
"logits/rejected": -2.354099750518799,
"logps/chosen": -250.82144165039062,
"logps/rejected": -252.1094207763672,
"loss": 0.2295,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 2.9837050437927246,
"rewards/margins": 3.6309990882873535,
"rewards/rejected": -0.6472941637039185,
"step": 70
},
{
"epoch": 2.2388059701492535,
"grad_norm": 21.236319133701897,
"learning_rate": 1.689470825715998e-07,
"logits/chosen": -2.356414318084717,
"logits/rejected": -2.3434813022613525,
"logps/chosen": -221.6200714111328,
"logps/rejected": -228.5461883544922,
"loss": 0.1691,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 2.63923978805542,
"rewards/margins": 3.8326287269592285,
"rewards/rejected": -1.193388819694519,
"step": 75
},
{
"epoch": 2.388059701492537,
"grad_norm": 22.854893892176456,
"learning_rate": 1.0829948651407372e-07,
"logits/chosen": -2.3592309951782227,
"logits/rejected": -2.365086078643799,
"logps/chosen": -252.72006225585938,
"logps/rejected": -216.23495483398438,
"loss": 0.18,
"rewards/accuracies": 0.9375,
"rewards/chosen": 2.9743988513946533,
"rewards/margins": 4.134276390075684,
"rewards/rejected": -1.159877896308899,
"step": 80
},
{
"epoch": 2.5373134328358207,
"grad_norm": 15.980808006004528,
"learning_rate": 5.982176856345444e-08,
"logits/chosen": -2.3686461448669434,
"logits/rejected": -2.348933696746826,
"logps/chosen": -252.70748901367188,
"logps/rejected": -250.7736358642578,
"loss": 0.1458,
"rewards/accuracies": 0.9375,
"rewards/chosen": 2.88429594039917,
"rewards/margins": 4.176507472991943,
"rewards/rejected": -1.2922115325927734,
"step": 85
},
{
"epoch": 2.6865671641791042,
"grad_norm": 20.436727705586488,
"learning_rate": 2.5020099628504598e-08,
"logits/chosen": -2.34785532951355,
"logits/rejected": -2.3492188453674316,
"logps/chosen": -246.19711303710938,
"logps/rejected": -241.9957733154297,
"loss": 0.1655,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": 3.0024290084838867,
"rewards/margins": 4.110239505767822,
"rewards/rejected": -1.1078107357025146,
"step": 90
},
{
"epoch": 2.835820895522388,
"grad_norm": 14.996067631975396,
"learning_rate": 4.975744742772847e-09,
"logits/chosen": -2.3592984676361084,
"logits/rejected": -2.341296672821045,
"logps/chosen": -241.30673217773438,
"logps/rejected": -234.73397827148438,
"loss": 0.1404,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": 2.8361592292785645,
"rewards/margins": 3.8578391075134277,
"rewards/rejected": -1.0216796398162842,
"step": 95
},
{
"epoch": 2.955223880597015,
"step": 99,
"total_flos": 1167029855846400.0,
"train_loss": 0.34429408685125484,
"train_runtime": 3404.4799,
"train_samples_per_second": 7.532,
"train_steps_per_second": 0.029
}
],
"logging_steps": 5,
"max_steps": 99,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1167029855846400.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}