htlou's picture
Upload folder using huggingface_hub
ed2ae5c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 50,
"global_step": 201,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07462686567164178,
"grad_norm": 57.12193824365218,
"learning_rate": 5e-07,
"logits/chosen": -2.716521978378296,
"logits/rejected": -2.7164063453674316,
"logps/chosen": -263.66815185546875,
"logps/rejected": -226.99600219726562,
"loss": 0.6899,
"rewards/accuracies": 0.3499999940395355,
"rewards/chosen": 0.014930379576981068,
"rewards/margins": 0.005484213586896658,
"rewards/rejected": 0.009446167387068272,
"step": 5
},
{
"epoch": 0.14925373134328357,
"grad_norm": 50.98436154009925,
"learning_rate": 1e-06,
"logits/chosen": -2.659832239151001,
"logits/rejected": -2.6706981658935547,
"logps/chosen": -240.44741821289062,
"logps/rejected": -202.6971893310547,
"loss": 0.6353,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": 0.4928322732448578,
"rewards/margins": 0.11446012556552887,
"rewards/rejected": 0.3783721327781677,
"step": 10
},
{
"epoch": 0.22388059701492538,
"grad_norm": 49.5897720682669,
"learning_rate": 9.983100718730718e-07,
"logits/chosen": -2.469956874847412,
"logits/rejected": -2.457557201385498,
"logps/chosen": -224.4867706298828,
"logps/rejected": -186.89651489257812,
"loss": 0.6504,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": 1.3739125728607178,
"rewards/margins": 0.8372632265090942,
"rewards/rejected": 0.5366495847702026,
"step": 15
},
{
"epoch": 0.29850746268656714,
"grad_norm": 38.57601617352061,
"learning_rate": 9.932517109205849e-07,
"logits/chosen": -2.329310894012451,
"logits/rejected": -2.304492235183716,
"logps/chosen": -216.98519897460938,
"logps/rejected": -203.6713104248047,
"loss": 0.6103,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 1.4170403480529785,
"rewards/margins": 1.1797821521759033,
"rewards/rejected": 0.2372581958770752,
"step": 20
},
{
"epoch": 0.373134328358209,
"grad_norm": 41.87199823484069,
"learning_rate": 9.848591102083375e-07,
"logits/chosen": -2.1940865516662598,
"logits/rejected": -2.1780757904052734,
"logps/chosen": -250.27261352539062,
"logps/rejected": -203.70118713378906,
"loss": 0.5969,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 1.575352430343628,
"rewards/margins": 1.1331461668014526,
"rewards/rejected": 0.44220608472824097,
"step": 25
},
{
"epoch": 0.44776119402985076,
"grad_norm": 34.36864819717641,
"learning_rate": 9.731890013043367e-07,
"logits/chosen": -2.177009105682373,
"logits/rejected": -2.1458096504211426,
"logps/chosen": -257.85015869140625,
"logps/rejected": -217.74462890625,
"loss": 0.5814,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 1.5730830430984497,
"rewards/margins": 1.5190694332122803,
"rewards/rejected": 0.054013729095458984,
"step": 30
},
{
"epoch": 0.5223880597014925,
"grad_norm": 40.73655696053411,
"learning_rate": 9.583202707897073e-07,
"logits/chosen": -2.2473671436309814,
"logits/rejected": -2.1966567039489746,
"logps/chosen": -247.3220672607422,
"logps/rejected": -202.4593963623047,
"loss": 0.5595,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 0.881485104560852,
"rewards/margins": 1.353430986404419,
"rewards/rejected": -0.47194600105285645,
"step": 35
},
{
"epoch": 0.5970149253731343,
"grad_norm": 41.80314492549501,
"learning_rate": 9.403534270080829e-07,
"logits/chosen": -2.27215838432312,
"logits/rejected": -2.2764623165130615,
"logps/chosen": -231.4678497314453,
"logps/rejected": -203.9759521484375,
"loss": 0.578,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.9200285077095032,
"rewards/margins": 1.0039114952087402,
"rewards/rejected": -0.0838831439614296,
"step": 40
},
{
"epoch": 0.6716417910447762,
"grad_norm": 38.467845430667744,
"learning_rate": 9.19409920658098e-07,
"logits/chosen": -2.338601589202881,
"logits/rejected": -2.3325324058532715,
"logps/chosen": -247.09738159179688,
"logps/rejected": -206.18026733398438,
"loss": 0.5433,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": 1.363398551940918,
"rewards/margins": 1.4603192806243896,
"rewards/rejected": -0.09692087769508362,
"step": 45
},
{
"epoch": 0.746268656716418,
"grad_norm": 37.05742903664642,
"learning_rate": 8.956313238215823e-07,
"logits/chosen": -2.4232733249664307,
"logits/rejected": -2.413396120071411,
"logps/chosen": -252.5549774169922,
"logps/rejected": -190.77883911132812,
"loss": 0.6027,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": 1.3205119371414185,
"rewards/margins": 1.3863776922225952,
"rewards/rejected": -0.06586578488349915,
"step": 50
},
{
"epoch": 0.746268656716418,
"eval_logits/chosen": -2.457545757293701,
"eval_logits/rejected": -2.435551404953003,
"eval_logps/chosen": -259.58868408203125,
"eval_logps/rejected": -201.051025390625,
"eval_loss": 0.5490748882293701,
"eval_rewards/accuracies": 0.8208333253860474,
"eval_rewards/chosen": 1.3802965879440308,
"eval_rewards/margins": 1.6785677671432495,
"eval_rewards/rejected": -0.29827114939689636,
"eval_runtime": 125.9419,
"eval_samples_per_second": 15.086,
"eval_steps_per_second": 0.238,
"step": 50
},
{
"epoch": 0.8208955223880597,
"grad_norm": 33.60802656137982,
"learning_rate": 8.691783729769873e-07,
"logits/chosen": -2.4649384021759033,
"logits/rejected": -2.4393675327301025,
"logps/chosen": -247.6337432861328,
"logps/rejected": -225.37429809570312,
"loss": 0.513,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 1.384879469871521,
"rewards/margins": 1.5727264881134033,
"rewards/rejected": -0.1878468245267868,
"step": 55
},
{
"epoch": 0.8955223880597015,
"grad_norm": 33.47722052756408,
"learning_rate": 8.402298824670029e-07,
"logits/chosen": -2.4343879222869873,
"logits/rejected": -2.429543972015381,
"logps/chosen": -243.7395782470703,
"logps/rejected": -216.7984161376953,
"loss": 0.5325,
"rewards/accuracies": 0.8125,
"rewards/chosen": 1.061714768409729,
"rewards/margins": 1.736096739768982,
"rewards/rejected": -0.6743819713592529,
"step": 60
},
{
"epoch": 0.9701492537313433,
"grad_norm": 30.472130365224405,
"learning_rate": 8.089815357650089e-07,
"logits/chosen": -2.3370931148529053,
"logits/rejected": -2.2993381023406982,
"logps/chosen": -250.01687622070312,
"logps/rejected": -205.3022003173828,
"loss": 0.5415,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": 1.2970752716064453,
"rewards/margins": 1.5023232698440552,
"rewards/rejected": -0.2052478790283203,
"step": 65
},
{
"epoch": 1.044776119402985,
"grad_norm": 19.840952878286068,
"learning_rate": 7.756445627110522e-07,
"logits/chosen": -2.2309017181396484,
"logits/rejected": -2.2050414085388184,
"logps/chosen": -237.7880096435547,
"logps/rejected": -231.38265991210938,
"loss": 0.3696,
"rewards/accuracies": 0.8687499761581421,
"rewards/chosen": 1.7277215719223022,
"rewards/margins": 2.358109474182129,
"rewards/rejected": -0.6303879618644714,
"step": 70
},
{
"epoch": 1.1194029850746268,
"grad_norm": 21.240934428524795,
"learning_rate": 7.404443116588547e-07,
"logits/chosen": -2.2093091011047363,
"logits/rejected": -2.1994729042053223,
"logps/chosen": -260.18426513671875,
"logps/rejected": -221.51742553710938,
"loss": 0.2473,
"rewards/accuracies": 0.90625,
"rewards/chosen": 1.7431342601776123,
"rewards/margins": 2.91957950592041,
"rewards/rejected": -1.1764450073242188,
"step": 75
},
{
"epoch": 1.1940298507462686,
"grad_norm": 19.68722107584437,
"learning_rate": 7.036187261857288e-07,
"logits/chosen": -2.1707935333251953,
"logits/rejected": -2.151038408279419,
"logps/chosen": -219.67929077148438,
"logps/rejected": -219.01358032226562,
"loss": 0.2201,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.7701470851898193,
"rewards/margins": 3.296602964401245,
"rewards/rejected": -1.5264555215835571,
"step": 80
},
{
"epoch": 1.2686567164179103,
"grad_norm": 19.753765139786655,
"learning_rate": 6.654167366624008e-07,
"logits/chosen": -2.137498140335083,
"logits/rejected": -2.1242105960845947,
"logps/chosen": -235.6524658203125,
"logps/rejected": -222.9441680908203,
"loss": 0.2428,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 1.8704173564910889,
"rewards/margins": 3.3603546619415283,
"rewards/rejected": -1.4899370670318604,
"step": 85
},
{
"epoch": 1.3432835820895521,
"grad_norm": 18.35032830894322,
"learning_rate": 6.260965775552713e-07,
"logits/chosen": -2.0957112312316895,
"logits/rejected": -2.025146961212158,
"logps/chosen": -224.27377319335938,
"logps/rejected": -204.6040496826172,
"loss": 0.2536,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 2.086984395980835,
"rewards/margins": 3.093162775039673,
"rewards/rejected": -1.0061782598495483,
"step": 90
},
{
"epoch": 1.417910447761194,
"grad_norm": 22.277576113780217,
"learning_rate": 5.859240418356614e-07,
"logits/chosen": -2.0116684436798096,
"logits/rejected": -1.9373754262924194,
"logps/chosen": -225.5041046142578,
"logps/rejected": -224.0210723876953,
"loss": 0.2394,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.8472282886505127,
"rewards/margins": 3.6459250450134277,
"rewards/rejected": -1.7986968755722046,
"step": 95
},
{
"epoch": 1.4925373134328357,
"grad_norm": 21.864051938399246,
"learning_rate": 5.451706842957421e-07,
"logits/chosen": -1.985772728919983,
"logits/rejected": -1.9631462097167969,
"logps/chosen": -228.11532592773438,
"logps/rejected": -222.6101837158203,
"loss": 0.2795,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 1.6982160806655884,
"rewards/margins": 3.457064151763916,
"rewards/rejected": -1.758847951889038,
"step": 100
},
{
"epoch": 1.4925373134328357,
"eval_logits/chosen": -1.975551724433899,
"eval_logits/rejected": -1.910164713859558,
"eval_logps/chosen": -261.8016357421875,
"eval_logps/rejected": -213.1614227294922,
"eval_loss": 0.5111880302429199,
"eval_rewards/accuracies": 0.8416666388511658,
"eval_rewards/chosen": 1.159003496170044,
"eval_rewards/margins": 2.6683123111724854,
"eval_rewards/rejected": -1.509308934211731,
"eval_runtime": 126.7081,
"eval_samples_per_second": 14.995,
"eval_steps_per_second": 0.237,
"step": 100
},
{
"epoch": 1.5671641791044775,
"grad_norm": 24.045736817846212,
"learning_rate": 5.041119859162068e-07,
"logits/chosen": -1.96713387966156,
"logits/rejected": -1.9250026941299438,
"logps/chosen": -253.3628387451172,
"logps/rejected": -219.9677276611328,
"loss": 0.2536,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.8924213647842407,
"rewards/margins": 3.974386692047119,
"rewards/rejected": -2.081965446472168,
"step": 105
},
{
"epoch": 1.6417910447761193,
"grad_norm": 21.61477181672177,
"learning_rate": 4.630254916940423e-07,
"logits/chosen": -1.9149868488311768,
"logits/rejected": -1.8783435821533203,
"logps/chosen": -229.2848663330078,
"logps/rejected": -201.11834716796875,
"loss": 0.2682,
"rewards/accuracies": 0.90625,
"rewards/chosen": 1.406848669052124,
"rewards/margins": 3.400202989578247,
"rewards/rejected": -1.9933545589447021,
"step": 110
},
{
"epoch": 1.716417910447761,
"grad_norm": 21.07628352255977,
"learning_rate": 4.2218893451814e-07,
"logits/chosen": -1.9863373041152954,
"logits/rejected": -1.9536094665527344,
"logps/chosen": -241.6915740966797,
"logps/rejected": -234.1923828125,
"loss": 0.2425,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.814729928970337,
"rewards/margins": 3.653728485107422,
"rewards/rejected": -1.8389990329742432,
"step": 115
},
{
"epoch": 1.7910447761194028,
"grad_norm": 20.726924515407703,
"learning_rate": 3.8187835777481375e-07,
"logits/chosen": -2.0750255584716797,
"logits/rejected": -2.0614113807678223,
"logps/chosen": -259.0155029296875,
"logps/rejected": -212.9644012451172,
"loss": 0.2864,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.9931504726409912,
"rewards/margins": 3.505798816680908,
"rewards/rejected": -1.512648582458496,
"step": 120
},
{
"epoch": 1.8656716417910446,
"grad_norm": 20.134285007421816,
"learning_rate": 3.423662493738687e-07,
"logits/chosen": -2.130033493041992,
"logits/rejected": -2.0896992683410645,
"logps/chosen": -227.1628875732422,
"logps/rejected": -232.8570556640625,
"loss": 0.2575,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 1.4646759033203125,
"rewards/margins": 3.994136095046997,
"rewards/rejected": -2.5294601917266846,
"step": 125
},
{
"epoch": 1.9402985074626866,
"grad_norm": 18.272474363996036,
"learning_rate": 3.039196998086687e-07,
"logits/chosen": -2.158094882965088,
"logits/rejected": -2.131124973297119,
"logps/chosen": -219.0237274169922,
"logps/rejected": -218.6309051513672,
"loss": 0.2712,
"rewards/accuracies": 0.90625,
"rewards/chosen": 1.447300910949707,
"rewards/margins": 3.195319414138794,
"rewards/rejected": -1.748018503189087,
"step": 130
},
{
"epoch": 2.014925373134328,
"grad_norm": 15.864915551280724,
"learning_rate": 2.667985967011878e-07,
"logits/chosen": -2.1461024284362793,
"logits/rejected": -2.168231248855591,
"logps/chosen": -228.489501953125,
"logps/rejected": -218.523193359375,
"loss": 0.2636,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.7504692077636719,
"rewards/margins": 3.347080945968628,
"rewards/rejected": -1.596611738204956,
"step": 135
},
{
"epoch": 2.08955223880597,
"grad_norm": 20.70108938067205,
"learning_rate": 2.3125386803640183e-07,
"logits/chosen": -2.1393442153930664,
"logits/rejected": -2.13488507270813,
"logps/chosen": -227.1748046875,
"logps/rejected": -206.73635864257812,
"loss": 0.1687,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": 2.1466095447540283,
"rewards/margins": 3.6835296154022217,
"rewards/rejected": -1.5369203090667725,
"step": 140
},
{
"epoch": 2.1641791044776117,
"grad_norm": 15.3203590693748,
"learning_rate": 1.9752578596124952e-07,
"logits/chosen": -2.1212143898010254,
"logits/rejected": -2.108987808227539,
"logps/chosen": -229.7302703857422,
"logps/rejected": -227.28109741210938,
"loss": 0.1473,
"rewards/accuracies": 0.96875,
"rewards/chosen": 2.296015739440918,
"rewards/margins": 4.074695110321045,
"rewards/rejected": -1.7786792516708374,
"step": 145
},
{
"epoch": 2.2388059701492535,
"grad_norm": 23.44280872584024,
"learning_rate": 1.6584234261399532e-07,
"logits/chosen": -2.1407978534698486,
"logits/rejected": -2.152918815612793,
"logps/chosen": -229.5863800048828,
"logps/rejected": -212.45986938476562,
"loss": 0.1557,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 2.0357882976531982,
"rewards/margins": 4.068355560302734,
"rewards/rejected": -2.032567024230957,
"step": 150
},
{
"epoch": 2.2388059701492535,
"eval_logits/chosen": -2.1696274280548096,
"eval_logits/rejected": -2.1170217990875244,
"eval_logps/chosen": -259.6371765136719,
"eval_logps/rejected": -211.39309692382812,
"eval_loss": 0.5032579898834229,
"eval_rewards/accuracies": 0.8583333492279053,
"eval_rewards/chosen": 1.3754442930221558,
"eval_rewards/margins": 2.7079200744628906,
"eval_rewards/rejected": -1.3324757814407349,
"eval_runtime": 125.6191,
"eval_samples_per_second": 15.125,
"eval_steps_per_second": 0.239,
"step": 150
},
{
"epoch": 2.3134328358208958,
"grad_norm": 15.277790126219001,
"learning_rate": 1.3641770896292082e-07,
"logits/chosen": -2.171910524368286,
"logits/rejected": -2.1707565784454346,
"logps/chosen": -250.38583374023438,
"logps/rejected": -238.89022827148438,
"loss": 0.1381,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 2.417271137237549,
"rewards/margins": 4.274667739868164,
"rewards/rejected": -1.8573967218399048,
"step": 155
},
{
"epoch": 2.388059701492537,
"grad_norm": 14.868033858341619,
"learning_rate": 1.0945078707215221e-07,
"logits/chosen": -2.1999287605285645,
"logits/rejected": -2.1457407474517822,
"logps/chosen": -241.4682159423828,
"logps/rejected": -240.2734832763672,
"loss": 0.1412,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 2.0974485874176025,
"rewards/margins": 4.184462547302246,
"rewards/rejected": -2.0870139598846436,
"step": 160
},
{
"epoch": 2.4626865671641793,
"grad_norm": 23.298138909290813,
"learning_rate": 8.512386558088919e-08,
"logits/chosen": -2.1854496002197266,
"logits/rejected": -2.173795223236084,
"logps/chosen": -261.7022399902344,
"logps/rejected": -242.4682159423828,
"loss": 0.1478,
"rewards/accuracies": 0.9375,
"rewards/chosen": 2.057788133621216,
"rewards/margins": 4.235553741455078,
"rewards/rejected": -2.1777658462524414,
"step": 165
},
{
"epoch": 2.5373134328358207,
"grad_norm": 22.03814748434263,
"learning_rate": 6.360138748461013e-08,
"logits/chosen": -2.20246958732605,
"logits/rejected": -2.161410093307495,
"logps/chosen": -251.98989868164062,
"logps/rejected": -254.8340301513672,
"loss": 0.1586,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.9426151514053345,
"rewards/margins": 4.301736831665039,
"rewards/rejected": -2.359121799468994,
"step": 170
},
{
"epoch": 2.611940298507463,
"grad_norm": 19.677590731179777,
"learning_rate": 4.5028838547699346e-08,
"logits/chosen": -2.1607601642608643,
"logits/rejected": -2.127946376800537,
"logps/chosen": -221.7189178466797,
"logps/rejected": -245.9696502685547,
"loss": 0.1676,
"rewards/accuracies": 0.981249988079071,
"rewards/chosen": 1.624866247177124,
"rewards/margins": 4.7091498374938965,
"rewards/rejected": -3.0842833518981934,
"step": 175
},
{
"epoch": 2.6865671641791042,
"grad_norm": 22.8450636545283,
"learning_rate": 2.9531763861505964e-08,
"logits/chosen": -2.1652731895446777,
"logits/rejected": -2.100562334060669,
"logps/chosen": -219.1479949951172,
"logps/rejected": -211.0758056640625,
"loss": 0.1661,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.2030365467071533,
"rewards/margins": 3.858271837234497,
"rewards/rejected": -2.6552352905273438,
"step": 180
},
{
"epoch": 2.7611940298507465,
"grad_norm": 13.900949161141034,
"learning_rate": 1.7214919195619125e-08,
"logits/chosen": -2.158569574356079,
"logits/rejected": -2.139193058013916,
"logps/chosen": -241.50634765625,
"logps/rejected": -225.2047882080078,
"loss": 0.1367,
"rewards/accuracies": 0.96875,
"rewards/chosen": 1.889639139175415,
"rewards/margins": 4.425418853759766,
"rewards/rejected": -2.5357797145843506,
"step": 185
},
{
"epoch": 2.835820895522388,
"grad_norm": 22.822822022841606,
"learning_rate": 8.161562878982398e-09,
"logits/chosen": -2.134626865386963,
"logits/rejected": -2.092273235321045,
"logps/chosen": -233.07662963867188,
"logps/rejected": -203.2852783203125,
"loss": 0.1562,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.5627435445785522,
"rewards/margins": 4.164488315582275,
"rewards/rejected": -2.601745367050171,
"step": 190
},
{
"epoch": 2.91044776119403,
"grad_norm": 13.470995740623104,
"learning_rate": 2.432892997526026e-09,
"logits/chosen": -2.160935878753662,
"logits/rejected": -2.1190755367279053,
"logps/chosen": -251.3184814453125,
"logps/rejected": -268.527587890625,
"loss": 0.1325,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.8667724132537842,
"rewards/margins": 4.689689636230469,
"rewards/rejected": -2.822917938232422,
"step": 195
},
{
"epoch": 2.9850746268656714,
"grad_norm": 14.133379803838286,
"learning_rate": 6.763371270035457e-11,
"logits/chosen": -2.160177707672119,
"logits/rejected": -2.123290777206421,
"logps/chosen": -251.46255493164062,
"logps/rejected": -241.31295776367188,
"loss": 0.1338,
"rewards/accuracies": 0.96875,
"rewards/chosen": 1.7690976858139038,
"rewards/margins": 4.538566589355469,
"rewards/rejected": -2.7694690227508545,
"step": 200
},
{
"epoch": 2.9850746268656714,
"eval_logits/chosen": -2.158780097961426,
"eval_logits/rejected": -2.1046879291534424,
"eval_logps/chosen": -263.8291320800781,
"eval_logps/rejected": -217.83078002929688,
"eval_loss": 0.4983350932598114,
"eval_rewards/accuracies": 0.8500000238418579,
"eval_rewards/chosen": 0.9562520384788513,
"eval_rewards/margins": 2.932494878768921,
"eval_rewards/rejected": -1.9762426614761353,
"eval_runtime": 125.7438,
"eval_samples_per_second": 15.11,
"eval_steps_per_second": 0.239,
"step": 200
},
{
"epoch": 3.0,
"step": 201,
"total_flos": 2369906314051584.0,
"train_loss": 0.33095499672996465,
"train_runtime": 7348.1824,
"train_samples_per_second": 6.979,
"train_steps_per_second": 0.027
}
],
"logging_steps": 5,
"max_steps": 201,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2369906314051584.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}