htlou's picture
Upload folder using huggingface_hub
39714c6 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.6632016632016633,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04158004158004158,
"grad_norm": 57.63412014296559,
"learning_rate": 5e-07,
"logits/chosen": -2.7145180702209473,
"logits/rejected": -2.7382097244262695,
"logps/chosen": -265.24481201171875,
"logps/rejected": -201.1150665283203,
"loss": 0.6915,
"rewards/accuracies": 0.375,
"rewards/chosen": 0.01524554193019867,
"rewards/margins": 0.008573563769459724,
"rewards/rejected": 0.006671978626400232,
"step": 5
},
{
"epoch": 0.08316008316008316,
"grad_norm": 52.91443405505758,
"learning_rate": 1e-06,
"logits/chosen": -2.690777063369751,
"logits/rejected": -2.689495325088501,
"logps/chosen": -259.32598876953125,
"logps/rejected": -236.3219757080078,
"loss": 0.6616,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": 0.4881957471370697,
"rewards/margins": 0.10135326534509659,
"rewards/rejected": 0.3868425786495209,
"step": 10
},
{
"epoch": 0.12474012474012475,
"grad_norm": 53.28329333963272,
"learning_rate": 9.994965332706572e-07,
"logits/chosen": -2.5831141471862793,
"logits/rejected": -2.580876350402832,
"logps/chosen": -237.8153839111328,
"logps/rejected": -192.7242889404297,
"loss": 0.6359,
"rewards/accuracies": 0.6875,
"rewards/chosen": 1.1336857080459595,
"rewards/margins": 0.47578057646751404,
"rewards/rejected": 0.6579049825668335,
"step": 15
},
{
"epoch": 0.16632016632016633,
"grad_norm": 59.51771261732688,
"learning_rate": 9.979871469976195e-07,
"logits/chosen": -2.5097970962524414,
"logits/rejected": -2.491774797439575,
"logps/chosen": -241.24349975585938,
"logps/rejected": -206.7909698486328,
"loss": 0.6385,
"rewards/accuracies": 0.6875,
"rewards/chosen": 1.5591249465942383,
"rewards/margins": 0.980699896812439,
"rewards/rejected": 0.5784249305725098,
"step": 20
},
{
"epoch": 0.2079002079002079,
"grad_norm": 43.932979300169734,
"learning_rate": 9.954748808839674e-07,
"logits/chosen": -2.428896427154541,
"logits/rejected": -2.426640510559082,
"logps/chosen": -260.3008117675781,
"logps/rejected": -223.4193878173828,
"loss": 0.6287,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": 1.4255732297897339,
"rewards/margins": 0.9219636917114258,
"rewards/rejected": 0.5036097764968872,
"step": 25
},
{
"epoch": 0.2494802494802495,
"grad_norm": 51.9520224365334,
"learning_rate": 9.919647942993147e-07,
"logits/chosen": -2.270231008529663,
"logits/rejected": -2.2373533248901367,
"logps/chosen": -239.1925811767578,
"logps/rejected": -199.03244018554688,
"loss": 0.6013,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": 1.0962295532226562,
"rewards/margins": 0.8090855479240417,
"rewards/rejected": 0.28714415431022644,
"step": 30
},
{
"epoch": 0.2910602910602911,
"grad_norm": 42.07074967218912,
"learning_rate": 9.874639560909118e-07,
"logits/chosen": -2.2837703227996826,
"logits/rejected": -2.2773120403289795,
"logps/chosen": -245.8435516357422,
"logps/rejected": -218.3318328857422,
"loss": 0.5782,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": 0.842710018157959,
"rewards/margins": 0.7606425285339355,
"rewards/rejected": 0.08206750452518463,
"step": 35
},
{
"epoch": 0.33264033264033266,
"grad_norm": 40.52902304303911,
"learning_rate": 9.819814303479267e-07,
"logits/chosen": -2.3677680492401123,
"logits/rejected": -2.3257031440734863,
"logps/chosen": -255.2162322998047,
"logps/rejected": -206.1927490234375,
"loss": 0.5732,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 0.774177610874176,
"rewards/margins": 0.9143842458724976,
"rewards/rejected": -0.14020657539367676,
"step": 40
},
{
"epoch": 0.37422037422037424,
"grad_norm": 42.34370895122762,
"learning_rate": 9.755282581475767e-07,
"logits/chosen": -2.39888596534729,
"logits/rejected": -2.3836829662323,
"logps/chosen": -237.30508422851562,
"logps/rejected": -216.06588745117188,
"loss": 0.5958,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.7707290649414062,
"rewards/margins": 1.1094779968261719,
"rewards/rejected": -0.3387489914894104,
"step": 45
},
{
"epoch": 0.4158004158004158,
"grad_norm": 46.42422960165206,
"learning_rate": 9.681174353198686e-07,
"logits/chosen": -2.3984861373901367,
"logits/rejected": -2.3900020122528076,
"logps/chosen": -233.5660400390625,
"logps/rejected": -216.6641082763672,
"loss": 0.635,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": 0.9606113433837891,
"rewards/margins": 1.124942660331726,
"rewards/rejected": -0.16433146595954895,
"step": 50
},
{
"epoch": 0.4158004158004158,
"eval_logits/chosen": -2.3899078369140625,
"eval_logits/rejected": -2.3907740116119385,
"eval_logps/chosen": -237.2779541015625,
"eval_logps/rejected": -211.79153442382812,
"eval_loss": 0.598716676235199,
"eval_rewards/accuracies": 0.7013888955116272,
"eval_rewards/chosen": 0.8451206684112549,
"eval_rewards/margins": 0.8839507102966309,
"eval_rewards/rejected": -0.038830023258924484,
"eval_runtime": 229.0134,
"eval_samples_per_second": 14.934,
"eval_steps_per_second": 0.236,
"step": 50
},
{
"epoch": 0.4573804573804574,
"grad_norm": 38.39211344385862,
"learning_rate": 9.597638862757253e-07,
"logits/chosen": -2.456921339035034,
"logits/rejected": -2.4394099712371826,
"logps/chosen": -237.6243133544922,
"logps/rejected": -233.6071014404297,
"loss": 0.6009,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 0.8705752491950989,
"rewards/margins": 1.126349687576294,
"rewards/rejected": -0.2557744085788727,
"step": 55
},
{
"epoch": 0.498960498960499,
"grad_norm": 37.3024427630972,
"learning_rate": 9.504844339512094e-07,
"logits/chosen": -2.521387815475464,
"logits/rejected": -2.4951186180114746,
"logps/chosen": -266.1732177734375,
"logps/rejected": -242.85897827148438,
"loss": 0.554,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": 0.9916666150093079,
"rewards/margins": 1.3694941997528076,
"rewards/rejected": -0.3778277039527893,
"step": 60
},
{
"epoch": 0.5405405405405406,
"grad_norm": 34.5258394053866,
"learning_rate": 9.402977659283689e-07,
"logits/chosen": -2.4310925006866455,
"logits/rejected": -2.4042844772338867,
"logps/chosen": -223.9085693359375,
"logps/rejected": -203.2917022705078,
"loss": 0.5364,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": 0.5567647814750671,
"rewards/margins": 0.8208805918693542,
"rewards/rejected": -0.26411566138267517,
"step": 65
},
{
"epoch": 0.5821205821205822,
"grad_norm": 37.746071231755344,
"learning_rate": 9.29224396800933e-07,
"logits/chosen": -2.3321871757507324,
"logits/rejected": -2.2954535484313965,
"logps/chosen": -243.26156616210938,
"logps/rejected": -226.8199005126953,
"loss": 0.5423,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 0.38314875960350037,
"rewards/margins": 1.1013362407684326,
"rewards/rejected": -0.7181875109672546,
"step": 70
},
{
"epoch": 0.6237006237006237,
"grad_norm": 57.07804647920337,
"learning_rate": 9.172866268606513e-07,
"logits/chosen": -2.26948881149292,
"logits/rejected": -2.2403252124786377,
"logps/chosen": -251.2090606689453,
"logps/rejected": -240.20169067382812,
"loss": 0.5844,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": 0.9941379427909851,
"rewards/margins": 1.1448538303375244,
"rewards/rejected": -0.1507158726453781,
"step": 75
},
{
"epoch": 0.6652806652806653,
"grad_norm": 36.48822907311496,
"learning_rate": 9.045084971874737e-07,
"logits/chosen": -2.212411403656006,
"logits/rejected": -2.1935043334960938,
"logps/chosen": -272.8021545410156,
"logps/rejected": -241.2564239501953,
"loss": 0.5453,
"rewards/accuracies": 0.78125,
"rewards/chosen": 1.1616096496582031,
"rewards/margins": 1.850441336631775,
"rewards/rejected": -0.6888317465782166,
"step": 80
},
{
"epoch": 0.7068607068607069,
"grad_norm": 38.5389032336389,
"learning_rate": 8.909157412340149e-07,
"logits/chosen": -2.180285930633545,
"logits/rejected": -2.126051902770996,
"logps/chosen": -253.0643310546875,
"logps/rejected": -237.400390625,
"loss": 0.5269,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.9046308398246765,
"rewards/margins": 1.5041005611419678,
"rewards/rejected": -0.599469780921936,
"step": 85
},
{
"epoch": 0.7484407484407485,
"grad_norm": 31.08445554774815,
"learning_rate": 8.765357330018055e-07,
"logits/chosen": -2.146207332611084,
"logits/rejected": -2.1198434829711914,
"logps/chosen": -246.0504608154297,
"logps/rejected": -242.79714965820312,
"loss": 0.5741,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 0.4694552421569824,
"rewards/margins": 1.537427544593811,
"rewards/rejected": -1.0679724216461182,
"step": 90
},
{
"epoch": 0.7900207900207901,
"grad_norm": 32.25150255167185,
"learning_rate": 8.613974319136957e-07,
"logits/chosen": -2.114973306655884,
"logits/rejected": -2.0754525661468506,
"logps/chosen": -249.64132690429688,
"logps/rejected": -233.89352416992188,
"loss": 0.4787,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.09324260801076889,
"rewards/margins": 1.446690320968628,
"rewards/rejected": -1.5399329662322998,
"step": 95
},
{
"epoch": 0.8316008316008316,
"grad_norm": 34.22313791470385,
"learning_rate": 8.455313244934324e-07,
"logits/chosen": -2.027057409286499,
"logits/rejected": -1.9819841384887695,
"logps/chosen": -250.86947631835938,
"logps/rejected": -241.2573699951172,
"loss": 0.4933,
"rewards/accuracies": 0.8187500238418579,
"rewards/chosen": 0.10764148086309433,
"rewards/margins": 1.9850435256958008,
"rewards/rejected": -1.8774023056030273,
"step": 100
},
{
"epoch": 0.8316008316008316,
"eval_logits/chosen": -1.9530320167541504,
"eval_logits/rejected": -1.9127507209777832,
"eval_logps/chosen": -247.99232482910156,
"eval_logps/rejected": -229.55447387695312,
"eval_loss": 0.5284922122955322,
"eval_rewards/accuracies": 0.7523148059844971,
"eval_rewards/chosen": -0.22631923854351044,
"eval_rewards/margins": 1.5888041257858276,
"eval_rewards/rejected": -1.815123438835144,
"eval_runtime": 227.9451,
"eval_samples_per_second": 15.004,
"eval_steps_per_second": 0.237,
"step": 100
},
{
"epoch": 0.8731808731808732,
"grad_norm": 43.67585736596051,
"learning_rate": 8.289693629698563e-07,
"logits/chosen": -1.952876329421997,
"logits/rejected": -1.9095561504364014,
"logps/chosen": -250.580322265625,
"logps/rejected": -255.4891357421875,
"loss": 0.5324,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.0729295164346695,
"rewards/margins": 1.8105415105819702,
"rewards/rejected": -1.8834712505340576,
"step": 105
},
{
"epoch": 0.9147609147609148,
"grad_norm": 39.1719273308311,
"learning_rate": 8.117449009293668e-07,
"logits/chosen": -1.8559868335723877,
"logits/rejected": -1.799195647239685,
"logps/chosen": -248.75961303710938,
"logps/rejected": -232.480712890625,
"loss": 0.4943,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.30641165375709534,
"rewards/margins": 1.7232319116592407,
"rewards/rejected": -1.4168202877044678,
"step": 110
},
{
"epoch": 0.9563409563409564,
"grad_norm": 30.704674411877335,
"learning_rate": 7.938926261462365e-07,
"logits/chosen": -1.7355577945709229,
"logits/rejected": -1.6728878021240234,
"logps/chosen": -255.2344970703125,
"logps/rejected": -221.1670684814453,
"loss": 0.4842,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.09199291467666626,
"rewards/margins": 1.9209611415863037,
"rewards/rejected": -1.8289684057235718,
"step": 115
},
{
"epoch": 0.997920997920998,
"grad_norm": 41.39067500024576,
"learning_rate": 7.754484907260512e-07,
"logits/chosen": -1.7632324695587158,
"logits/rejected": -1.6800066232681274,
"logps/chosen": -236.336669921875,
"logps/rejected": -248.4853515625,
"loss": 0.4959,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.09794483333826065,
"rewards/margins": 2.035454273223877,
"rewards/rejected": -1.937509536743164,
"step": 120
},
{
"epoch": 1.0395010395010396,
"grad_norm": 18.479902808810362,
"learning_rate": 7.564496387029531e-07,
"logits/chosen": -1.835211157798767,
"logits/rejected": -1.744879126548767,
"logps/chosen": -246.5315704345703,
"logps/rejected": -238.4007568359375,
"loss": 0.2765,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.2002815008163452,
"rewards/margins": 3.028635025024414,
"rewards/rejected": -1.8283536434173584,
"step": 125
},
{
"epoch": 1.0810810810810811,
"grad_norm": 20.239636952023396,
"learning_rate": 7.369343312364993e-07,
"logits/chosen": -1.8902709484100342,
"logits/rejected": -1.841406226158142,
"logps/chosen": -249.97555541992188,
"logps/rejected": -225.74893188476562,
"loss": 0.2499,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.3934506177902222,
"rewards/margins": 3.1044392585754395,
"rewards/rejected": -1.7109886407852173,
"step": 130
},
{
"epoch": 1.1226611226611227,
"grad_norm": 27.020533908406442,
"learning_rate": 7.16941869558779e-07,
"logits/chosen": -2.057455062866211,
"logits/rejected": -2.003194570541382,
"logps/chosen": -232.67001342773438,
"logps/rejected": -234.6235809326172,
"loss": 0.2301,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.6992406845092773,
"rewards/margins": 3.422689914703369,
"rewards/rejected": -1.7234487533569336,
"step": 135
},
{
"epoch": 1.1642411642411643,
"grad_norm": 22.833931438432327,
"learning_rate": 6.965125158269618e-07,
"logits/chosen": -2.0912320613861084,
"logits/rejected": -2.0458855628967285,
"logps/chosen": -244.6804656982422,
"logps/rejected": -225.2838897705078,
"loss": 0.228,
"rewards/accuracies": 0.90625,
"rewards/chosen": 2.007575511932373,
"rewards/margins": 3.5606026649475098,
"rewards/rejected": -1.5530271530151367,
"step": 140
},
{
"epoch": 1.2058212058212059,
"grad_norm": 19.05547442329841,
"learning_rate": 6.756874120406714e-07,
"logits/chosen": -2.0981993675231934,
"logits/rejected": -2.0623602867126465,
"logps/chosen": -231.7192840576172,
"logps/rejected": -239.6023712158203,
"loss": 0.2467,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.801042914390564,
"rewards/margins": 3.514739513397217,
"rewards/rejected": -1.713696837425232,
"step": 145
},
{
"epoch": 1.2474012474012475,
"grad_norm": 17.479166649922572,
"learning_rate": 6.545084971874736e-07,
"logits/chosen": -2.1294615268707275,
"logits/rejected": -2.0636110305786133,
"logps/chosen": -238.5365447998047,
"logps/rejected": -244.5021514892578,
"loss": 0.2495,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.3247355222702026,
"rewards/margins": 3.6321330070495605,
"rewards/rejected": -2.3073973655700684,
"step": 150
},
{
"epoch": 1.2474012474012475,
"eval_logits/chosen": -2.1232447624206543,
"eval_logits/rejected": -2.0982720851898193,
"eval_logps/chosen": -240.15695190429688,
"eval_logps/rejected": -225.6040802001953,
"eval_loss": 0.5427103042602539,
"eval_rewards/accuracies": 0.7592592835426331,
"eval_rewards/chosen": 0.5572175979614258,
"eval_rewards/margins": 1.9773017168045044,
"eval_rewards/rejected": -1.4200841188430786,
"eval_runtime": 227.6445,
"eval_samples_per_second": 15.023,
"eval_steps_per_second": 0.237,
"step": 150
},
{
"epoch": 1.288981288981289,
"grad_norm": 23.820329780522155,
"learning_rate": 6.330184227833375e-07,
"logits/chosen": -2.1582248210906982,
"logits/rejected": -2.1403584480285645,
"logps/chosen": -229.8828582763672,
"logps/rejected": -232.87924194335938,
"loss": 0.2962,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.5254548788070679,
"rewards/margins": 3.3456084728240967,
"rewards/rejected": -1.8201534748077393,
"step": 155
},
{
"epoch": 1.3305613305613306,
"grad_norm": 17.670853805471324,
"learning_rate": 6.112604669781572e-07,
"logits/chosen": -2.1896843910217285,
"logits/rejected": -2.151392698287964,
"logps/chosen": -246.44552612304688,
"logps/rejected": -224.73641967773438,
"loss": 0.2666,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.2775523662567139,
"rewards/margins": 3.355015516281128,
"rewards/rejected": -2.077463150024414,
"step": 160
},
{
"epoch": 1.3721413721413722,
"grad_norm": 19.884309338358804,
"learning_rate": 5.892784473993183e-07,
"logits/chosen": -2.1978659629821777,
"logits/rejected": -2.139770984649658,
"logps/chosen": -244.56283569335938,
"logps/rejected": -235.612060546875,
"loss": 0.2784,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": 1.9508459568023682,
"rewards/margins": 3.806994676589966,
"rewards/rejected": -1.8561484813690186,
"step": 165
},
{
"epoch": 1.4137214137214138,
"grad_norm": 21.536221614158325,
"learning_rate": 5.671166329088277e-07,
"logits/chosen": -2.159837245941162,
"logits/rejected": -2.1259591579437256,
"logps/chosen": -235.0163116455078,
"logps/rejected": -240.001953125,
"loss": 0.2998,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 1.634421944618225,
"rewards/margins": 3.504880428314209,
"rewards/rejected": -1.870458960533142,
"step": 170
},
{
"epoch": 1.4553014553014554,
"grad_norm": 19.683343698848628,
"learning_rate": 5.448196544517167e-07,
"logits/chosen": -2.1057746410369873,
"logits/rejected": -2.0693142414093018,
"logps/chosen": -242.95492553710938,
"logps/rejected": -222.35372924804688,
"loss": 0.2881,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.408005952835083,
"rewards/margins": 3.477801561355591,
"rewards/rejected": -2.069795846939087,
"step": 175
},
{
"epoch": 1.496881496881497,
"grad_norm": 23.26853368506061,
"learning_rate": 5.224324151752575e-07,
"logits/chosen": -2.0776679515838623,
"logits/rejected": -2.0356287956237793,
"logps/chosen": -252.8184356689453,
"logps/rejected": -263.45013427734375,
"loss": 0.2876,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.3347493410110474,
"rewards/margins": 3.841991901397705,
"rewards/rejected": -2.5072426795959473,
"step": 180
},
{
"epoch": 1.5384615384615383,
"grad_norm": 19.309579616953823,
"learning_rate": 5e-07,
"logits/chosen": -2.0446431636810303,
"logits/rejected": -1.985939621925354,
"logps/chosen": -235.72598266601562,
"logps/rejected": -258.97344970703125,
"loss": 0.2758,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.3530393838882446,
"rewards/margins": 3.8046765327453613,
"rewards/rejected": -2.451636791229248,
"step": 185
},
{
"epoch": 1.5800415800415801,
"grad_norm": 18.625040552548324,
"learning_rate": 4.775675848247427e-07,
"logits/chosen": -2.0644099712371826,
"logits/rejected": -2.0216641426086426,
"logps/chosen": -241.5558624267578,
"logps/rejected": -263.6393737792969,
"loss": 0.292,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.0546023845672607,
"rewards/margins": 3.451650619506836,
"rewards/rejected": -2.397047996520996,
"step": 190
},
{
"epoch": 1.6216216216216215,
"grad_norm": 25.624591590300348,
"learning_rate": 4.5518034554828327e-07,
"logits/chosen": -2.0048115253448486,
"logits/rejected": -1.9765331745147705,
"logps/chosen": -227.64968872070312,
"logps/rejected": -238.43795776367188,
"loss": 0.2831,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.2037407159805298,
"rewards/margins": 3.231950044631958,
"rewards/rejected": -2.0282092094421387,
"step": 195
},
{
"epoch": 1.6632016632016633,
"grad_norm": 18.88665153638521,
"learning_rate": 4.328833670911724e-07,
"logits/chosen": -2.0011658668518066,
"logits/rejected": -1.9661411046981812,
"logps/chosen": -240.28115844726562,
"logps/rejected": -233.8397674560547,
"loss": 0.2753,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.5664188861846924,
"rewards/margins": 3.7791213989257812,
"rewards/rejected": -2.212702512741089,
"step": 200
},
{
"epoch": 1.6632016632016633,
"eval_logits/chosen": -2.0067989826202393,
"eval_logits/rejected": -1.9752123355865479,
"eval_logps/chosen": -239.95291137695312,
"eval_logps/rejected": -228.13818359375,
"eval_loss": 0.5260343551635742,
"eval_rewards/accuracies": 0.7870370149612427,
"eval_rewards/chosen": 0.5776218175888062,
"eval_rewards/margins": 2.2511181831359863,
"eval_rewards/rejected": -1.6734964847564697,
"eval_runtime": 227.8787,
"eval_samples_per_second": 15.008,
"eval_steps_per_second": 0.237,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 360,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2358113407598592.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}