htlou's picture
Upload folder using huggingface_hub
92d61ac verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.8691588785046729,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04672897196261682,
"grad_norm": 61.970988574529464,
"learning_rate": 5e-07,
"logits/chosen": -2.7293832302093506,
"logits/rejected": -2.7098002433776855,
"logps/chosen": -282.619384765625,
"logps/rejected": -220.626708984375,
"loss": 0.6898,
"rewards/accuracies": 0.34375,
"rewards/chosen": 0.016255810856819153,
"rewards/margins": 0.00715771596878767,
"rewards/rejected": 0.009098095819354057,
"step": 5
},
{
"epoch": 0.09345794392523364,
"grad_norm": 59.149513121461624,
"learning_rate": 1e-06,
"logits/chosen": -2.6811907291412354,
"logits/rejected": -2.6537957191467285,
"logps/chosen": -256.4613037109375,
"logps/rejected": -214.9097442626953,
"loss": 0.6465,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.46442437171936035,
"rewards/margins": 0.15177568793296814,
"rewards/rejected": 0.3126486837863922,
"step": 10
},
{
"epoch": 0.14018691588785046,
"grad_norm": 43.868507115118234,
"learning_rate": 9.993623730611148e-07,
"logits/chosen": -2.4990592002868652,
"logits/rejected": -2.4942288398742676,
"logps/chosen": -252.8370361328125,
"logps/rejected": -212.9906768798828,
"loss": 0.6365,
"rewards/accuracies": 0.65625,
"rewards/chosen": 1.3768469095230103,
"rewards/margins": 0.5569905042648315,
"rewards/rejected": 0.8198563456535339,
"step": 15
},
{
"epoch": 0.18691588785046728,
"grad_norm": 40.58644668089117,
"learning_rate": 9.97451118516912e-07,
"logits/chosen": -2.357009172439575,
"logits/rejected": -2.3200223445892334,
"logps/chosen": -245.38623046875,
"logps/rejected": -190.89620971679688,
"loss": 0.6273,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": 1.5086402893066406,
"rewards/margins": 0.8777653574943542,
"rewards/rejected": 0.6308748722076416,
"step": 20
},
{
"epoch": 0.2336448598130841,
"grad_norm": 46.4139582288858,
"learning_rate": 9.94271111036929e-07,
"logits/chosen": -2.2126636505126953,
"logits/rejected": -2.2065536975860596,
"logps/chosen": -242.53775024414062,
"logps/rejected": -225.9445343017578,
"loss": 0.6348,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": 1.3302034139633179,
"rewards/margins": 0.9245456457138062,
"rewards/rejected": 0.4056577682495117,
"step": 25
},
{
"epoch": 0.2803738317757009,
"grad_norm": 64.74245067517272,
"learning_rate": 9.898304612549066e-07,
"logits/chosen": -2.183245897293091,
"logits/rejected": -2.1688549518585205,
"logps/chosen": -233.8648223876953,
"logps/rejected": -205.1978759765625,
"loss": 0.5938,
"rewards/accuracies": 0.71875,
"rewards/chosen": 1.1376529932022095,
"rewards/margins": 0.8598777651786804,
"rewards/rejected": 0.27777519822120667,
"step": 30
},
{
"epoch": 0.32710280373831774,
"grad_norm": 51.84730256605764,
"learning_rate": 9.841404950825536e-07,
"logits/chosen": -2.227497100830078,
"logits/rejected": -2.1977126598358154,
"logps/chosen": -237.3373565673828,
"logps/rejected": -212.15512084960938,
"loss": 0.5785,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": 1.2570827007293701,
"rewards/margins": 0.9708169102668762,
"rewards/rejected": 0.2862659692764282,
"step": 35
},
{
"epoch": 0.37383177570093457,
"grad_norm": 40.60340275031408,
"learning_rate": 9.77215724822721e-07,
"logits/chosen": -2.243427276611328,
"logits/rejected": -2.2088351249694824,
"logps/chosen": -248.1513671875,
"logps/rejected": -229.47531127929688,
"loss": 0.5829,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": 0.9161311984062195,
"rewards/margins": 1.0557386875152588,
"rewards/rejected": -0.13960735499858856,
"step": 40
},
{
"epoch": 0.4205607476635514,
"grad_norm": 34.21102418831377,
"learning_rate": 9.69073812155662e-07,
"logits/chosen": -2.2237961292266846,
"logits/rejected": -2.2018837928771973,
"logps/chosen": -269.6021423339844,
"logps/rejected": -224.58645629882812,
"loss": 0.5896,
"rewards/accuracies": 0.71875,
"rewards/chosen": 1.0438940525054932,
"rewards/margins": 0.9452205896377563,
"rewards/rejected": 0.09867370873689651,
"step": 45
},
{
"epoch": 0.4672897196261682,
"grad_norm": 42.29092168681447,
"learning_rate": 9.597355230927788e-07,
"logits/chosen": -2.0892410278320312,
"logits/rejected": -2.0624351501464844,
"logps/chosen": -240.1529083251953,
"logps/rejected": -198.04843139648438,
"loss": 0.5529,
"rewards/accuracies": 0.78125,
"rewards/chosen": 1.232414722442627,
"rewards/margins": 1.3885271549224854,
"rewards/rejected": -0.156112402677536,
"step": 50
},
{
"epoch": 0.4672897196261682,
"eval_logits/chosen": -2.0593109130859375,
"eval_logits/rejected": -2.02968692779541,
"eval_logps/chosen": -239.67454528808594,
"eval_logps/rejected": -216.6856689453125,
"eval_loss": 0.5947180390357971,
"eval_rewards/accuracies": 0.7317708134651184,
"eval_rewards/chosen": 0.8738771080970764,
"eval_rewards/margins": 1.1332703828811646,
"eval_rewards/rejected": -0.2593933641910553,
"eval_runtime": 202.7096,
"eval_samples_per_second": 14.997,
"eval_steps_per_second": 0.237,
"step": 50
},
{
"epoch": 0.514018691588785,
"grad_norm": 35.93324426868974,
"learning_rate": 9.4922467501275e-07,
"logits/chosen": -2.0398037433624268,
"logits/rejected": -2.0083518028259277,
"logps/chosen": -261.76177978515625,
"logps/rejected": -210.04684448242188,
"loss": 0.5467,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.9814583659172058,
"rewards/margins": 1.524784803390503,
"rewards/rejected": -0.5433263778686523,
"step": 55
},
{
"epoch": 0.5607476635514018,
"grad_norm": 50.252777089157334,
"learning_rate": 9.375680759151206e-07,
"logits/chosen": -2.092087984085083,
"logits/rejected": -2.098419427871704,
"logps/chosen": -252.8424530029297,
"logps/rejected": -214.93258666992188,
"loss": 0.5409,
"rewards/accuracies": 0.768750011920929,
"rewards/chosen": 0.7585053443908691,
"rewards/margins": 1.2850215435028076,
"rewards/rejected": -0.5265161395072937,
"step": 60
},
{
"epoch": 0.6074766355140186,
"grad_norm": 43.0211288247323,
"learning_rate": 9.247954560462927e-07,
"logits/chosen": -2.0893867015838623,
"logits/rejected": -2.0975987911224365,
"logps/chosen": -241.7879180908203,
"logps/rejected": -239.7193145751953,
"loss": 0.5337,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": 0.8641234636306763,
"rewards/margins": 1.493786096572876,
"rewards/rejected": -0.6296626329421997,
"step": 65
},
{
"epoch": 0.6542056074766355,
"grad_norm": 38.40101091003172,
"learning_rate": 9.109393920723001e-07,
"logits/chosen": -2.0060245990753174,
"logits/rejected": -1.9721952676773071,
"logps/chosen": -256.7455139160156,
"logps/rejected": -214.79403686523438,
"loss": 0.533,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 0.6738287210464478,
"rewards/margins": 1.2377485036849976,
"rewards/rejected": -0.563919723033905,
"step": 70
},
{
"epoch": 0.7009345794392523,
"grad_norm": 40.40382308924294,
"learning_rate": 8.960352239917699e-07,
"logits/chosen": -1.9107002019882202,
"logits/rejected": -1.8710416555404663,
"logps/chosen": -246.01028442382812,
"logps/rejected": -224.3345947265625,
"loss": 0.5452,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": 0.6804067492485046,
"rewards/margins": 1.3526126146316528,
"rewards/rejected": -0.6722058653831482,
"step": 75
},
{
"epoch": 0.7476635514018691,
"grad_norm": 37.2632538141041,
"learning_rate": 8.801209650009814e-07,
"logits/chosen": -1.8342845439910889,
"logits/rejected": -1.7722011804580688,
"logps/chosen": -238.6627960205078,
"logps/rejected": -204.77720642089844,
"loss": 0.4954,
"rewards/accuracies": 0.8062499761581421,
"rewards/chosen": 0.8959934115409851,
"rewards/margins": 1.759472131729126,
"rewards/rejected": -0.8634785413742065,
"step": 80
},
{
"epoch": 0.794392523364486,
"grad_norm": 41.835599660746425,
"learning_rate": 8.632372045409141e-07,
"logits/chosen": -1.9798192977905273,
"logits/rejected": -1.9717410802841187,
"logps/chosen": -251.6102294921875,
"logps/rejected": -240.1038360595703,
"loss": 0.5641,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": 1.3537020683288574,
"rewards/margins": 1.3792067766189575,
"rewards/rejected": -0.025504767894744873,
"step": 85
},
{
"epoch": 0.8411214953271028,
"grad_norm": 33.80081107648868,
"learning_rate": 8.454270047735642e-07,
"logits/chosen": -2.012608051300049,
"logits/rejected": -2.0120816230773926,
"logps/chosen": -253.6774139404297,
"logps/rejected": -194.4556884765625,
"loss": 0.5553,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": 0.8388773798942566,
"rewards/margins": 1.2849448919296265,
"rewards/rejected": -0.4460674822330475,
"step": 90
},
{
"epoch": 0.8878504672897196,
"grad_norm": 29.38305905850477,
"learning_rate": 8.267357907515661e-07,
"logits/chosen": -1.9744971990585327,
"logits/rejected": -1.9980430603027344,
"logps/chosen": -264.6416320800781,
"logps/rejected": -246.7568817138672,
"loss": 0.538,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 0.25706252455711365,
"rewards/margins": 2.062177896499634,
"rewards/rejected": -1.8051154613494873,
"step": 95
},
{
"epoch": 0.9345794392523364,
"grad_norm": 33.767105871104185,
"learning_rate": 8.072112345612433e-07,
"logits/chosen": -1.9495391845703125,
"logits/rejected": -1.8999382257461548,
"logps/chosen": -248.95272827148438,
"logps/rejected": -255.79019165039062,
"loss": 0.5159,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -0.18092432618141174,
"rewards/margins": 1.6218032836914062,
"rewards/rejected": -1.8027276992797852,
"step": 100
},
{
"epoch": 0.9345794392523364,
"eval_logits/chosen": -1.9645830392837524,
"eval_logits/rejected": -1.9168211221694946,
"eval_logps/chosen": -250.5727081298828,
"eval_logps/rejected": -234.28236389160156,
"eval_loss": 0.5285552144050598,
"eval_rewards/accuracies": 0.78125,
"eval_rewards/chosen": -0.21594171226024628,
"eval_rewards/margins": 1.8031220436096191,
"eval_rewards/rejected": -2.019063711166382,
"eval_runtime": 202.4154,
"eval_samples_per_second": 15.019,
"eval_steps_per_second": 0.237,
"step": 100
},
{
"epoch": 0.9813084112149533,
"grad_norm": 31.738611864064374,
"learning_rate": 7.869031337345827e-07,
"logits/chosen": -1.9719873666763306,
"logits/rejected": -1.923525094985962,
"logps/chosen": -260.2001953125,
"logps/rejected": -236.92495727539062,
"loss": 0.4808,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.06123792380094528,
"rewards/margins": 1.9958425760269165,
"rewards/rejected": -2.0570805072784424,
"step": 105
},
{
"epoch": 1.02803738317757,
"grad_norm": 20.57706834965762,
"learning_rate": 7.658632842402432e-07,
"logits/chosen": -2.0060174465179443,
"logits/rejected": -1.9614614248275757,
"logps/chosen": -249.8937530517578,
"logps/rejected": -242.28305053710938,
"loss": 0.3194,
"rewards/accuracies": 0.831250011920929,
"rewards/chosen": 0.40553492307662964,
"rewards/margins": 2.3744890689849854,
"rewards/rejected": -1.968954086303711,
"step": 110
},
{
"epoch": 1.074766355140187,
"grad_norm": 35.46781940178886,
"learning_rate": 7.441453483775353e-07,
"logits/chosen": -2.0191903114318848,
"logits/rejected": -1.9993083477020264,
"logps/chosen": -243.10147094726562,
"logps/rejected": -233.6897735595703,
"loss": 0.2252,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.1334301233291626,
"rewards/margins": 3.1529507637023926,
"rewards/rejected": -2.0195209980010986,
"step": 115
},
{
"epoch": 1.1214953271028036,
"grad_norm": 21.651387151346523,
"learning_rate": 7.218047179103112e-07,
"logits/chosen": -2.0854923725128174,
"logits/rejected": -2.022040605545044,
"logps/chosen": -228.9936981201172,
"logps/rejected": -211.9679412841797,
"loss": 0.2292,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.1401965618133545,
"rewards/margins": 2.9597201347351074,
"rewards/rejected": -1.8195232152938843,
"step": 120
},
{
"epoch": 1.1682242990654206,
"grad_norm": 24.010613449905463,
"learning_rate": 6.988983727898413e-07,
"logits/chosen": -2.136890411376953,
"logits/rejected": -2.12919282913208,
"logps/chosen": -243.7528076171875,
"logps/rejected": -251.5740203857422,
"loss": 0.233,
"rewards/accuracies": 0.90625,
"rewards/chosen": 1.6651722192764282,
"rewards/margins": 3.5824947357177734,
"rewards/rejected": -1.9173227548599243,
"step": 125
},
{
"epoch": 1.2149532710280373,
"grad_norm": 19.19894650607632,
"learning_rate": 6.754847358270066e-07,
"logits/chosen": -2.1517224311828613,
"logits/rejected": -2.156635046005249,
"logps/chosen": -250.22463989257812,
"logps/rejected": -209.24722290039062,
"loss": 0.2381,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.3966515064239502,
"rewards/margins": 3.149792432785034,
"rewards/rejected": -1.7531406879425049,
"step": 130
},
{
"epoch": 1.2616822429906542,
"grad_norm": 23.637404006148603,
"learning_rate": 6.516235236844661e-07,
"logits/chosen": -2.180138349533081,
"logits/rejected": -2.123582363128662,
"logps/chosen": -249.1603546142578,
"logps/rejected": -240.6315460205078,
"loss": 0.2453,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.434802770614624,
"rewards/margins": 3.7537569999694824,
"rewards/rejected": -2.3189542293548584,
"step": 135
},
{
"epoch": 1.308411214953271,
"grad_norm": 21.150266522840564,
"learning_rate": 6.273755945688457e-07,
"logits/chosen": -2.1802070140838623,
"logits/rejected": -2.1404995918273926,
"logps/chosen": -261.7569274902344,
"logps/rejected": -238.82333374023438,
"loss": 0.2056,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.6147171258926392,
"rewards/margins": 3.631941318511963,
"rewards/rejected": -2.017223596572876,
"step": 140
},
{
"epoch": 1.355140186915888,
"grad_norm": 19.216251285680404,
"learning_rate": 6.02802793011411e-07,
"logits/chosen": -2.2070083618164062,
"logits/rejected": -2.18471360206604,
"logps/chosen": -250.37966918945312,
"logps/rejected": -233.73361206054688,
"loss": 0.2691,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 0.6839503049850464,
"rewards/margins": 3.4379615783691406,
"rewards/rejected": -2.7540111541748047,
"step": 145
},
{
"epoch": 1.4018691588785046,
"grad_norm": 23.267149481370343,
"learning_rate": 5.779677921331093e-07,
"logits/chosen": -2.293395757675171,
"logits/rejected": -2.2372868061065674,
"logps/chosen": -240.39102172851562,
"logps/rejected": -243.92245483398438,
"loss": 0.2666,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.220241665840149,
"rewards/margins": 3.838820219039917,
"rewards/rejected": -2.6185781955718994,
"step": 150
},
{
"epoch": 1.4018691588785046,
"eval_logits/chosen": -2.284703254699707,
"eval_logits/rejected": -2.2442755699157715,
"eval_logps/chosen": -240.5095977783203,
"eval_logps/rejected": -230.9029083251953,
"eval_loss": 0.5667340159416199,
"eval_rewards/accuracies": 0.7890625,
"eval_rewards/chosen": 0.7903707027435303,
"eval_rewards/margins": 2.4714887142181396,
"eval_rewards/rejected": -1.6811178922653198,
"eval_runtime": 202.5833,
"eval_samples_per_second": 15.006,
"eval_steps_per_second": 0.237,
"step": 150
},
{
"epoch": 1.4485981308411215,
"grad_norm": 22.64610474864586,
"learning_rate": 5.529339337962897e-07,
"logits/chosen": -2.320802927017212,
"logits/rejected": -2.2888898849487305,
"logps/chosen": -232.2943878173828,
"logps/rejected": -249.5731658935547,
"loss": 0.2286,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": 1.71249258518219,
"rewards/margins": 3.7487540245056152,
"rewards/rejected": -2.036261796951294,
"step": 155
},
{
"epoch": 1.4953271028037383,
"grad_norm": 17.267431239861295,
"learning_rate": 5.277650670507915e-07,
"logits/chosen": -2.3630456924438477,
"logits/rejected": -2.334383487701416,
"logps/chosen": -239.9236297607422,
"logps/rejected": -220.5778350830078,
"loss": 0.233,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.5703070163726807,
"rewards/margins": 3.3855056762695312,
"rewards/rejected": -1.815198540687561,
"step": 160
},
{
"epoch": 1.542056074766355,
"grad_norm": 28.93807721843343,
"learning_rate": 5.025253852864471e-07,
"logits/chosen": -2.456749439239502,
"logits/rejected": -2.4346892833709717,
"logps/chosen": -237.8841552734375,
"logps/rejected": -234.561767578125,
"loss": 0.3141,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.509019136428833,
"rewards/margins": 3.890598773956299,
"rewards/rejected": -2.3815793991088867,
"step": 165
},
{
"epoch": 1.588785046728972,
"grad_norm": 21.109754151195787,
"learning_rate": 4.77279262507344e-07,
"logits/chosen": -2.4918441772460938,
"logits/rejected": -2.464447021484375,
"logps/chosen": -242.0265350341797,
"logps/rejected": -236.3219757080078,
"loss": 0.275,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.0706355571746826,
"rewards/margins": 3.679105043411255,
"rewards/rejected": -2.6084697246551514,
"step": 170
},
{
"epoch": 1.6355140186915889,
"grad_norm": 21.650032107276107,
"learning_rate": 4.5209108914542714e-07,
"logits/chosen": -2.494457721710205,
"logits/rejected": -2.5054736137390137,
"logps/chosen": -242.4899139404297,
"logps/rejected": -258.30059814453125,
"loss": 0.2774,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.321101188659668,
"rewards/margins": 4.072310447692871,
"rewards/rejected": -2.7512094974517822,
"step": 175
},
{
"epoch": 1.6822429906542056,
"grad_norm": 21.356500983747313,
"learning_rate": 4.2702510783220475e-07,
"logits/chosen": -2.48954701423645,
"logits/rejected": -2.4850687980651855,
"logps/chosen": -227.01760864257812,
"logps/rejected": -231.8510284423828,
"loss": 0.2598,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 0.9325019121170044,
"rewards/margins": 3.5170364379882812,
"rewards/rejected": -2.5845344066619873,
"step": 180
},
{
"epoch": 1.7289719626168223,
"grad_norm": 23.401658906609267,
"learning_rate": 4.0214524954741586e-07,
"logits/chosen": -2.4770901203155518,
"logits/rejected": -2.449385404586792,
"logps/chosen": -249.5960235595703,
"logps/rejected": -240.88900756835938,
"loss": 0.2955,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.3769264221191406,
"rewards/margins": 3.714228868484497,
"rewards/rejected": -2.3373022079467773,
"step": 185
},
{
"epoch": 1.7757009345794392,
"grad_norm": 21.273588354036704,
"learning_rate": 3.7751497056257305e-07,
"logits/chosen": -2.4559922218322754,
"logits/rejected": -2.4245378971099854,
"logps/chosen": -249.3975372314453,
"logps/rejected": -253.1663818359375,
"loss": 0.2401,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.1007009744644165,
"rewards/margins": 3.69682240486145,
"rewards/rejected": -2.596121311187744,
"step": 190
},
{
"epoch": 1.8224299065420562,
"grad_norm": 26.50900109669566,
"learning_rate": 3.531970905952478e-07,
"logits/chosen": -2.4134464263916016,
"logits/rejected": -2.398714542388916,
"logps/chosen": -243.4962615966797,
"logps/rejected": -259.9742736816406,
"loss": 0.3042,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.2408723831176758,
"rewards/margins": 3.5389716625213623,
"rewards/rejected": -2.2980995178222656,
"step": 195
},
{
"epoch": 1.8691588785046729,
"grad_norm": 20.04208979237232,
"learning_rate": 3.2925363258689553e-07,
"logits/chosen": -2.4002745151519775,
"logits/rejected": -2.381742000579834,
"logps/chosen": -227.89956665039062,
"logps/rejected": -238.51956176757812,
"loss": 0.3127,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 1.159445881843567,
"rewards/margins": 3.3063011169433594,
"rewards/rejected": -2.146855115890503,
"step": 200
},
{
"epoch": 1.8691588785046729,
"eval_logits/chosen": -2.4148459434509277,
"eval_logits/rejected": -2.3878657817840576,
"eval_logps/chosen": -241.9330291748047,
"eval_logps/rejected": -232.25018310546875,
"eval_loss": 0.5355702042579651,
"eval_rewards/accuracies": 0.8046875,
"eval_rewards/chosen": 0.6480298638343811,
"eval_rewards/margins": 2.463876962661743,
"eval_rewards/rejected": -1.8158468008041382,
"eval_runtime": 202.4674,
"eval_samples_per_second": 15.015,
"eval_steps_per_second": 0.237,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 321,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2358113407598592.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}