|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 50, |
|
"global_step": 321, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04672897196261682, |
|
"grad_norm": 61.970988574529464, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": -2.7293832302093506, |
|
"logits/rejected": -2.7098002433776855, |
|
"logps/chosen": -282.619384765625, |
|
"logps/rejected": -220.626708984375, |
|
"loss": 0.6898, |
|
"rewards/accuracies": 0.34375, |
|
"rewards/chosen": 0.016255810856819153, |
|
"rewards/margins": 0.00715771596878767, |
|
"rewards/rejected": 0.009098095819354057, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.09345794392523364, |
|
"grad_norm": 59.149513121461624, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -2.6811907291412354, |
|
"logits/rejected": -2.6537957191467285, |
|
"logps/chosen": -256.4613037109375, |
|
"logps/rejected": -214.9097442626953, |
|
"loss": 0.6465, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.46442437171936035, |
|
"rewards/margins": 0.15177568793296814, |
|
"rewards/rejected": 0.3126486837863922, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14018691588785046, |
|
"grad_norm": 43.868507115118234, |
|
"learning_rate": 9.993623730611148e-07, |
|
"logits/chosen": -2.4990592002868652, |
|
"logits/rejected": -2.4942288398742676, |
|
"logps/chosen": -252.8370361328125, |
|
"logps/rejected": -212.9906768798828, |
|
"loss": 0.6365, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 1.3768469095230103, |
|
"rewards/margins": 0.5569905042648315, |
|
"rewards/rejected": 0.8198563456535339, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.18691588785046728, |
|
"grad_norm": 40.58644668089117, |
|
"learning_rate": 9.97451118516912e-07, |
|
"logits/chosen": -2.357009172439575, |
|
"logits/rejected": -2.3200223445892334, |
|
"logps/chosen": -245.38623046875, |
|
"logps/rejected": -190.89620971679688, |
|
"loss": 0.6273, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": 1.5086402893066406, |
|
"rewards/margins": 0.8777653574943542, |
|
"rewards/rejected": 0.6308748722076416, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2336448598130841, |
|
"grad_norm": 46.4139582288858, |
|
"learning_rate": 9.94271111036929e-07, |
|
"logits/chosen": -2.2126636505126953, |
|
"logits/rejected": -2.2065536975860596, |
|
"logps/chosen": -242.53775024414062, |
|
"logps/rejected": -225.9445343017578, |
|
"loss": 0.6348, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 1.3302034139633179, |
|
"rewards/margins": 0.9245456457138062, |
|
"rewards/rejected": 0.4056577682495117, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2803738317757009, |
|
"grad_norm": 64.74245067517272, |
|
"learning_rate": 9.898304612549066e-07, |
|
"logits/chosen": -2.183245897293091, |
|
"logits/rejected": -2.1688549518585205, |
|
"logps/chosen": -233.8648223876953, |
|
"logps/rejected": -205.1978759765625, |
|
"loss": 0.5938, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 1.1376529932022095, |
|
"rewards/margins": 0.8598777651786804, |
|
"rewards/rejected": 0.27777519822120667, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.32710280373831774, |
|
"grad_norm": 51.84730256605764, |
|
"learning_rate": 9.841404950825536e-07, |
|
"logits/chosen": -2.227497100830078, |
|
"logits/rejected": -2.1977126598358154, |
|
"logps/chosen": -237.3373565673828, |
|
"logps/rejected": -212.15512084960938, |
|
"loss": 0.5785, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 1.2570827007293701, |
|
"rewards/margins": 0.9708169102668762, |
|
"rewards/rejected": 0.2862659692764282, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.37383177570093457, |
|
"grad_norm": 40.60340275031408, |
|
"learning_rate": 9.77215724822721e-07, |
|
"logits/chosen": -2.243427276611328, |
|
"logits/rejected": -2.2088351249694824, |
|
"logps/chosen": -248.1513671875, |
|
"logps/rejected": -229.47531127929688, |
|
"loss": 0.5829, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.9161311984062195, |
|
"rewards/margins": 1.0557386875152588, |
|
"rewards/rejected": -0.13960735499858856, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.4205607476635514, |
|
"grad_norm": 34.21102418831377, |
|
"learning_rate": 9.69073812155662e-07, |
|
"logits/chosen": -2.2237961292266846, |
|
"logits/rejected": -2.2018837928771973, |
|
"logps/chosen": -269.6021423339844, |
|
"logps/rejected": -224.58645629882812, |
|
"loss": 0.5896, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 1.0438940525054932, |
|
"rewards/margins": 0.9452205896377563, |
|
"rewards/rejected": 0.09867370873689651, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.4672897196261682, |
|
"grad_norm": 42.29092168681447, |
|
"learning_rate": 9.597355230927788e-07, |
|
"logits/chosen": -2.0892410278320312, |
|
"logits/rejected": -2.0624351501464844, |
|
"logps/chosen": -240.1529083251953, |
|
"logps/rejected": -198.04843139648438, |
|
"loss": 0.5529, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": 1.232414722442627, |
|
"rewards/margins": 1.3885271549224854, |
|
"rewards/rejected": -0.156112402677536, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4672897196261682, |
|
"eval_logits/chosen": -2.0593109130859375, |
|
"eval_logits/rejected": -2.02968692779541, |
|
"eval_logps/chosen": -239.67454528808594, |
|
"eval_logps/rejected": -216.6856689453125, |
|
"eval_loss": 0.5947180390357971, |
|
"eval_rewards/accuracies": 0.7317708134651184, |
|
"eval_rewards/chosen": 0.8738771080970764, |
|
"eval_rewards/margins": 1.1332703828811646, |
|
"eval_rewards/rejected": -0.2593933641910553, |
|
"eval_runtime": 202.7096, |
|
"eval_samples_per_second": 14.997, |
|
"eval_steps_per_second": 0.237, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.514018691588785, |
|
"grad_norm": 35.93324426868974, |
|
"learning_rate": 9.4922467501275e-07, |
|
"logits/chosen": -2.0398037433624268, |
|
"logits/rejected": -2.0083518028259277, |
|
"logps/chosen": -261.76177978515625, |
|
"logps/rejected": -210.04684448242188, |
|
"loss": 0.5467, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.9814583659172058, |
|
"rewards/margins": 1.524784803390503, |
|
"rewards/rejected": -0.5433263778686523, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5607476635514018, |
|
"grad_norm": 50.252777089157334, |
|
"learning_rate": 9.375680759151206e-07, |
|
"logits/chosen": -2.092087984085083, |
|
"logits/rejected": -2.098419427871704, |
|
"logps/chosen": -252.8424530029297, |
|
"logps/rejected": -214.93258666992188, |
|
"loss": 0.5409, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 0.7585053443908691, |
|
"rewards/margins": 1.2850215435028076, |
|
"rewards/rejected": -0.5265161395072937, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.6074766355140186, |
|
"grad_norm": 43.0211288247323, |
|
"learning_rate": 9.247954560462927e-07, |
|
"logits/chosen": -2.0893867015838623, |
|
"logits/rejected": -2.0975987911224365, |
|
"logps/chosen": -241.7879180908203, |
|
"logps/rejected": -239.7193145751953, |
|
"loss": 0.5337, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": 0.8641234636306763, |
|
"rewards/margins": 1.493786096572876, |
|
"rewards/rejected": -0.6296626329421997, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.6542056074766355, |
|
"grad_norm": 38.40101091003172, |
|
"learning_rate": 9.109393920723001e-07, |
|
"logits/chosen": -2.0060245990753174, |
|
"logits/rejected": -1.9721952676773071, |
|
"logps/chosen": -256.7455139160156, |
|
"logps/rejected": -214.79403686523438, |
|
"loss": 0.533, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.6738287210464478, |
|
"rewards/margins": 1.2377485036849976, |
|
"rewards/rejected": -0.563919723033905, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.7009345794392523, |
|
"grad_norm": 40.40382308924294, |
|
"learning_rate": 8.960352239917699e-07, |
|
"logits/chosen": -1.9107002019882202, |
|
"logits/rejected": -1.8710416555404663, |
|
"logps/chosen": -246.01028442382812, |
|
"logps/rejected": -224.3345947265625, |
|
"loss": 0.5452, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": 0.6804067492485046, |
|
"rewards/margins": 1.3526126146316528, |
|
"rewards/rejected": -0.6722058653831482, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7476635514018691, |
|
"grad_norm": 37.2632538141041, |
|
"learning_rate": 8.801209650009814e-07, |
|
"logits/chosen": -1.8342845439910889, |
|
"logits/rejected": -1.7722011804580688, |
|
"logps/chosen": -238.6627960205078, |
|
"logps/rejected": -204.77720642089844, |
|
"loss": 0.4954, |
|
"rewards/accuracies": 0.8062499761581421, |
|
"rewards/chosen": 0.8959934115409851, |
|
"rewards/margins": 1.759472131729126, |
|
"rewards/rejected": -0.8634785413742065, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.794392523364486, |
|
"grad_norm": 41.835599660746425, |
|
"learning_rate": 8.632372045409141e-07, |
|
"logits/chosen": -1.9798192977905273, |
|
"logits/rejected": -1.9717410802841187, |
|
"logps/chosen": -251.6102294921875, |
|
"logps/rejected": -240.1038360595703, |
|
"loss": 0.5641, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": 1.3537020683288574, |
|
"rewards/margins": 1.3792067766189575, |
|
"rewards/rejected": -0.025504767894744873, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.8411214953271028, |
|
"grad_norm": 33.80081107648868, |
|
"learning_rate": 8.454270047735642e-07, |
|
"logits/chosen": -2.012608051300049, |
|
"logits/rejected": -2.0120816230773926, |
|
"logps/chosen": -253.6774139404297, |
|
"logps/rejected": -194.4556884765625, |
|
"loss": 0.5553, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.8388773798942566, |
|
"rewards/margins": 1.2849448919296265, |
|
"rewards/rejected": -0.4460674822330475, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8878504672897196, |
|
"grad_norm": 29.38305905850477, |
|
"learning_rate": 8.267357907515661e-07, |
|
"logits/chosen": -1.9744971990585327, |
|
"logits/rejected": -1.9980430603027344, |
|
"logps/chosen": -264.6416320800781, |
|
"logps/rejected": -246.7568817138672, |
|
"loss": 0.538, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.25706252455711365, |
|
"rewards/margins": 2.062177896499634, |
|
"rewards/rejected": -1.8051154613494873, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.9345794392523364, |
|
"grad_norm": 33.767105871104185, |
|
"learning_rate": 8.072112345612433e-07, |
|
"logits/chosen": -1.9495391845703125, |
|
"logits/rejected": -1.8999382257461548, |
|
"logps/chosen": -248.95272827148438, |
|
"logps/rejected": -255.79019165039062, |
|
"loss": 0.5159, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -0.18092432618141174, |
|
"rewards/margins": 1.6218032836914062, |
|
"rewards/rejected": -1.8027276992797852, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9345794392523364, |
|
"eval_logits/chosen": -1.9645830392837524, |
|
"eval_logits/rejected": -1.9168211221694946, |
|
"eval_logps/chosen": -250.5727081298828, |
|
"eval_logps/rejected": -234.28236389160156, |
|
"eval_loss": 0.5285552144050598, |
|
"eval_rewards/accuracies": 0.78125, |
|
"eval_rewards/chosen": -0.21594171226024628, |
|
"eval_rewards/margins": 1.8031220436096191, |
|
"eval_rewards/rejected": -2.019063711166382, |
|
"eval_runtime": 202.4154, |
|
"eval_samples_per_second": 15.019, |
|
"eval_steps_per_second": 0.237, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9813084112149533, |
|
"grad_norm": 31.738611864064374, |
|
"learning_rate": 7.869031337345827e-07, |
|
"logits/chosen": -1.9719873666763306, |
|
"logits/rejected": -1.923525094985962, |
|
"logps/chosen": -260.2001953125, |
|
"logps/rejected": -236.92495727539062, |
|
"loss": 0.4808, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.06123792380094528, |
|
"rewards/margins": 1.9958425760269165, |
|
"rewards/rejected": -2.0570805072784424, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.02803738317757, |
|
"grad_norm": 20.57706834965762, |
|
"learning_rate": 7.658632842402432e-07, |
|
"logits/chosen": -2.0060174465179443, |
|
"logits/rejected": -1.9614614248275757, |
|
"logps/chosen": -249.8937530517578, |
|
"logps/rejected": -242.28305053710938, |
|
"loss": 0.3194, |
|
"rewards/accuracies": 0.831250011920929, |
|
"rewards/chosen": 0.40553492307662964, |
|
"rewards/margins": 2.3744890689849854, |
|
"rewards/rejected": -1.968954086303711, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.074766355140187, |
|
"grad_norm": 35.46781940178886, |
|
"learning_rate": 7.441453483775353e-07, |
|
"logits/chosen": -2.0191903114318848, |
|
"logits/rejected": -1.9993083477020264, |
|
"logps/chosen": -243.10147094726562, |
|
"logps/rejected": -233.6897735595703, |
|
"loss": 0.2252, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 1.1334301233291626, |
|
"rewards/margins": 3.1529507637023926, |
|
"rewards/rejected": -2.0195209980010986, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.1214953271028036, |
|
"grad_norm": 21.651387151346523, |
|
"learning_rate": 7.218047179103112e-07, |
|
"logits/chosen": -2.0854923725128174, |
|
"logits/rejected": -2.022040605545044, |
|
"logps/chosen": -228.9936981201172, |
|
"logps/rejected": -211.9679412841797, |
|
"loss": 0.2292, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": 1.1401965618133545, |
|
"rewards/margins": 2.9597201347351074, |
|
"rewards/rejected": -1.8195232152938843, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.1682242990654206, |
|
"grad_norm": 24.010613449905463, |
|
"learning_rate": 6.988983727898413e-07, |
|
"logits/chosen": -2.136890411376953, |
|
"logits/rejected": -2.12919282913208, |
|
"logps/chosen": -243.7528076171875, |
|
"logps/rejected": -251.5740203857422, |
|
"loss": 0.233, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.6651722192764282, |
|
"rewards/margins": 3.5824947357177734, |
|
"rewards/rejected": -1.9173227548599243, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.2149532710280373, |
|
"grad_norm": 19.19894650607632, |
|
"learning_rate": 6.754847358270066e-07, |
|
"logits/chosen": -2.1517224311828613, |
|
"logits/rejected": -2.156635046005249, |
|
"logps/chosen": -250.22463989257812, |
|
"logps/rejected": -209.24722290039062, |
|
"loss": 0.2381, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 1.3966515064239502, |
|
"rewards/margins": 3.149792432785034, |
|
"rewards/rejected": -1.7531406879425049, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.2616822429906542, |
|
"grad_norm": 23.637404006148603, |
|
"learning_rate": 6.516235236844661e-07, |
|
"logits/chosen": -2.180138349533081, |
|
"logits/rejected": -2.123582363128662, |
|
"logps/chosen": -249.1603546142578, |
|
"logps/rejected": -240.6315460205078, |
|
"loss": 0.2453, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 1.434802770614624, |
|
"rewards/margins": 3.7537569999694824, |
|
"rewards/rejected": -2.3189542293548584, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.308411214953271, |
|
"grad_norm": 21.150266522840564, |
|
"learning_rate": 6.273755945688457e-07, |
|
"logits/chosen": -2.1802070140838623, |
|
"logits/rejected": -2.1404995918273926, |
|
"logps/chosen": -261.7569274902344, |
|
"logps/rejected": -238.82333374023438, |
|
"loss": 0.2056, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 1.6147171258926392, |
|
"rewards/margins": 3.631941318511963, |
|
"rewards/rejected": -2.017223596572876, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.355140186915888, |
|
"grad_norm": 19.216251285680404, |
|
"learning_rate": 6.02802793011411e-07, |
|
"logits/chosen": -2.2070083618164062, |
|
"logits/rejected": -2.18471360206604, |
|
"logps/chosen": -250.37966918945312, |
|
"logps/rejected": -233.73361206054688, |
|
"loss": 0.2691, |
|
"rewards/accuracies": 0.8812500238418579, |
|
"rewards/chosen": 0.6839503049850464, |
|
"rewards/margins": 3.4379615783691406, |
|
"rewards/rejected": -2.7540111541748047, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.4018691588785046, |
|
"grad_norm": 23.267149481370343, |
|
"learning_rate": 5.779677921331093e-07, |
|
"logits/chosen": -2.293395757675171, |
|
"logits/rejected": -2.2372868061065674, |
|
"logps/chosen": -240.39102172851562, |
|
"logps/rejected": -243.92245483398438, |
|
"loss": 0.2666, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": 1.220241665840149, |
|
"rewards/margins": 3.838820219039917, |
|
"rewards/rejected": -2.6185781955718994, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.4018691588785046, |
|
"eval_logits/chosen": -2.284703254699707, |
|
"eval_logits/rejected": -2.2442755699157715, |
|
"eval_logps/chosen": -240.5095977783203, |
|
"eval_logps/rejected": -230.9029083251953, |
|
"eval_loss": 0.5667340159416199, |
|
"eval_rewards/accuracies": 0.7890625, |
|
"eval_rewards/chosen": 0.7903707027435303, |
|
"eval_rewards/margins": 2.4714887142181396, |
|
"eval_rewards/rejected": -1.6811178922653198, |
|
"eval_runtime": 202.5833, |
|
"eval_samples_per_second": 15.006, |
|
"eval_steps_per_second": 0.237, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.4485981308411215, |
|
"grad_norm": 22.64610474864586, |
|
"learning_rate": 5.529339337962897e-07, |
|
"logits/chosen": -2.320802927017212, |
|
"logits/rejected": -2.2888898849487305, |
|
"logps/chosen": -232.2943878173828, |
|
"logps/rejected": -249.5731658935547, |
|
"loss": 0.2286, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": 1.71249258518219, |
|
"rewards/margins": 3.7487540245056152, |
|
"rewards/rejected": -2.036261796951294, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.4953271028037383, |
|
"grad_norm": 17.267431239861295, |
|
"learning_rate": 5.277650670507915e-07, |
|
"logits/chosen": -2.3630456924438477, |
|
"logits/rejected": -2.334383487701416, |
|
"logps/chosen": -239.9236297607422, |
|
"logps/rejected": -220.5778350830078, |
|
"loss": 0.233, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 1.5703070163726807, |
|
"rewards/margins": 3.3855056762695312, |
|
"rewards/rejected": -1.815198540687561, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.542056074766355, |
|
"grad_norm": 28.93807721843343, |
|
"learning_rate": 5.025253852864471e-07, |
|
"logits/chosen": -2.456749439239502, |
|
"logits/rejected": -2.4346892833709717, |
|
"logps/chosen": -237.8841552734375, |
|
"logps/rejected": -234.561767578125, |
|
"loss": 0.3141, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 1.509019136428833, |
|
"rewards/margins": 3.890598773956299, |
|
"rewards/rejected": -2.3815793991088867, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.588785046728972, |
|
"grad_norm": 21.109754151195787, |
|
"learning_rate": 4.77279262507344e-07, |
|
"logits/chosen": -2.4918441772460938, |
|
"logits/rejected": -2.464447021484375, |
|
"logps/chosen": -242.0265350341797, |
|
"logps/rejected": -236.3219757080078, |
|
"loss": 0.275, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 1.0706355571746826, |
|
"rewards/margins": 3.679105043411255, |
|
"rewards/rejected": -2.6084697246551514, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.6355140186915889, |
|
"grad_norm": 21.650032107276107, |
|
"learning_rate": 4.5209108914542714e-07, |
|
"logits/chosen": -2.494457721710205, |
|
"logits/rejected": -2.5054736137390137, |
|
"logps/chosen": -242.4899139404297, |
|
"logps/rejected": -258.30059814453125, |
|
"loss": 0.2774, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": 1.321101188659668, |
|
"rewards/margins": 4.072310447692871, |
|
"rewards/rejected": -2.7512094974517822, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.6822429906542056, |
|
"grad_norm": 21.356500983747313, |
|
"learning_rate": 4.2702510783220475e-07, |
|
"logits/chosen": -2.48954701423645, |
|
"logits/rejected": -2.4850687980651855, |
|
"logps/chosen": -227.01760864257812, |
|
"logps/rejected": -231.8510284423828, |
|
"loss": 0.2598, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": 0.9325019121170044, |
|
"rewards/margins": 3.5170364379882812, |
|
"rewards/rejected": -2.5845344066619873, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.7289719626168223, |
|
"grad_norm": 23.401658906609267, |
|
"learning_rate": 4.0214524954741586e-07, |
|
"logits/chosen": -2.4770901203155518, |
|
"logits/rejected": -2.449385404586792, |
|
"logps/chosen": -249.5960235595703, |
|
"logps/rejected": -240.88900756835938, |
|
"loss": 0.2955, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 1.3769264221191406, |
|
"rewards/margins": 3.714228868484497, |
|
"rewards/rejected": -2.3373022079467773, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.7757009345794392, |
|
"grad_norm": 21.273588354036704, |
|
"learning_rate": 3.7751497056257305e-07, |
|
"logits/chosen": -2.4559922218322754, |
|
"logits/rejected": -2.4245378971099854, |
|
"logps/chosen": -249.3975372314453, |
|
"logps/rejected": -253.1663818359375, |
|
"loss": 0.2401, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": 1.1007009744644165, |
|
"rewards/margins": 3.69682240486145, |
|
"rewards/rejected": -2.596121311187744, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.8224299065420562, |
|
"grad_norm": 26.50900109669566, |
|
"learning_rate": 3.531970905952478e-07, |
|
"logits/chosen": -2.4134464263916016, |
|
"logits/rejected": -2.398714542388916, |
|
"logps/chosen": -243.4962615966797, |
|
"logps/rejected": -259.9742736816406, |
|
"loss": 0.3042, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 1.2408723831176758, |
|
"rewards/margins": 3.5389716625213623, |
|
"rewards/rejected": -2.2980995178222656, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.8691588785046729, |
|
"grad_norm": 20.04208979237232, |
|
"learning_rate": 3.2925363258689553e-07, |
|
"logits/chosen": -2.4002745151519775, |
|
"logits/rejected": -2.381742000579834, |
|
"logps/chosen": -227.89956665039062, |
|
"logps/rejected": -238.51956176757812, |
|
"loss": 0.3127, |
|
"rewards/accuracies": 0.893750011920929, |
|
"rewards/chosen": 1.159445881843567, |
|
"rewards/margins": 3.3063011169433594, |
|
"rewards/rejected": -2.146855115890503, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.8691588785046729, |
|
"eval_logits/chosen": -2.4148459434509277, |
|
"eval_logits/rejected": -2.3878657817840576, |
|
"eval_logps/chosen": -241.9330291748047, |
|
"eval_logps/rejected": -232.25018310546875, |
|
"eval_loss": 0.5355702042579651, |
|
"eval_rewards/accuracies": 0.8046875, |
|
"eval_rewards/chosen": 0.6480298638343811, |
|
"eval_rewards/margins": 2.463876962661743, |
|
"eval_rewards/rejected": -1.8158468008041382, |
|
"eval_runtime": 202.4674, |
|
"eval_samples_per_second": 15.015, |
|
"eval_steps_per_second": 0.237, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.9158878504672896, |
|
"grad_norm": 20.064900060265114, |
|
"learning_rate": 3.0574566451286086e-07, |
|
"logits/chosen": -2.4189305305480957, |
|
"logits/rejected": -2.40187406539917, |
|
"logps/chosen": -265.58294677734375, |
|
"logps/rejected": -251.65847778320312, |
|
"loss": 0.2513, |
|
"rewards/accuracies": 0.918749988079071, |
|
"rewards/chosen": 1.1464284658432007, |
|
"rewards/margins": 3.5169920921325684, |
|
"rewards/rejected": -2.3705639839172363, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.9626168224299065, |
|
"grad_norm": 22.038470491161178, |
|
"learning_rate": 2.8273314362803333e-07, |
|
"logits/chosen": -2.4120736122131348, |
|
"logits/rejected": -2.4032583236694336, |
|
"logps/chosen": -282.6405334472656, |
|
"logps/rejected": -240.655029296875, |
|
"loss": 0.2764, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.6487640142440796, |
|
"rewards/margins": 3.7647461891174316, |
|
"rewards/rejected": -2.1159825325012207, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.0093457943925235, |
|
"grad_norm": 12.721605555302636, |
|
"learning_rate": 2.602747635454047e-07, |
|
"logits/chosen": -2.4080586433410645, |
|
"logits/rejected": -2.3771650791168213, |
|
"logps/chosen": -258.2095947265625, |
|
"logps/rejected": -212.6840362548828, |
|
"loss": 0.2701, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": 1.2774152755737305, |
|
"rewards/margins": 3.8353512287139893, |
|
"rewards/rejected": -2.557935953140259, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 2.05607476635514, |
|
"grad_norm": 14.30139581032378, |
|
"learning_rate": 2.384278045375523e-07, |
|
"logits/chosen": -2.3718018531799316, |
|
"logits/rejected": -2.343240261077881, |
|
"logps/chosen": -236.43167114257812, |
|
"logps/rejected": -254.13290405273438, |
|
"loss": 0.1493, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 1.3953884840011597, |
|
"rewards/margins": 4.300187110900879, |
|
"rewards/rejected": -2.9047982692718506, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.102803738317757, |
|
"grad_norm": 17.63579128313249, |
|
"learning_rate": 2.1724798744286071e-07, |
|
"logits/chosen": -2.3075740337371826, |
|
"logits/rejected": -2.2964611053466797, |
|
"logps/chosen": -247.1775360107422, |
|
"logps/rejected": -232.0567169189453, |
|
"loss": 0.1552, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 1.4666545391082764, |
|
"rewards/margins": 4.158020496368408, |
|
"rewards/rejected": -2.691365957260132, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.149532710280374, |
|
"grad_norm": 13.340130576113058, |
|
"learning_rate": 1.9678933154909095e-07, |
|
"logits/chosen": -2.3146255016326904, |
|
"logits/rejected": -2.270624876022339, |
|
"logps/chosen": -234.5230712890625, |
|
"logps/rejected": -253.0650634765625, |
|
"loss": 0.1433, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.5044587850570679, |
|
"rewards/margins": 4.230677604675293, |
|
"rewards/rejected": -2.7262187004089355, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.196261682242991, |
|
"grad_norm": 14.326810577042355, |
|
"learning_rate": 1.77104016816768e-07, |
|
"logits/chosen": -2.29292631149292, |
|
"logits/rejected": -2.2760777473449707, |
|
"logps/chosen": -235.0215606689453, |
|
"logps/rejected": -248.5024871826172, |
|
"loss": 0.1523, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": 1.5524753332138062, |
|
"rewards/margins": 4.352492332458496, |
|
"rewards/rejected": -2.8000173568725586, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 2.2429906542056073, |
|
"grad_norm": 10.917474403037492, |
|
"learning_rate": 1.5824225079378684e-07, |
|
"logits/chosen": -2.287108898162842, |
|
"logits/rejected": -2.2589364051818848, |
|
"logps/chosen": -212.53848266601562, |
|
"logps/rejected": -262.1377258300781, |
|
"loss": 0.1379, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 1.49385404586792, |
|
"rewards/margins": 4.177613735198975, |
|
"rewards/rejected": -2.6837594509124756, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.289719626168224, |
|
"grad_norm": 18.70156944510107, |
|
"learning_rate": 1.4025214056067237e-07, |
|
"logits/chosen": -2.3043479919433594, |
|
"logits/rejected": -2.2352633476257324, |
|
"logps/chosen": -237.90090942382812, |
|
"logps/rejected": -255.3131103515625, |
|
"loss": 0.1504, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": 1.958906888961792, |
|
"rewards/margins": 4.553770065307617, |
|
"rewards/rejected": -2.594862461090088, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 2.336448598130841, |
|
"grad_norm": 18.58957134665332, |
|
"learning_rate": 1.2317957003309725e-07, |
|
"logits/chosen": -2.28039813041687, |
|
"logits/rejected": -2.269214630126953, |
|
"logps/chosen": -242.89535522460938, |
|
"logps/rejected": -228.6979217529297, |
|
"loss": 0.152, |
|
"rewards/accuracies": 0.981249988079071, |
|
"rewards/chosen": 1.4991402626037598, |
|
"rewards/margins": 4.210090637207031, |
|
"rewards/rejected": -2.7109508514404297, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.336448598130841, |
|
"eval_logits/chosen": -2.28143310546875, |
|
"eval_logits/rejected": -2.2442617416381836, |
|
"eval_logps/chosen": -242.3255157470703, |
|
"eval_logps/rejected": -234.9364776611328, |
|
"eval_loss": 0.5442168116569519, |
|
"eval_rewards/accuracies": 0.7890625, |
|
"eval_rewards/chosen": 0.6087805032730103, |
|
"eval_rewards/margins": 2.6932570934295654, |
|
"eval_rewards/rejected": -2.0844767093658447, |
|
"eval_runtime": 202.4297, |
|
"eval_samples_per_second": 15.018, |
|
"eval_steps_per_second": 0.237, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.383177570093458, |
|
"grad_norm": 12.504767667176505, |
|
"learning_rate": 1.0706808293459873e-07, |
|
"logits/chosen": -2.300673007965088, |
|
"logits/rejected": -2.2649149894714355, |
|
"logps/chosen": -240.6633758544922, |
|
"logps/rejected": -250.4842987060547, |
|
"loss": 0.1378, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 1.6441866159439087, |
|
"rewards/margins": 4.3823652267456055, |
|
"rewards/rejected": -2.7381789684295654, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.4299065420560746, |
|
"grad_norm": 13.794920815357296, |
|
"learning_rate": 9.195877173797534e-08, |
|
"logits/chosen": -2.300203323364258, |
|
"logits/rejected": -2.258514881134033, |
|
"logps/chosen": -262.6177978515625, |
|
"logps/rejected": -264.5312194824219, |
|
"loss": 0.1359, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.8180078268051147, |
|
"rewards/margins": 4.726799964904785, |
|
"rewards/rejected": -2.908792018890381, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.4766355140186915, |
|
"grad_norm": 16.46425878105061, |
|
"learning_rate": 7.789017285861438e-08, |
|
"logits/chosen": -2.2753031253814697, |
|
"logits/rejected": -2.2750420570373535, |
|
"logps/chosen": -253.7283935546875, |
|
"logps/rejected": -241.67990112304688, |
|
"loss": 0.1319, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": 1.0793383121490479, |
|
"rewards/margins": 4.2754716873168945, |
|
"rewards/rejected": -3.1961333751678467, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.5233644859813085, |
|
"grad_norm": 13.487096691897303, |
|
"learning_rate": 6.489816836706785e-08, |
|
"logits/chosen": -2.2921862602233887, |
|
"logits/rejected": -2.2367775440216064, |
|
"logps/chosen": -272.6159973144531, |
|
"logps/rejected": -244.61575317382812, |
|
"loss": 0.1606, |
|
"rewards/accuracies": 0.96875, |
|
"rewards/chosen": 1.8186426162719727, |
|
"rewards/margins": 4.736049175262451, |
|
"rewards/rejected": -2.9174060821533203, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.5700934579439254, |
|
"grad_norm": 22.37915209288031, |
|
"learning_rate": 5.3015894471550914e-08, |
|
"logits/chosen": -2.2553036212921143, |
|
"logits/rejected": -2.249110460281372, |
|
"logps/chosen": -236.9984130859375, |
|
"logps/rejected": -265.2401123046875, |
|
"loss": 0.1559, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": 1.1508326530456543, |
|
"rewards/margins": 4.554782390594482, |
|
"rewards/rejected": -3.403949737548828, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.616822429906542, |
|
"grad_norm": 13.985165706457115, |
|
"learning_rate": 4.227365700378799e-08, |
|
"logits/chosen": -2.293137788772583, |
|
"logits/rejected": -2.2412238121032715, |
|
"logps/chosen": -241.9412841796875, |
|
"logps/rejected": -224.3672332763672, |
|
"loss": 0.127, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.2079182863235474, |
|
"rewards/margins": 4.391335964202881, |
|
"rewards/rejected": -3.183417558670044, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.663551401869159, |
|
"grad_norm": 11.021506399671743, |
|
"learning_rate": 3.269885412375223e-08, |
|
"logits/chosen": -2.291661262512207, |
|
"logits/rejected": -2.2645070552825928, |
|
"logps/chosen": -234.7343292236328, |
|
"logps/rejected": -226.64303588867188, |
|
"loss": 0.124, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": 1.284121036529541, |
|
"rewards/margins": 4.28745174407959, |
|
"rewards/rejected": -3.003331184387207, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.710280373831776, |
|
"grad_norm": 15.975321991322621, |
|
"learning_rate": 2.4315906440446952e-08, |
|
"logits/chosen": -2.2947776317596436, |
|
"logits/rejected": -2.2545151710510254, |
|
"logps/chosen": -259.29241943359375, |
|
"logps/rejected": -244.57156372070312, |
|
"loss": 0.1606, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 1.2673962116241455, |
|
"rewards/margins": 4.276968002319336, |
|
"rewards/rejected": -3.0095717906951904, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.7570093457943923, |
|
"grad_norm": 15.010245498160302, |
|
"learning_rate": 1.7146194726952778e-08, |
|
"logits/chosen": -2.317955732345581, |
|
"logits/rejected": -2.2629940509796143, |
|
"logps/chosen": -269.7888488769531, |
|
"logps/rejected": -267.582763671875, |
|
"loss": 0.1412, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.5963925123214722, |
|
"rewards/margins": 4.7224860191345215, |
|
"rewards/rejected": -3.126093626022339, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.803738317757009, |
|
"grad_norm": 15.472628492526889, |
|
"learning_rate": 1.1208005388599951e-08, |
|
"logits/chosen": -2.2652642726898193, |
|
"logits/rejected": -2.266711950302124, |
|
"logps/chosen": -251.26358032226562, |
|
"logps/rejected": -226.0489044189453, |
|
"loss": 0.1431, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": 1.2339963912963867, |
|
"rewards/margins": 4.164398193359375, |
|
"rewards/rejected": -2.9304018020629883, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.803738317757009, |
|
"eval_logits/chosen": -2.282158374786377, |
|
"eval_logits/rejected": -2.2467496395111084, |
|
"eval_logps/chosen": -244.18870544433594, |
|
"eval_logps/rejected": -238.83473205566406, |
|
"eval_loss": 0.5449995398521423, |
|
"eval_rewards/accuracies": 0.7942708134651184, |
|
"eval_rewards/chosen": 0.42245936393737793, |
|
"eval_rewards/margins": 2.8967597484588623, |
|
"eval_rewards/rejected": -2.4743001461029053, |
|
"eval_runtime": 202.3815, |
|
"eval_samples_per_second": 15.021, |
|
"eval_steps_per_second": 0.237, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.850467289719626, |
|
"grad_norm": 16.936006866528523, |
|
"learning_rate": 6.516483823349794e-09, |
|
"logits/chosen": -2.278529644012451, |
|
"logits/rejected": -2.2658190727233887, |
|
"logps/chosen": -254.26248168945312, |
|
"logps/rejected": -235.7794647216797, |
|
"loss": 0.1681, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.6976511478424072, |
|
"rewards/margins": 4.890439510345459, |
|
"rewards/rejected": -3.192788600921631, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.897196261682243, |
|
"grad_norm": 14.960616808458298, |
|
"learning_rate": 3.0835957933397773e-09, |
|
"logits/chosen": -2.276339054107666, |
|
"logits/rejected": -2.2654058933258057, |
|
"logps/chosen": -237.9030303955078, |
|
"logps/rejected": -246.7018585205078, |
|
"loss": 0.1553, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.366158127784729, |
|
"rewards/margins": 4.665255069732666, |
|
"rewards/rejected": -3.2990965843200684, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.94392523364486, |
|
"grad_norm": 17.16613844616987, |
|
"learning_rate": 9.180969061143851e-10, |
|
"logits/chosen": -2.2957701683044434, |
|
"logits/rejected": -2.253190279006958, |
|
"logps/chosen": -253.9457550048828, |
|
"logps/rejected": -231.51229858398438, |
|
"loss": 0.137, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 1.2239946126937866, |
|
"rewards/margins": 4.194064617156982, |
|
"rewards/rejected": -2.9700698852539062, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 2.9906542056074765, |
|
"grad_norm": 14.055773300679348, |
|
"learning_rate": 2.5510283379992504e-11, |
|
"logits/chosen": -2.2696633338928223, |
|
"logits/rejected": -2.2665677070617676, |
|
"logps/chosen": -260.431640625, |
|
"logps/rejected": -258.1851806640625, |
|
"loss": 0.156, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": 1.6931114196777344, |
|
"rewards/margins": 4.892333984375, |
|
"rewards/rejected": -3.1992225646972656, |
|
"step": 320 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 321, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3785055088410624.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|