File size: 12,480 Bytes
7d989f4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.3888888888888888,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06944444444444445,
"grad_norm": 36.698929739881684,
"learning_rate": 5e-07,
"logits/chosen": -2.7539002895355225,
"logits/rejected": -2.7327029705047607,
"logps/chosen": -163.69387817382812,
"logps/rejected": -163.82852172851562,
"loss": 0.693,
"rewards/accuracies": 0.33125001192092896,
"rewards/chosen": 0.0032427930273115635,
"rewards/margins": 0.003264186205342412,
"rewards/rejected": -2.1393225324572995e-05,
"step": 5
},
{
"epoch": 0.1388888888888889,
"grad_norm": 33.19716118176963,
"learning_rate": 1e-06,
"logits/chosen": -2.725349187850952,
"logits/rejected": -2.7233974933624268,
"logps/chosen": -158.5753631591797,
"logps/rejected": -163.6913299560547,
"loss": 0.6852,
"rewards/accuracies": 0.5562499761581421,
"rewards/chosen": 0.1477239578962326,
"rewards/margins": 0.009601245634257793,
"rewards/rejected": 0.13812272250652313,
"step": 10
},
{
"epoch": 0.20833333333333334,
"grad_norm": 32.17261180089332,
"learning_rate": 9.985471028179154e-07,
"logits/chosen": -2.6893043518066406,
"logits/rejected": -2.6972908973693848,
"logps/chosen": -163.91458129882812,
"logps/rejected": -160.10305786132812,
"loss": 0.6754,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.5012314319610596,
"rewards/margins": 0.07390480488538742,
"rewards/rejected": 0.42732667922973633,
"step": 15
},
{
"epoch": 0.2777777777777778,
"grad_norm": 34.14748336984378,
"learning_rate": 9.94196854912548e-07,
"logits/chosen": -2.6901488304138184,
"logits/rejected": -2.706470251083374,
"logps/chosen": -160.76934814453125,
"logps/rejected": -159.16995239257812,
"loss": 0.6546,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": 0.3948652148246765,
"rewards/margins": 0.21753108501434326,
"rewards/rejected": 0.17733411490917206,
"step": 20
},
{
"epoch": 0.3472222222222222,
"grad_norm": 32.71971381381323,
"learning_rate": 9.869745381355905e-07,
"logits/chosen": -2.6964094638824463,
"logits/rejected": -2.682394504547119,
"logps/chosen": -157.96328735351562,
"logps/rejected": -162.65863037109375,
"loss": 0.6383,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": 0.2566204071044922,
"rewards/margins": 0.2564144432544708,
"rewards/rejected": 0.00020598471746779978,
"step": 25
},
{
"epoch": 0.4166666666666667,
"grad_norm": 35.675812969071345,
"learning_rate": 9.769221256218162e-07,
"logits/chosen": -2.6471545696258545,
"logits/rejected": -2.620529890060425,
"logps/chosen": -160.15093994140625,
"logps/rejected": -163.9252166748047,
"loss": 0.6267,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.07839958369731903,
"rewards/margins": 0.13437125086784363,
"rewards/rejected": -0.0559716634452343,
"step": 30
},
{
"epoch": 0.4861111111111111,
"grad_norm": 35.65139996099045,
"learning_rate": 9.64098037858483e-07,
"logits/chosen": -2.5851612091064453,
"logits/rejected": -2.5945792198181152,
"logps/chosen": -153.91452026367188,
"logps/rejected": -158.6124267578125,
"loss": 0.6275,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": 0.3417479395866394,
"rewards/margins": 0.34512001276016235,
"rewards/rejected": -0.0033720836509019136,
"step": 35
},
{
"epoch": 0.5555555555555556,
"grad_norm": 32.23578863597255,
"learning_rate": 9.485768031694871e-07,
"logits/chosen": -2.5328378677368164,
"logits/rejected": -2.5566062927246094,
"logps/chosen": -149.2768096923828,
"logps/rejected": -150.4925537109375,
"loss": 0.593,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.4136400818824768,
"rewards/margins": 0.36537498235702515,
"rewards/rejected": 0.048265062272548676,
"step": 40
},
{
"epoch": 0.625,
"grad_norm": 32.86956863981884,
"learning_rate": 9.304486245873971e-07,
"logits/chosen": -2.6404240131378174,
"logits/rejected": -2.6186232566833496,
"logps/chosen": -154.857421875,
"logps/rejected": -160.54421997070312,
"loss": 0.5804,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": 0.21952423453330994,
"rewards/margins": 0.6369880437850952,
"rewards/rejected": -0.41746383905410767,
"step": 45
},
{
"epoch": 0.6944444444444444,
"grad_norm": 32.98463521922708,
"learning_rate": 9.098188556305262e-07,
"logits/chosen": -2.7281055450439453,
"logits/rejected": -2.7109835147857666,
"logps/chosen": -164.81529235839844,
"logps/rejected": -172.00509643554688,
"loss": 0.5904,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": 0.22729039192199707,
"rewards/margins": 0.6152051687240601,
"rewards/rejected": -0.3879148066043854,
"step": 50
},
{
"epoch": 0.6944444444444444,
"eval_logits/chosen": -2.7504920959472656,
"eval_logits/rejected": -2.7399511337280273,
"eval_logps/chosen": -158.1156463623047,
"eval_logps/rejected": -165.36752319335938,
"eval_loss": 0.5584720969200134,
"eval_rewards/accuracies": 0.6953125,
"eval_rewards/chosen": 0.1767115592956543,
"eval_rewards/margins": 0.707548201084137,
"eval_rewards/rejected": -0.5308365821838379,
"eval_runtime": 126.2022,
"eval_samples_per_second": 16.188,
"eval_steps_per_second": 0.254,
"step": 50
},
{
"epoch": 0.7638888888888888,
"grad_norm": 32.243042350659394,
"learning_rate": 8.868073880316123e-07,
"logits/chosen": -2.716021776199341,
"logits/rejected": -2.7205090522766113,
"logps/chosen": -160.37879943847656,
"logps/rejected": -165.23492431640625,
"loss": 0.5532,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.1015612855553627,
"rewards/margins": 0.6121625304222107,
"rewards/rejected": -0.7137238383293152,
"step": 55
},
{
"epoch": 0.8333333333333334,
"grad_norm": 35.25482149048248,
"learning_rate": 8.615479549763755e-07,
"logits/chosen": -2.6721854209899902,
"logits/rejected": -2.6540756225585938,
"logps/chosen": -154.70159912109375,
"logps/rejected": -160.74624633789062,
"loss": 0.5415,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -0.3344821035861969,
"rewards/margins": 0.9801710247993469,
"rewards/rejected": -1.3146532773971558,
"step": 60
},
{
"epoch": 0.9027777777777778,
"grad_norm": 30.012069786549304,
"learning_rate": 8.341873539012443e-07,
"logits/chosen": -2.6360578536987305,
"logits/rejected": -2.6559650897979736,
"logps/chosen": -155.54788208007812,
"logps/rejected": -159.24798583984375,
"loss": 0.534,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.024257740005850792,
"rewards/margins": 0.7708773016929626,
"rewards/rejected": -0.7466195821762085,
"step": 65
},
{
"epoch": 0.9722222222222222,
"grad_norm": 32.593167513928634,
"learning_rate": 8.048845933670271e-07,
"logits/chosen": -2.6497814655303955,
"logits/rejected": -2.643947124481201,
"logps/chosen": -175.2610626220703,
"logps/rejected": -181.9915771484375,
"loss": 0.5238,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.22413644194602966,
"rewards/margins": 0.8313905596733093,
"rewards/rejected": -1.0555269718170166,
"step": 70
},
{
"epoch": 1.0416666666666667,
"grad_norm": 18.23226121550072,
"learning_rate": 7.738099689665539e-07,
"logits/chosen": -2.7001595497131348,
"logits/rejected": -2.7125773429870605,
"logps/chosen": -158.98019409179688,
"logps/rejected": -174.9591522216797,
"loss": 0.3717,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.3584290146827698,
"rewards/margins": 1.3700459003448486,
"rewards/rejected": -1.7284749746322632,
"step": 75
},
{
"epoch": 1.1111111111111112,
"grad_norm": 17.63451132872378,
"learning_rate": 7.41144073636728e-07,
"logits/chosen": -2.7544779777526855,
"logits/rejected": -2.7487056255340576,
"logps/chosen": -172.69039916992188,
"logps/rejected": -199.56082153320312,
"loss": 0.253,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": 0.5693989396095276,
"rewards/margins": 2.6911113262176514,
"rewards/rejected": -2.1217124462127686,
"step": 80
},
{
"epoch": 1.1805555555555556,
"grad_norm": 19.435839013678418,
"learning_rate": 7.070767481266492e-07,
"logits/chosen": -2.7354109287261963,
"logits/rejected": -2.7295777797698975,
"logps/chosen": -166.5186767578125,
"logps/rejected": -190.08285522460938,
"loss": 0.2397,
"rewards/accuracies": 0.90625,
"rewards/chosen": 0.3293834328651428,
"rewards/margins": 2.566129207611084,
"rewards/rejected": -2.236745595932007,
"step": 85
},
{
"epoch": 1.25,
"grad_norm": 20.070654461149143,
"learning_rate": 6.718059777212565e-07,
"logits/chosen": -2.7108664512634277,
"logits/rejected": -2.699066638946533,
"logps/chosen": -165.696533203125,
"logps/rejected": -187.604736328125,
"loss": 0.2257,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.10144776105880737,
"rewards/margins": 2.6659486293792725,
"rewards/rejected": -2.7673964500427246,
"step": 90
},
{
"epoch": 1.3194444444444444,
"grad_norm": 21.350071509710315,
"learning_rate": 6.355367416322778e-07,
"logits/chosen": -2.6924808025360107,
"logits/rejected": -2.6694552898406982,
"logps/chosen": -162.50173950195312,
"logps/rejected": -189.10617065429688,
"loss": 0.2252,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -0.26245421171188354,
"rewards/margins": 2.784482955932617,
"rewards/rejected": -3.0469374656677246,
"step": 95
},
{
"epoch": 1.3888888888888888,
"grad_norm": 19.5783540001623,
"learning_rate": 5.984798217433531e-07,
"logits/chosen": -2.669064998626709,
"logits/rejected": -2.683467388153076,
"logps/chosen": -166.97213745117188,
"logps/rejected": -197.8631591796875,
"loss": 0.2124,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -0.2085113823413849,
"rewards/margins": 2.9906728267669678,
"rewards/rejected": -3.1991844177246094,
"step": 100
},
{
"epoch": 1.3888888888888888,
"eval_logits/chosen": -2.702752113342285,
"eval_logits/rejected": -2.684614419937134,
"eval_logps/chosen": -169.66738891601562,
"eval_logps/rejected": -182.4733123779297,
"eval_loss": 0.5329886078834534,
"eval_rewards/accuracies": 0.734375,
"eval_rewards/chosen": -0.9784606099128723,
"eval_rewards/margins": 1.2629573345184326,
"eval_rewards/rejected": -2.2414181232452393,
"eval_runtime": 125.9231,
"eval_samples_per_second": 16.224,
"eval_steps_per_second": 0.254,
"step": 100
}
],
"logging_steps": 5,
"max_steps": 216,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1178822762299392.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|