File size: 12,545 Bytes
bea1e2e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.3888888888888888,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06944444444444445,
"grad_norm": 34.922038802469906,
"learning_rate": 5e-07,
"logits/chosen": -2.745856761932373,
"logits/rejected": -2.7519428730010986,
"logps/chosen": -158.59893798828125,
"logps/rejected": -160.2094268798828,
"loss": 0.6939,
"rewards/accuracies": 0.33125001192092896,
"rewards/chosen": 0.000581090513151139,
"rewards/margins": -0.00047404784709215164,
"rewards/rejected": 0.0010551378363743424,
"step": 5
},
{
"epoch": 0.1388888888888889,
"grad_norm": 36.413747880379944,
"learning_rate": 1e-06,
"logits/chosen": -2.736849546432495,
"logits/rejected": -2.7453856468200684,
"logps/chosen": -174.9836883544922,
"logps/rejected": -171.3789825439453,
"loss": 0.6883,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 0.022891324013471603,
"rewards/margins": 0.0072159310802817345,
"rewards/rejected": 0.015675392001867294,
"step": 10
},
{
"epoch": 0.20833333333333334,
"grad_norm": 37.39831623643963,
"learning_rate": 9.985471028179154e-07,
"logits/chosen": -2.6100494861602783,
"logits/rejected": -2.613814115524292,
"logps/chosen": -173.74456787109375,
"logps/rejected": -171.9447784423828,
"loss": 0.6733,
"rewards/accuracies": 0.6187499761581421,
"rewards/chosen": 0.21788981556892395,
"rewards/margins": 0.09981251507997513,
"rewards/rejected": 0.11807730048894882,
"step": 15
},
{
"epoch": 0.2777777777777778,
"grad_norm": 32.21436012894337,
"learning_rate": 9.94196854912548e-07,
"logits/chosen": -2.52358341217041,
"logits/rejected": -2.5229320526123047,
"logps/chosen": -155.1029815673828,
"logps/rejected": -159.9917755126953,
"loss": 0.653,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": 0.3672500252723694,
"rewards/margins": 0.20777833461761475,
"rewards/rejected": 0.15947169065475464,
"step": 20
},
{
"epoch": 0.3472222222222222,
"grad_norm": 32.0223445235898,
"learning_rate": 9.869745381355905e-07,
"logits/chosen": -2.4307353496551514,
"logits/rejected": -2.429652452468872,
"logps/chosen": -159.20545959472656,
"logps/rejected": -158.57797241210938,
"loss": 0.6327,
"rewards/accuracies": 0.6312500238418579,
"rewards/chosen": 0.3310449421405792,
"rewards/margins": 0.22794027626514435,
"rewards/rejected": 0.10310468822717667,
"step": 25
},
{
"epoch": 0.4166666666666667,
"grad_norm": 36.79676539267814,
"learning_rate": 9.769221256218162e-07,
"logits/chosen": -2.4173130989074707,
"logits/rejected": -2.3855977058410645,
"logps/chosen": -165.49618530273438,
"logps/rejected": -164.41111755371094,
"loss": 0.6264,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -0.17511680722236633,
"rewards/margins": 0.2607855200767517,
"rewards/rejected": -0.43590235710144043,
"step": 30
},
{
"epoch": 0.4861111111111111,
"grad_norm": 32.497757396344305,
"learning_rate": 9.64098037858483e-07,
"logits/chosen": -2.482489824295044,
"logits/rejected": -2.4674153327941895,
"logps/chosen": -162.7029571533203,
"logps/rejected": -164.48619079589844,
"loss": 0.5914,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -0.3191075921058655,
"rewards/margins": 0.362353652715683,
"rewards/rejected": -0.6814612150192261,
"step": 35
},
{
"epoch": 0.5555555555555556,
"grad_norm": 34.40764043054464,
"learning_rate": 9.485768031694871e-07,
"logits/chosen": -2.518897771835327,
"logits/rejected": -2.5143680572509766,
"logps/chosen": -167.1140899658203,
"logps/rejected": -168.67987060546875,
"loss": 0.58,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.037984561175107956,
"rewards/margins": 0.48854589462280273,
"rewards/rejected": -0.5265304446220398,
"step": 40
},
{
"epoch": 0.625,
"grad_norm": 33.97876407643923,
"learning_rate": 9.304486245873971e-07,
"logits/chosen": -2.542407989501953,
"logits/rejected": -2.510068416595459,
"logps/chosen": -163.48536682128906,
"logps/rejected": -164.54147338867188,
"loss": 0.5815,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.062438905239105225,
"rewards/margins": 0.5621680021286011,
"rewards/rejected": -0.6246069073677063,
"step": 45
},
{
"epoch": 0.6944444444444444,
"grad_norm": 32.726531127211416,
"learning_rate": 9.098188556305262e-07,
"logits/chosen": -2.513942241668701,
"logits/rejected": -2.51139235496521,
"logps/chosen": -165.90362548828125,
"logps/rejected": -170.9777069091797,
"loss": 0.5342,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.2061791867017746,
"rewards/margins": 0.6521804928779602,
"rewards/rejected": -0.8583596348762512,
"step": 50
},
{
"epoch": 0.6944444444444444,
"eval_logits/chosen": -2.4995272159576416,
"eval_logits/rejected": -2.4678475856781006,
"eval_logps/chosen": -159.87913513183594,
"eval_logps/rejected": -170.17739868164062,
"eval_loss": 0.5209046602249146,
"eval_rewards/accuracies": 0.71484375,
"eval_rewards/chosen": -0.20708096027374268,
"eval_rewards/margins": 0.7420069575309753,
"eval_rewards/rejected": -0.9490878582000732,
"eval_runtime": 129.0562,
"eval_samples_per_second": 15.83,
"eval_steps_per_second": 0.248,
"step": 50
},
{
"epoch": 0.7638888888888888,
"grad_norm": 30.615472770103583,
"learning_rate": 8.868073880316123e-07,
"logits/chosen": -2.500612497329712,
"logits/rejected": -2.520470142364502,
"logps/chosen": -177.12451171875,
"logps/rejected": -183.56619262695312,
"loss": 0.5577,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.32517191767692566,
"rewards/margins": 0.8004968762397766,
"rewards/rejected": -1.1256687641143799,
"step": 55
},
{
"epoch": 0.8333333333333334,
"grad_norm": 32.6842144140191,
"learning_rate": 8.615479549763755e-07,
"logits/chosen": -2.5329596996307373,
"logits/rejected": -2.5310966968536377,
"logps/chosen": -183.05056762695312,
"logps/rejected": -193.45396423339844,
"loss": 0.5335,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.4021238386631012,
"rewards/margins": 0.8954024314880371,
"rewards/rejected": -1.297526240348816,
"step": 60
},
{
"epoch": 0.9027777777777778,
"grad_norm": 31.818374577978716,
"learning_rate": 8.341873539012443e-07,
"logits/chosen": -2.518287420272827,
"logits/rejected": -2.5078444480895996,
"logps/chosen": -168.2073974609375,
"logps/rejected": -179.64515686035156,
"loss": 0.5272,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -0.6193049550056458,
"rewards/margins": 0.8720922470092773,
"rewards/rejected": -1.4913971424102783,
"step": 65
},
{
"epoch": 0.9722222222222222,
"grad_norm": 30.74282933667216,
"learning_rate": 8.048845933670271e-07,
"logits/chosen": -2.474501371383667,
"logits/rejected": -2.486248731613159,
"logps/chosen": -170.53952026367188,
"logps/rejected": -183.97750854492188,
"loss": 0.516,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -1.0543142557144165,
"rewards/margins": 0.9860897064208984,
"rewards/rejected": -2.0404040813446045,
"step": 70
},
{
"epoch": 1.0416666666666667,
"grad_norm": 20.857882794392346,
"learning_rate": 7.738099689665539e-07,
"logits/chosen": -2.4805214405059814,
"logits/rejected": -2.4716594219207764,
"logps/chosen": -179.7224884033203,
"logps/rejected": -194.9944610595703,
"loss": 0.3668,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -0.5534920692443848,
"rewards/margins": 1.5888177156448364,
"rewards/rejected": -2.1423099040985107,
"step": 75
},
{
"epoch": 1.1111111111111112,
"grad_norm": 19.198293152522673,
"learning_rate": 7.41144073636728e-07,
"logits/chosen": -2.4745230674743652,
"logits/rejected": -2.449141025543213,
"logps/chosen": -166.55409240722656,
"logps/rejected": -190.5934295654297,
"loss": 0.2405,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 0.21927456557750702,
"rewards/margins": 2.280102014541626,
"rewards/rejected": -2.0608274936676025,
"step": 80
},
{
"epoch": 1.1805555555555556,
"grad_norm": 22.513577769114043,
"learning_rate": 7.070767481266492e-07,
"logits/chosen": -2.457094192504883,
"logits/rejected": -2.434199571609497,
"logps/chosen": -168.0836944580078,
"logps/rejected": -188.27374267578125,
"loss": 0.2396,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -0.004831351339817047,
"rewards/margins": 2.4242899417877197,
"rewards/rejected": -2.429121494293213,
"step": 85
},
{
"epoch": 1.25,
"grad_norm": 18.850259072813216,
"learning_rate": 6.718059777212565e-07,
"logits/chosen": -2.4347877502441406,
"logits/rejected": -2.432413101196289,
"logps/chosen": -154.73831176757812,
"logps/rejected": -174.75621032714844,
"loss": 0.2034,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.1518968939781189,
"rewards/margins": 2.676344394683838,
"rewards/rejected": -2.8282413482666016,
"step": 90
},
{
"epoch": 1.3194444444444444,
"grad_norm": 19.10543784815799,
"learning_rate": 6.355367416322778e-07,
"logits/chosen": -2.433638572692871,
"logits/rejected": -2.4281392097473145,
"logps/chosen": -173.74961853027344,
"logps/rejected": -198.87208557128906,
"loss": 0.2048,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": -0.30015555024147034,
"rewards/margins": 2.652449369430542,
"rewards/rejected": -2.9526050090789795,
"step": 95
},
{
"epoch": 1.3888888888888888,
"grad_norm": 19.991814546360683,
"learning_rate": 5.984798217433531e-07,
"logits/chosen": -2.434572696685791,
"logits/rejected": -2.417861223220825,
"logps/chosen": -158.5247802734375,
"logps/rejected": -190.9485626220703,
"loss": 0.217,
"rewards/accuracies": 0.90625,
"rewards/chosen": -0.37773507833480835,
"rewards/margins": 2.8572323322296143,
"rewards/rejected": -3.2349674701690674,
"step": 100
},
{
"epoch": 1.3888888888888888,
"eval_logits/chosen": -2.4349896907806396,
"eval_logits/rejected": -2.40444278717041,
"eval_logps/chosen": -169.47793579101562,
"eval_logps/rejected": -184.55125427246094,
"eval_loss": 0.5166749954223633,
"eval_rewards/accuracies": 0.70703125,
"eval_rewards/chosen": -1.1669622659683228,
"eval_rewards/margins": 1.2195117473602295,
"eval_rewards/rejected": -2.386474132537842,
"eval_runtime": 128.5113,
"eval_samples_per_second": 15.897,
"eval_steps_per_second": 0.249,
"step": 100
}
],
"logging_steps": 5,
"max_steps": 216,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1178822762299392.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|