File size: 15,376 Bytes
3b1bac9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 2.9447852760736195,
  "eval_steps": 40,
  "global_step": 120,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.12269938650306748,
      "grad_norm": 86.20452852397518,
      "learning_rate": 5e-07,
      "logits/chosen": -2.7444097995758057,
      "logits/rejected": -2.7274601459503174,
      "logps/chosen": -360.38885498046875,
      "logps/rejected": -252.59326171875,
      "loss": 0.686,
      "rewards/accuracies": 0.3687500059604645,
      "rewards/chosen": 0.03574826195836067,
      "rewards/margins": 0.01679060235619545,
      "rewards/rejected": 0.01895766146481037,
      "step": 5
    },
    {
      "epoch": 0.24539877300613497,
      "grad_norm": 49.11775497623949,
      "learning_rate": 1e-06,
      "logits/chosen": -2.7090232372283936,
      "logits/rejected": -2.695378065109253,
      "logps/chosen": -350.5760498046875,
      "logps/rejected": -244.6541748046875,
      "loss": 0.5653,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": 1.1125060319900513,
      "rewards/margins": 0.48822182416915894,
      "rewards/rejected": 0.6242840886116028,
      "step": 10
    },
    {
      "epoch": 0.36809815950920244,
      "grad_norm": 50.2613096520658,
      "learning_rate": 9.949107209404663e-07,
      "logits/chosen": -2.5219290256500244,
      "logits/rejected": -2.520174026489258,
      "logps/chosen": -337.64007568359375,
      "logps/rejected": -243.67855834960938,
      "loss": 0.4946,
      "rewards/accuracies": 0.8187500238418579,
      "rewards/chosen": 3.0655035972595215,
      "rewards/margins": 1.6884444952011108,
      "rewards/rejected": 1.3770592212677002,
      "step": 15
    },
    {
      "epoch": 0.49079754601226994,
      "grad_norm": 44.523153881937354,
      "learning_rate": 9.797464868072486e-07,
      "logits/chosen": -2.4309468269348145,
      "logits/rejected": -2.414785623550415,
      "logps/chosen": -334.6103820800781,
      "logps/rejected": -232.5205078125,
      "loss": 0.5164,
      "rewards/accuracies": 0.84375,
      "rewards/chosen": 3.701667070388794,
      "rewards/margins": 2.2039473056793213,
      "rewards/rejected": 1.497719645500183,
      "step": 20
    },
    {
      "epoch": 0.6134969325153374,
      "grad_norm": 47.41094231683796,
      "learning_rate": 9.548159976772592e-07,
      "logits/chosen": -2.3079915046691895,
      "logits/rejected": -2.274071216583252,
      "logps/chosen": -327.0514221191406,
      "logps/rejected": -242.24111938476562,
      "loss": 0.4801,
      "rewards/accuracies": 0.856249988079071,
      "rewards/chosen": 3.8270351886749268,
      "rewards/margins": 2.360003709793091,
      "rewards/rejected": 1.4670313596725464,
      "step": 25
    },
    {
      "epoch": 0.7361963190184049,
      "grad_norm": 47.15109197005976,
      "learning_rate": 9.206267664155906e-07,
      "logits/chosen": -2.241680860519409,
      "logits/rejected": -2.202399492263794,
      "logps/chosen": -316.30059814453125,
      "logps/rejected": -225.1979522705078,
      "loss": 0.5095,
      "rewards/accuracies": 0.824999988079071,
      "rewards/chosen": 3.4974567890167236,
      "rewards/margins": 2.2280030250549316,
      "rewards/rejected": 1.2694536447525024,
      "step": 30
    },
    {
      "epoch": 0.8588957055214724,
      "grad_norm": 44.93610699286752,
      "learning_rate": 8.778747871771291e-07,
      "logits/chosen": -2.2071456909179688,
      "logits/rejected": -2.178459882736206,
      "logps/chosen": -339.3052673339844,
      "logps/rejected": -237.75088500976562,
      "loss": 0.437,
      "rewards/accuracies": 0.831250011920929,
      "rewards/chosen": 3.4862112998962402,
      "rewards/margins": 2.48161244392395,
      "rewards/rejected": 1.0045992136001587,
      "step": 35
    },
    {
      "epoch": 0.9815950920245399,
      "grad_norm": 42.025649679688485,
      "learning_rate": 8.274303669726426e-07,
      "logits/chosen": -2.189873218536377,
      "logits/rejected": -2.1432714462280273,
      "logps/chosen": -324.4537658691406,
      "logps/rejected": -253.5149383544922,
      "loss": 0.474,
      "rewards/accuracies": 0.8125,
      "rewards/chosen": 3.1547563076019287,
      "rewards/margins": 2.2278854846954346,
      "rewards/rejected": 0.926871120929718,
      "step": 40
    },
    {
      "epoch": 0.9815950920245399,
      "eval_logits/chosen": -2.2103025913238525,
      "eval_logits/rejected": -2.166433095932007,
      "eval_logps/chosen": -312.3291015625,
      "eval_logps/rejected": -221.68295288085938,
      "eval_loss": 0.4699374735355377,
      "eval_rewards/accuracies": 0.8552631735801697,
      "eval_rewards/chosen": 3.133356809616089,
      "eval_rewards/margins": 2.2975046634674072,
      "eval_rewards/rejected": 0.8358522653579712,
      "eval_runtime": 79.9413,
      "eval_samples_per_second": 14.423,
      "eval_steps_per_second": 0.238,
      "step": 40
    },
    {
      "epoch": 1.1042944785276074,
      "grad_norm": 13.276718823898912,
      "learning_rate": 7.703204087277988e-07,
      "logits/chosen": -2.1988937854766846,
      "logits/rejected": -2.1543939113616943,
      "logps/chosen": -323.3505554199219,
      "logps/rejected": -250.4222869873047,
      "loss": 0.1591,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": 3.8928611278533936,
      "rewards/margins": 3.514218807220459,
      "rewards/rejected": 0.37864241003990173,
      "step": 45
    },
    {
      "epoch": 1.2269938650306749,
      "grad_norm": 11.82053956546381,
      "learning_rate": 7.077075065009433e-07,
      "logits/chosen": -2.236666202545166,
      "logits/rejected": -2.2079665660858154,
      "logps/chosen": -310.46380615234375,
      "logps/rejected": -259.19451904296875,
      "loss": 0.1047,
      "rewards/accuracies": 0.96875,
      "rewards/chosen": 4.311354160308838,
      "rewards/margins": 4.656617164611816,
      "rewards/rejected": -0.3452628552913666,
      "step": 50
    },
    {
      "epoch": 1.3496932515337423,
      "grad_norm": 20.37110322056987,
      "learning_rate": 6.408662784207149e-07,
      "logits/chosen": -2.267946243286133,
      "logits/rejected": -2.231114387512207,
      "logps/chosen": -314.8283996582031,
      "logps/rejected": -245.3289794921875,
      "loss": 0.1118,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": 4.451395511627197,
      "rewards/margins": 4.734715938568115,
      "rewards/rejected": -0.28331995010375977,
      "step": 55
    },
    {
      "epoch": 1.4723926380368098,
      "grad_norm": 17.168264846885307,
      "learning_rate": 5.711574191366427e-07,
      "logits/chosen": -2.309600830078125,
      "logits/rejected": -2.2622950077056885,
      "logps/chosen": -326.2135925292969,
      "logps/rejected": -246.0592803955078,
      "loss": 0.1226,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": 4.813075065612793,
      "rewards/margins": 4.994798183441162,
      "rewards/rejected": -0.18172283470630646,
      "step": 60
    },
    {
      "epoch": 1.5950920245398774,
      "grad_norm": 18.304060726243097,
      "learning_rate": 5e-07,
      "logits/chosen": -2.287087917327881,
      "logits/rejected": -2.275485038757324,
      "logps/chosen": -332.449462890625,
      "logps/rejected": -237.60238647460938,
      "loss": 0.1335,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": 4.406615734100342,
      "rewards/margins": 4.897850036621094,
      "rewards/rejected": -0.4912341237068176,
      "step": 65
    },
    {
      "epoch": 1.7177914110429446,
      "grad_norm": 20.292317388094563,
      "learning_rate": 4.2884258086335745e-07,
      "logits/chosen": -2.3330864906311035,
      "logits/rejected": -2.2979342937469482,
      "logps/chosen": -308.66900634765625,
      "logps/rejected": -257.72625732421875,
      "loss": 0.1206,
      "rewards/accuracies": 0.96875,
      "rewards/chosen": 4.669040679931641,
      "rewards/margins": 5.335470199584961,
      "rewards/rejected": -0.6664296984672546,
      "step": 70
    },
    {
      "epoch": 1.8404907975460123,
      "grad_norm": 20.9581029042108,
      "learning_rate": 3.591337215792851e-07,
      "logits/chosen": -2.3408362865448,
      "logits/rejected": -2.3560574054718018,
      "logps/chosen": -291.6307678222656,
      "logps/rejected": -275.3473815917969,
      "loss": 0.1405,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": 4.201301574707031,
      "rewards/margins": 4.819304466247559,
      "rewards/rejected": -0.6180022954940796,
      "step": 75
    },
    {
      "epoch": 1.9631901840490797,
      "grad_norm": 18.92082476950038,
      "learning_rate": 2.922924934990568e-07,
      "logits/chosen": -2.380004405975342,
      "logits/rejected": -2.3659119606018066,
      "logps/chosen": -306.8321228027344,
      "logps/rejected": -278.3690185546875,
      "loss": 0.1662,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": 4.020644187927246,
      "rewards/margins": 5.037873268127441,
      "rewards/rejected": -1.0172293186187744,
      "step": 80
    },
    {
      "epoch": 1.9631901840490797,
      "eval_logits/chosen": -2.3872616291046143,
      "eval_logits/rejected": -2.3660154342651367,
      "eval_logps/chosen": -309.1402282714844,
      "eval_logps/rejected": -227.7744903564453,
      "eval_loss": 0.5018076300621033,
      "eval_rewards/accuracies": 0.8618420958518982,
      "eval_rewards/chosen": 3.452242374420166,
      "eval_rewards/margins": 3.2255470752716064,
      "eval_rewards/rejected": 0.22669506072998047,
      "eval_runtime": 79.1104,
      "eval_samples_per_second": 14.575,
      "eval_steps_per_second": 0.24,
      "step": 80
    },
    {
      "epoch": 2.085889570552147,
      "grad_norm": 12.572625564658129,
      "learning_rate": 2.2967959127220137e-07,
      "logits/chosen": -2.3931784629821777,
      "logits/rejected": -2.3873510360717773,
      "logps/chosen": -308.5347595214844,
      "logps/rejected": -261.3822021484375,
      "loss": 0.0795,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": 4.7503461837768555,
      "rewards/margins": 5.960836887359619,
      "rewards/rejected": -1.210490345954895,
      "step": 85
    },
    {
      "epoch": 2.208588957055215,
      "grad_norm": 16.240567224727286,
      "learning_rate": 1.725696330273575e-07,
      "logits/chosen": -2.376250743865967,
      "logits/rejected": -2.346761703491211,
      "logps/chosen": -332.0423889160156,
      "logps/rejected": -256.23468017578125,
      "loss": 0.0523,
      "rewards/accuracies": 0.9937499761581421,
      "rewards/chosen": 4.7371039390563965,
      "rewards/margins": 5.809103965759277,
      "rewards/rejected": -1.0720010995864868,
      "step": 90
    },
    {
      "epoch": 2.331288343558282,
      "grad_norm": 7.52423631295579,
      "learning_rate": 1.2212521282287093e-07,
      "logits/chosen": -2.381354808807373,
      "logits/rejected": -2.3519160747528076,
      "logps/chosen": -301.87396240234375,
      "logps/rejected": -252.88778686523438,
      "loss": 0.0418,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 4.879428863525391,
      "rewards/margins": 5.731529712677002,
      "rewards/rejected": -0.8520998954772949,
      "step": 95
    },
    {
      "epoch": 2.4539877300613497,
      "grad_norm": 7.461131760503096,
      "learning_rate": 7.937323358440934e-08,
      "logits/chosen": -2.3626861572265625,
      "logits/rejected": -2.370225667953491,
      "logps/chosen": -320.0696716308594,
      "logps/rejected": -272.7176208496094,
      "loss": 0.0398,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": 4.83002233505249,
      "rewards/margins": 5.829442977905273,
      "rewards/rejected": -0.9994203448295593,
      "step": 100
    },
    {
      "epoch": 2.5766871165644174,
      "grad_norm": 8.366519661393047,
      "learning_rate": 4.518400232274078e-08,
      "logits/chosen": -2.362389087677002,
      "logits/rejected": -2.341630220413208,
      "logps/chosen": -301.3256530761719,
      "logps/rejected": -280.96820068359375,
      "loss": 0.0387,
      "rewards/accuracies": 0.9937499761581421,
      "rewards/chosen": 4.926737308502197,
      "rewards/margins": 5.911121368408203,
      "rewards/rejected": -0.9843841791152954,
      "step": 105
    },
    {
      "epoch": 2.6993865030674846,
      "grad_norm": 15.561698800102333,
      "learning_rate": 2.025351319275137e-08,
      "logits/chosen": -2.3739213943481445,
      "logits/rejected": -2.350820779800415,
      "logps/chosen": -292.09820556640625,
      "logps/rejected": -268.2146301269531,
      "loss": 0.056,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": 4.876989364624023,
      "rewards/margins": 5.988296985626221,
      "rewards/rejected": -1.1113078594207764,
      "step": 110
    },
    {
      "epoch": 2.8220858895705523,
      "grad_norm": 11.646568178986305,
      "learning_rate": 5.0892790595336575e-09,
      "logits/chosen": -2.364748001098633,
      "logits/rejected": -2.348578453063965,
      "logps/chosen": -293.08319091796875,
      "logps/rejected": -246.6769256591797,
      "loss": 0.0543,
      "rewards/accuracies": 1.0,
      "rewards/chosen": 4.639247894287109,
      "rewards/margins": 5.979287147521973,
      "rewards/rejected": -1.3400390148162842,
      "step": 115
    },
    {
      "epoch": 2.9447852760736195,
      "grad_norm": 7.012491202589339,
      "learning_rate": 0.0,
      "logits/chosen": -2.355649709701538,
      "logits/rejected": -2.3410375118255615,
      "logps/chosen": -299.503173828125,
      "logps/rejected": -282.52447509765625,
      "loss": 0.0328,
      "rewards/accuracies": 0.9937499761581421,
      "rewards/chosen": 4.639992713928223,
      "rewards/margins": 6.344868183135986,
      "rewards/rejected": -1.704876184463501,
      "step": 120
    },
    {
      "epoch": 2.9447852760736195,
      "eval_logits/chosen": -2.3613531589508057,
      "eval_logits/rejected": -2.339138984680176,
      "eval_logps/chosen": -307.6524353027344,
      "eval_logps/rejected": -228.04098510742188,
      "eval_loss": 0.5027558207511902,
      "eval_rewards/accuracies": 0.875,
      "eval_rewards/chosen": 3.6010191440582275,
      "eval_rewards/margins": 3.400972843170166,
      "eval_rewards/rejected": 0.2000463604927063,
      "eval_runtime": 79.1483,
      "eval_samples_per_second": 14.568,
      "eval_steps_per_second": 0.24,
      "step": 120
    },
    {
      "epoch": 2.9447852760736195,
      "step": 120,
      "total_flos": 1414680891359232.0,
      "train_loss": 0.23404777497053147,
      "train_runtime": 4394.1396,
      "train_samples_per_second": 7.082,
      "train_steps_per_second": 0.027
    }
  ],
  "logging_steps": 5,
  "max_steps": 120,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 40,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 1414680891359232.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}