File size: 26,120 Bytes
70b58fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 3.0,
  "eval_steps": 50,
  "global_step": 216,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.06944444444444445,
      "grad_norm": 34.922038802469906,
      "learning_rate": 5e-07,
      "logits/chosen": -2.745856761932373,
      "logits/rejected": -2.7519428730010986,
      "logps/chosen": -158.59893798828125,
      "logps/rejected": -160.2094268798828,
      "loss": 0.6939,
      "rewards/accuracies": 0.33125001192092896,
      "rewards/chosen": 0.000581090513151139,
      "rewards/margins": -0.00047404784709215164,
      "rewards/rejected": 0.0010551378363743424,
      "step": 5
    },
    {
      "epoch": 0.1388888888888889,
      "grad_norm": 36.413747880379944,
      "learning_rate": 1e-06,
      "logits/chosen": -2.736849546432495,
      "logits/rejected": -2.7453856468200684,
      "logps/chosen": -174.9836883544922,
      "logps/rejected": -171.3789825439453,
      "loss": 0.6883,
      "rewards/accuracies": 0.5249999761581421,
      "rewards/chosen": 0.022891324013471603,
      "rewards/margins": 0.0072159310802817345,
      "rewards/rejected": 0.015675392001867294,
      "step": 10
    },
    {
      "epoch": 0.20833333333333334,
      "grad_norm": 37.39831623643963,
      "learning_rate": 9.985471028179154e-07,
      "logits/chosen": -2.6100494861602783,
      "logits/rejected": -2.613814115524292,
      "logps/chosen": -173.74456787109375,
      "logps/rejected": -171.9447784423828,
      "loss": 0.6733,
      "rewards/accuracies": 0.6187499761581421,
      "rewards/chosen": 0.21788981556892395,
      "rewards/margins": 0.09981251507997513,
      "rewards/rejected": 0.11807730048894882,
      "step": 15
    },
    {
      "epoch": 0.2777777777777778,
      "grad_norm": 32.21436012894337,
      "learning_rate": 9.94196854912548e-07,
      "logits/chosen": -2.52358341217041,
      "logits/rejected": -2.5229320526123047,
      "logps/chosen": -155.1029815673828,
      "logps/rejected": -159.9917755126953,
      "loss": 0.653,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": 0.3672500252723694,
      "rewards/margins": 0.20777833461761475,
      "rewards/rejected": 0.15947169065475464,
      "step": 20
    },
    {
      "epoch": 0.3472222222222222,
      "grad_norm": 32.0223445235898,
      "learning_rate": 9.869745381355905e-07,
      "logits/chosen": -2.4307353496551514,
      "logits/rejected": -2.429652452468872,
      "logps/chosen": -159.20545959472656,
      "logps/rejected": -158.57797241210938,
      "loss": 0.6327,
      "rewards/accuracies": 0.6312500238418579,
      "rewards/chosen": 0.3310449421405792,
      "rewards/margins": 0.22794027626514435,
      "rewards/rejected": 0.10310468822717667,
      "step": 25
    },
    {
      "epoch": 0.4166666666666667,
      "grad_norm": 36.79676539267814,
      "learning_rate": 9.769221256218162e-07,
      "logits/chosen": -2.4173130989074707,
      "logits/rejected": -2.3855977058410645,
      "logps/chosen": -165.49618530273438,
      "logps/rejected": -164.41111755371094,
      "loss": 0.6264,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": -0.17511680722236633,
      "rewards/margins": 0.2607855200767517,
      "rewards/rejected": -0.43590235710144043,
      "step": 30
    },
    {
      "epoch": 0.4861111111111111,
      "grad_norm": 32.497757396344305,
      "learning_rate": 9.64098037858483e-07,
      "logits/chosen": -2.482489824295044,
      "logits/rejected": -2.4674153327941895,
      "logps/chosen": -162.7029571533203,
      "logps/rejected": -164.48619079589844,
      "loss": 0.5914,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": -0.3191075921058655,
      "rewards/margins": 0.362353652715683,
      "rewards/rejected": -0.6814612150192261,
      "step": 35
    },
    {
      "epoch": 0.5555555555555556,
      "grad_norm": 34.40764043054464,
      "learning_rate": 9.485768031694871e-07,
      "logits/chosen": -2.518897771835327,
      "logits/rejected": -2.5143680572509766,
      "logps/chosen": -167.1140899658203,
      "logps/rejected": -168.67987060546875,
      "loss": 0.58,
      "rewards/accuracies": 0.6812499761581421,
      "rewards/chosen": -0.037984561175107956,
      "rewards/margins": 0.48854589462280273,
      "rewards/rejected": -0.5265304446220398,
      "step": 40
    },
    {
      "epoch": 0.625,
      "grad_norm": 33.97876407643923,
      "learning_rate": 9.304486245873971e-07,
      "logits/chosen": -2.542407989501953,
      "logits/rejected": -2.510068416595459,
      "logps/chosen": -163.48536682128906,
      "logps/rejected": -164.54147338867188,
      "loss": 0.5815,
      "rewards/accuracies": 0.6812499761581421,
      "rewards/chosen": -0.062438905239105225,
      "rewards/margins": 0.5621680021286011,
      "rewards/rejected": -0.6246069073677063,
      "step": 45
    },
    {
      "epoch": 0.6944444444444444,
      "grad_norm": 32.726531127211416,
      "learning_rate": 9.098188556305262e-07,
      "logits/chosen": -2.513942241668701,
      "logits/rejected": -2.51139235496521,
      "logps/chosen": -165.90362548828125,
      "logps/rejected": -170.9777069091797,
      "loss": 0.5342,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.2061791867017746,
      "rewards/margins": 0.6521804928779602,
      "rewards/rejected": -0.8583596348762512,
      "step": 50
    },
    {
      "epoch": 0.6944444444444444,
      "eval_logits/chosen": -2.4995272159576416,
      "eval_logits/rejected": -2.4678475856781006,
      "eval_logps/chosen": -159.87913513183594,
      "eval_logps/rejected": -170.17739868164062,
      "eval_loss": 0.5209046602249146,
      "eval_rewards/accuracies": 0.71484375,
      "eval_rewards/chosen": -0.20708096027374268,
      "eval_rewards/margins": 0.7420069575309753,
      "eval_rewards/rejected": -0.9490878582000732,
      "eval_runtime": 129.0562,
      "eval_samples_per_second": 15.83,
      "eval_steps_per_second": 0.248,
      "step": 50
    },
    {
      "epoch": 0.7638888888888888,
      "grad_norm": 30.615472770103583,
      "learning_rate": 8.868073880316123e-07,
      "logits/chosen": -2.500612497329712,
      "logits/rejected": -2.520470142364502,
      "logps/chosen": -177.12451171875,
      "logps/rejected": -183.56619262695312,
      "loss": 0.5577,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -0.32517191767692566,
      "rewards/margins": 0.8004968762397766,
      "rewards/rejected": -1.1256687641143799,
      "step": 55
    },
    {
      "epoch": 0.8333333333333334,
      "grad_norm": 32.6842144140191,
      "learning_rate": 8.615479549763755e-07,
      "logits/chosen": -2.5329596996307373,
      "logits/rejected": -2.5310966968536377,
      "logps/chosen": -183.05056762695312,
      "logps/rejected": -193.45396423339844,
      "loss": 0.5335,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.4021238386631012,
      "rewards/margins": 0.8954024314880371,
      "rewards/rejected": -1.297526240348816,
      "step": 60
    },
    {
      "epoch": 0.9027777777777778,
      "grad_norm": 31.818374577978716,
      "learning_rate": 8.341873539012443e-07,
      "logits/chosen": -2.518287420272827,
      "logits/rejected": -2.5078444480895996,
      "logps/chosen": -168.2073974609375,
      "logps/rejected": -179.64515686035156,
      "loss": 0.5272,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.6193049550056458,
      "rewards/margins": 0.8720922470092773,
      "rewards/rejected": -1.4913971424102783,
      "step": 65
    },
    {
      "epoch": 0.9722222222222222,
      "grad_norm": 30.74282933667216,
      "learning_rate": 8.048845933670271e-07,
      "logits/chosen": -2.474501371383667,
      "logits/rejected": -2.486248731613159,
      "logps/chosen": -170.53952026367188,
      "logps/rejected": -183.97750854492188,
      "loss": 0.516,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": -1.0543142557144165,
      "rewards/margins": 0.9860897064208984,
      "rewards/rejected": -2.0404040813446045,
      "step": 70
    },
    {
      "epoch": 1.0416666666666667,
      "grad_norm": 20.857882794392346,
      "learning_rate": 7.738099689665539e-07,
      "logits/chosen": -2.4805214405059814,
      "logits/rejected": -2.4716594219207764,
      "logps/chosen": -179.7224884033203,
      "logps/rejected": -194.9944610595703,
      "loss": 0.3668,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -0.5534920692443848,
      "rewards/margins": 1.5888177156448364,
      "rewards/rejected": -2.1423099040985107,
      "step": 75
    },
    {
      "epoch": 1.1111111111111112,
      "grad_norm": 19.198293152522673,
      "learning_rate": 7.41144073636728e-07,
      "logits/chosen": -2.4745230674743652,
      "logits/rejected": -2.449141025543213,
      "logps/chosen": -166.55409240722656,
      "logps/rejected": -190.5934295654297,
      "loss": 0.2405,
      "rewards/accuracies": 0.9312499761581421,
      "rewards/chosen": 0.21927456557750702,
      "rewards/margins": 2.280102014541626,
      "rewards/rejected": -2.0608274936676025,
      "step": 80
    },
    {
      "epoch": 1.1805555555555556,
      "grad_norm": 22.513577769114043,
      "learning_rate": 7.070767481266492e-07,
      "logits/chosen": -2.457094192504883,
      "logits/rejected": -2.434199571609497,
      "logps/chosen": -168.0836944580078,
      "logps/rejected": -188.27374267578125,
      "loss": 0.2396,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -0.004831351339817047,
      "rewards/margins": 2.4242899417877197,
      "rewards/rejected": -2.429121494293213,
      "step": 85
    },
    {
      "epoch": 1.25,
      "grad_norm": 18.850259072813216,
      "learning_rate": 6.718059777212565e-07,
      "logits/chosen": -2.4347877502441406,
      "logits/rejected": -2.432413101196289,
      "logps/chosen": -154.73831176757812,
      "logps/rejected": -174.75621032714844,
      "loss": 0.2034,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -0.1518968939781189,
      "rewards/margins": 2.676344394683838,
      "rewards/rejected": -2.8282413482666016,
      "step": 90
    },
    {
      "epoch": 1.3194444444444444,
      "grad_norm": 19.10543784815799,
      "learning_rate": 6.355367416322778e-07,
      "logits/chosen": -2.433638572692871,
      "logits/rejected": -2.4281392097473145,
      "logps/chosen": -173.74961853027344,
      "logps/rejected": -198.87208557128906,
      "loss": 0.2048,
      "rewards/accuracies": 0.8812500238418579,
      "rewards/chosen": -0.30015555024147034,
      "rewards/margins": 2.652449369430542,
      "rewards/rejected": -2.9526050090789795,
      "step": 95
    },
    {
      "epoch": 1.3888888888888888,
      "grad_norm": 19.991814546360683,
      "learning_rate": 5.984798217433531e-07,
      "logits/chosen": -2.434572696685791,
      "logits/rejected": -2.417861223220825,
      "logps/chosen": -158.5247802734375,
      "logps/rejected": -190.9485626220703,
      "loss": 0.217,
      "rewards/accuracies": 0.90625,
      "rewards/chosen": -0.37773507833480835,
      "rewards/margins": 2.8572323322296143,
      "rewards/rejected": -3.2349674701690674,
      "step": 100
    },
    {
      "epoch": 1.3888888888888888,
      "eval_logits/chosen": -2.4349896907806396,
      "eval_logits/rejected": -2.40444278717041,
      "eval_logps/chosen": -169.47793579101562,
      "eval_logps/rejected": -184.55125427246094,
      "eval_loss": 0.5166749954223633,
      "eval_rewards/accuracies": 0.70703125,
      "eval_rewards/chosen": -1.1669622659683228,
      "eval_rewards/margins": 1.2195117473602295,
      "eval_rewards/rejected": -2.386474132537842,
      "eval_runtime": 128.5113,
      "eval_samples_per_second": 15.897,
      "eval_steps_per_second": 0.249,
      "step": 100
    },
    {
      "epoch": 1.4583333333333333,
      "grad_norm": 28.299743381287744,
      "learning_rate": 5.608505776324157e-07,
      "logits/chosen": -2.4249603748321533,
      "logits/rejected": -2.4180781841278076,
      "logps/chosen": -167.8837432861328,
      "logps/rejected": -196.94412231445312,
      "loss": 0.2013,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -0.45704206824302673,
      "rewards/margins": 3.021711826324463,
      "rewards/rejected": -3.4787545204162598,
      "step": 105
    },
    {
      "epoch": 1.5277777777777777,
      "grad_norm": 20.695727476520528,
      "learning_rate": 5.228676949903973e-07,
      "logits/chosen": -2.374063491821289,
      "logits/rejected": -2.3558177947998047,
      "logps/chosen": -176.4818572998047,
      "logps/rejected": -208.8624267578125,
      "loss": 0.1943,
      "rewards/accuracies": 0.90625,
      "rewards/chosen": -0.3167189359664917,
      "rewards/margins": 3.428766965866089,
      "rewards/rejected": -3.7454864978790283,
      "step": 110
    },
    {
      "epoch": 1.5972222222222223,
      "grad_norm": 25.767538093776388,
      "learning_rate": 4.847519147099294e-07,
      "logits/chosen": -2.302668333053589,
      "logits/rejected": -2.28350567817688,
      "logps/chosen": -169.49490356445312,
      "logps/rejected": -201.66481018066406,
      "loss": 0.1966,
      "rewards/accuracies": 0.956250011920929,
      "rewards/chosen": -0.7796251177787781,
      "rewards/margins": 2.979997158050537,
      "rewards/rejected": -3.7596218585968018,
      "step": 115
    },
    {
      "epoch": 1.6666666666666665,
      "grad_norm": 25.2139533526534,
      "learning_rate": 4.46724750030062e-07,
      "logits/chosen": -2.2816123962402344,
      "logits/rejected": -2.2772622108459473,
      "logps/chosen": -172.01380920410156,
      "logps/rejected": -204.12197875976562,
      "loss": 0.1904,
      "rewards/accuracies": 0.9312499761581421,
      "rewards/chosen": -0.6298034191131592,
      "rewards/margins": 3.174367904663086,
      "rewards/rejected": -3.804171323776245,
      "step": 120
    },
    {
      "epoch": 1.7361111111111112,
      "grad_norm": 23.046670885435994,
      "learning_rate": 4.0900719919241935e-07,
      "logits/chosen": -2.2773830890655518,
      "logits/rejected": -2.252821207046509,
      "logps/chosen": -174.70835876464844,
      "logps/rejected": -208.0013885498047,
      "loss": 0.1926,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -0.6799504160881042,
      "rewards/margins": 3.1291940212249756,
      "rewards/rejected": -3.8091444969177246,
      "step": 125
    },
    {
      "epoch": 1.8055555555555556,
      "grad_norm": 19.935190350579614,
      "learning_rate": 3.7181846109031e-07,
      "logits/chosen": -2.232686996459961,
      "logits/rejected": -2.187178134918213,
      "logps/chosen": -160.58908081054688,
      "logps/rejected": -191.36898803710938,
      "loss": 0.2,
      "rewards/accuracies": 0.918749988079071,
      "rewards/chosen": -0.7336459159851074,
      "rewards/margins": 2.8751566410064697,
      "rewards/rejected": -3.608802318572998,
      "step": 130
    },
    {
      "epoch": 1.875,
      "grad_norm": 22.85505851727533,
      "learning_rate": 3.353746613749093e-07,
      "logits/chosen": -2.229166030883789,
      "logits/rejected": -2.2128987312316895,
      "logps/chosen": -171.8442840576172,
      "logps/rejected": -203.36578369140625,
      "loss": 0.1983,
      "rewards/accuracies": 0.9312499761581421,
      "rewards/chosen": -0.3919256329536438,
      "rewards/margins": 3.1109907627105713,
      "rewards/rejected": -3.5029163360595703,
      "step": 135
    },
    {
      "epoch": 1.9444444444444444,
      "grad_norm": 20.994239018606788,
      "learning_rate": 2.9988759642186093e-07,
      "logits/chosen": -2.2110824584960938,
      "logits/rejected": -2.1866531372070312,
      "logps/chosen": -160.40597534179688,
      "logps/rejected": -193.88641357421875,
      "loss": 0.1803,
      "rewards/accuracies": 0.90625,
      "rewards/chosen": -0.6127753257751465,
      "rewards/margins": 3.024134397506714,
      "rewards/rejected": -3.6369099617004395,
      "step": 140
    },
    {
      "epoch": 2.013888888888889,
      "grad_norm": 18.007789953128686,
      "learning_rate": 2.655635024578483e-07,
      "logits/chosen": -2.2402732372283936,
      "logits/rejected": -2.176499843597412,
      "logps/chosen": -165.99827575683594,
      "logps/rejected": -203.1928253173828,
      "loss": 0.1666,
      "rewards/accuracies": 0.96875,
      "rewards/chosen": -0.5927726030349731,
      "rewards/margins": 3.829090118408203,
      "rewards/rejected": -4.421862602233887,
      "step": 145
    },
    {
      "epoch": 2.0833333333333335,
      "grad_norm": 17.845221787073083,
      "learning_rate": 2.3260185700046292e-07,
      "logits/chosen": -2.213283061981201,
      "logits/rejected": -2.1884255409240723,
      "logps/chosen": -163.85293579101562,
      "logps/rejected": -204.19644165039062,
      "loss": 0.105,
      "rewards/accuracies": 0.981249988079071,
      "rewards/chosen": -0.7582882046699524,
      "rewards/margins": 3.970874786376953,
      "rewards/rejected": -4.729162693023682,
      "step": 150
    },
    {
      "epoch": 2.0833333333333335,
      "eval_logits/chosen": -2.2022910118103027,
      "eval_logits/rejected": -2.163472890853882,
      "eval_logps/chosen": -181.17343139648438,
      "eval_logps/rejected": -200.37542724609375,
      "eval_loss": 0.5275049805641174,
      "eval_rewards/accuracies": 0.73828125,
      "eval_rewards/chosen": -2.336512565612793,
      "eval_rewards/margins": 1.6323777437210083,
      "eval_rewards/rejected": -3.9688901901245117,
      "eval_runtime": 128.4507,
      "eval_samples_per_second": 15.905,
      "eval_steps_per_second": 0.249,
      "step": 150
    },
    {
      "epoch": 2.1527777777777777,
      "grad_norm": 16.5358223115486,
      "learning_rate": 2.0119421957691218e-07,
      "logits/chosen": -2.2433078289031982,
      "logits/rejected": -2.192546844482422,
      "logps/chosen": -183.214599609375,
      "logps/rejected": -228.466064453125,
      "loss": 0.0922,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -1.2263261079788208,
      "rewards/margins": 4.63826847076416,
      "rewards/rejected": -5.86459493637085,
      "step": 155
    },
    {
      "epoch": 2.2222222222222223,
      "grad_norm": 16.243253588165253,
      "learning_rate": 1.7152311845883094e-07,
      "logits/chosen": -2.2243053913116455,
      "logits/rejected": -2.1990137100219727,
      "logps/chosen": -183.52928161621094,
      "logps/rejected": -225.4740753173828,
      "loss": 0.0882,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -1.0933525562286377,
      "rewards/margins": 4.847748756408691,
      "rewards/rejected": -5.941100120544434,
      "step": 160
    },
    {
      "epoch": 2.2916666666666665,
      "grad_norm": 14.143249761494847,
      "learning_rate": 1.4376098988303404e-07,
      "logits/chosen": -2.265902042388916,
      "logits/rejected": -2.2371320724487305,
      "logps/chosen": -178.1345977783203,
      "logps/rejected": -228.50906372070312,
      "loss": 0.0853,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -1.1710445880889893,
      "rewards/margins": 4.946959018707275,
      "rewards/rejected": -6.1180033683776855,
      "step": 165
    },
    {
      "epoch": 2.361111111111111,
      "grad_norm": 17.773700067929525,
      "learning_rate": 1.1806917592302761e-07,
      "logits/chosen": -2.2693705558776855,
      "logits/rejected": -2.2570414543151855,
      "logps/chosen": -169.011474609375,
      "logps/rejected": -220.2297821044922,
      "loss": 0.0769,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -1.048015832901001,
      "rewards/margins": 4.869006633758545,
      "rewards/rejected": -5.917021751403809,
      "step": 170
    },
    {
      "epoch": 2.4305555555555554,
      "grad_norm": 16.131832286323938,
      "learning_rate": 9.459698683523204e-08,
      "logits/chosen": -2.2916018962860107,
      "logits/rejected": -2.2644224166870117,
      "logps/chosen": -179.8535919189453,
      "logps/rejected": -232.84213256835938,
      "loss": 0.0838,
      "rewards/accuracies": 0.956250011920929,
      "rewards/chosen": -1.5861546993255615,
      "rewards/margins": 4.993525981903076,
      "rewards/rejected": -6.579680442810059,
      "step": 175
    },
    {
      "epoch": 2.5,
      "grad_norm": 25.688615524154926,
      "learning_rate": 7.348083332917926e-08,
      "logits/chosen": -2.3048667907714844,
      "logits/rejected": -2.2622156143188477,
      "logps/chosen": -187.1690216064453,
      "logps/rejected": -231.0559844970703,
      "loss": 0.1002,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": -1.6769403219223022,
      "rewards/margins": 4.427250862121582,
      "rewards/rejected": -6.104191780090332,
      "step": 180
    },
    {
      "epoch": 2.5694444444444446,
      "grad_norm": 17.118168827295822,
      "learning_rate": 5.484343380457124e-08,
      "logits/chosen": -2.2761294841766357,
      "logits/rejected": -2.241340160369873,
      "logps/chosen": -165.86204528808594,
      "logps/rejected": -211.8990936279297,
      "loss": 0.0951,
      "rewards/accuracies": 0.981249988079071,
      "rewards/chosen": -1.2597142457962036,
      "rewards/margins": 4.591185092926025,
      "rewards/rejected": -5.850900173187256,
      "step": 185
    },
    {
      "epoch": 2.638888888888889,
      "grad_norm": 14.646194270585417,
      "learning_rate": 3.879310116241041e-08,
      "logits/chosen": -2.2710745334625244,
      "logits/rejected": -2.2402360439300537,
      "logps/chosen": -171.69906616210938,
      "logps/rejected": -220.8769073486328,
      "loss": 0.074,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -0.8112651705741882,
      "rewards/margins": 4.888184547424316,
      "rewards/rejected": -5.699450492858887,
      "step": 190
    },
    {
      "epoch": 2.7083333333333335,
      "grad_norm": 14.441833627511247,
      "learning_rate": 2.5423113334966218e-08,
      "logits/chosen": -2.294968605041504,
      "logits/rejected": -2.231370449066162,
      "logps/chosen": -172.13613891601562,
      "logps/rejected": -215.5753631591797,
      "loss": 0.081,
      "rewards/accuracies": 0.981249988079071,
      "rewards/chosen": -0.8212493658065796,
      "rewards/margins": 4.537470817565918,
      "rewards/rejected": -5.358720302581787,
      "step": 195
    },
    {
      "epoch": 2.7777777777777777,
      "grad_norm": 17.8264257139916,
      "learning_rate": 1.4811171192794624e-08,
      "logits/chosen": -2.2626068592071533,
      "logits/rejected": -2.2442564964294434,
      "logps/chosen": -178.3024139404297,
      "logps/rejected": -224.14501953125,
      "loss": 0.0911,
      "rewards/accuracies": 0.96875,
      "rewards/chosen": -1.1280481815338135,
      "rewards/margins": 4.719998359680176,
      "rewards/rejected": -5.848046779632568,
      "step": 200
    },
    {
      "epoch": 2.7777777777777777,
      "eval_logits/chosen": -2.256969690322876,
      "eval_logits/rejected": -2.2202577590942383,
      "eval_logps/chosen": -183.34054565429688,
      "eval_logps/rejected": -202.82171630859375,
      "eval_loss": 0.5503418445587158,
      "eval_rewards/accuracies": 0.72265625,
      "eval_rewards/chosen": -2.5532219409942627,
      "eval_rewards/margins": 1.6602981090545654,
      "eval_rewards/rejected": -4.213520050048828,
      "eval_runtime": 128.2168,
      "eval_samples_per_second": 15.934,
      "eval_steps_per_second": 0.25,
      "step": 200
    },
    {
      "epoch": 2.8472222222222223,
      "grad_norm": 16.324224179238254,
      "learning_rate": 7.018946979234997e-09,
      "logits/chosen": -2.2659378051757812,
      "logits/rejected": -2.2529799938201904,
      "logps/chosen": -168.2804412841797,
      "logps/rejected": -219.5007781982422,
      "loss": 0.0741,
      "rewards/accuracies": 0.96875,
      "rewards/chosen": -1.1989504098892212,
      "rewards/margins": 4.789656162261963,
      "rewards/rejected": -5.9886064529418945,
      "step": 205
    },
    {
      "epoch": 2.9166666666666665,
      "grad_norm": 17.2695146985847,
      "learning_rate": 2.0917258966953734e-09,
      "logits/chosen": -2.2585835456848145,
      "logits/rejected": -2.247985363006592,
      "logps/chosen": -184.29794311523438,
      "logps/rejected": -223.56027221679688,
      "loss": 0.0895,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -1.2817625999450684,
      "rewards/margins": 4.492359161376953,
      "rewards/rejected": -5.774121284484863,
      "step": 210
    },
    {
      "epoch": 2.986111111111111,
      "grad_norm": 23.734981579706652,
      "learning_rate": 5.814292768108187e-11,
      "logits/chosen": -2.2835826873779297,
      "logits/rejected": -2.2543907165527344,
      "logps/chosen": -181.99880981445312,
      "logps/rejected": -226.61776733398438,
      "loss": 0.083,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -1.232177495956421,
      "rewards/margins": 4.671173095703125,
      "rewards/rejected": -5.903350353240967,
      "step": 215
    },
    {
      "epoch": 3.0,
      "step": 216,
      "total_flos": 2546799910846464.0,
      "train_loss": 0.29666884402158084,
      "train_runtime": 7681.0779,
      "train_samples_per_second": 7.181,
      "train_steps_per_second": 0.028
    }
  ],
  "logging_steps": 5,
  "max_steps": 216,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 2546799910846464.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}