File size: 29,892 Bytes
489dbd4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 2.9907692307692306,
  "eval_steps": 40,
  "global_step": 243,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.06153846153846154,
      "grad_norm": 67.0723563361413,
      "learning_rate": 5e-07,
      "logits/chosen": -2.709871530532837,
      "logits/rejected": -2.7169337272644043,
      "logps/chosen": -301.914306640625,
      "logps/rejected": -224.25662231445312,
      "loss": 0.6893,
      "rewards/accuracies": 0.33125001192092896,
      "rewards/chosen": 0.021815577521920204,
      "rewards/margins": 0.006514790467917919,
      "rewards/rejected": 0.015300785191357136,
      "step": 5
    },
    {
      "epoch": 0.12307692307692308,
      "grad_norm": 63.50419416702775,
      "learning_rate": 1e-06,
      "logits/chosen": -2.673720598220825,
      "logits/rejected": -2.6661205291748047,
      "logps/chosen": -289.4712829589844,
      "logps/rejected": -227.0055694580078,
      "loss": 0.6229,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": 0.7337898015975952,
      "rewards/margins": 0.23366117477416992,
      "rewards/rejected": 0.5001285672187805,
      "step": 10
    },
    {
      "epoch": 0.18461538461538463,
      "grad_norm": 50.38404433953605,
      "learning_rate": 9.98864195911451e-07,
      "logits/chosen": -2.5014748573303223,
      "logits/rejected": -2.4808177947998047,
      "logps/chosen": -269.33612060546875,
      "logps/rejected": -221.9925079345703,
      "loss": 0.5587,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": 1.8688875436782837,
      "rewards/margins": 0.7238284945487976,
      "rewards/rejected": 1.1450591087341309,
      "step": 15
    },
    {
      "epoch": 0.24615384615384617,
      "grad_norm": 46.1003816476988,
      "learning_rate": 9.95461943849514e-07,
      "logits/chosen": -2.3574154376983643,
      "logits/rejected": -2.3489270210266113,
      "logps/chosen": -279.2587585449219,
      "logps/rejected": -240.3167724609375,
      "loss": 0.5578,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": 2.3487281799316406,
      "rewards/margins": 1.1014046669006348,
      "rewards/rejected": 1.2473232746124268,
      "step": 20
    },
    {
      "epoch": 0.3076923076923077,
      "grad_norm": 46.94795150121206,
      "learning_rate": 9.898087009813985e-07,
      "logits/chosen": -2.298891544342041,
      "logits/rejected": -2.2741196155548096,
      "logps/chosen": -274.5340576171875,
      "logps/rejected": -243.38784790039062,
      "loss": 0.5498,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": 2.5186121463775635,
      "rewards/margins": 1.7126624584197998,
      "rewards/rejected": 0.8059496879577637,
      "step": 25
    },
    {
      "epoch": 0.36923076923076925,
      "grad_norm": 46.91929359569832,
      "learning_rate": 9.819301512125564e-07,
      "logits/chosen": -2.2155728340148926,
      "logits/rejected": -2.1926393508911133,
      "logps/chosen": -303.2185363769531,
      "logps/rejected": -230.5345001220703,
      "loss": 0.5365,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": 2.6670188903808594,
      "rewards/margins": 1.5836530923843384,
      "rewards/rejected": 1.0833656787872314,
      "step": 30
    },
    {
      "epoch": 0.4307692307692308,
      "grad_norm": 39.966430884552075,
      "learning_rate": 9.718620884991454e-07,
      "logits/chosen": -2.1086268424987793,
      "logits/rejected": -2.077179193496704,
      "logps/chosen": -293.8847351074219,
      "logps/rejected": -250.50009155273438,
      "loss": 0.539,
      "rewards/accuracies": 0.75,
      "rewards/chosen": 2.3622381687164307,
      "rewards/margins": 1.3279569149017334,
      "rewards/rejected": 1.0342811346054077,
      "step": 35
    },
    {
      "epoch": 0.49230769230769234,
      "grad_norm": 39.51653677279534,
      "learning_rate": 9.596502542283398e-07,
      "logits/chosen": -2.0062007904052734,
      "logits/rejected": -1.9648046493530273,
      "logps/chosen": -271.65533447265625,
      "logps/rejected": -209.97476196289062,
      "loss": 0.5436,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": 2.328826665878296,
      "rewards/margins": 1.5009214878082275,
      "rewards/rejected": 0.8279051780700684,
      "step": 40
    },
    {
      "epoch": 0.49230769230769234,
      "eval_logits/chosen": -2.035125732421875,
      "eval_logits/rejected": -2.0087647438049316,
      "eval_logps/chosen": -303.9334411621094,
      "eval_logps/rejected": -223.16908264160156,
      "eval_loss": 0.5011464357376099,
      "eval_rewards/accuracies": 0.7736486196517944,
      "eval_rewards/chosen": 2.5953011512756348,
      "eval_rewards/margins": 1.6505608558654785,
      "eval_rewards/rejected": 0.9447402954101562,
      "eval_runtime": 156.8895,
      "eval_samples_per_second": 14.692,
      "eval_steps_per_second": 0.236,
      "step": 40
    },
    {
      "epoch": 0.5538461538461539,
      "grad_norm": 39.53692648423416,
      "learning_rate": 9.453501294053137e-07,
      "logits/chosen": -2.023144245147705,
      "logits/rejected": -2.0024471282958984,
      "logps/chosen": -288.55859375,
      "logps/rejected": -237.53579711914062,
      "loss": 0.5107,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": 2.411022663116455,
      "rewards/margins": 1.7200405597686768,
      "rewards/rejected": 0.6909819841384888,
      "step": 45
    },
    {
      "epoch": 0.6153846153846154,
      "grad_norm": 45.18002841742545,
      "learning_rate": 9.2902668259103e-07,
      "logits/chosen": -2.0483999252319336,
      "logits/rejected": -1.9713916778564453,
      "logps/chosen": -285.7639465332031,
      "logps/rejected": -219.0935516357422,
      "loss": 0.5436,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": 2.238405466079712,
      "rewards/margins": 1.7145936489105225,
      "rewards/rejected": 0.5238116979598999,
      "step": 50
    },
    {
      "epoch": 0.676923076923077,
      "grad_norm": 36.94795750119324,
      "learning_rate": 9.107540747360123e-07,
      "logits/chosen": -2.019026756286621,
      "logits/rejected": -1.9596664905548096,
      "logps/chosen": -288.3727111816406,
      "logps/rejected": -236.1953582763672,
      "loss": 0.5107,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": 2.0629780292510986,
      "rewards/margins": 1.6129209995269775,
      "rewards/rejected": 0.45005717873573303,
      "step": 55
    },
    {
      "epoch": 0.7384615384615385,
      "grad_norm": 45.823356835075316,
      "learning_rate": 8.906153222511012e-07,
      "logits/chosen": -2.1011242866516113,
      "logits/rejected": -2.058206081390381,
      "logps/chosen": -267.6994934082031,
      "logps/rejected": -240.79833984375,
      "loss": 0.5475,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": 2.305724620819092,
      "rewards/margins": 1.3877848386764526,
      "rewards/rejected": 0.9179398417472839,
      "step": 60
    },
    {
      "epoch": 0.8,
      "grad_norm": 41.349964597831146,
      "learning_rate": 8.687019198459393e-07,
      "logits/chosen": -2.26322078704834,
      "logits/rejected": -2.2332780361175537,
      "logps/chosen": -284.81500244140625,
      "logps/rejected": -243.53466796875,
      "loss": 0.5145,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": 2.149193525314331,
      "rewards/margins": 1.4158048629760742,
      "rewards/rejected": 0.7333890199661255,
      "step": 65
    },
    {
      "epoch": 0.8615384615384616,
      "grad_norm": 35.03982177026574,
      "learning_rate": 8.451134248487099e-07,
      "logits/chosen": -2.3586983680725098,
      "logits/rejected": -2.338914394378662,
      "logps/chosen": -301.6893005371094,
      "logps/rejected": -228.70260620117188,
      "loss": 0.4817,
      "rewards/accuracies": 0.8187500238418579,
      "rewards/chosen": 2.474177360534668,
      "rewards/margins": 2.0998575687408447,
      "rewards/rejected": 0.3743199408054352,
      "step": 70
    },
    {
      "epoch": 0.9230769230769231,
      "grad_norm": 36.788375659444604,
      "learning_rate": 8.199570048956553e-07,
      "logits/chosen": -2.4227261543273926,
      "logits/rejected": -2.4093117713928223,
      "logps/chosen": -297.9197082519531,
      "logps/rejected": -216.85360717773438,
      "loss": 0.4727,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": 1.9344415664672852,
      "rewards/margins": 2.026484727859497,
      "rewards/rejected": -0.09204334020614624,
      "step": 75
    },
    {
      "epoch": 0.9846153846153847,
      "grad_norm": 29.272816983661464,
      "learning_rate": 7.933469510453187e-07,
      "logits/chosen": -2.3556580543518066,
      "logits/rejected": -2.3152148723602295,
      "logps/chosen": -286.50604248046875,
      "logps/rejected": -264.03253173828125,
      "loss": 0.4689,
      "rewards/accuracies": 0.8187500238418579,
      "rewards/chosen": 1.6535011529922485,
      "rewards/margins": 2.2574427127838135,
      "rewards/rejected": -0.6039413213729858,
      "step": 80
    },
    {
      "epoch": 0.9846153846153847,
      "eval_logits/chosen": -2.2517545223236084,
      "eval_logits/rejected": -2.229193925857544,
      "eval_logps/chosen": -311.55841064453125,
      "eval_logps/rejected": -235.54119873046875,
      "eval_loss": 0.47693783044815063,
      "eval_rewards/accuracies": 0.8074324131011963,
      "eval_rewards/chosen": 1.8328040838241577,
      "eval_rewards/margins": 2.1252739429473877,
      "eval_rewards/rejected": -0.2924700975418091,
      "eval_runtime": 154.8855,
      "eval_samples_per_second": 14.882,
      "eval_steps_per_second": 0.239,
      "step": 80
    },
    {
      "epoch": 1.0461538461538462,
      "grad_norm": 15.165513534162447,
      "learning_rate": 7.654041585295399e-07,
      "logits/chosen": -2.1545214653015137,
      "logits/rejected": -2.1319217681884766,
      "logps/chosen": -286.4499816894531,
      "logps/rejected": -262.167724609375,
      "loss": 0.2094,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": 2.428976058959961,
      "rewards/margins": 3.554943084716797,
      "rewards/rejected": -1.125967264175415,
      "step": 85
    },
    {
      "epoch": 1.1076923076923078,
      "grad_norm": 22.133936868649922,
      "learning_rate": 7.362555775002579e-07,
      "logits/chosen": -2.101616859436035,
      "logits/rejected": -2.094041347503662,
      "logps/chosen": -280.25897216796875,
      "logps/rejected": -252.6195526123047,
      "loss": 0.1923,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": 3.1136231422424316,
      "rewards/margins": 3.7958176136016846,
      "rewards/rejected": -0.6821939945220947,
      "step": 90
    },
    {
      "epoch": 1.1692307692307693,
      "grad_norm": 21.235888542941137,
      "learning_rate": 7.060336362675068e-07,
      "logits/chosen": -2.1554484367370605,
      "logits/rejected": -2.087667226791382,
      "logps/chosen": -284.35028076171875,
      "logps/rejected": -240.01785278320312,
      "loss": 0.1724,
      "rewards/accuracies": 0.9437500238418579,
      "rewards/chosen": 3.559795379638672,
      "rewards/margins": 4.128841400146484,
      "rewards/rejected": -0.569046139717102,
      "step": 95
    },
    {
      "epoch": 1.2307692307692308,
      "grad_norm": 19.651603274408373,
      "learning_rate": 6.748756396489505e-07,
      "logits/chosen": -2.1568312644958496,
      "logits/rejected": -2.117281436920166,
      "logps/chosen": -269.5829162597656,
      "logps/rejected": -248.9884033203125,
      "loss": 0.1757,
      "rewards/accuracies": 0.9437500238418579,
      "rewards/chosen": 2.9166793823242188,
      "rewards/margins": 4.183014869689941,
      "rewards/rejected": -1.2663352489471436,
      "step": 100
    },
    {
      "epoch": 1.2923076923076924,
      "grad_norm": 21.2287191738782,
      "learning_rate": 6.429231451643906e-07,
      "logits/chosen": -2.1700432300567627,
      "logits/rejected": -2.1068949699401855,
      "logps/chosen": -274.2152404785156,
      "logps/rejected": -229.031005859375,
      "loss": 0.1806,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": 3.072680711746216,
      "rewards/margins": 4.2831034660339355,
      "rewards/rejected": -1.210423231124878,
      "step": 105
    },
    {
      "epoch": 1.353846153846154,
      "grad_norm": 17.119914873342427,
      "learning_rate": 6.103213199093267e-07,
      "logits/chosen": -2.1095035076141357,
      "logits/rejected": -2.0814878940582275,
      "logps/chosen": -289.4352111816406,
      "logps/rejected": -246.68759155273438,
      "loss": 0.1544,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": 3.430999755859375,
      "rewards/margins": 4.2461419105529785,
      "rewards/rejected": -0.8151422739028931,
      "step": 110
    },
    {
      "epoch": 1.4153846153846155,
      "grad_norm": 22.13412271462813,
      "learning_rate": 5.772182810294344e-07,
      "logits/chosen": -2.121804714202881,
      "logits/rejected": -2.097852945327759,
      "logps/chosen": -265.60552978515625,
      "logps/rejected": -243.84390258789062,
      "loss": 0.1949,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": 2.722719192504883,
      "rewards/margins": 4.1747965812683105,
      "rewards/rejected": -1.452077031135559,
      "step": 115
    },
    {
      "epoch": 1.476923076923077,
      "grad_norm": 19.045988439656412,
      "learning_rate": 5.43764422792326e-07,
      "logits/chosen": -2.1834075450897217,
      "logits/rejected": -2.1550724506378174,
      "logps/chosen": -287.23614501953125,
      "logps/rejected": -262.32855224609375,
      "loss": 0.1825,
      "rewards/accuracies": 0.949999988079071,
      "rewards/chosen": 3.010335683822632,
      "rewards/margins": 4.589625358581543,
      "rewards/rejected": -1.5792896747589111,
      "step": 120
    },
    {
      "epoch": 1.476923076923077,
      "eval_logits/chosen": -2.2158141136169434,
      "eval_logits/rejected": -2.195758819580078,
      "eval_logps/chosen": -310.03826904296875,
      "eval_logps/rejected": -240.60853576660156,
      "eval_loss": 0.5121302604675293,
      "eval_rewards/accuracies": 0.8141891956329346,
      "eval_rewards/chosen": 1.9848158359527588,
      "eval_rewards/margins": 2.784022092819214,
      "eval_rewards/rejected": -0.7992062568664551,
      "eval_runtime": 154.8152,
      "eval_samples_per_second": 14.889,
      "eval_steps_per_second": 0.239,
      "step": 120
    },
    {
      "epoch": 1.5384615384615383,
      "grad_norm": 18.888758026934507,
      "learning_rate": 5.101117333138557e-07,
      "logits/chosen": -2.2510101795196533,
      "logits/rejected": -2.2272305488586426,
      "logps/chosen": -291.73980712890625,
      "logps/rejected": -265.9889221191406,
      "loss": 0.1577,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": 3.0048184394836426,
      "rewards/margins": 4.716927528381348,
      "rewards/rejected": -1.712108850479126,
      "step": 125
    },
    {
      "epoch": 1.6,
      "grad_norm": 19.29495102896881,
      "learning_rate": 4.764131040432247e-07,
      "logits/chosen": -2.225930690765381,
      "logits/rejected": -2.253312349319458,
      "logps/chosen": -274.7572021484375,
      "logps/rejected": -255.572998046875,
      "loss": 0.1913,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": 2.918527126312256,
      "rewards/margins": 4.673203945159912,
      "rewards/rejected": -1.754677414894104,
      "step": 130
    },
    {
      "epoch": 1.6615384615384614,
      "grad_norm": 25.5621828915234,
      "learning_rate": 4.428216351440491e-07,
      "logits/chosen": -2.251063585281372,
      "logits/rejected": -2.2444169521331787,
      "logps/chosen": -276.546630859375,
      "logps/rejected": -242.7362823486328,
      "loss": 0.2192,
      "rewards/accuracies": 0.96875,
      "rewards/chosen": 3.2366764545440674,
      "rewards/margins": 4.48799991607666,
      "rewards/rejected": -1.2513238191604614,
      "step": 135
    },
    {
      "epoch": 1.7230769230769232,
      "grad_norm": 22.24001791940726,
      "learning_rate": 4.0948993992719343e-07,
      "logits/chosen": -2.2279458045959473,
      "logits/rejected": -2.211503744125366,
      "logps/chosen": -269.0787658691406,
      "logps/rejected": -245.9545440673828,
      "loss": 0.2094,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": 3.218578338623047,
      "rewards/margins": 4.087203502655029,
      "rewards/rejected": -0.8686248660087585,
      "step": 140
    },
    {
      "epoch": 1.7846153846153845,
      "grad_norm": 20.544231797693058,
      "learning_rate": 3.765694514954795e-07,
      "logits/chosen": -2.15586256980896,
      "logits/rejected": -2.1282958984375,
      "logps/chosen": -286.4849548339844,
      "logps/rejected": -253.61160278320312,
      "loss": 0.2194,
      "rewards/accuracies": 0.8812500238418579,
      "rewards/chosen": 3.1202824115753174,
      "rewards/margins": 4.100513458251953,
      "rewards/rejected": -0.9802314043045044,
      "step": 145
    },
    {
      "epoch": 1.8461538461538463,
      "grad_norm": 19.256578235605794,
      "learning_rate": 3.4420973475033887e-07,
      "logits/chosen": -2.118685722351074,
      "logits/rejected": -2.0710177421569824,
      "logps/chosen": -275.2586364746094,
      "logps/rejected": -253.2464141845703,
      "loss": 0.2143,
      "rewards/accuracies": 0.9312499761581421,
      "rewards/chosen": 3.2161738872528076,
      "rewards/margins": 4.078971862792969,
      "rewards/rejected": -0.862797737121582,
      "step": 150
    },
    {
      "epoch": 1.9076923076923076,
      "grad_norm": 23.98963369524157,
      "learning_rate": 3.1255780688610506e-07,
      "logits/chosen": -2.166189432144165,
      "logits/rejected": -2.098736047744751,
      "logps/chosen": -276.6828308105469,
      "logps/rejected": -248.3531036376953,
      "loss": 0.1708,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": 3.080463409423828,
      "rewards/margins": 4.5233073234558105,
      "rewards/rejected": -1.4428437948226929,
      "step": 155
    },
    {
      "epoch": 1.9692307692307693,
      "grad_norm": 19.206785735628465,
      "learning_rate": 2.8175746945909274e-07,
      "logits/chosen": -2.137047290802002,
      "logits/rejected": -2.1248533725738525,
      "logps/chosen": -272.5418395996094,
      "logps/rejected": -239.5931854248047,
      "loss": 0.2112,
      "rewards/accuracies": 0.9437500238418579,
      "rewards/chosen": 3.2736434936523438,
      "rewards/margins": 4.470524787902832,
      "rewards/rejected": -1.1968815326690674,
      "step": 160
    },
    {
      "epoch": 1.9692307692307693,
      "eval_logits/chosen": -2.181283712387085,
      "eval_logits/rejected": -2.160254716873169,
      "eval_logps/chosen": -305.28289794921875,
      "eval_logps/rejected": -235.87991333007812,
      "eval_loss": 0.48850271105766296,
      "eval_rewards/accuracies": 0.8175675868988037,
      "eval_rewards/chosen": 2.460352897644043,
      "eval_rewards/margins": 2.78669810295105,
      "eval_rewards/rejected": -0.32634544372558594,
      "eval_runtime": 154.8615,
      "eval_samples_per_second": 14.884,
      "eval_steps_per_second": 0.239,
      "step": 160
    },
    {
      "epoch": 2.0307692307692307,
      "grad_norm": 13.040095905596985,
      "learning_rate": 2.51948655066015e-07,
      "logits/chosen": -2.1852829456329346,
      "logits/rejected": -2.1388115882873535,
      "logps/chosen": -276.78057861328125,
      "logps/rejected": -241.5438690185547,
      "loss": 0.1728,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": 3.1819839477539062,
      "rewards/margins": 4.5904693603515625,
      "rewards/rejected": -1.4084855318069458,
      "step": 165
    },
    {
      "epoch": 2.0923076923076924,
      "grad_norm": 9.499778107585676,
      "learning_rate": 2.2326679159992156e-07,
      "logits/chosen": -2.2025036811828613,
      "logits/rejected": -2.180441379547119,
      "logps/chosen": -282.98736572265625,
      "logps/rejected": -248.19552612304688,
      "loss": 0.0866,
      "rewards/accuracies": 0.981249988079071,
      "rewards/chosen": 3.148271083831787,
      "rewards/margins": 4.650267601013184,
      "rewards/rejected": -1.5019972324371338,
      "step": 170
    },
    {
      "epoch": 2.1538461538461537,
      "grad_norm": 15.892804029114844,
      "learning_rate": 1.9584218697198068e-07,
      "logits/chosen": -2.2108588218688965,
      "logits/rejected": -2.2013297080993652,
      "logps/chosen": -273.5797119140625,
      "logps/rejected": -248.348876953125,
      "loss": 0.1031,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": 3.147243022918701,
      "rewards/margins": 4.491833209991455,
      "rewards/rejected": -1.3445903062820435,
      "step": 175
    },
    {
      "epoch": 2.2153846153846155,
      "grad_norm": 14.22570343292715,
      "learning_rate": 1.6979943709444517e-07,
      "logits/chosen": -2.190584421157837,
      "logits/rejected": -2.174020290374756,
      "logps/chosen": -262.9644470214844,
      "logps/rejected": -239.3780975341797,
      "loss": 0.0932,
      "rewards/accuracies": 0.96875,
      "rewards/chosen": 2.893162965774536,
      "rewards/margins": 4.69353723526001,
      "rewards/rejected": -1.8003742694854736,
      "step": 180
    },
    {
      "epoch": 2.276923076923077,
      "grad_norm": 12.806415359522136,
      "learning_rate": 1.4525685981446679e-07,
      "logits/chosen": -2.1847140789031982,
      "logits/rejected": -2.2051777839660645,
      "logps/chosen": -252.59158325195312,
      "logps/rejected": -246.86373901367188,
      "loss": 0.0929,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": 2.550929307937622,
      "rewards/margins": 4.282719612121582,
      "rewards/rejected": -1.7317907810211182,
      "step": 185
    },
    {
      "epoch": 2.3384615384615386,
      "grad_norm": 10.861908687655884,
      "learning_rate": 1.2232595737051837e-07,
      "logits/chosen": -2.1893796920776367,
      "logits/rejected": -2.158172369003296,
      "logps/chosen": -281.5319519042969,
      "logps/rejected": -253.09927368164062,
      "loss": 0.0853,
      "rewards/accuracies": 0.981249988079071,
      "rewards/chosen": 3.348022937774658,
      "rewards/margins": 4.836338996887207,
      "rewards/rejected": -1.488316297531128,
      "step": 190
    },
    {
      "epoch": 2.4,
      "grad_norm": 13.046588546578379,
      "learning_rate": 1.011109098135996e-07,
      "logits/chosen": -2.18019437789917,
      "logits/rejected": -2.1145331859588623,
      "logps/chosen": -299.452880859375,
      "logps/rejected": -278.765869140625,
      "loss": 0.0841,
      "rewards/accuracies": 0.981249988079071,
      "rewards/chosen": 3.1544060707092285,
      "rewards/margins": 5.230926990509033,
      "rewards/rejected": -2.0765209197998047,
      "step": 195
    },
    {
      "epoch": 2.4615384615384617,
      "grad_norm": 17.569771715724876,
      "learning_rate": 8.170810169472592e-08,
      "logits/chosen": -2.1487338542938232,
      "logits/rejected": -2.103668689727783,
      "logps/chosen": -285.69757080078125,
      "logps/rejected": -238.96499633789062,
      "loss": 0.1059,
      "rewards/accuracies": 0.956250011920929,
      "rewards/chosen": 3.010953187942505,
      "rewards/margins": 4.7436604499816895,
      "rewards/rejected": -1.7327070236206055,
      "step": 200
    },
    {
      "epoch": 2.4615384615384617,
      "eval_logits/chosen": -2.1317200660705566,
      "eval_logits/rejected": -2.1061103343963623,
      "eval_logps/chosen": -306.38006591796875,
      "eval_logps/rejected": -239.89051818847656,
      "eval_loss": 0.49472159147262573,
      "eval_rewards/accuracies": 0.8108108043670654,
      "eval_rewards/chosen": 2.350633144378662,
      "eval_rewards/margins": 3.0780346393585205,
      "eval_rewards/rejected": -0.7274015545845032,
      "eval_runtime": 154.9725,
      "eval_samples_per_second": 14.874,
      "eval_steps_per_second": 0.239,
      "step": 200
    },
    {
      "epoch": 2.523076923076923,
      "grad_norm": 12.526662899067567,
      "learning_rate": 6.420568416906058e-08,
      "logits/chosen": -2.110624074935913,
      "logits/rejected": -2.0670769214630127,
      "logps/chosen": -267.602294921875,
      "logps/rejected": -255.283203125,
      "loss": 0.0798,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": 3.103508472442627,
      "rewards/margins": 5.05588436126709,
      "rewards/rejected": -1.9523770809173584,
      "step": 205
    },
    {
      "epoch": 2.5846153846153848,
      "grad_norm": 15.586951889499693,
      "learning_rate": 4.8683174506144064e-08,
      "logits/chosen": -2.1358416080474854,
      "logits/rejected": -2.0804855823516846,
      "logps/chosen": -267.86932373046875,
      "logps/rejected": -266.5166320800781,
      "loss": 0.0799,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": 3.441650390625,
      "rewards/margins": 5.49423885345459,
      "rewards/rejected": -2.0525882244110107,
      "step": 210
    },
    {
      "epoch": 2.646153846153846,
      "grad_norm": 12.921163363324,
      "learning_rate": 3.5211094825735145e-08,
      "logits/chosen": -2.1344826221466064,
      "logits/rejected": -2.095686435699463,
      "logps/chosen": -273.34466552734375,
      "logps/rejected": -259.7888488769531,
      "loss": 0.0906,
      "rewards/accuracies": 0.9937499761581421,
      "rewards/chosen": 3.337371826171875,
      "rewards/margins": 5.457432270050049,
      "rewards/rejected": -2.1200602054595947,
      "step": 215
    },
    {
      "epoch": 2.707692307692308,
      "grad_norm": 11.698637983314368,
      "learning_rate": 2.385065170056283e-08,
      "logits/chosen": -2.1433401107788086,
      "logits/rejected": -2.081160545349121,
      "logps/chosen": -273.98004150390625,
      "logps/rejected": -262.38897705078125,
      "loss": 0.0834,
      "rewards/accuracies": 0.981249988079071,
      "rewards/chosen": 3.1992526054382324,
      "rewards/margins": 5.575494766235352,
      "rewards/rejected": -2.376242160797119,
      "step": 220
    },
    {
      "epoch": 2.769230769230769,
      "grad_norm": 12.58060499267709,
      "learning_rate": 1.465345808162427e-08,
      "logits/chosen": -2.1173667907714844,
      "logits/rejected": -2.0692567825317383,
      "logps/chosen": -273.2080383300781,
      "logps/rejected": -258.080810546875,
      "loss": 0.0774,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": 3.0734853744506836,
      "rewards/margins": 5.315826892852783,
      "rewards/rejected": -2.2423415184020996,
      "step": 225
    },
    {
      "epoch": 2.830769230769231,
      "grad_norm": 11.786260598928802,
      "learning_rate": 7.661298809381877e-09,
      "logits/chosen": -2.126652717590332,
      "logits/rejected": -2.076254367828369,
      "logps/chosen": -289.486083984375,
      "logps/rejected": -265.6861877441406,
      "loss": 0.1024,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": 3.2749016284942627,
      "rewards/margins": 5.374606132507324,
      "rewards/rejected": -2.0997047424316406,
      "step": 230
    },
    {
      "epoch": 2.8923076923076922,
      "grad_norm": 17.82006078758849,
      "learning_rate": 2.9059407761923836e-09,
      "logits/chosen": -2.1355929374694824,
      "logits/rejected": -2.1044511795043945,
      "logps/chosen": -269.39984130859375,
      "logps/rejected": -247.22671508789062,
      "loss": 0.0907,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": 3.20941424369812,
      "rewards/margins": 4.841259956359863,
      "rewards/rejected": -1.6318458318710327,
      "step": 235
    },
    {
      "epoch": 2.953846153846154,
      "grad_norm": 17.458154920989255,
      "learning_rate": 4.0898860244215074e-10,
      "logits/chosen": -2.1104207038879395,
      "logits/rejected": -2.0696866512298584,
      "logps/chosen": -259.77471923828125,
      "logps/rejected": -241.05380249023438,
      "loss": 0.1134,
      "rewards/accuracies": 0.96875,
      "rewards/chosen": 2.9383902549743652,
      "rewards/margins": 4.824470520019531,
      "rewards/rejected": -1.8860807418823242,
      "step": 240
    },
    {
      "epoch": 2.953846153846154,
      "eval_logits/chosen": -2.1210827827453613,
      "eval_logits/rejected": -2.094482660293579,
      "eval_logps/chosen": -306.5203552246094,
      "eval_logps/rejected": -240.8570556640625,
      "eval_loss": 0.4984860122203827,
      "eval_rewards/accuracies": 0.8108108043670654,
      "eval_rewards/chosen": 2.3366057872772217,
      "eval_rewards/margins": 3.1606650352478027,
      "eval_rewards/rejected": -0.8240591287612915,
      "eval_runtime": 154.8776,
      "eval_samples_per_second": 14.883,
      "eval_steps_per_second": 0.239,
      "step": 240
    },
    {
      "epoch": 2.9907692307692306,
      "step": 243,
      "total_flos": 2865208385077248.0,
      "train_loss": 0.2735099794933335,
      "train_runtime": 9352.9566,
      "train_samples_per_second": 6.652,
      "train_steps_per_second": 0.026
    }
  ],
  "logging_steps": 5,
  "max_steps": 243,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 40,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 2865208385077248.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}