File size: 26,081 Bytes
7d989f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 3.0,
  "eval_steps": 50,
  "global_step": 216,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.06944444444444445,
      "grad_norm": 36.698929739881684,
      "learning_rate": 5e-07,
      "logits/chosen": -2.7539002895355225,
      "logits/rejected": -2.7327029705047607,
      "logps/chosen": -163.69387817382812,
      "logps/rejected": -163.82852172851562,
      "loss": 0.693,
      "rewards/accuracies": 0.33125001192092896,
      "rewards/chosen": 0.0032427930273115635,
      "rewards/margins": 0.003264186205342412,
      "rewards/rejected": -2.1393225324572995e-05,
      "step": 5
    },
    {
      "epoch": 0.1388888888888889,
      "grad_norm": 33.19716118176963,
      "learning_rate": 1e-06,
      "logits/chosen": -2.725349187850952,
      "logits/rejected": -2.7233974933624268,
      "logps/chosen": -158.5753631591797,
      "logps/rejected": -163.6913299560547,
      "loss": 0.6852,
      "rewards/accuracies": 0.5562499761581421,
      "rewards/chosen": 0.1477239578962326,
      "rewards/margins": 0.009601245634257793,
      "rewards/rejected": 0.13812272250652313,
      "step": 10
    },
    {
      "epoch": 0.20833333333333334,
      "grad_norm": 32.17261180089332,
      "learning_rate": 9.985471028179154e-07,
      "logits/chosen": -2.6893043518066406,
      "logits/rejected": -2.6972908973693848,
      "logps/chosen": -163.91458129882812,
      "logps/rejected": -160.10305786132812,
      "loss": 0.6754,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": 0.5012314319610596,
      "rewards/margins": 0.07390480488538742,
      "rewards/rejected": 0.42732667922973633,
      "step": 15
    },
    {
      "epoch": 0.2777777777777778,
      "grad_norm": 34.14748336984378,
      "learning_rate": 9.94196854912548e-07,
      "logits/chosen": -2.6901488304138184,
      "logits/rejected": -2.706470251083374,
      "logps/chosen": -160.76934814453125,
      "logps/rejected": -159.16995239257812,
      "loss": 0.6546,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": 0.3948652148246765,
      "rewards/margins": 0.21753108501434326,
      "rewards/rejected": 0.17733411490917206,
      "step": 20
    },
    {
      "epoch": 0.3472222222222222,
      "grad_norm": 32.71971381381323,
      "learning_rate": 9.869745381355905e-07,
      "logits/chosen": -2.6964094638824463,
      "logits/rejected": -2.682394504547119,
      "logps/chosen": -157.96328735351562,
      "logps/rejected": -162.65863037109375,
      "loss": 0.6383,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": 0.2566204071044922,
      "rewards/margins": 0.2564144432544708,
      "rewards/rejected": 0.00020598471746779978,
      "step": 25
    },
    {
      "epoch": 0.4166666666666667,
      "grad_norm": 35.675812969071345,
      "learning_rate": 9.769221256218162e-07,
      "logits/chosen": -2.6471545696258545,
      "logits/rejected": -2.620529890060425,
      "logps/chosen": -160.15093994140625,
      "logps/rejected": -163.9252166748047,
      "loss": 0.6267,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": 0.07839958369731903,
      "rewards/margins": 0.13437125086784363,
      "rewards/rejected": -0.0559716634452343,
      "step": 30
    },
    {
      "epoch": 0.4861111111111111,
      "grad_norm": 35.65139996099045,
      "learning_rate": 9.64098037858483e-07,
      "logits/chosen": -2.5851612091064453,
      "logits/rejected": -2.5945792198181152,
      "logps/chosen": -153.91452026367188,
      "logps/rejected": -158.6124267578125,
      "loss": 0.6275,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": 0.3417479395866394,
      "rewards/margins": 0.34512001276016235,
      "rewards/rejected": -0.0033720836509019136,
      "step": 35
    },
    {
      "epoch": 0.5555555555555556,
      "grad_norm": 32.23578863597255,
      "learning_rate": 9.485768031694871e-07,
      "logits/chosen": -2.5328378677368164,
      "logits/rejected": -2.5566062927246094,
      "logps/chosen": -149.2768096923828,
      "logps/rejected": -150.4925537109375,
      "loss": 0.593,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": 0.4136400818824768,
      "rewards/margins": 0.36537498235702515,
      "rewards/rejected": 0.048265062272548676,
      "step": 40
    },
    {
      "epoch": 0.625,
      "grad_norm": 32.86956863981884,
      "learning_rate": 9.304486245873971e-07,
      "logits/chosen": -2.6404240131378174,
      "logits/rejected": -2.6186232566833496,
      "logps/chosen": -154.857421875,
      "logps/rejected": -160.54421997070312,
      "loss": 0.5804,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": 0.21952423453330994,
      "rewards/margins": 0.6369880437850952,
      "rewards/rejected": -0.41746383905410767,
      "step": 45
    },
    {
      "epoch": 0.6944444444444444,
      "grad_norm": 32.98463521922708,
      "learning_rate": 9.098188556305262e-07,
      "logits/chosen": -2.7281055450439453,
      "logits/rejected": -2.7109835147857666,
      "logps/chosen": -164.81529235839844,
      "logps/rejected": -172.00509643554688,
      "loss": 0.5904,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": 0.22729039192199707,
      "rewards/margins": 0.6152051687240601,
      "rewards/rejected": -0.3879148066043854,
      "step": 50
    },
    {
      "epoch": 0.6944444444444444,
      "eval_logits/chosen": -2.7504920959472656,
      "eval_logits/rejected": -2.7399511337280273,
      "eval_logps/chosen": -158.1156463623047,
      "eval_logps/rejected": -165.36752319335938,
      "eval_loss": 0.5584720969200134,
      "eval_rewards/accuracies": 0.6953125,
      "eval_rewards/chosen": 0.1767115592956543,
      "eval_rewards/margins": 0.707548201084137,
      "eval_rewards/rejected": -0.5308365821838379,
      "eval_runtime": 126.2022,
      "eval_samples_per_second": 16.188,
      "eval_steps_per_second": 0.254,
      "step": 50
    },
    {
      "epoch": 0.7638888888888888,
      "grad_norm": 32.243042350659394,
      "learning_rate": 8.868073880316123e-07,
      "logits/chosen": -2.716021776199341,
      "logits/rejected": -2.7205090522766113,
      "logps/chosen": -160.37879943847656,
      "logps/rejected": -165.23492431640625,
      "loss": 0.5532,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.1015612855553627,
      "rewards/margins": 0.6121625304222107,
      "rewards/rejected": -0.7137238383293152,
      "step": 55
    },
    {
      "epoch": 0.8333333333333334,
      "grad_norm": 35.25482149048248,
      "learning_rate": 8.615479549763755e-07,
      "logits/chosen": -2.6721854209899902,
      "logits/rejected": -2.6540756225585938,
      "logps/chosen": -154.70159912109375,
      "logps/rejected": -160.74624633789062,
      "loss": 0.5415,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -0.3344821035861969,
      "rewards/margins": 0.9801710247993469,
      "rewards/rejected": -1.3146532773971558,
      "step": 60
    },
    {
      "epoch": 0.9027777777777778,
      "grad_norm": 30.012069786549304,
      "learning_rate": 8.341873539012443e-07,
      "logits/chosen": -2.6360578536987305,
      "logits/rejected": -2.6559650897979736,
      "logps/chosen": -155.54788208007812,
      "logps/rejected": -159.24798583984375,
      "loss": 0.534,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": 0.024257740005850792,
      "rewards/margins": 0.7708773016929626,
      "rewards/rejected": -0.7466195821762085,
      "step": 65
    },
    {
      "epoch": 0.9722222222222222,
      "grad_norm": 32.593167513928634,
      "learning_rate": 8.048845933670271e-07,
      "logits/chosen": -2.6497814655303955,
      "logits/rejected": -2.643947124481201,
      "logps/chosen": -175.2610626220703,
      "logps/rejected": -181.9915771484375,
      "loss": 0.5238,
      "rewards/accuracies": 0.6812499761581421,
      "rewards/chosen": -0.22413644194602966,
      "rewards/margins": 0.8313905596733093,
      "rewards/rejected": -1.0555269718170166,
      "step": 70
    },
    {
      "epoch": 1.0416666666666667,
      "grad_norm": 18.23226121550072,
      "learning_rate": 7.738099689665539e-07,
      "logits/chosen": -2.7001595497131348,
      "logits/rejected": -2.7125773429870605,
      "logps/chosen": -158.98019409179688,
      "logps/rejected": -174.9591522216797,
      "loss": 0.3717,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -0.3584290146827698,
      "rewards/margins": 1.3700459003448486,
      "rewards/rejected": -1.7284749746322632,
      "step": 75
    },
    {
      "epoch": 1.1111111111111112,
      "grad_norm": 17.63451132872378,
      "learning_rate": 7.41144073636728e-07,
      "logits/chosen": -2.7544779777526855,
      "logits/rejected": -2.7487056255340576,
      "logps/chosen": -172.69039916992188,
      "logps/rejected": -199.56082153320312,
      "loss": 0.253,
      "rewards/accuracies": 0.956250011920929,
      "rewards/chosen": 0.5693989396095276,
      "rewards/margins": 2.6911113262176514,
      "rewards/rejected": -2.1217124462127686,
      "step": 80
    },
    {
      "epoch": 1.1805555555555556,
      "grad_norm": 19.435839013678418,
      "learning_rate": 7.070767481266492e-07,
      "logits/chosen": -2.7354109287261963,
      "logits/rejected": -2.7295777797698975,
      "logps/chosen": -166.5186767578125,
      "logps/rejected": -190.08285522460938,
      "loss": 0.2397,
      "rewards/accuracies": 0.90625,
      "rewards/chosen": 0.3293834328651428,
      "rewards/margins": 2.566129207611084,
      "rewards/rejected": -2.236745595932007,
      "step": 85
    },
    {
      "epoch": 1.25,
      "grad_norm": 20.070654461149143,
      "learning_rate": 6.718059777212565e-07,
      "logits/chosen": -2.7108664512634277,
      "logits/rejected": -2.699066638946533,
      "logps/chosen": -165.696533203125,
      "logps/rejected": -187.604736328125,
      "loss": 0.2257,
      "rewards/accuracies": 0.9375,
      "rewards/chosen": -0.10144776105880737,
      "rewards/margins": 2.6659486293792725,
      "rewards/rejected": -2.7673964500427246,
      "step": 90
    },
    {
      "epoch": 1.3194444444444444,
      "grad_norm": 21.350071509710315,
      "learning_rate": 6.355367416322778e-07,
      "logits/chosen": -2.6924808025360107,
      "logits/rejected": -2.6694552898406982,
      "logps/chosen": -162.50173950195312,
      "logps/rejected": -189.10617065429688,
      "loss": 0.2252,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -0.26245421171188354,
      "rewards/margins": 2.784482955932617,
      "rewards/rejected": -3.0469374656677246,
      "step": 95
    },
    {
      "epoch": 1.3888888888888888,
      "grad_norm": 19.5783540001623,
      "learning_rate": 5.984798217433531e-07,
      "logits/chosen": -2.669064998626709,
      "logits/rejected": -2.683467388153076,
      "logps/chosen": -166.97213745117188,
      "logps/rejected": -197.8631591796875,
      "loss": 0.2124,
      "rewards/accuracies": 0.8999999761581421,
      "rewards/chosen": -0.2085113823413849,
      "rewards/margins": 2.9906728267669678,
      "rewards/rejected": -3.1991844177246094,
      "step": 100
    },
    {
      "epoch": 1.3888888888888888,
      "eval_logits/chosen": -2.702752113342285,
      "eval_logits/rejected": -2.684614419937134,
      "eval_logps/chosen": -169.66738891601562,
      "eval_logps/rejected": -182.4733123779297,
      "eval_loss": 0.5329886078834534,
      "eval_rewards/accuracies": 0.734375,
      "eval_rewards/chosen": -0.9784606099128723,
      "eval_rewards/margins": 1.2629573345184326,
      "eval_rewards/rejected": -2.2414181232452393,
      "eval_runtime": 125.9231,
      "eval_samples_per_second": 16.224,
      "eval_steps_per_second": 0.254,
      "step": 100
    },
    {
      "epoch": 1.4583333333333333,
      "grad_norm": 21.958809178033764,
      "learning_rate": 5.608505776324157e-07,
      "logits/chosen": -2.6888201236724854,
      "logits/rejected": -2.695568561553955,
      "logps/chosen": -166.43875122070312,
      "logps/rejected": -191.87063598632812,
      "loss": 0.2121,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": -0.37919527292251587,
      "rewards/margins": 2.9825637340545654,
      "rewards/rejected": -3.3617591857910156,
      "step": 105
    },
    {
      "epoch": 1.5277777777777777,
      "grad_norm": 20.249637616589567,
      "learning_rate": 5.228676949903973e-07,
      "logits/chosen": -2.682009696960449,
      "logits/rejected": -2.689685106277466,
      "logps/chosen": -163.05067443847656,
      "logps/rejected": -196.00839233398438,
      "loss": 0.205,
      "rewards/accuracies": 0.9125000238418579,
      "rewards/chosen": -0.09015237540006638,
      "rewards/margins": 3.487103223800659,
      "rewards/rejected": -3.5772557258605957,
      "step": 110
    },
    {
      "epoch": 1.5972222222222223,
      "grad_norm": 22.752461736398654,
      "learning_rate": 4.847519147099294e-07,
      "logits/chosen": -2.688514232635498,
      "logits/rejected": -2.678856372833252,
      "logps/chosen": -164.12435913085938,
      "logps/rejected": -198.7073516845703,
      "loss": 0.1898,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -0.3626613914966583,
      "rewards/margins": 3.352776288986206,
      "rewards/rejected": -3.7154381275177,
      "step": 115
    },
    {
      "epoch": 1.6666666666666665,
      "grad_norm": 28.552521399491905,
      "learning_rate": 4.46724750030062e-07,
      "logits/chosen": -2.6869266033172607,
      "logits/rejected": -2.6809396743774414,
      "logps/chosen": -165.9976043701172,
      "logps/rejected": -198.72543334960938,
      "loss": 0.2231,
      "rewards/accuracies": 0.862500011920929,
      "rewards/chosen": -0.41630274057388306,
      "rewards/margins": 3.22212553024292,
      "rewards/rejected": -3.6384284496307373,
      "step": 120
    },
    {
      "epoch": 1.7361111111111112,
      "grad_norm": 23.105210217645727,
      "learning_rate": 4.0900719919241935e-07,
      "logits/chosen": -2.6787171363830566,
      "logits/rejected": -2.685763120651245,
      "logps/chosen": -171.46243286132812,
      "logps/rejected": -196.81906127929688,
      "loss": 0.1911,
      "rewards/accuracies": 0.918749988079071,
      "rewards/chosen": -0.20612892508506775,
      "rewards/margins": 3.1734466552734375,
      "rewards/rejected": -3.379575252532959,
      "step": 125
    },
    {
      "epoch": 1.8055555555555556,
      "grad_norm": 23.168860883635634,
      "learning_rate": 3.7181846109031e-07,
      "logits/chosen": -2.701108455657959,
      "logits/rejected": -2.7029197216033936,
      "logps/chosen": -165.6434326171875,
      "logps/rejected": -197.2202911376953,
      "loss": 0.1896,
      "rewards/accuracies": 0.918749988079071,
      "rewards/chosen": -0.21247120201587677,
      "rewards/margins": 3.4415907859802246,
      "rewards/rejected": -3.654061794281006,
      "step": 130
    },
    {
      "epoch": 1.875,
      "grad_norm": 20.925035284646686,
      "learning_rate": 3.353746613749093e-07,
      "logits/chosen": -2.707535982131958,
      "logits/rejected": -2.6970787048339844,
      "logps/chosen": -165.4237518310547,
      "logps/rejected": -192.59902954101562,
      "loss": 0.1835,
      "rewards/accuracies": 0.925000011920929,
      "rewards/chosen": -0.5198689103126526,
      "rewards/margins": 3.2268478870391846,
      "rewards/rejected": -3.7467167377471924,
      "step": 135
    },
    {
      "epoch": 1.9444444444444444,
      "grad_norm": 23.165784110794785,
      "learning_rate": 2.9988759642186093e-07,
      "logits/chosen": -2.7198643684387207,
      "logits/rejected": -2.680373191833496,
      "logps/chosen": -163.90203857421875,
      "logps/rejected": -195.30250549316406,
      "loss": 0.1933,
      "rewards/accuracies": 0.956250011920929,
      "rewards/chosen": -0.40515241026878357,
      "rewards/margins": 3.571967601776123,
      "rewards/rejected": -3.9771201610565186,
      "step": 140
    },
    {
      "epoch": 2.013888888888889,
      "grad_norm": 15.582805467012191,
      "learning_rate": 2.655635024578483e-07,
      "logits/chosen": -2.7195186614990234,
      "logits/rejected": -2.723917007446289,
      "logps/chosen": -163.361572265625,
      "logps/rejected": -190.98854064941406,
      "loss": 0.1807,
      "rewards/accuracies": 0.90625,
      "rewards/chosen": -0.5500528216362,
      "rewards/margins": 3.374781847000122,
      "rewards/rejected": -3.924835205078125,
      "step": 145
    },
    {
      "epoch": 2.0833333333333335,
      "grad_norm": 12.541018024884982,
      "learning_rate": 2.3260185700046292e-07,
      "logits/chosen": -2.763014316558838,
      "logits/rejected": -2.7388086318969727,
      "logps/chosen": -163.94528198242188,
      "logps/rejected": -203.96595764160156,
      "loss": 0.1027,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -0.01648128405213356,
      "rewards/margins": 4.084949493408203,
      "rewards/rejected": -4.101430416107178,
      "step": 150
    },
    {
      "epoch": 2.0833333333333335,
      "eval_logits/chosen": -2.7841079235076904,
      "eval_logits/rejected": -2.764777898788452,
      "eval_logps/chosen": -173.171875,
      "eval_logps/rejected": -187.4415283203125,
      "eval_loss": 0.5208835005760193,
      "eval_rewards/accuracies": 0.73046875,
      "eval_rewards/chosen": -1.3289111852645874,
      "eval_rewards/margins": 1.4093263149261475,
      "eval_rewards/rejected": -2.7382373809814453,
      "eval_runtime": 126.0823,
      "eval_samples_per_second": 16.204,
      "eval_steps_per_second": 0.254,
      "step": 150
    },
    {
      "epoch": 2.1527777777777777,
      "grad_norm": 15.046831939350444,
      "learning_rate": 2.0119421957691218e-07,
      "logits/chosen": -2.763632297515869,
      "logits/rejected": -2.7723591327667236,
      "logps/chosen": -164.19580078125,
      "logps/rejected": -203.69476318359375,
      "loss": 0.1026,
      "rewards/accuracies": 0.956250011920929,
      "rewards/chosen": -0.08720869570970535,
      "rewards/margins": 4.142312049865723,
      "rewards/rejected": -4.229520797729492,
      "step": 155
    },
    {
      "epoch": 2.2222222222222223,
      "grad_norm": 15.165797448924506,
      "learning_rate": 1.7152311845883094e-07,
      "logits/chosen": -2.7780723571777344,
      "logits/rejected": -2.7640511989593506,
      "logps/chosen": -169.73117065429688,
      "logps/rejected": -211.1389617919922,
      "loss": 0.0982,
      "rewards/accuracies": 0.96875,
      "rewards/chosen": -0.3978654444217682,
      "rewards/margins": 4.3727922439575195,
      "rewards/rejected": -4.770657539367676,
      "step": 160
    },
    {
      "epoch": 2.2916666666666665,
      "grad_norm": 15.482018650732057,
      "learning_rate": 1.4376098988303404e-07,
      "logits/chosen": -2.759021282196045,
      "logits/rejected": -2.748340368270874,
      "logps/chosen": -162.560791015625,
      "logps/rejected": -204.7227783203125,
      "loss": 0.0902,
      "rewards/accuracies": 0.981249988079071,
      "rewards/chosen": -0.4122535288333893,
      "rewards/margins": 4.691542625427246,
      "rewards/rejected": -5.103796482086182,
      "step": 165
    },
    {
      "epoch": 2.361111111111111,
      "grad_norm": 14.070010302114309,
      "learning_rate": 1.1806917592302761e-07,
      "logits/chosen": -2.7598907947540283,
      "logits/rejected": -2.764383316040039,
      "logps/chosen": -175.9230499267578,
      "logps/rejected": -222.24502563476562,
      "loss": 0.0837,
      "rewards/accuracies": 0.981249988079071,
      "rewards/chosen": -0.6161278486251831,
      "rewards/margins": 4.77976655960083,
      "rewards/rejected": -5.395894527435303,
      "step": 170
    },
    {
      "epoch": 2.4305555555555554,
      "grad_norm": 21.006851996636122,
      "learning_rate": 9.459698683523204e-08,
      "logits/chosen": -2.7295148372650146,
      "logits/rejected": -2.7130894660949707,
      "logps/chosen": -167.0711669921875,
      "logps/rejected": -206.5072021484375,
      "loss": 0.0974,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -0.7671406269073486,
      "rewards/margins": 4.370370388031006,
      "rewards/rejected": -5.137511253356934,
      "step": 175
    },
    {
      "epoch": 2.5,
      "grad_norm": 15.576491460606624,
      "learning_rate": 7.348083332917926e-08,
      "logits/chosen": -2.7286458015441895,
      "logits/rejected": -2.745143413543701,
      "logps/chosen": -158.9204559326172,
      "logps/rejected": -203.44619750976562,
      "loss": 0.0952,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -0.859430193901062,
      "rewards/margins": 4.667559623718262,
      "rewards/rejected": -5.526989936828613,
      "step": 180
    },
    {
      "epoch": 2.5694444444444446,
      "grad_norm": 19.664847074777622,
      "learning_rate": 5.484343380457124e-08,
      "logits/chosen": -2.7284622192382812,
      "logits/rejected": -2.7399046421051025,
      "logps/chosen": -172.6531219482422,
      "logps/rejected": -218.3181915283203,
      "loss": 0.092,
      "rewards/accuracies": 0.9750000238418579,
      "rewards/chosen": -0.9713869094848633,
      "rewards/margins": 4.435851097106934,
      "rewards/rejected": -5.407237529754639,
      "step": 185
    },
    {
      "epoch": 2.638888888888889,
      "grad_norm": 18.716443063570917,
      "learning_rate": 3.879310116241041e-08,
      "logits/chosen": -2.7296249866485596,
      "logits/rejected": -2.6954047679901123,
      "logps/chosen": -168.70761108398438,
      "logps/rejected": -215.40487670898438,
      "loss": 0.0906,
      "rewards/accuracies": 0.96875,
      "rewards/chosen": -0.9057639241218567,
      "rewards/margins": 4.902363300323486,
      "rewards/rejected": -5.808127403259277,
      "step": 190
    },
    {
      "epoch": 2.7083333333333335,
      "grad_norm": 14.846476512042852,
      "learning_rate": 2.5423113334966218e-08,
      "logits/chosen": -2.7083740234375,
      "logits/rejected": -2.718740463256836,
      "logps/chosen": -182.84317016601562,
      "logps/rejected": -217.2157440185547,
      "loss": 0.0828,
      "rewards/accuracies": 0.96875,
      "rewards/chosen": -1.0764333009719849,
      "rewards/margins": 4.455503940582275,
      "rewards/rejected": -5.5319366455078125,
      "step": 195
    },
    {
      "epoch": 2.7777777777777777,
      "grad_norm": 18.16022354751141,
      "learning_rate": 1.4811171192794624e-08,
      "logits/chosen": -2.7187182903289795,
      "logits/rejected": -2.70548415184021,
      "logps/chosen": -163.11203002929688,
      "logps/rejected": -208.163330078125,
      "loss": 0.0793,
      "rewards/accuracies": 0.9624999761581421,
      "rewards/chosen": -0.7775343656539917,
      "rewards/margins": 4.664699554443359,
      "rewards/rejected": -5.442234039306641,
      "step": 200
    },
    {
      "epoch": 2.7777777777777777,
      "eval_logits/chosen": -2.7315986156463623,
      "eval_logits/rejected": -2.7054669857025146,
      "eval_logps/chosen": -183.64117431640625,
      "eval_logps/rejected": -201.37167358398438,
      "eval_loss": 0.5435395836830139,
      "eval_rewards/accuracies": 0.72265625,
      "eval_rewards/chosen": -2.3758413791656494,
      "eval_rewards/margins": 1.7554093599319458,
      "eval_rewards/rejected": -4.131250381469727,
      "eval_runtime": 126.0967,
      "eval_samples_per_second": 16.202,
      "eval_steps_per_second": 0.254,
      "step": 200
    },
    {
      "epoch": 2.8472222222222223,
      "grad_norm": 14.047137266998277,
      "learning_rate": 7.018946979234997e-09,
      "logits/chosen": -2.7222084999084473,
      "logits/rejected": -2.7124929428100586,
      "logps/chosen": -169.2677459716797,
      "logps/rejected": -219.9378204345703,
      "loss": 0.0924,
      "rewards/accuracies": 0.956250011920929,
      "rewards/chosen": -1.0015312433242798,
      "rewards/margins": 4.620343208312988,
      "rewards/rejected": -5.621874809265137,
      "step": 205
    },
    {
      "epoch": 2.9166666666666665,
      "grad_norm": 17.522380122796704,
      "learning_rate": 2.0917258966953734e-09,
      "logits/chosen": -2.718588352203369,
      "logits/rejected": -2.6965277194976807,
      "logps/chosen": -176.5627899169922,
      "logps/rejected": -226.1285400390625,
      "loss": 0.0823,
      "rewards/accuracies": 0.9937499761581421,
      "rewards/chosen": -1.2789057493209839,
      "rewards/margins": 4.813643455505371,
      "rewards/rejected": -6.0925493240356445,
      "step": 210
    },
    {
      "epoch": 2.986111111111111,
      "grad_norm": 15.800377863252164,
      "learning_rate": 5.814292768108187e-11,
      "logits/chosen": -2.7308130264282227,
      "logits/rejected": -2.712651252746582,
      "logps/chosen": -180.72364807128906,
      "logps/rejected": -226.88345336914062,
      "loss": 0.0929,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": -0.9098930358886719,
      "rewards/margins": 4.751838207244873,
      "rewards/rejected": -5.661731719970703,
      "step": 215
    },
    {
      "epoch": 3.0,
      "step": 216,
      "total_flos": 2546799910846464.0,
      "train_loss": 0.30356262255184074,
      "train_runtime": 7672.2284,
      "train_samples_per_second": 7.189,
      "train_steps_per_second": 0.028
    }
  ],
  "logging_steps": 5,
  "max_steps": 216,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 2546799910846464.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}