|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.2898550724637681, |
|
"eval_steps": 40, |
|
"global_step": 40, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.036231884057971016, |
|
"grad_norm": 61.66789827681496, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": -2.7321553230285645, |
|
"logits/rejected": -2.7100937366485596, |
|
"logps/chosen": -182.5845489501953, |
|
"logps/rejected": -189.55001831054688, |
|
"loss": 0.689, |
|
"rewards/accuracies": 0.3187499940395355, |
|
"rewards/chosen": -0.0021577859297394753, |
|
"rewards/margins": 0.005646524019539356, |
|
"rewards/rejected": -0.007804309483617544, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07246376811594203, |
|
"grad_norm": 44.96703657621111, |
|
"learning_rate": 1e-06, |
|
"logits/chosen": -2.753889799118042, |
|
"logits/rejected": -2.7519516944885254, |
|
"logps/chosen": -197.34320068359375, |
|
"logps/rejected": -184.00961303710938, |
|
"loss": 0.6275, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.030874451622366905, |
|
"rewards/margins": 0.18904080986976624, |
|
"rewards/rejected": -0.15816636383533478, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10869565217391304, |
|
"grad_norm": 51.54402636298773, |
|
"learning_rate": 9.996221126793764e-07, |
|
"logits/chosen": -2.694437265396118, |
|
"logits/rejected": -2.691904067993164, |
|
"logps/chosen": -203.14883422851562, |
|
"logps/rejected": -204.52386474609375, |
|
"loss": 0.5851, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.6205412745475769, |
|
"rewards/margins": 0.9350436925888062, |
|
"rewards/rejected": -0.314502477645874, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.14492753623188406, |
|
"grad_norm": 35.11282014809116, |
|
"learning_rate": 9.984890219128145e-07, |
|
"logits/chosen": -2.609405517578125, |
|
"logits/rejected": -2.5795176029205322, |
|
"logps/chosen": -188.33395385742188, |
|
"logps/rejected": -192.52633666992188, |
|
"loss": 0.5144, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.8838651776313782, |
|
"rewards/margins": 1.275496244430542, |
|
"rewards/rejected": -0.39163118600845337, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.18115942028985507, |
|
"grad_norm": 37.05591291167376, |
|
"learning_rate": 9.966024404228493e-07, |
|
"logits/chosen": -2.4429798126220703, |
|
"logits/rejected": -2.4225034713745117, |
|
"logps/chosen": -179.79977416992188, |
|
"logps/rejected": -179.25279235839844, |
|
"loss": 0.5036, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": 0.40532931685447693, |
|
"rewards/margins": 0.8947975039482117, |
|
"rewards/rejected": -0.4894680976867676, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.21739130434782608, |
|
"grad_norm": 33.27618771325657, |
|
"learning_rate": 9.939652198703783e-07, |
|
"logits/chosen": -2.319044589996338, |
|
"logits/rejected": -2.320253372192383, |
|
"logps/chosen": -188.62039184570312, |
|
"logps/rejected": -193.8806915283203, |
|
"loss": 0.4987, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.6481167078018188, |
|
"rewards/margins": 1.2183058261871338, |
|
"rewards/rejected": -0.5701891183853149, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2536231884057971, |
|
"grad_norm": 38.645073192304736, |
|
"learning_rate": 9.905813465442354e-07, |
|
"logits/chosen": -2.242053747177124, |
|
"logits/rejected": -2.2155580520629883, |
|
"logps/chosen": -203.94546508789062, |
|
"logps/rejected": -194.8705291748047, |
|
"loss": 0.5091, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.8871867060661316, |
|
"rewards/margins": 1.2737131118774414, |
|
"rewards/rejected": -0.3865264356136322, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.2898550724637681, |
|
"grad_norm": 30.937642063399068, |
|
"learning_rate": 9.864559353357187e-07, |
|
"logits/chosen": -2.0998620986938477, |
|
"logits/rejected": -2.094836473464966, |
|
"logps/chosen": -182.6898956298828, |
|
"logps/rejected": -185.73983764648438, |
|
"loss": 0.4889, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 1.0410809516906738, |
|
"rewards/margins": 1.0222995281219482, |
|
"rewards/rejected": 0.018781563267111778, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2898550724637681, |
|
"eval_logits/chosen": -2.035081624984741, |
|
"eval_logits/rejected": -1.9875677824020386, |
|
"eval_logps/chosen": -188.77523803710938, |
|
"eval_logps/rejected": -200.09498596191406, |
|
"eval_loss": 0.4642000198364258, |
|
"eval_rewards/accuracies": 0.7943548560142517, |
|
"eval_rewards/chosen": 1.1544201374053955, |
|
"eval_rewards/margins": 1.3431099653244019, |
|
"eval_rewards/rejected": -0.18868987262248993, |
|
"eval_runtime": 248.6586, |
|
"eval_samples_per_second": 15.773, |
|
"eval_steps_per_second": 0.249, |
|
"step": 40 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 414, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 40, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 471248375119872.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|