|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0030034314203978047, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.5017157101989023e-05, |
|
"eval_loss": 3.4940359592437744, |
|
"eval_runtime": 2298.8386, |
|
"eval_samples_per_second": 12.197, |
|
"eval_steps_per_second": 6.099, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 7.508578550994511e-05, |
|
"grad_norm": 0.3800654113292694, |
|
"learning_rate": 5e-05, |
|
"loss": 1.406, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00015017157101989022, |
|
"grad_norm": 0.7993976473808289, |
|
"learning_rate": 0.0001, |
|
"loss": 1.6444, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00022525735652983533, |
|
"grad_norm": 0.8274301290512085, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 1.6831, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.00030034314203978044, |
|
"grad_norm": 1.1854416131973267, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 1.8049, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0003754289275497256, |
|
"grad_norm": 1.5541913509368896, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 1.7347, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.00045051471305967067, |
|
"grad_norm": 1.7877317667007446, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 1.8298, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0005256004985696158, |
|
"grad_norm": 2.2849998474121094, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 2.3391, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0006006862840795609, |
|
"grad_norm": 4.458853244781494, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 3.3874, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.000675772069589506, |
|
"grad_norm": 5.58752965927124, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 4.1727, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0007508578550994512, |
|
"grad_norm": 17.630456924438477, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 4.9483, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0007508578550994512, |
|
"eval_loss": 2.8145010471343994, |
|
"eval_runtime": 2308.5092, |
|
"eval_samples_per_second": 12.146, |
|
"eval_steps_per_second": 6.073, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0008259436406093962, |
|
"grad_norm": 1.356021761894226, |
|
"learning_rate": 8.678619553365659e-05, |
|
"loss": 1.5806, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.0009010294261193413, |
|
"grad_norm": 0.8718690872192383, |
|
"learning_rate": 8.386407858128706e-05, |
|
"loss": 1.4852, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0009761152116292864, |
|
"grad_norm": 1.0226633548736572, |
|
"learning_rate": 8.07106356344834e-05, |
|
"loss": 1.6258, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.0010512009971392316, |
|
"grad_norm": 0.9398789405822754, |
|
"learning_rate": 7.734740790612136e-05, |
|
"loss": 1.7249, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0011262867826491766, |
|
"grad_norm": 1.301905632019043, |
|
"learning_rate": 7.379736965185368e-05, |
|
"loss": 1.8587, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.0012013725681591218, |
|
"grad_norm": 1.5729906558990479, |
|
"learning_rate": 7.008477123264848e-05, |
|
"loss": 1.8419, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.001276458353669067, |
|
"grad_norm": 1.849243402481079, |
|
"learning_rate": 6.623497346023418e-05, |
|
"loss": 2.12, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.001351544139179012, |
|
"grad_norm": 3.608657121658325, |
|
"learning_rate": 6.227427435703997e-05, |
|
"loss": 3.2251, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0014266299246889571, |
|
"grad_norm": 5.073084831237793, |
|
"learning_rate": 5.8229729514036705e-05, |
|
"loss": 4.0405, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.0015017157101989023, |
|
"grad_norm": 9.735648155212402, |
|
"learning_rate": 5.4128967273616625e-05, |
|
"loss": 4.5536, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0015017157101989023, |
|
"eval_loss": 2.4490885734558105, |
|
"eval_runtime": 2312.1302, |
|
"eval_samples_per_second": 12.127, |
|
"eval_steps_per_second": 6.064, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0015768014957088473, |
|
"grad_norm": 0.8634369969367981, |
|
"learning_rate": 5e-05, |
|
"loss": 1.3816, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.0016518872812187925, |
|
"grad_norm": 0.7216693758964539, |
|
"learning_rate": 4.5871032726383386e-05, |
|
"loss": 1.5009, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.0017269730667287377, |
|
"grad_norm": 0.9396697282791138, |
|
"learning_rate": 4.17702704859633e-05, |
|
"loss": 1.6304, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.0018020588522386827, |
|
"grad_norm": 1.026105284690857, |
|
"learning_rate": 3.772572564296005e-05, |
|
"loss": 1.674, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.0018771446377486279, |
|
"grad_norm": 1.0835193395614624, |
|
"learning_rate": 3.3765026539765834e-05, |
|
"loss": 1.531, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.0019522304232585728, |
|
"grad_norm": 1.1509310007095337, |
|
"learning_rate": 2.991522876735154e-05, |
|
"loss": 1.7271, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.002027316208768518, |
|
"grad_norm": 2.0080721378326416, |
|
"learning_rate": 2.6202630348146324e-05, |
|
"loss": 2.1363, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.0021024019942784632, |
|
"grad_norm": 2.767570734024048, |
|
"learning_rate": 2.2652592093878666e-05, |
|
"loss": 2.8779, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.0021774877797884084, |
|
"grad_norm": 4.044328212738037, |
|
"learning_rate": 1.928936436551661e-05, |
|
"loss": 3.7799, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.002252573565298353, |
|
"grad_norm": 12.717976570129395, |
|
"learning_rate": 1.6135921418712956e-05, |
|
"loss": 4.5699, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.002252573565298353, |
|
"eval_loss": 2.327751636505127, |
|
"eval_runtime": 2313.9453, |
|
"eval_samples_per_second": 12.117, |
|
"eval_steps_per_second": 6.059, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.0023276593508082984, |
|
"grad_norm": 0.4896831214427948, |
|
"learning_rate": 1.3213804466343421e-05, |
|
"loss": 1.3756, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.0024027451363182436, |
|
"grad_norm": 0.7484576106071472, |
|
"learning_rate": 1.0542974530180327e-05, |
|
"loss": 1.4297, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.0024778309218281887, |
|
"grad_norm": 0.7907517552375793, |
|
"learning_rate": 8.141676086873572e-06, |
|
"loss": 1.531, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.002552916707338134, |
|
"grad_norm": 0.9451369643211365, |
|
"learning_rate": 6.026312439675552e-06, |
|
"loss": 1.5736, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.002628002492848079, |
|
"grad_norm": 1.0756996870040894, |
|
"learning_rate": 4.2113336672471245e-06, |
|
"loss": 1.6789, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.002703088278358024, |
|
"grad_norm": 1.4432721138000488, |
|
"learning_rate": 2.7091379149682685e-06, |
|
"loss": 1.8192, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.002778174063867969, |
|
"grad_norm": 1.6177784204483032, |
|
"learning_rate": 1.5299867030334814e-06, |
|
"loss": 2.0462, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.0028532598493779143, |
|
"grad_norm": 3.0353176593780518, |
|
"learning_rate": 6.819348298638839e-07, |
|
"loss": 2.6835, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.0029283456348878595, |
|
"grad_norm": 3.7709476947784424, |
|
"learning_rate": 1.7077534966650766e-07, |
|
"loss": 3.513, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.0030034314203978047, |
|
"grad_norm": 10.600866317749023, |
|
"learning_rate": 0.0, |
|
"loss": 4.5757, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.0030034314203978047, |
|
"eval_loss": 2.316987991333008, |
|
"eval_runtime": 2314.3781, |
|
"eval_samples_per_second": 12.115, |
|
"eval_steps_per_second": 6.058, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.444496098787328e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|