{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.16796136888515642, "eval_steps": 13, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.003359227377703128, "grad_norm": NaN, "learning_rate": 1e-05, "loss": 1.9546, "step": 1 }, { "epoch": 0.003359227377703128, "eval_loss": NaN, "eval_runtime": 61.1706, "eval_samples_per_second": 8.207, "eval_steps_per_second": 4.103, "step": 1 }, { "epoch": 0.006718454755406256, "grad_norm": NaN, "learning_rate": 2e-05, "loss": 2.0227, "step": 2 }, { "epoch": 0.010077682133109386, "grad_norm": NaN, "learning_rate": 3e-05, "loss": 4.1069, "step": 3 }, { "epoch": 0.013436909510812512, "grad_norm": 0.3233916461467743, "learning_rate": 4e-05, "loss": 1.5745, "step": 4 }, { "epoch": 0.016796136888515643, "grad_norm": 0.32183876633644104, "learning_rate": 5e-05, "loss": 1.5581, "step": 5 }, { "epoch": 0.02015536426621877, "grad_norm": 0.33219215273857117, "learning_rate": 6e-05, "loss": 1.5906, "step": 6 }, { "epoch": 0.023514591643921896, "grad_norm": NaN, "learning_rate": 7e-05, "loss": 2.2802, "step": 7 }, { "epoch": 0.026873819021625025, "grad_norm": NaN, "learning_rate": 8e-05, "loss": 1.7134, "step": 8 }, { "epoch": 0.030233046399328153, "grad_norm": 0.37869352102279663, "learning_rate": 9e-05, "loss": 1.5389, "step": 9 }, { "epoch": 0.033592273777031285, "grad_norm": 0.414662629365921, "learning_rate": 0.0001, "loss": 1.5451, "step": 10 }, { "epoch": 0.03695150115473441, "grad_norm": 0.5286617279052734, "learning_rate": 9.98458666866564e-05, "loss": 1.508, "step": 11 }, { "epoch": 0.04031072853243754, "grad_norm": 0.571130633354187, "learning_rate": 9.938441702975689e-05, "loss": 1.4923, "step": 12 }, { "epoch": 0.04366995591014067, "grad_norm": 0.5468776226043701, "learning_rate": 9.861849601988383e-05, "loss": 1.4145, "step": 13 }, { "epoch": 0.04366995591014067, "eval_loss": NaN, "eval_runtime": 21.2042, "eval_samples_per_second": 23.675, "eval_steps_per_second": 11.837, "step": 13 }, { "epoch": 0.04702918328784379, "grad_norm": 0.5036017894744873, "learning_rate": 9.755282581475769e-05, "loss": 1.3422, "step": 14 }, { "epoch": 0.050388410665546925, "grad_norm": 0.429851233959198, "learning_rate": 9.619397662556435e-05, "loss": 1.2768, "step": 15 }, { "epoch": 0.05374763804325005, "grad_norm": 0.43012356758117676, "learning_rate": 9.45503262094184e-05, "loss": 1.2485, "step": 16 }, { "epoch": 0.05710686542095318, "grad_norm": 0.43632248044013977, "learning_rate": 9.263200821770461e-05, "loss": 1.1458, "step": 17 }, { "epoch": 0.06046609279865631, "grad_norm": 0.4527662992477417, "learning_rate": 9.045084971874738e-05, "loss": 1.1514, "step": 18 }, { "epoch": 0.06382532017635943, "grad_norm": NaN, "learning_rate": 8.802029828000156e-05, "loss": 1.8809, "step": 19 }, { "epoch": 0.06718454755406257, "grad_norm": 0.43924033641815186, "learning_rate": 8.535533905932738e-05, "loss": 1.1503, "step": 20 }, { "epoch": 0.0705437749317657, "grad_norm": NaN, "learning_rate": 8.247240241650918e-05, "loss": 1.9837, "step": 21 }, { "epoch": 0.07390300230946882, "grad_norm": NaN, "learning_rate": 7.938926261462366e-05, "loss": 1.4764, "step": 22 }, { "epoch": 0.07726222968717195, "grad_norm": 0.39424705505371094, "learning_rate": 7.612492823579745e-05, "loss": 1.1039, "step": 23 }, { "epoch": 0.08062145706487509, "grad_norm": NaN, "learning_rate": 7.269952498697734e-05, "loss": 1.4906, "step": 24 }, { "epoch": 0.08398068444257821, "grad_norm": 0.34690049290657043, "learning_rate": 6.91341716182545e-05, "loss": 1.0607, "step": 25 }, { "epoch": 0.08733991182028134, "grad_norm": 0.34878969192504883, "learning_rate": 6.545084971874738e-05, "loss": 1.0336, "step": 26 }, { "epoch": 0.08733991182028134, "eval_loss": NaN, "eval_runtime": 21.1636, "eval_samples_per_second": 23.72, "eval_steps_per_second": 11.86, "step": 26 }, { "epoch": 0.09069913919798446, "grad_norm": 0.3356166183948517, "learning_rate": 6.167226819279528e-05, "loss": 1.0614, "step": 27 }, { "epoch": 0.09405836657568759, "grad_norm": NaN, "learning_rate": 5.782172325201155e-05, "loss": 1.5224, "step": 28 }, { "epoch": 0.09741759395339072, "grad_norm": NaN, "learning_rate": 5.392295478639225e-05, "loss": 1.774, "step": 29 }, { "epoch": 0.10077682133109385, "grad_norm": 0.33320266008377075, "learning_rate": 5e-05, "loss": 1.0382, "step": 30 }, { "epoch": 0.10413604870879697, "grad_norm": 0.3378564417362213, "learning_rate": 4.607704521360776e-05, "loss": 0.9827, "step": 31 }, { "epoch": 0.1074952760865001, "grad_norm": NaN, "learning_rate": 4.2178276747988446e-05, "loss": 1.4901, "step": 32 }, { "epoch": 0.11085450346420324, "grad_norm": 0.3424948453903198, "learning_rate": 3.832773180720475e-05, "loss": 0.9419, "step": 33 }, { "epoch": 0.11421373084190636, "grad_norm": 0.3185945749282837, "learning_rate": 3.4549150281252636e-05, "loss": 0.9325, "step": 34 }, { "epoch": 0.11757295821960949, "grad_norm": NaN, "learning_rate": 3.086582838174551e-05, "loss": 1.1159, "step": 35 }, { "epoch": 0.12093218559731261, "grad_norm": 0.3276839554309845, "learning_rate": 2.7300475013022663e-05, "loss": 0.9843, "step": 36 }, { "epoch": 0.12429141297501575, "grad_norm": NaN, "learning_rate": 2.3875071764202563e-05, "loss": 0.9924, "step": 37 }, { "epoch": 0.12765064035271886, "grad_norm": NaN, "learning_rate": 2.061073738537635e-05, "loss": 1.3187, "step": 38 }, { "epoch": 0.131009867730422, "grad_norm": 0.31996309757232666, "learning_rate": 1.7527597583490822e-05, "loss": 0.9506, "step": 39 }, { "epoch": 0.131009867730422, "eval_loss": NaN, "eval_runtime": 21.1859, "eval_samples_per_second": 23.695, "eval_steps_per_second": 11.847, "step": 39 }, { "epoch": 0.13436909510812514, "grad_norm": NaN, "learning_rate": 1.4644660940672627e-05, "loss": 1.2932, "step": 40 }, { "epoch": 0.13772832248582825, "grad_norm": NaN, "learning_rate": 1.1979701719998453e-05, "loss": 1.5242, "step": 41 }, { "epoch": 0.1410875498635314, "grad_norm": 0.3238716125488281, "learning_rate": 9.549150281252633e-06, "loss": 0.9333, "step": 42 }, { "epoch": 0.1444467772412345, "grad_norm": 0.3042464852333069, "learning_rate": 7.367991782295391e-06, "loss": 0.9319, "step": 43 }, { "epoch": 0.14780600461893764, "grad_norm": 0.31937065720558167, "learning_rate": 5.449673790581611e-06, "loss": 0.8955, "step": 44 }, { "epoch": 0.15116523199664078, "grad_norm": 0.30120527744293213, "learning_rate": 3.8060233744356633e-06, "loss": 0.9155, "step": 45 }, { "epoch": 0.1545244593743439, "grad_norm": NaN, "learning_rate": 2.4471741852423237e-06, "loss": 1.735, "step": 46 }, { "epoch": 0.15788368675204703, "grad_norm": NaN, "learning_rate": 1.3815039801161721e-06, "loss": 1.5965, "step": 47 }, { "epoch": 0.16124291412975017, "grad_norm": 0.3180822730064392, "learning_rate": 6.15582970243117e-07, "loss": 0.9638, "step": 48 }, { "epoch": 0.16460214150745328, "grad_norm": NaN, "learning_rate": 1.5413331334360182e-07, "loss": 0.9644, "step": 49 }, { "epoch": 0.16796136888515642, "grad_norm": 0.3325708508491516, "learning_rate": 0.0, "loss": 0.9511, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 13, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 6.57265690411008e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }