{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.9914040114613183, "eval_steps": 50, "global_step": 261, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.05730659025787966, "grad_norm": 13.615523112232149, "learning_rate": 5e-07, "loss": 1.2695, "step": 5 }, { "epoch": 0.11461318051575932, "grad_norm": 8.507104558172857, "learning_rate": 1e-06, "loss": 1.1722, "step": 10 }, { "epoch": 0.17191977077363896, "grad_norm": 5.216528499988659, "learning_rate": 9.990212076323586e-07, "loss": 1.067, "step": 15 }, { "epoch": 0.22922636103151864, "grad_norm": 3.9965836016587044, "learning_rate": 9.9608866266743e-07, "loss": 1.0137, "step": 20 }, { "epoch": 0.28653295128939826, "grad_norm": 3.90312687677931, "learning_rate": 9.912138465157323e-07, "loss": 1.0128, "step": 25 }, { "epoch": 0.3438395415472779, "grad_norm": 3.5852669105149593, "learning_rate": 9.84415844908637e-07, "loss": 0.9835, "step": 30 }, { "epoch": 0.40114613180515757, "grad_norm": 3.544317847930178, "learning_rate": 9.757212731744973e-07, "loss": 0.967, "step": 35 }, { "epoch": 0.4584527220630373, "grad_norm": 3.581414511816451, "learning_rate": 9.65164172035126e-07, "loss": 0.9694, "step": 40 }, { "epoch": 0.5157593123209169, "grad_norm": 3.4337266155445634, "learning_rate": 9.527858743306018e-07, "loss": 0.9532, "step": 45 }, { "epoch": 0.5730659025787965, "grad_norm": 3.4769930108810807, "learning_rate": 9.386348431941952e-07, "loss": 0.9472, "step": 50 }, { "epoch": 0.5730659025787965, "eval_loss": 0.9284669160842896, "eval_runtime": 42.918, "eval_samples_per_second": 57.761, "eval_steps_per_second": 0.909, "step": 50 }, { "epoch": 0.6303724928366762, "grad_norm": 3.550987444482616, "learning_rate": 9.227664823109882e-07, "loss": 0.9391, "step": 55 }, { "epoch": 0.6876790830945558, "grad_norm": 3.4297326670171917, "learning_rate": 9.052429190030588e-07, "loss": 0.9422, "step": 60 }, { "epoch": 0.7449856733524355, "grad_norm": 3.507501066789746, "learning_rate": 8.861327609904857e-07, "loss": 0.9428, "step": 65 }, { "epoch": 0.8022922636103151, "grad_norm": 3.5906687662141117, "learning_rate": 8.655108277804975e-07, "loss": 0.9253, "step": 70 }, { "epoch": 0.8595988538681948, "grad_norm": 3.500303702885294, "learning_rate": 8.434578577364217e-07, "loss": 0.9285, "step": 75 }, { "epoch": 0.9169054441260746, "grad_norm": 3.9526314008449996, "learning_rate": 8.200601919733105e-07, "loss": 0.8996, "step": 80 }, { "epoch": 0.9742120343839542, "grad_norm": 3.516323615141867, "learning_rate": 7.954094363178421e-07, "loss": 0.895, "step": 85 }, { "epoch": 1.0315186246418337, "grad_norm": 4.35173055618085, "learning_rate": 7.696021026559849e-07, "loss": 0.8505, "step": 90 }, { "epoch": 1.0888252148997135, "grad_norm": 4.085589351210672, "learning_rate": 7.427392310726087e-07, "loss": 0.7964, "step": 95 }, { "epoch": 1.146131805157593, "grad_norm": 3.9343561192796934, "learning_rate": 7.149259942624286e-07, "loss": 0.7918, "step": 100 }, { "epoch": 1.146131805157593, "eval_loss": 0.8878653049468994, "eval_runtime": 42.3514, "eval_samples_per_second": 58.534, "eval_steps_per_second": 0.921, "step": 100 }, { "epoch": 1.2034383954154728, "grad_norm": 3.882747185049188, "learning_rate": 6.862712857610811e-07, "loss": 0.7851, "step": 105 }, { "epoch": 1.2607449856733524, "grad_norm": 3.9752866239432185, "learning_rate": 6.568872936084788e-07, "loss": 0.7941, "step": 110 }, { "epoch": 1.3180515759312321, "grad_norm": 3.6828284446292505, "learning_rate": 6.26889061113621e-07, "loss": 0.7818, "step": 115 }, { "epoch": 1.3753581661891117, "grad_norm": 4.0779987976559795, "learning_rate": 5.963940364405425e-07, "loss": 0.7888, "step": 120 }, { "epoch": 1.4326647564469914, "grad_norm": 3.658488328256016, "learning_rate": 5.655216127788472e-07, "loss": 0.7783, "step": 125 }, { "epoch": 1.4899713467048712, "grad_norm": 3.9469181853717594, "learning_rate": 5.343926608991379e-07, "loss": 0.7718, "step": 130 }, { "epoch": 1.5472779369627507, "grad_norm": 3.9580241692624294, "learning_rate": 5.031290559234649e-07, "loss": 0.7789, "step": 135 }, { "epoch": 1.6045845272206303, "grad_norm": 3.9554511858811114, "learning_rate": 4.718532001635686e-07, "loss": 0.7749, "step": 140 }, { "epoch": 1.66189111747851, "grad_norm": 3.91874392198526, "learning_rate": 4.406875438950861e-07, "loss": 0.7639, "step": 145 }, { "epoch": 1.7191977077363898, "grad_norm": 3.775379766157052, "learning_rate": 4.097541059439698e-07, "loss": 0.7632, "step": 150 }, { "epoch": 1.7191977077363898, "eval_loss": 0.8642120361328125, "eval_runtime": 42.3256, "eval_samples_per_second": 58.57, "eval_steps_per_second": 0.921, "step": 150 }, { "epoch": 1.7765042979942693, "grad_norm": 3.9001768665312837, "learning_rate": 3.7917399596210535e-07, "loss": 0.7686, "step": 155 }, { "epoch": 1.8338108882521489, "grad_norm": 3.776759348608646, "learning_rate": 3.490669402625007e-07, "loss": 0.7652, "step": 160 }, { "epoch": 1.8911174785100286, "grad_norm": 3.954259341942418, "learning_rate": 3.195508130704795e-07, "loss": 0.7608, "step": 165 }, { "epoch": 1.9484240687679084, "grad_norm": 3.9914749419039275, "learning_rate": 2.9074117502611296e-07, "loss": 0.7559, "step": 170 }, { "epoch": 2.005730659025788, "grad_norm": 4.489725135115642, "learning_rate": 2.6275082074473075e-07, "loss": 0.7395, "step": 175 }, { "epoch": 2.0630372492836675, "grad_norm": 4.317747898535887, "learning_rate": 2.3568933720688543e-07, "loss": 0.6774, "step": 180 }, { "epoch": 2.1203438395415475, "grad_norm": 4.302108159226055, "learning_rate": 2.096626747067527e-07, "loss": 0.6677, "step": 185 }, { "epoch": 2.177650429799427, "grad_norm": 4.307581231308638, "learning_rate": 1.8477273203877398e-07, "loss": 0.6876, "step": 190 }, { "epoch": 2.2349570200573066, "grad_norm": 4.196370522955929, "learning_rate": 1.6111695754660664e-07, "loss": 0.6838, "step": 195 }, { "epoch": 2.292263610315186, "grad_norm": 4.075486625054549, "learning_rate": 1.3878796759634542e-07, "loss": 0.6847, "step": 200 }, { "epoch": 2.292263610315186, "eval_loss": 0.8614758253097534, "eval_runtime": 42.3657, "eval_samples_per_second": 58.514, "eval_steps_per_second": 0.921, "step": 200 }, { "epoch": 2.349570200573066, "grad_norm": 4.204097889941997, "learning_rate": 1.1787318396775186e-07, "loss": 0.6856, "step": 205 }, { "epoch": 2.4068767908309456, "grad_norm": 4.419337399898955, "learning_rate": 9.845449158317215e-08, "loss": 0.6915, "step": 210 }, { "epoch": 2.464183381088825, "grad_norm": 4.134150727696896, "learning_rate": 8.060791791418886e-08, "loss": 0.6697, "step": 215 }, { "epoch": 2.5214899713467047, "grad_norm": 4.168834465021675, "learning_rate": 6.440333532118503e-08, "loss": 0.6839, "step": 220 }, { "epoch": 2.5787965616045847, "grad_norm": 4.273918640205208, "learning_rate": 4.990418749121178e-08, "loss": 0.6739, "step": 225 }, { "epoch": 2.6361031518624642, "grad_norm": 4.307832832639697, "learning_rate": 3.716724104520247e-08, "loss": 0.6754, "step": 230 }, { "epoch": 2.693409742120344, "grad_norm": 4.0666737832982385, "learning_rate": 2.624236328703061e-08, "loss": 0.6954, "step": 235 }, { "epoch": 2.7507163323782233, "grad_norm": 3.9813588727036855, "learning_rate": 1.7172326964564775e-08, "loss": 0.6716, "step": 240 }, { "epoch": 2.8080229226361033, "grad_norm": 4.208145042622316, "learning_rate": 9.992642807111484e-09, "loss": 0.6893, "step": 245 }, { "epoch": 2.865329512893983, "grad_norm": 4.214969116203108, "learning_rate": 4.7314204948923354e-09, "loss": 0.685, "step": 250 }, { "epoch": 2.865329512893983, "eval_loss": 0.8606014847755432, "eval_runtime": 42.4569, "eval_samples_per_second": 58.389, "eval_steps_per_second": 0.919, "step": 250 }, { "epoch": 2.9226361031518624, "grad_norm": 4.130008937743228, "learning_rate": 1.4092586048820575e-09, "loss": 0.6778, "step": 255 }, { "epoch": 2.9799426934097424, "grad_norm": 4.115719334147374, "learning_rate": 3.91639638886998e-11, "loss": 0.6874, "step": 260 }, { "epoch": 2.9914040114613183, "step": 261, "total_flos": 1538740350615552.0, "train_loss": 0.8155917972897204, "train_runtime": 3563.3225, "train_samples_per_second": 18.784, "train_steps_per_second": 0.073 } ], "logging_steps": 5, "max_steps": 261, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 100, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1538740350615552.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }