{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.984771573604061, "eval_steps": 50, "global_step": 441, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0338409475465313, "grad_norm": 18.879603600162138, "learning_rate": 5e-07, "loss": 1.7268, "step": 5 }, { "epoch": 0.0676818950930626, "grad_norm": 12.562690441872693, "learning_rate": 1e-06, "loss": 1.5925, "step": 10 }, { "epoch": 0.10152284263959391, "grad_norm": 8.05925907057448, "learning_rate": 9.996679701338661e-07, "loss": 1.269, "step": 15 }, { "epoch": 0.1353637901861252, "grad_norm": 4.106846211376464, "learning_rate": 9.986723215107924e-07, "loss": 1.1436, "step": 20 }, { "epoch": 0.1692047377326565, "grad_norm": 4.065415826389198, "learning_rate": 9.97014376471095e-07, "loss": 1.0624, "step": 25 }, { "epoch": 0.20304568527918782, "grad_norm": 3.5507350425506274, "learning_rate": 9.946963369638524e-07, "loss": 1.0302, "step": 30 }, { "epoch": 0.23688663282571912, "grad_norm": 3.620116416000427, "learning_rate": 9.917212816224536e-07, "loss": 1.0156, "step": 35 }, { "epoch": 0.2707275803722504, "grad_norm": 3.392556379414998, "learning_rate": 9.880931616758056e-07, "loss": 0.9731, "step": 40 }, { "epoch": 0.30456852791878175, "grad_norm": 3.6283098411860224, "learning_rate": 9.838167957006293e-07, "loss": 0.9453, "step": 45 }, { "epoch": 0.338409475465313, "grad_norm": 3.643003094042682, "learning_rate": 9.788978632218138e-07, "loss": 0.9333, "step": 50 }, { "epoch": 0.338409475465313, "eval_loss": 0.9387685656547546, "eval_runtime": 73.7594, "eval_samples_per_second": 56.942, "eval_steps_per_second": 0.895, "step": 50 }, { "epoch": 0.37225042301184436, "grad_norm": 3.5187728461211143, "learning_rate": 9.73342897169329e-07, "loss": 0.933, "step": 55 }, { "epoch": 0.40609137055837563, "grad_norm": 3.221032807973684, "learning_rate": 9.671592752017137e-07, "loss": 0.9244, "step": 60 }, { "epoch": 0.43993231810490696, "grad_norm": 3.6068076034822982, "learning_rate": 9.603552099076648e-07, "loss": 0.9217, "step": 65 }, { "epoch": 0.47377326565143824, "grad_norm": 3.522728399930581, "learning_rate": 9.52939737898737e-07, "loss": 0.9251, "step": 70 }, { "epoch": 0.5076142131979695, "grad_norm": 3.4156164712414805, "learning_rate": 9.449227078076443e-07, "loss": 0.8951, "step": 75 }, { "epoch": 0.5414551607445008, "grad_norm": 3.5241377443178594, "learning_rate": 9.363147672080985e-07, "loss": 0.893, "step": 80 }, { "epoch": 0.5752961082910322, "grad_norm": 3.480593037844269, "learning_rate": 9.271273484735592e-07, "loss": 0.8702, "step": 85 }, { "epoch": 0.6091370558375635, "grad_norm": 3.3606090648530906, "learning_rate": 9.173726535936766e-07, "loss": 0.8931, "step": 90 }, { "epoch": 0.6429780033840947, "grad_norm": 3.3802398568272545, "learning_rate": 9.070636379685915e-07, "loss": 0.8689, "step": 95 }, { "epoch": 0.676818950930626, "grad_norm": 3.7340957238116013, "learning_rate": 8.962139932026156e-07, "loss": 0.8736, "step": 100 }, { "epoch": 0.676818950930626, "eval_loss": 0.8820675015449524, "eval_runtime": 73.2211, "eval_samples_per_second": 57.361, "eval_steps_per_second": 0.901, "step": 100 }, { "epoch": 0.7106598984771574, "grad_norm": 3.491575137149294, "learning_rate": 8.848381289201459e-07, "loss": 0.8658, "step": 105 }, { "epoch": 0.7445008460236887, "grad_norm": 3.3971585698046023, "learning_rate": 8.72951153627962e-07, "loss": 0.85, "step": 110 }, { "epoch": 0.7783417935702199, "grad_norm": 3.4606145381962574, "learning_rate": 8.605688546493238e-07, "loss": 0.8831, "step": 115 }, { "epoch": 0.8121827411167513, "grad_norm": 3.312370481580055, "learning_rate": 8.477076771565202e-07, "loss": 0.8542, "step": 120 }, { "epoch": 0.8460236886632826, "grad_norm": 3.479568204762634, "learning_rate": 8.343847023297169e-07, "loss": 0.8538, "step": 125 }, { "epoch": 0.8798646362098139, "grad_norm": 3.470397617651747, "learning_rate": 8.206176246711065e-07, "loss": 0.8568, "step": 130 }, { "epoch": 0.9137055837563451, "grad_norm": 3.4164690427045974, "learning_rate": 8.064247285044972e-07, "loss": 0.8566, "step": 135 }, { "epoch": 0.9475465313028765, "grad_norm": 3.5885722316358994, "learning_rate": 7.918248636915459e-07, "loss": 0.8671, "step": 140 }, { "epoch": 0.9813874788494078, "grad_norm": 3.573742166264298, "learning_rate": 7.768374205968906e-07, "loss": 0.8607, "step": 145 }, { "epoch": 1.015228426395939, "grad_norm": 3.7440804811259376, "learning_rate": 7.614823043354285e-07, "loss": 0.8459, "step": 150 }, { "epoch": 1.015228426395939, "eval_loss": 0.8576342463493347, "eval_runtime": 73.2015, "eval_samples_per_second": 57.376, "eval_steps_per_second": 0.902, "step": 150 }, { "epoch": 1.0490693739424704, "grad_norm": 3.836769449038388, "learning_rate": 7.457799083359471e-07, "loss": 0.7928, "step": 155 }, { "epoch": 1.0829103214890017, "grad_norm": 3.776692818561464, "learning_rate": 7.297510872562131e-07, "loss": 0.7801, "step": 160 }, { "epoch": 1.116751269035533, "grad_norm": 3.5436254123817075, "learning_rate": 7.134171292854955e-07, "loss": 0.7915, "step": 165 }, { "epoch": 1.1505922165820643, "grad_norm": 3.637774293664913, "learning_rate": 6.967997278713089e-07, "loss": 0.7831, "step": 170 }, { "epoch": 1.1844331641285957, "grad_norm": 3.6828430568432817, "learning_rate": 6.79920952907921e-07, "loss": 0.7911, "step": 175 }, { "epoch": 1.218274111675127, "grad_norm": 3.3501836574132673, "learning_rate": 6.628032214248982e-07, "loss": 0.7816, "step": 180 }, { "epoch": 1.252115059221658, "grad_norm": 3.678603436999248, "learning_rate": 6.454692678146119e-07, "loss": 0.7742, "step": 185 }, { "epoch": 1.2859560067681894, "grad_norm": 3.469023528162151, "learning_rate": 6.279421136382494e-07, "loss": 0.7932, "step": 190 }, { "epoch": 1.3197969543147208, "grad_norm": 3.5311154474090007, "learning_rate": 6.102450370504299e-07, "loss": 0.7852, "step": 195 }, { "epoch": 1.353637901861252, "grad_norm": 3.509395500338986, "learning_rate": 5.924015418830354e-07, "loss": 0.7682, "step": 200 }, { "epoch": 1.353637901861252, "eval_loss": 0.8485246300697327, "eval_runtime": 73.3335, "eval_samples_per_second": 57.273, "eval_steps_per_second": 0.9, "step": 200 }, { "epoch": 1.3874788494077834, "grad_norm": 3.5828255600676226, "learning_rate": 5.74435326429313e-07, "loss": 0.7784, "step": 205 }, { "epoch": 1.4213197969543148, "grad_norm": 3.8406423248810526, "learning_rate": 5.563702519697108e-07, "loss": 0.7789, "step": 210 }, { "epoch": 1.455160744500846, "grad_norm": 3.5396413573689482, "learning_rate": 5.382303110812466e-07, "loss": 0.7843, "step": 215 }, { "epoch": 1.4890016920473772, "grad_norm": 3.9366608589473184, "learning_rate": 5.200395957725005e-07, "loss": 0.7734, "step": 220 }, { "epoch": 1.5228426395939088, "grad_norm": 3.6167320799092644, "learning_rate": 5.018222654865471e-07, "loss": 0.777, "step": 225 }, { "epoch": 1.5566835871404399, "grad_norm": 3.508615499961612, "learning_rate": 4.836025150143318e-07, "loss": 0.7777, "step": 230 }, { "epoch": 1.5905245346869712, "grad_norm": 3.8323856939840133, "learning_rate": 4.654045423610952e-07, "loss": 0.7744, "step": 235 }, { "epoch": 1.6243654822335025, "grad_norm": 3.48551950556471, "learning_rate": 4.4725251660853357e-07, "loss": 0.7812, "step": 240 }, { "epoch": 1.6582064297800339, "grad_norm": 3.2586294819951815, "learning_rate": 4.2917054581536926e-07, "loss": 0.768, "step": 245 }, { "epoch": 1.6920473773265652, "grad_norm": 3.587138715732753, "learning_rate": 4.1118264499897003e-07, "loss": 0.7797, "step": 250 }, { "epoch": 1.6920473773265652, "eval_loss": 0.8384872078895569, "eval_runtime": 73.1511, "eval_samples_per_second": 57.415, "eval_steps_per_second": 0.902, "step": 250 }, { "epoch": 1.7258883248730963, "grad_norm": 3.497575413714801, "learning_rate": 3.9331270424053616e-07, "loss": 0.7617, "step": 255 }, { "epoch": 1.7597292724196278, "grad_norm": 3.410968439354172, "learning_rate": 3.755844569562191e-07, "loss": 0.7687, "step": 260 }, { "epoch": 1.793570219966159, "grad_norm": 3.609149780281463, "learning_rate": 3.580214483763093e-07, "loss": 0.7602, "step": 265 }, { "epoch": 1.8274111675126905, "grad_norm": 3.493512578978129, "learning_rate": 3.406470042743574e-07, "loss": 0.7717, "step": 270 }, { "epoch": 1.8612521150592216, "grad_norm": 3.6002811799241714, "learning_rate": 3.23484199987761e-07, "loss": 0.7496, "step": 275 }, { "epoch": 1.895093062605753, "grad_norm": 3.6855175892280303, "learning_rate": 3.065558297709588e-07, "loss": 0.7695, "step": 280 }, { "epoch": 1.9289340101522843, "grad_norm": 3.6642552773973254, "learning_rate": 2.898843765219388e-07, "loss": 0.7628, "step": 285 }, { "epoch": 1.9627749576988156, "grad_norm": 3.7846627756438913, "learning_rate": 2.7349198192226295e-07, "loss": 0.7732, "step": 290 }, { "epoch": 1.996615905245347, "grad_norm": 3.4513200002016022, "learning_rate": 2.574004170302696e-07, "loss": 0.7684, "step": 295 }, { "epoch": 2.030456852791878, "grad_norm": 3.8311631709862968, "learning_rate": 2.4163105336650643e-07, "loss": 0.7246, "step": 300 }, { "epoch": 2.030456852791878, "eval_loss": 0.8336632251739502, "eval_runtime": 73.1743, "eval_samples_per_second": 57.397, "eval_steps_per_second": 0.902, "step": 300 }, { "epoch": 2.0642978003384096, "grad_norm": 3.9190275346300107, "learning_rate": 2.2620483452979887e-07, "loss": 0.7305, "step": 305 }, { "epoch": 2.0981387478849407, "grad_norm": 3.6908653316747446, "learning_rate": 2.1114224838164806e-07, "loss": 0.7351, "step": 310 }, { "epoch": 2.1319796954314723, "grad_norm": 3.802531438077045, "learning_rate": 1.964632998359036e-07, "loss": 0.7367, "step": 315 }, { "epoch": 2.1658206429780034, "grad_norm": 3.775491751588902, "learning_rate": 1.8218748428984782e-07, "loss": 0.7233, "step": 320 }, { "epoch": 2.199661590524535, "grad_norm": 3.5862593065525, "learning_rate": 1.6833376173198005e-07, "loss": 0.7233, "step": 325 }, { "epoch": 2.233502538071066, "grad_norm": 3.5427938243892, "learning_rate": 1.5492053156088498e-07, "loss": 0.7098, "step": 330 }, { "epoch": 2.267343485617597, "grad_norm": 3.61152784910624, "learning_rate": 1.4196560814863567e-07, "loss": 0.7129, "step": 335 }, { "epoch": 2.3011844331641287, "grad_norm": 3.807265016749999, "learning_rate": 1.294861971811773e-07, "loss": 0.7074, "step": 340 }, { "epoch": 2.33502538071066, "grad_norm": 3.6719299115770783, "learning_rate": 1.1749887280712161e-07, "loss": 0.7121, "step": 345 }, { "epoch": 2.3688663282571913, "grad_norm": 3.5588978514727305, "learning_rate": 1.0601955562529774e-07, "loss": 0.7082, "step": 350 }, { "epoch": 2.3688663282571913, "eval_loss": 0.8359954357147217, "eval_runtime": 73.1022, "eval_samples_per_second": 57.454, "eval_steps_per_second": 0.903, "step": 350 }, { "epoch": 2.4027072758037225, "grad_norm": 3.72273582563589, "learning_rate": 9.506349154029425e-08, "loss": 0.7117, "step": 355 }, { "epoch": 2.436548223350254, "grad_norm": 3.777312137275298, "learning_rate": 8.46452315140772e-08, "loss": 0.7238, "step": 360 }, { "epoch": 2.470389170896785, "grad_norm": 3.9355287879984404, "learning_rate": 7.477861224057403e-08, "loss": 0.7385, "step": 365 }, { "epoch": 2.504230118443316, "grad_norm": 3.612978118989858, "learning_rate": 6.547673776889095e-08, "loss": 0.7239, "step": 370 }, { "epoch": 2.5380710659898478, "grad_norm": 3.73334551587451, "learning_rate": 5.6751962099570396e-08, "loss": 0.6999, "step": 375 }, { "epoch": 2.571912013536379, "grad_norm": 3.7980379085271974, "learning_rate": 4.861587277700274e-08, "loss": 0.7253, "step": 380 }, { "epoch": 2.6057529610829104, "grad_norm": 3.8953973449204105, "learning_rate": 4.107927549978235e-08, "loss": 0.7267, "step": 385 }, { "epoch": 2.6395939086294415, "grad_norm": 4.0344872537944525, "learning_rate": 3.4152179769449396e-08, "loss": 0.7179, "step": 390 }, { "epoch": 2.673434856175973, "grad_norm": 3.8358198567516935, "learning_rate": 2.784378559667622e-08, "loss": 0.7043, "step": 395 }, { "epoch": 2.707275803722504, "grad_norm": 3.5246037765259532, "learning_rate": 2.2162471282553553e-08, "loss": 0.7007, "step": 400 }, { "epoch": 2.707275803722504, "eval_loss": 0.8346139788627625, "eval_runtime": 73.1876, "eval_samples_per_second": 57.387, "eval_steps_per_second": 0.902, "step": 400 }, { "epoch": 2.7411167512690353, "grad_norm": 3.808011429657169, "learning_rate": 1.7115782291206082e-08, "loss": 0.7232, "step": 405 }, { "epoch": 2.774957698815567, "grad_norm": 3.8868492324851003, "learning_rate": 1.2710421228514733e-08, "loss": 0.7213, "step": 410 }, { "epoch": 2.808798646362098, "grad_norm": 3.8016855221337953, "learning_rate": 8.952238940255153e-09, "loss": 0.7114, "step": 415 }, { "epoch": 2.8426395939086295, "grad_norm": 3.6712328006089763, "learning_rate": 5.846226741475557e-09, "loss": 0.7148, "step": 420 }, { "epoch": 2.8764805414551606, "grad_norm": 3.7375800014295417, "learning_rate": 3.3965097874343872e-09, "loss": 0.731, "step": 425 }, { "epoch": 2.910321489001692, "grad_norm": 3.6510115083695562, "learning_rate": 1.6063415949008618e-09, "loss": 0.7146, "step": 430 }, { "epoch": 2.9441624365482233, "grad_norm": 4.038300563245875, "learning_rate": 4.780997210962478e-10, "loss": 0.7113, "step": 435 }, { "epoch": 2.9780033840947544, "grad_norm": 3.6454952565628376, "learning_rate": 1.328260601385356e-11, "loss": 0.701, "step": 440 }, { "epoch": 2.984771573604061, "step": 441, "total_flos": 2600101931384832.0, "train_loss": 0.8247822573125497, "train_runtime": 6397.793, "train_samples_per_second": 17.725, "train_steps_per_second": 0.069 } ], "logging_steps": 5, "max_steps": 441, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 10000, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 2600101931384832.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }