|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 50, |
|
"global_step": 198, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005050505050505051, |
|
"grad_norm": 23.029043197631836, |
|
"learning_rate": 2e-05, |
|
"loss": 2.6769, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010101010101010102, |
|
"grad_norm": 634.5021362304688, |
|
"learning_rate": 4e-05, |
|
"loss": 3.2691, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.015151515151515152, |
|
"grad_norm": 87.77198028564453, |
|
"learning_rate": 6e-05, |
|
"loss": 2.2667, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.020202020202020204, |
|
"grad_norm": 27.21422576904297, |
|
"learning_rate": 8e-05, |
|
"loss": 3.2698, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.025252525252525252, |
|
"grad_norm": 17.437393188476562, |
|
"learning_rate": 0.0001, |
|
"loss": 2.2537, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.030303030303030304, |
|
"grad_norm": 21.726999282836914, |
|
"learning_rate": 0.00012, |
|
"loss": 2.7696, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.03535353535353535, |
|
"grad_norm": 14.650723457336426, |
|
"learning_rate": 0.00014, |
|
"loss": 1.979, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.04040404040404041, |
|
"grad_norm": 15.102992057800293, |
|
"learning_rate": 0.00016, |
|
"loss": 1.5791, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.045454545454545456, |
|
"grad_norm": 17.626737594604492, |
|
"learning_rate": 0.00018, |
|
"loss": 1.4636, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.050505050505050504, |
|
"grad_norm": 25.798032760620117, |
|
"learning_rate": 0.0002, |
|
"loss": 1.5067, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05555555555555555, |
|
"grad_norm": 27.792566299438477, |
|
"learning_rate": 0.00019998603811858571, |
|
"loss": 1.4289, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.06060606060606061, |
|
"grad_norm": 46.56501770019531, |
|
"learning_rate": 0.00019994415637302547, |
|
"loss": 1.7922, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.06565656565656566, |
|
"grad_norm": 26.839624404907227, |
|
"learning_rate": 0.0001998743664582786, |
|
"loss": 1.1899, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0707070707070707, |
|
"grad_norm": 25.198179244995117, |
|
"learning_rate": 0.00019977668786231534, |
|
"loss": 1.0497, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.07575757575757576, |
|
"grad_norm": 51.9806022644043, |
|
"learning_rate": 0.00019965114786067516, |
|
"loss": 1.3165, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.08080808080808081, |
|
"grad_norm": 28.80169677734375, |
|
"learning_rate": 0.00019949778150885042, |
|
"loss": 1.111, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.08585858585858586, |
|
"grad_norm": 25.198205947875977, |
|
"learning_rate": 0.00019931663163249742, |
|
"loss": 1.0153, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.09090909090909091, |
|
"grad_norm": 25.36805534362793, |
|
"learning_rate": 0.000199107748815478, |
|
"loss": 0.8575, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.09595959595959595, |
|
"grad_norm": 28.688661575317383, |
|
"learning_rate": 0.0001988711913857346, |
|
"loss": 0.7617, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.10101010101010101, |
|
"grad_norm": 22.180784225463867, |
|
"learning_rate": 0.00019860702539900287, |
|
"loss": 1.0212, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.10606060606060606, |
|
"grad_norm": 20.960046768188477, |
|
"learning_rate": 0.00019831532462036636, |
|
"loss": 0.892, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.1111111111111111, |
|
"grad_norm": 20.003198623657227, |
|
"learning_rate": 0.0001979961705036587, |
|
"loss": 0.8259, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.11616161616161616, |
|
"grad_norm": 20.527767181396484, |
|
"learning_rate": 0.00019764965216871846, |
|
"loss": 0.7981, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.12121212121212122, |
|
"grad_norm": 24.499876022338867, |
|
"learning_rate": 0.00019727586637650373, |
|
"loss": 0.7407, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.12626262626262627, |
|
"grad_norm": 27.962913513183594, |
|
"learning_rate": 0.00019687491750207254, |
|
"loss": 0.8106, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.13131313131313133, |
|
"grad_norm": 20.95252227783203, |
|
"learning_rate": 0.00019644691750543767, |
|
"loss": 0.8093, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.13636363636363635, |
|
"grad_norm": 24.588537216186523, |
|
"learning_rate": 0.0001959919859003031, |
|
"loss": 0.9619, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.1414141414141414, |
|
"grad_norm": 25.922687530517578, |
|
"learning_rate": 0.00019551024972069126, |
|
"loss": 1.0955, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.14646464646464646, |
|
"grad_norm": 18.61737823486328, |
|
"learning_rate": 0.00019500184348547042, |
|
"loss": 0.7048, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.15151515151515152, |
|
"grad_norm": 17.320585250854492, |
|
"learning_rate": 0.0001944669091607919, |
|
"loss": 0.5604, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.15656565656565657, |
|
"grad_norm": 16.542715072631836, |
|
"learning_rate": 0.0001939055961204478, |
|
"loss": 0.7072, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.16161616161616163, |
|
"grad_norm": 22.6462459564209, |
|
"learning_rate": 0.00019331806110416027, |
|
"loss": 0.7732, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.16666666666666666, |
|
"grad_norm": 33.40491485595703, |
|
"learning_rate": 0.00019270446817381377, |
|
"loss": 0.6016, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1717171717171717, |
|
"grad_norm": 21.087663650512695, |
|
"learning_rate": 0.00019206498866764288, |
|
"loss": 0.61, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.17676767676767677, |
|
"grad_norm": 17.59093475341797, |
|
"learning_rate": 0.00019139980115238827, |
|
"loss": 0.6075, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.18181818181818182, |
|
"grad_norm": 19.23078155517578, |
|
"learning_rate": 0.00019070909137343408, |
|
"loss": 0.694, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.18686868686868688, |
|
"grad_norm": 26.05833625793457, |
|
"learning_rate": 0.0001899930522029408, |
|
"loss": 0.7611, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.1919191919191919, |
|
"grad_norm": 27.84595489501953, |
|
"learning_rate": 0.00018925188358598813, |
|
"loss": 0.6542, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.19696969696969696, |
|
"grad_norm": 26.004344940185547, |
|
"learning_rate": 0.00018848579248474288, |
|
"loss": 0.6681, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.20202020202020202, |
|
"grad_norm": 20.24230194091797, |
|
"learning_rate": 0.00018769499282066717, |
|
"loss": 0.5088, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.20707070707070707, |
|
"grad_norm": 21.711565017700195, |
|
"learning_rate": 0.00018687970541478364, |
|
"loss": 0.74, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.21212121212121213, |
|
"grad_norm": 17.97589111328125, |
|
"learning_rate": 0.00018604015792601396, |
|
"loss": 0.4864, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.21717171717171718, |
|
"grad_norm": 34.957908630371094, |
|
"learning_rate": 0.0001851765847876076, |
|
"loss": 0.6905, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.2222222222222222, |
|
"grad_norm": 26.597530364990234, |
|
"learning_rate": 0.0001842892271416797, |
|
"loss": 0.7611, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.22727272727272727, |
|
"grad_norm": 16.2249755859375, |
|
"learning_rate": 0.00018337833277187472, |
|
"loss": 0.4251, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.23232323232323232, |
|
"grad_norm": 24.652427673339844, |
|
"learning_rate": 0.00018244415603417603, |
|
"loss": 0.9778, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.23737373737373738, |
|
"grad_norm": 17.19180679321289, |
|
"learning_rate": 0.00018148695778588033, |
|
"loss": 0.6035, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.24242424242424243, |
|
"grad_norm": 21.772842407226562, |
|
"learning_rate": 0.0001805070053127563, |
|
"loss": 0.9396, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.2474747474747475, |
|
"grad_norm": 30.497249603271484, |
|
"learning_rate": 0.0001795045722544083, |
|
"loss": 1.0868, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.25252525252525254, |
|
"grad_norm": 20.648635864257812, |
|
"learning_rate": 0.0001784799385278661, |
|
"loss": 0.6751, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.25252525252525254, |
|
"eval_loss": 0.4286772310733795, |
|
"eval_runtime": 12.8415, |
|
"eval_samples_per_second": 6.541, |
|
"eval_steps_per_second": 3.271, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.25757575757575757, |
|
"grad_norm": 15.56528091430664, |
|
"learning_rate": 0.00017743339024942135, |
|
"loss": 0.5069, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.26262626262626265, |
|
"grad_norm": 13.587965965270996, |
|
"learning_rate": 0.00017636521965473323, |
|
"loss": 0.5898, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.2676767676767677, |
|
"grad_norm": 11.232004165649414, |
|
"learning_rate": 0.00017527572501722512, |
|
"loss": 0.5344, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.2727272727272727, |
|
"grad_norm": 48.71158981323242, |
|
"learning_rate": 0.00017416521056479577, |
|
"loss": 0.5318, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.2777777777777778, |
|
"grad_norm": 13.308910369873047, |
|
"learning_rate": 0.00017303398639486695, |
|
"loss": 0.456, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.2828282828282828, |
|
"grad_norm": 17.141597747802734, |
|
"learning_rate": 0.00017188236838779295, |
|
"loss": 0.4883, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.2878787878787879, |
|
"grad_norm": 11.742911338806152, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 0.4722, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.29292929292929293, |
|
"grad_norm": 12.313322067260742, |
|
"learning_rate": 0.00016951924276746425, |
|
"loss": 0.4694, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.29797979797979796, |
|
"grad_norm": 15.240520477294922, |
|
"learning_rate": 0.0001683083950278031, |
|
"loss": 0.6767, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.30303030303030304, |
|
"grad_norm": 18.384809494018555, |
|
"learning_rate": 0.00016707847301392236, |
|
"loss": 0.5452, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.30808080808080807, |
|
"grad_norm": 24.02840805053711, |
|
"learning_rate": 0.00016582982016632818, |
|
"loss": 0.4739, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.31313131313131315, |
|
"grad_norm": 25.153413772583008, |
|
"learning_rate": 0.00016456278515588024, |
|
"loss": 0.5608, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.3181818181818182, |
|
"grad_norm": 16.113597869873047, |
|
"learning_rate": 0.00016327772178642986, |
|
"loss": 0.5061, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.32323232323232326, |
|
"grad_norm": 14.654685020446777, |
|
"learning_rate": 0.00016197498889602448, |
|
"loss": 0.5645, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.3282828282828283, |
|
"grad_norm": 13.464080810546875, |
|
"learning_rate": 0.00016065495025670675, |
|
"loss": 0.46, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.3333333333333333, |
|
"grad_norm": 16.236915588378906, |
|
"learning_rate": 0.00015931797447293552, |
|
"loss": 0.4963, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.3383838383838384, |
|
"grad_norm": 15.136520385742188, |
|
"learning_rate": 0.00015796443487865776, |
|
"loss": 0.3889, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.3434343434343434, |
|
"grad_norm": 17.57411766052246, |
|
"learning_rate": 0.00015659470943305955, |
|
"loss": 0.6627, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.3484848484848485, |
|
"grad_norm": 26.13228988647461, |
|
"learning_rate": 0.00015520918061502569, |
|
"loss": 0.5681, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.35353535353535354, |
|
"grad_norm": 19.557939529418945, |
|
"learning_rate": 0.00015380823531633729, |
|
"loss": 0.4217, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.35858585858585856, |
|
"grad_norm": 20.687942504882812, |
|
"learning_rate": 0.00015239226473363687, |
|
"loss": 0.4085, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 18.704753875732422, |
|
"learning_rate": 0.00015096166425919175, |
|
"loss": 0.5309, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.3686868686868687, |
|
"grad_norm": 14.950682640075684, |
|
"learning_rate": 0.00014951683337048537, |
|
"loss": 0.5447, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.37373737373737376, |
|
"grad_norm": 15.215104103088379, |
|
"learning_rate": 0.00014805817551866838, |
|
"loss": 0.4135, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.3787878787878788, |
|
"grad_norm": 20.362154006958008, |
|
"learning_rate": 0.00014658609801589982, |
|
"loss": 0.5322, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.3838383838383838, |
|
"grad_norm": 17.189462661743164, |
|
"learning_rate": 0.00014510101192161018, |
|
"loss": 0.6402, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.3888888888888889, |
|
"grad_norm": 14.426923751831055, |
|
"learning_rate": 0.0001436033319277183, |
|
"loss": 0.4401, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.3939393939393939, |
|
"grad_norm": 16.389694213867188, |
|
"learning_rate": 0.0001420934762428335, |
|
"loss": 0.3578, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.398989898989899, |
|
"grad_norm": 13.852417945861816, |
|
"learning_rate": 0.0001405718664754764, |
|
"loss": 0.4186, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.40404040404040403, |
|
"grad_norm": 14.556455612182617, |
|
"learning_rate": 0.00013903892751634947, |
|
"loss": 0.5181, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.4090909090909091, |
|
"grad_norm": 11.96506404876709, |
|
"learning_rate": 0.00013749508741969213, |
|
"loss": 0.5548, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.41414141414141414, |
|
"grad_norm": 20.171615600585938, |
|
"learning_rate": 0.00013594077728375128, |
|
"loss": 0.5334, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.41919191919191917, |
|
"grad_norm": 14.336783409118652, |
|
"learning_rate": 0.00013437643113040301, |
|
"loss": 0.543, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.42424242424242425, |
|
"grad_norm": 31.304283142089844, |
|
"learning_rate": 0.0001328024857839569, |
|
"loss": 0.5402, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.4292929292929293, |
|
"grad_norm": 16.0805721282959, |
|
"learning_rate": 0.00013121938074917865, |
|
"loss": 0.4462, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.43434343434343436, |
|
"grad_norm": 20.255895614624023, |
|
"learning_rate": 0.00012962755808856342, |
|
"loss": 0.3971, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.4393939393939394, |
|
"grad_norm": 14.014909744262695, |
|
"learning_rate": 0.00012802746229889563, |
|
"loss": 0.4082, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"grad_norm": 16.093183517456055, |
|
"learning_rate": 0.00012641954018712863, |
|
"loss": 0.5894, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.4494949494949495, |
|
"grad_norm": 19.122770309448242, |
|
"learning_rate": 0.00012480424074561933, |
|
"loss": 0.6092, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.45454545454545453, |
|
"grad_norm": 22.975881576538086, |
|
"learning_rate": 0.00012318201502675285, |
|
"loss": 0.6141, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.4595959595959596, |
|
"grad_norm": 12.433504104614258, |
|
"learning_rate": 0.00012155331601699136, |
|
"loss": 0.4174, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.46464646464646464, |
|
"grad_norm": 18.699106216430664, |
|
"learning_rate": 0.0001199185985103836, |
|
"loss": 0.5296, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.4696969696969697, |
|
"grad_norm": 17.323938369750977, |
|
"learning_rate": 0.00011827831898156905, |
|
"loss": 0.6469, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.47474747474747475, |
|
"grad_norm": 11.476202964782715, |
|
"learning_rate": 0.00011663293545831302, |
|
"loss": 0.4282, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.4797979797979798, |
|
"grad_norm": 15.700027465820312, |
|
"learning_rate": 0.00011498290739360815, |
|
"loss": 0.5744, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.48484848484848486, |
|
"grad_norm": 30.623720169067383, |
|
"learning_rate": 0.0001133286955373779, |
|
"loss": 0.7131, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.4898989898989899, |
|
"grad_norm": 17.045047760009766, |
|
"learning_rate": 0.00011167076180781764, |
|
"loss": 0.5727, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.494949494949495, |
|
"grad_norm": 15.74181842803955, |
|
"learning_rate": 0.00011000956916240985, |
|
"loss": 0.5673, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 16.612184524536133, |
|
"learning_rate": 0.000108345581468649, |
|
"loss": 0.5101, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.5050505050505051, |
|
"grad_norm": 18.710609436035156, |
|
"learning_rate": 0.00010667926337451217, |
|
"loss": 0.5985, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5050505050505051, |
|
"eval_loss": 0.3532155752182007, |
|
"eval_runtime": 12.0059, |
|
"eval_samples_per_second": 6.997, |
|
"eval_steps_per_second": 3.498, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.51010101010101, |
|
"grad_norm": 11.24027156829834, |
|
"learning_rate": 0.00010501108017871192, |
|
"loss": 0.4537, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.5151515151515151, |
|
"grad_norm": 14.357158660888672, |
|
"learning_rate": 0.00010334149770076747, |
|
"loss": 0.4567, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.5202020202020202, |
|
"grad_norm": 12.492400169372559, |
|
"learning_rate": 0.00010167098215093009, |
|
"loss": 0.4684, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.5252525252525253, |
|
"grad_norm": 11.547194480895996, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4345, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.5303030303030303, |
|
"grad_norm": 13.756988525390625, |
|
"learning_rate": 9.83290178490699e-05, |
|
"loss": 0.4538, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.5353535353535354, |
|
"grad_norm": 13.91006088256836, |
|
"learning_rate": 9.665850229923258e-05, |
|
"loss": 0.3966, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.5404040404040404, |
|
"grad_norm": 13.23215103149414, |
|
"learning_rate": 9.498891982128809e-05, |
|
"loss": 0.3571, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.5454545454545454, |
|
"grad_norm": 13.87959098815918, |
|
"learning_rate": 9.332073662548784e-05, |
|
"loss": 0.4389, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.5505050505050505, |
|
"grad_norm": 12.538737297058105, |
|
"learning_rate": 9.165441853135104e-05, |
|
"loss": 0.4184, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.5555555555555556, |
|
"grad_norm": 23.718013763427734, |
|
"learning_rate": 8.999043083759017e-05, |
|
"loss": 0.3822, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.5606060606060606, |
|
"grad_norm": 70.29689025878906, |
|
"learning_rate": 8.832923819218238e-05, |
|
"loss": 0.3815, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.5656565656565656, |
|
"grad_norm": 11.808393478393555, |
|
"learning_rate": 8.667130446262214e-05, |
|
"loss": 0.362, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.5707070707070707, |
|
"grad_norm": 13.301114082336426, |
|
"learning_rate": 8.501709260639186e-05, |
|
"loss": 0.5234, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.5757575757575758, |
|
"grad_norm": 11.385530471801758, |
|
"learning_rate": 8.336706454168701e-05, |
|
"loss": 0.546, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.5808080808080808, |
|
"grad_norm": 17.040523529052734, |
|
"learning_rate": 8.172168101843099e-05, |
|
"loss": 0.4199, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.5858585858585859, |
|
"grad_norm": 18.70421028137207, |
|
"learning_rate": 8.008140148961641e-05, |
|
"loss": 0.5603, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.5909090909090909, |
|
"grad_norm": 13.10011100769043, |
|
"learning_rate": 7.844668398300865e-05, |
|
"loss": 0.6309, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.5959595959595959, |
|
"grad_norm": 13.03792953491211, |
|
"learning_rate": 7.681798497324716e-05, |
|
"loss": 0.4718, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.601010101010101, |
|
"grad_norm": 14.050755500793457, |
|
"learning_rate": 7.519575925438067e-05, |
|
"loss": 0.372, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.6060606060606061, |
|
"grad_norm": 13.505302429199219, |
|
"learning_rate": 7.358045981287141e-05, |
|
"loss": 0.5525, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6111111111111112, |
|
"grad_norm": 13.572732925415039, |
|
"learning_rate": 7.197253770110438e-05, |
|
"loss": 0.4607, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.6161616161616161, |
|
"grad_norm": 22.72136878967285, |
|
"learning_rate": 7.037244191143661e-05, |
|
"loss": 0.632, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.6212121212121212, |
|
"grad_norm": 12.22665786743164, |
|
"learning_rate": 6.878061925082137e-05, |
|
"loss": 0.429, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.6262626262626263, |
|
"grad_norm": 10.075366973876953, |
|
"learning_rate": 6.719751421604309e-05, |
|
"loss": 0.4592, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.6313131313131313, |
|
"grad_norm": 16.798250198364258, |
|
"learning_rate": 6.562356886959704e-05, |
|
"loss": 0.368, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.6363636363636364, |
|
"grad_norm": 10.359375953674316, |
|
"learning_rate": 6.405922271624874e-05, |
|
"loss": 0.2825, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.6414141414141414, |
|
"grad_norm": 12.54139232635498, |
|
"learning_rate": 6.250491258030791e-05, |
|
"loss": 0.479, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.6464646464646465, |
|
"grad_norm": 15.544010162353516, |
|
"learning_rate": 6.0961072483650526e-05, |
|
"loss": 0.4355, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.6515151515151515, |
|
"grad_norm": 13.150715827941895, |
|
"learning_rate": 5.9428133524523646e-05, |
|
"loss": 0.3998, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.6565656565656566, |
|
"grad_norm": 11.6818265914917, |
|
"learning_rate": 5.790652375716652e-05, |
|
"loss": 0.4009, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.6616161616161617, |
|
"grad_norm": 16.20279884338379, |
|
"learning_rate": 5.639666807228175e-05, |
|
"loss": 0.5703, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 15.14217758178711, |
|
"learning_rate": 5.48989880783898e-05, |
|
"loss": 0.4918, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.6717171717171717, |
|
"grad_norm": 17.63346290588379, |
|
"learning_rate": 5.341390198410019e-05, |
|
"loss": 0.4146, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.6767676767676768, |
|
"grad_norm": 17.88300323486328, |
|
"learning_rate": 5.1941824481331626e-05, |
|
"loss": 0.4518, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.6818181818181818, |
|
"grad_norm": 19.94394302368164, |
|
"learning_rate": 5.0483166629514654e-05, |
|
"loss": 0.6016, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.6868686868686869, |
|
"grad_norm": 14.756821632385254, |
|
"learning_rate": 4.903833574080825e-05, |
|
"loss": 0.3996, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.6919191919191919, |
|
"grad_norm": 19.392858505249023, |
|
"learning_rate": 4.760773526636315e-05, |
|
"loss": 0.6053, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.696969696969697, |
|
"grad_norm": 7.531655788421631, |
|
"learning_rate": 4.6191764683662744e-05, |
|
"loss": 0.2299, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.702020202020202, |
|
"grad_norm": 23.472288131713867, |
|
"learning_rate": 4.479081938497435e-05, |
|
"loss": 0.5437, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.7070707070707071, |
|
"grad_norm": 17.57956314086914, |
|
"learning_rate": 4.340529056694047e-05, |
|
"loss": 0.5943, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.7121212121212122, |
|
"grad_norm": 11.99634075164795, |
|
"learning_rate": 4.2035565121342246e-05, |
|
"loss": 0.2477, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.7171717171717171, |
|
"grad_norm": 34.67549133300781, |
|
"learning_rate": 4.0682025527064486e-05, |
|
"loss": 0.4986, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.7222222222222222, |
|
"grad_norm": 12.304537773132324, |
|
"learning_rate": 3.934504974329326e-05, |
|
"loss": 0.5063, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 14.232462882995605, |
|
"learning_rate": 3.802501110397553e-05, |
|
"loss": 0.5271, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.7323232323232324, |
|
"grad_norm": 23.888545989990234, |
|
"learning_rate": 3.672227821357014e-05, |
|
"loss": 0.6568, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.7373737373737373, |
|
"grad_norm": 16.624494552612305, |
|
"learning_rate": 3.543721484411976e-05, |
|
"loss": 0.4932, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.7424242424242424, |
|
"grad_norm": 15.917621612548828, |
|
"learning_rate": 3.4170179833671846e-05, |
|
"loss": 0.6934, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.7474747474747475, |
|
"grad_norm": 9.06427001953125, |
|
"learning_rate": 3.292152698607768e-05, |
|
"loss": 0.3849, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.7525252525252525, |
|
"grad_norm": 17.903718948364258, |
|
"learning_rate": 3.169160497219692e-05, |
|
"loss": 0.3646, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.7575757575757576, |
|
"grad_norm": 11.98536205291748, |
|
"learning_rate": 3.0480757232535772e-05, |
|
"loss": 0.46, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.7575757575757576, |
|
"eval_loss": 0.3067511022090912, |
|
"eval_runtime": 12.2523, |
|
"eval_samples_per_second": 6.856, |
|
"eval_steps_per_second": 3.428, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.7626262626262627, |
|
"grad_norm": 9.82388687133789, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.302, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.7676767676767676, |
|
"grad_norm": 12.946221351623535, |
|
"learning_rate": 2.8117631612207084e-05, |
|
"loss": 0.4361, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.7727272727272727, |
|
"grad_norm": 9.764445304870605, |
|
"learning_rate": 2.6966013605133088e-05, |
|
"loss": 0.3317, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.7777777777777778, |
|
"grad_norm": 14.5607271194458, |
|
"learning_rate": 2.5834789435204243e-05, |
|
"loss": 0.3403, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.7828282828282829, |
|
"grad_norm": 11.619050979614258, |
|
"learning_rate": 2.4724274982774865e-05, |
|
"loss": 0.4053, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.7878787878787878, |
|
"grad_norm": 14.43159008026123, |
|
"learning_rate": 2.3634780345266806e-05, |
|
"loss": 0.3843, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.7929292929292929, |
|
"grad_norm": 15.145977020263672, |
|
"learning_rate": 2.2566609750578673e-05, |
|
"loss": 0.4364, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.797979797979798, |
|
"grad_norm": 11.833556175231934, |
|
"learning_rate": 2.1520061472133902e-05, |
|
"loss": 0.3154, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.803030303030303, |
|
"grad_norm": 12.835911750793457, |
|
"learning_rate": 2.04954277455917e-05, |
|
"loss": 0.3429, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.8080808080808081, |
|
"grad_norm": 11.492507934570312, |
|
"learning_rate": 1.9492994687243714e-05, |
|
"loss": 0.3757, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.8131313131313131, |
|
"grad_norm": 13.120503425598145, |
|
"learning_rate": 1.851304221411967e-05, |
|
"loss": 0.4127, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.8181818181818182, |
|
"grad_norm": 20.865995407104492, |
|
"learning_rate": 1.7555843965823992e-05, |
|
"loss": 0.4, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.8232323232323232, |
|
"grad_norm": 11.817997932434082, |
|
"learning_rate": 1.6621667228125302e-05, |
|
"loss": 0.4647, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.8282828282828283, |
|
"grad_norm": 10.320054054260254, |
|
"learning_rate": 1.57107728583203e-05, |
|
"loss": 0.3413, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 11.233556747436523, |
|
"learning_rate": 1.4823415212392377e-05, |
|
"loss": 0.2859, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.8383838383838383, |
|
"grad_norm": 10.825108528137207, |
|
"learning_rate": 1.3959842073986085e-05, |
|
"loss": 0.2771, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.8434343434343434, |
|
"grad_norm": 11.109395027160645, |
|
"learning_rate": 1.3120294585216353e-05, |
|
"loss": 0.3611, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.8484848484848485, |
|
"grad_norm": 10.571539878845215, |
|
"learning_rate": 1.230500717933285e-05, |
|
"loss": 0.3888, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.8535353535353535, |
|
"grad_norm": 11.521129608154297, |
|
"learning_rate": 1.1514207515257147e-05, |
|
"loss": 0.3247, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.8585858585858586, |
|
"grad_norm": 11.530449867248535, |
|
"learning_rate": 1.0748116414011888e-05, |
|
"loss": 0.2623, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.8636363636363636, |
|
"grad_norm": 10.529777526855469, |
|
"learning_rate": 1.0006947797059219e-05, |
|
"loss": 0.3313, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.8686868686868687, |
|
"grad_norm": 9.755709648132324, |
|
"learning_rate": 9.29090862656593e-06, |
|
"loss": 0.3945, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.8737373737373737, |
|
"grad_norm": 10.26276969909668, |
|
"learning_rate": 8.600198847611729e-06, |
|
"loss": 0.2629, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.8787878787878788, |
|
"grad_norm": 18.38811492919922, |
|
"learning_rate": 7.935011332357112e-06, |
|
"loss": 0.408, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.8838383838383839, |
|
"grad_norm": 11.992220878601074, |
|
"learning_rate": 7.295531826186264e-06, |
|
"loss": 0.4615, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": 14.000274658203125, |
|
"learning_rate": 6.681938895839746e-06, |
|
"loss": 0.3583, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.8939393939393939, |
|
"grad_norm": 11.123028755187988, |
|
"learning_rate": 6.094403879552213e-06, |
|
"loss": 0.2686, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.898989898989899, |
|
"grad_norm": 11.463796615600586, |
|
"learning_rate": 5.533090839208133e-06, |
|
"loss": 0.3694, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.9040404040404041, |
|
"grad_norm": 14.426941871643066, |
|
"learning_rate": 4.998156514529595e-06, |
|
"loss": 0.4453, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"grad_norm": 11.056812286376953, |
|
"learning_rate": 4.489750279308757e-06, |
|
"loss": 0.3627, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.9141414141414141, |
|
"grad_norm": 15.307928085327148, |
|
"learning_rate": 4.008014099696922e-06, |
|
"loss": 0.5187, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.9191919191919192, |
|
"grad_norm": 16.49620246887207, |
|
"learning_rate": 3.5530824945623542e-06, |
|
"loss": 0.4363, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.9242424242424242, |
|
"grad_norm": 14.4350004196167, |
|
"learning_rate": 3.1250824979274675e-06, |
|
"loss": 0.422, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.9292929292929293, |
|
"grad_norm": 8.324783325195312, |
|
"learning_rate": 2.7241336234962944e-06, |
|
"loss": 0.3985, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.9343434343434344, |
|
"grad_norm": 11.766496658325195, |
|
"learning_rate": 2.3503478312815298e-06, |
|
"loss": 0.3377, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.9393939393939394, |
|
"grad_norm": 15.193984031677246, |
|
"learning_rate": 2.003829496341325e-06, |
|
"loss": 0.3802, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.9444444444444444, |
|
"grad_norm": 20.59272003173828, |
|
"learning_rate": 1.684675379633649e-06, |
|
"loss": 0.4727, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.9494949494949495, |
|
"grad_norm": 10.777263641357422, |
|
"learning_rate": 1.3929746009971433e-06, |
|
"loss": 0.3044, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.9545454545454546, |
|
"grad_norm": 20.804996490478516, |
|
"learning_rate": 1.1288086142653864e-06, |
|
"loss": 0.3692, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.9595959595959596, |
|
"grad_norm": 19.473365783691406, |
|
"learning_rate": 8.922511845219971e-07, |
|
"loss": 0.321, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.9646464646464646, |
|
"grad_norm": 10.078756332397461, |
|
"learning_rate": 6.833683675025904e-07, |
|
"loss": 0.3689, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.9696969696969697, |
|
"grad_norm": 10.66713809967041, |
|
"learning_rate": 5.022184911495864e-07, |
|
"loss": 0.2831, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.9747474747474747, |
|
"grad_norm": 13.519158363342285, |
|
"learning_rate": 3.488521393248401e-07, |
|
"loss": 0.3693, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.9797979797979798, |
|
"grad_norm": 18.351518630981445, |
|
"learning_rate": 2.2331213768468363e-07, |
|
"loss": 0.5692, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.9848484848484849, |
|
"grad_norm": 20.144264221191406, |
|
"learning_rate": 1.2563354172142606e-07, |
|
"loss": 0.4896, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.98989898989899, |
|
"grad_norm": 21.47068977355957, |
|
"learning_rate": 5.584362697453882e-08, |
|
"loss": 0.4884, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.9949494949494949, |
|
"grad_norm": 14.894364356994629, |
|
"learning_rate": 1.3961881414292778e-08, |
|
"loss": 0.4101, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 20.294527053833008, |
|
"learning_rate": 0.0, |
|
"loss": 0.5037, |
|
"step": 198 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 198, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.812060362394829e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|