|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9927710843373494, |
|
"eval_steps": 500, |
|
"global_step": 103, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00963855421686747, |
|
"grad_norm": 0.7313525676727295, |
|
"learning_rate": 4.998837209058379e-05, |
|
"loss": 1.2542, |
|
"num_input_tokens_seen": 2097152, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01927710843373494, |
|
"grad_norm": 0.7724310159683228, |
|
"learning_rate": 4.9953499178997346e-05, |
|
"loss": 1.2368, |
|
"num_input_tokens_seen": 4194304, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02891566265060241, |
|
"grad_norm": 0.7648841738700867, |
|
"learning_rate": 4.9895413705165234e-05, |
|
"loss": 1.1874, |
|
"num_input_tokens_seen": 6291456, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03855421686746988, |
|
"grad_norm": 0.6278133392333984, |
|
"learning_rate": 4.98141697020977e-05, |
|
"loss": 1.1094, |
|
"num_input_tokens_seen": 8388608, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04819277108433735, |
|
"grad_norm": 0.5364275574684143, |
|
"learning_rate": 4.970984274562741e-05, |
|
"loss": 1.0771, |
|
"num_input_tokens_seen": 10485760, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05783132530120482, |
|
"grad_norm": 0.4229595363140106, |
|
"learning_rate": 4.958252988410631e-05, |
|
"loss": 1.0465, |
|
"num_input_tokens_seen": 12582912, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06746987951807229, |
|
"grad_norm": 0.3765781819820404, |
|
"learning_rate": 4.9432349548128124e-05, |
|
"loss": 1.0828, |
|
"num_input_tokens_seen": 14680064, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.07710843373493977, |
|
"grad_norm": 0.3276417553424835, |
|
"learning_rate": 4.925944144036026e-05, |
|
"loss": 1.0584, |
|
"num_input_tokens_seen": 16777216, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.08674698795180723, |
|
"grad_norm": 0.2784905433654785, |
|
"learning_rate": 4.90639664055879e-05, |
|
"loss": 1.0203, |
|
"num_input_tokens_seen": 18874368, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0963855421686747, |
|
"grad_norm": 0.24603000283241272, |
|
"learning_rate": 4.884610628109082e-05, |
|
"loss": 1.0074, |
|
"num_input_tokens_seen": 20971520, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10602409638554217, |
|
"grad_norm": 0.21994058787822723, |
|
"learning_rate": 4.860606372749247e-05, |
|
"loss": 1.0076, |
|
"num_input_tokens_seen": 23068672, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.11566265060240964, |
|
"grad_norm": 0.1868952363729477, |
|
"learning_rate": 4.8344062040238395e-05, |
|
"loss": 0.9358, |
|
"num_input_tokens_seen": 25165824, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.12530120481927712, |
|
"grad_norm": 0.1867723912000656, |
|
"learning_rate": 4.806034494187949e-05, |
|
"loss": 0.9963, |
|
"num_input_tokens_seen": 27262976, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.13493975903614458, |
|
"grad_norm": 0.1620677411556244, |
|
"learning_rate": 4.775517635535332e-05, |
|
"loss": 0.9646, |
|
"num_input_tokens_seen": 29360128, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.14457831325301204, |
|
"grad_norm": 0.14515823125839233, |
|
"learning_rate": 4.742884015847436e-05, |
|
"loss": 0.9749, |
|
"num_input_tokens_seen": 31457280, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.15421686746987953, |
|
"grad_norm": 0.12092889100313187, |
|
"learning_rate": 4.708163991986152e-05, |
|
"loss": 0.9222, |
|
"num_input_tokens_seen": 33554432, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.163855421686747, |
|
"grad_norm": 0.11052511632442474, |
|
"learning_rate": 4.6713898616548724e-05, |
|
"loss": 0.9341, |
|
"num_input_tokens_seen": 35651584, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.17349397590361446, |
|
"grad_norm": 0.09935510903596878, |
|
"learning_rate": 4.6325958333541044e-05, |
|
"loss": 0.9262, |
|
"num_input_tokens_seen": 37748736, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.18313253012048192, |
|
"grad_norm": 0.09447427093982697, |
|
"learning_rate": 4.591817994559605e-05, |
|
"loss": 0.9563, |
|
"num_input_tokens_seen": 39845888, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.1927710843373494, |
|
"grad_norm": 0.08354643732309341, |
|
"learning_rate": 4.5490942781526316e-05, |
|
"loss": 0.9378, |
|
"num_input_tokens_seen": 41943040, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.20240963855421687, |
|
"grad_norm": 0.08155971020460129, |
|
"learning_rate": 4.504464427133527e-05, |
|
"loss": 0.9421, |
|
"num_input_tokens_seen": 44040192, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.21204819277108433, |
|
"grad_norm": 0.07310276478528976, |
|
"learning_rate": 4.457969957651484e-05, |
|
"loss": 0.9219, |
|
"num_input_tokens_seen": 46137344, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.2216867469879518, |
|
"grad_norm": 0.06967604160308838, |
|
"learning_rate": 4.409654120384862e-05, |
|
"loss": 0.9108, |
|
"num_input_tokens_seen": 48234496, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.23132530120481928, |
|
"grad_norm": 0.06935103982686996, |
|
"learning_rate": 4.35956186030799e-05, |
|
"loss": 0.9331, |
|
"num_input_tokens_seen": 50331648, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.24096385542168675, |
|
"grad_norm": 0.06473473459482193, |
|
"learning_rate": 4.307739774881878e-05, |
|
"loss": 0.9405, |
|
"num_input_tokens_seen": 52428800, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.25060240963855424, |
|
"grad_norm": 0.06642216444015503, |
|
"learning_rate": 4.254236070707733e-05, |
|
"loss": 0.9667, |
|
"num_input_tokens_seen": 54525952, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.26024096385542167, |
|
"grad_norm": 0.06075766310095787, |
|
"learning_rate": 4.1991005186836005e-05, |
|
"loss": 0.935, |
|
"num_input_tokens_seen": 56623104, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.26987951807228916, |
|
"grad_norm": 0.05267713591456413, |
|
"learning_rate": 4.142384407705846e-05, |
|
"loss": 0.8985, |
|
"num_input_tokens_seen": 58720256, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.27951807228915665, |
|
"grad_norm": 0.052896805107593536, |
|
"learning_rate": 4.084140496958538e-05, |
|
"loss": 0.8879, |
|
"num_input_tokens_seen": 60817408, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.2891566265060241, |
|
"grad_norm": 0.054511502385139465, |
|
"learning_rate": 4.024422966835136e-05, |
|
"loss": 0.9086, |
|
"num_input_tokens_seen": 62914560, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2987951807228916, |
|
"grad_norm": 0.05628665164113045, |
|
"learning_rate": 3.963287368538106e-05, |
|
"loss": 0.9437, |
|
"num_input_tokens_seen": 65011712, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.30843373493975906, |
|
"grad_norm": 0.05085182189941406, |
|
"learning_rate": 3.900790572403376e-05, |
|
"loss": 0.916, |
|
"num_input_tokens_seen": 67108864, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3180722891566265, |
|
"grad_norm": 0.05213847756385803, |
|
"learning_rate": 3.836990714997686e-05, |
|
"loss": 0.9433, |
|
"num_input_tokens_seen": 69206016, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.327710843373494, |
|
"grad_norm": 0.050841424614191055, |
|
"learning_rate": 3.7719471450380514e-05, |
|
"loss": 0.9276, |
|
"num_input_tokens_seen": 71303168, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.3373493975903614, |
|
"grad_norm": 0.050464361906051636, |
|
"learning_rate": 3.7057203681836406e-05, |
|
"loss": 0.918, |
|
"num_input_tokens_seen": 73400320, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3469879518072289, |
|
"grad_norm": 0.047950029373168945, |
|
"learning_rate": 3.638371990751428e-05, |
|
"loss": 0.8979, |
|
"num_input_tokens_seen": 75497472, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3566265060240964, |
|
"grad_norm": 0.04698570817708969, |
|
"learning_rate": 3.569964662407983e-05, |
|
"loss": 0.9065, |
|
"num_input_tokens_seen": 77594624, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.36626506024096384, |
|
"grad_norm": 0.047698017209768295, |
|
"learning_rate": 3.500562017890695e-05, |
|
"loss": 0.9405, |
|
"num_input_tokens_seen": 79691776, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.3759036144578313, |
|
"grad_norm": 0.04816911369562149, |
|
"learning_rate": 3.430228617812661e-05, |
|
"loss": 0.9263, |
|
"num_input_tokens_seen": 81788928, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.3855421686746988, |
|
"grad_norm": 0.04724708944559097, |
|
"learning_rate": 3.3590298886062833e-05, |
|
"loss": 0.9243, |
|
"num_input_tokens_seen": 83886080, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.39518072289156625, |
|
"grad_norm": 0.04571336507797241, |
|
"learning_rate": 3.2870320616614626e-05, |
|
"loss": 0.9201, |
|
"num_input_tokens_seen": 85983232, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.40481927710843374, |
|
"grad_norm": 0.045291587710380554, |
|
"learning_rate": 3.21430211171499e-05, |
|
"loss": 0.9015, |
|
"num_input_tokens_seen": 88080384, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.41445783132530123, |
|
"grad_norm": 0.04433094710111618, |
|
"learning_rate": 3.140907694548451e-05, |
|
"loss": 0.9104, |
|
"num_input_tokens_seen": 90177536, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.42409638554216866, |
|
"grad_norm": 0.04381513595581055, |
|
"learning_rate": 3.066917084052603e-05, |
|
"loss": 0.9194, |
|
"num_input_tokens_seen": 92274688, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.43373493975903615, |
|
"grad_norm": 0.04662506654858589, |
|
"learning_rate": 2.9923991087167658e-05, |
|
"loss": 0.9575, |
|
"num_input_tokens_seen": 94371840, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.4433734939759036, |
|
"grad_norm": 0.044867418706417084, |
|
"learning_rate": 2.9174230876023058e-05, |
|
"loss": 0.8922, |
|
"num_input_tokens_seen": 96468992, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.4530120481927711, |
|
"grad_norm": 0.04386744648218155, |
|
"learning_rate": 2.8420587658597757e-05, |
|
"loss": 0.9125, |
|
"num_input_tokens_seen": 98566144, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.46265060240963857, |
|
"grad_norm": 0.044598404318094254, |
|
"learning_rate": 2.7663762498496905e-05, |
|
"loss": 0.8989, |
|
"num_input_tokens_seen": 100663296, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.472289156626506, |
|
"grad_norm": 0.04060734063386917, |
|
"learning_rate": 2.6904459419272955e-05, |
|
"loss": 0.8719, |
|
"num_input_tokens_seen": 102760448, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.4819277108433735, |
|
"grad_norm": 0.045836638659238815, |
|
"learning_rate": 2.6143384749519866e-05, |
|
"loss": 0.9074, |
|
"num_input_tokens_seen": 104857600, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.491566265060241, |
|
"grad_norm": 0.04294141009449959, |
|
"learning_rate": 2.538124646582315e-05, |
|
"loss": 0.8911, |
|
"num_input_tokens_seen": 106954752, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.5012048192771085, |
|
"grad_norm": 0.045149002224206924, |
|
"learning_rate": 2.4618753534176856e-05, |
|
"loss": 0.9314, |
|
"num_input_tokens_seen": 109051904, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.5108433734939759, |
|
"grad_norm": 0.041385799646377563, |
|
"learning_rate": 2.385661525048014e-05, |
|
"loss": 0.9104, |
|
"num_input_tokens_seen": 111149056, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.5204819277108433, |
|
"grad_norm": 0.043126098811626434, |
|
"learning_rate": 2.3095540580727055e-05, |
|
"loss": 0.8854, |
|
"num_input_tokens_seen": 113246208, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.5301204819277109, |
|
"grad_norm": 0.04146340861916542, |
|
"learning_rate": 2.23362375015031e-05, |
|
"loss": 0.8827, |
|
"num_input_tokens_seen": 115343360, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5397590361445783, |
|
"grad_norm": 0.04183032736182213, |
|
"learning_rate": 2.157941234140225e-05, |
|
"loss": 0.9197, |
|
"num_input_tokens_seen": 117440512, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.5493975903614458, |
|
"grad_norm": 0.040669653564691544, |
|
"learning_rate": 2.082576912397695e-05, |
|
"loss": 0.9112, |
|
"num_input_tokens_seen": 119537664, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.5590361445783133, |
|
"grad_norm": 0.03980317711830139, |
|
"learning_rate": 2.0076008912832355e-05, |
|
"loss": 0.8954, |
|
"num_input_tokens_seen": 121634816, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.5686746987951807, |
|
"grad_norm": 0.039486825466156006, |
|
"learning_rate": 1.933082915947398e-05, |
|
"loss": 0.8575, |
|
"num_input_tokens_seen": 123731968, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.5783132530120482, |
|
"grad_norm": 0.04150194674730301, |
|
"learning_rate": 1.8590923054515503e-05, |
|
"loss": 0.8983, |
|
"num_input_tokens_seen": 125829120, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5879518072289157, |
|
"grad_norm": 0.041630711406469345, |
|
"learning_rate": 1.7856978882850113e-05, |
|
"loss": 0.8819, |
|
"num_input_tokens_seen": 127926272, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5975903614457831, |
|
"grad_norm": 0.04325857758522034, |
|
"learning_rate": 1.7129679383385383e-05, |
|
"loss": 0.9067, |
|
"num_input_tokens_seen": 130023424, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.6072289156626506, |
|
"grad_norm": 0.041175901889801025, |
|
"learning_rate": 1.6409701113937183e-05, |
|
"loss": 0.9054, |
|
"num_input_tokens_seen": 132120576, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.6168674698795181, |
|
"grad_norm": 0.04035747051239014, |
|
"learning_rate": 1.5697713821873398e-05, |
|
"loss": 0.8938, |
|
"num_input_tokens_seen": 134217728, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.6265060240963856, |
|
"grad_norm": 0.040536992251873016, |
|
"learning_rate": 1.4994379821093049e-05, |
|
"loss": 0.8964, |
|
"num_input_tokens_seen": 136314880, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.636144578313253, |
|
"grad_norm": 0.03936683386564255, |
|
"learning_rate": 1.430035337592018e-05, |
|
"loss": 0.8979, |
|
"num_input_tokens_seen": 138412032, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.6457831325301204, |
|
"grad_norm": 0.042347777634859085, |
|
"learning_rate": 1.3616280092485717e-05, |
|
"loss": 0.9085, |
|
"num_input_tokens_seen": 140509184, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.655421686746988, |
|
"grad_norm": 0.039885543286800385, |
|
"learning_rate": 1.2942796318163595e-05, |
|
"loss": 0.9091, |
|
"num_input_tokens_seen": 142606336, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.6650602409638554, |
|
"grad_norm": 0.04115555062890053, |
|
"learning_rate": 1.2280528549619485e-05, |
|
"loss": 0.8615, |
|
"num_input_tokens_seen": 144703488, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.6746987951807228, |
|
"grad_norm": 0.041039761155843735, |
|
"learning_rate": 1.1630092850023147e-05, |
|
"loss": 0.8873, |
|
"num_input_tokens_seen": 146800640, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6843373493975904, |
|
"grad_norm": 0.04071088135242462, |
|
"learning_rate": 1.0992094275966256e-05, |
|
"loss": 0.8958, |
|
"num_input_tokens_seen": 148897792, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.6939759036144578, |
|
"grad_norm": 0.04651007801294327, |
|
"learning_rate": 1.0367126314618947e-05, |
|
"loss": 0.9171, |
|
"num_input_tokens_seen": 150994944, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.7036144578313253, |
|
"grad_norm": 0.03853911906480789, |
|
"learning_rate": 9.755770331648642e-06, |
|
"loss": 0.9064, |
|
"num_input_tokens_seen": 153092096, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.7132530120481928, |
|
"grad_norm": 0.03924622759222984, |
|
"learning_rate": 9.15859503041462e-06, |
|
"loss": 0.9176, |
|
"num_input_tokens_seen": 155189248, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.7228915662650602, |
|
"grad_norm": 0.04030469059944153, |
|
"learning_rate": 8.576155922941548e-06, |
|
"loss": 0.8882, |
|
"num_input_tokens_seen": 157286400, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7325301204819277, |
|
"grad_norm": 0.03955009952187538, |
|
"learning_rate": 8.008994813163995e-06, |
|
"loss": 0.9081, |
|
"num_input_tokens_seen": 159383552, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.7421686746987952, |
|
"grad_norm": 0.041652191430330276, |
|
"learning_rate": 7.457639292922675e-06, |
|
"loss": 0.8861, |
|
"num_input_tokens_seen": 161480704, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.7518072289156627, |
|
"grad_norm": 0.041008081287145615, |
|
"learning_rate": 6.92260225118122e-06, |
|
"loss": 0.8766, |
|
"num_input_tokens_seen": 163577856, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.7614457831325301, |
|
"grad_norm": 0.039367541670799255, |
|
"learning_rate": 6.4043813969201004e-06, |
|
"loss": 0.8789, |
|
"num_input_tokens_seen": 165675008, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.7710843373493976, |
|
"grad_norm": 0.039804939180612564, |
|
"learning_rate": 5.903458796151381e-06, |
|
"loss": 0.8952, |
|
"num_input_tokens_seen": 167772160, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.7807228915662651, |
|
"grad_norm": 0.03992246836423874, |
|
"learning_rate": 5.420300423485167e-06, |
|
"loss": 0.8847, |
|
"num_input_tokens_seen": 169869312, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.7903614457831325, |
|
"grad_norm": 0.03980246186256409, |
|
"learning_rate": 4.95535572866474e-06, |
|
"loss": 0.8848, |
|
"num_input_tokens_seen": 171966464, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.038608700037002563, |
|
"learning_rate": 4.5090572184736864e-06, |
|
"loss": 0.8884, |
|
"num_input_tokens_seen": 174063616, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.8096385542168675, |
|
"grad_norm": 0.03972884640097618, |
|
"learning_rate": 4.081820054403948e-06, |
|
"loss": 0.8885, |
|
"num_input_tokens_seen": 176160768, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.8192771084337349, |
|
"grad_norm": 0.037002868950366974, |
|
"learning_rate": 3.674041666458963e-06, |
|
"loss": 0.8821, |
|
"num_input_tokens_seen": 178257920, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.8289156626506025, |
|
"grad_norm": 0.039480820298194885, |
|
"learning_rate": 3.2861013834512846e-06, |
|
"loss": 0.9095, |
|
"num_input_tokens_seen": 180355072, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.8385542168674699, |
|
"grad_norm": 0.039000798016786575, |
|
"learning_rate": 2.918360080138485e-06, |
|
"loss": 0.8505, |
|
"num_input_tokens_seen": 182452224, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.8481927710843373, |
|
"grad_norm": 0.03906833752989769, |
|
"learning_rate": 2.57115984152565e-06, |
|
"loss": 0.8776, |
|
"num_input_tokens_seen": 184549376, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.8578313253012049, |
|
"grad_norm": 0.040421612560749054, |
|
"learning_rate": 2.2448236446466846e-06, |
|
"loss": 0.8932, |
|
"num_input_tokens_seen": 186646528, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.8674698795180723, |
|
"grad_norm": 0.03807380422949791, |
|
"learning_rate": 1.939655058120521e-06, |
|
"loss": 0.8887, |
|
"num_input_tokens_seen": 188743680, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8771084337349397, |
|
"grad_norm": 0.03808167204260826, |
|
"learning_rate": 1.6559379597616137e-06, |
|
"loss": 0.8613, |
|
"num_input_tokens_seen": 190840832, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.8867469879518072, |
|
"grad_norm": 0.03954986855387688, |
|
"learning_rate": 1.3939362725075345e-06, |
|
"loss": 0.9218, |
|
"num_input_tokens_seen": 192937984, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.8963855421686747, |
|
"grad_norm": 0.03984986990690231, |
|
"learning_rate": 1.1538937189091825e-06, |
|
"loss": 0.9037, |
|
"num_input_tokens_seen": 195035136, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.9060240963855422, |
|
"grad_norm": 0.03950276970863342, |
|
"learning_rate": 9.360335944121029e-07, |
|
"loss": 0.8971, |
|
"num_input_tokens_seen": 197132288, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.9156626506024096, |
|
"grad_norm": 0.04189635440707207, |
|
"learning_rate": 7.405585596397313e-07, |
|
"loss": 0.9131, |
|
"num_input_tokens_seen": 199229440, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.9253012048192771, |
|
"grad_norm": 0.04141265153884888, |
|
"learning_rate": 5.676504518718761e-07, |
|
"loss": 0.9085, |
|
"num_input_tokens_seen": 201326592, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.9349397590361446, |
|
"grad_norm": 0.03829219564795494, |
|
"learning_rate": 4.1747011589368947e-07, |
|
"loss": 0.8804, |
|
"num_input_tokens_seen": 203423744, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.944578313253012, |
|
"grad_norm": 0.0407879464328289, |
|
"learning_rate": 2.9015725437259724e-07, |
|
"loss": 0.8935, |
|
"num_input_tokens_seen": 205520896, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.9542168674698795, |
|
"grad_norm": 0.04065724089741707, |
|
"learning_rate": 1.8583029790230355e-07, |
|
"loss": 0.8984, |
|
"num_input_tokens_seen": 207618048, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.963855421686747, |
|
"grad_norm": 0.04242575168609619, |
|
"learning_rate": 1.0458629483476867e-07, |
|
"loss": 0.9129, |
|
"num_input_tokens_seen": 209715200, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9734939759036144, |
|
"grad_norm": 0.04202008619904518, |
|
"learning_rate": 4.650082100265407e-08, |
|
"loss": 0.9183, |
|
"num_input_tokens_seen": 211812352, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.983132530120482, |
|
"grad_norm": 0.04002172499895096, |
|
"learning_rate": 1.1627909416211947e-08, |
|
"loss": 0.8792, |
|
"num_input_tokens_seen": 213909504, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.9927710843373494, |
|
"grad_norm": 0.03849833831191063, |
|
"learning_rate": 0.0, |
|
"loss": 0.8806, |
|
"num_input_tokens_seen": 216006656, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.9927710843373494, |
|
"num_input_tokens_seen": 216006656, |
|
"step": 103, |
|
"total_flos": 9.558846039650206e+18, |
|
"train_loss": 0.9284970314757338, |
|
"train_runtime": 17321.2957, |
|
"train_samples_per_second": 3.064, |
|
"train_steps_per_second": 0.006 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 103, |
|
"num_input_tokens_seen": 216006656, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.558846039650206e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|