htlou's picture
Upload folder using huggingface_hub
970b03b verified
raw
history blame
13.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.979683972911964,
"eval_steps": 50,
"global_step": 330,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.045146726862302484,
"grad_norm": 18.47902498006474,
"learning_rate": 5e-07,
"loss": 1.7294,
"step": 5
},
{
"epoch": 0.09029345372460497,
"grad_norm": 12.268601512539195,
"learning_rate": 1e-06,
"loss": 1.5978,
"step": 10
},
{
"epoch": 0.13544018058690746,
"grad_norm": 7.473453913673138,
"learning_rate": 9.993977281025862e-07,
"loss": 1.2909,
"step": 15
},
{
"epoch": 0.18058690744920994,
"grad_norm": 4.242889467689194,
"learning_rate": 9.975923633360984e-07,
"loss": 1.1281,
"step": 20
},
{
"epoch": 0.22573363431151242,
"grad_norm": 3.7511187742202794,
"learning_rate": 9.945882549823904e-07,
"loss": 1.0669,
"step": 25
},
{
"epoch": 0.2708803611738149,
"grad_norm": 3.7686497891232653,
"learning_rate": 9.90392640201615e-07,
"loss": 1.0131,
"step": 30
},
{
"epoch": 0.3160270880361174,
"grad_norm": 3.62119206079501,
"learning_rate": 9.85015626597272e-07,
"loss": 1.0054,
"step": 35
},
{
"epoch": 0.3611738148984199,
"grad_norm": 3.849448662188167,
"learning_rate": 9.784701678661044e-07,
"loss": 0.9772,
"step": 40
},
{
"epoch": 0.40632054176072235,
"grad_norm": 3.717637360909183,
"learning_rate": 9.707720325915103e-07,
"loss": 0.9666,
"step": 45
},
{
"epoch": 0.45146726862302483,
"grad_norm": 3.335114517852652,
"learning_rate": 9.619397662556433e-07,
"loss": 0.9349,
"step": 50
},
{
"epoch": 0.45146726862302483,
"eval_loss": 0.9357019066810608,
"eval_runtime": 56.3673,
"eval_samples_per_second": 55.883,
"eval_steps_per_second": 0.887,
"step": 50
},
{
"epoch": 0.4966139954853273,
"grad_norm": 3.5546589218606477,
"learning_rate": 9.519946465617217e-07,
"loss": 0.9345,
"step": 55
},
{
"epoch": 0.5417607223476298,
"grad_norm": 3.531001152680907,
"learning_rate": 9.409606321741774e-07,
"loss": 0.9188,
"step": 60
},
{
"epoch": 0.5869074492099323,
"grad_norm": 3.5692359406823804,
"learning_rate": 9.28864305000136e-07,
"loss": 0.9116,
"step": 65
},
{
"epoch": 0.6320541760722348,
"grad_norm": 3.702385519615215,
"learning_rate": 9.157348061512726e-07,
"loss": 0.9161,
"step": 70
},
{
"epoch": 0.6772009029345373,
"grad_norm": 3.4298013712048716,
"learning_rate": 9.016037657403223e-07,
"loss": 0.8958,
"step": 75
},
{
"epoch": 0.7223476297968398,
"grad_norm": 3.4040303578876725,
"learning_rate": 8.865052266813685e-07,
"loss": 0.8658,
"step": 80
},
{
"epoch": 0.7674943566591422,
"grad_norm": 3.5133332427335895,
"learning_rate": 8.704755626774795e-07,
"loss": 0.8939,
"step": 85
},
{
"epoch": 0.8126410835214447,
"grad_norm": 3.500121545145948,
"learning_rate": 8.535533905932737e-07,
"loss": 0.8927,
"step": 90
},
{
"epoch": 0.8577878103837472,
"grad_norm": 3.2225864446847545,
"learning_rate": 8.357794774235092e-07,
"loss": 0.8711,
"step": 95
},
{
"epoch": 0.9029345372460497,
"grad_norm": 3.632794695134748,
"learning_rate": 8.171966420818227e-07,
"loss": 0.892,
"step": 100
},
{
"epoch": 0.9029345372460497,
"eval_loss": 0.8807913064956665,
"eval_runtime": 55.4559,
"eval_samples_per_second": 56.802,
"eval_steps_per_second": 0.902,
"step": 100
},
{
"epoch": 0.9480812641083521,
"grad_norm": 3.8825573929061155,
"learning_rate": 7.978496522462167e-07,
"loss": 0.8746,
"step": 105
},
{
"epoch": 0.9932279909706546,
"grad_norm": 3.325010031368483,
"learning_rate": 7.777851165098011e-07,
"loss": 0.8682,
"step": 110
},
{
"epoch": 1.0383747178329572,
"grad_norm": 3.349437996562217,
"learning_rate": 7.570513720966107e-07,
"loss": 0.8283,
"step": 115
},
{
"epoch": 1.0835214446952597,
"grad_norm": 3.5602949032422853,
"learning_rate": 7.356983684129989e-07,
"loss": 0.8152,
"step": 120
},
{
"epoch": 1.1286681715575622,
"grad_norm": 3.4642871655890644,
"learning_rate": 7.13777546715141e-07,
"loss": 0.8002,
"step": 125
},
{
"epoch": 1.1738148984198646,
"grad_norm": 3.4004037196199204,
"learning_rate": 6.913417161825449e-07,
"loss": 0.7893,
"step": 130
},
{
"epoch": 1.2189616252821671,
"grad_norm": 3.865156874647086,
"learning_rate": 6.684449266961099e-07,
"loss": 0.7828,
"step": 135
},
{
"epoch": 1.2641083521444696,
"grad_norm": 3.298117419702452,
"learning_rate": 6.451423386272311e-07,
"loss": 0.792,
"step": 140
},
{
"epoch": 1.309255079006772,
"grad_norm": 3.5667007043819354,
"learning_rate": 6.21490089951632e-07,
"loss": 0.8115,
"step": 145
},
{
"epoch": 1.3544018058690745,
"grad_norm": 3.662661160157946,
"learning_rate": 5.975451610080642e-07,
"loss": 0.8055,
"step": 150
},
{
"epoch": 1.3544018058690745,
"eval_loss": 0.8635040521621704,
"eval_runtime": 55.5373,
"eval_samples_per_second": 56.719,
"eval_steps_per_second": 0.9,
"step": 150
},
{
"epoch": 1.399548532731377,
"grad_norm": 3.452424328507076,
"learning_rate": 5.733652372276809e-07,
"loss": 0.8008,
"step": 155
},
{
"epoch": 1.4446952595936795,
"grad_norm": 3.619887615175037,
"learning_rate": 5.490085701647804e-07,
"loss": 0.7882,
"step": 160
},
{
"epoch": 1.489841986455982,
"grad_norm": 3.706405826898614,
"learning_rate": 5.245338371637091e-07,
"loss": 0.8086,
"step": 165
},
{
"epoch": 1.5349887133182845,
"grad_norm": 3.5950186032164213,
"learning_rate": 5e-07,
"loss": 0.7867,
"step": 170
},
{
"epoch": 1.580135440180587,
"grad_norm": 3.3585631265829803,
"learning_rate": 4.75466162836291e-07,
"loss": 0.7887,
"step": 175
},
{
"epoch": 1.6252821670428894,
"grad_norm": 3.460817031523478,
"learning_rate": 4.5099142983521963e-07,
"loss": 0.7808,
"step": 180
},
{
"epoch": 1.670428893905192,
"grad_norm": 3.400007334758639,
"learning_rate": 4.2663476277231915e-07,
"loss": 0.7903,
"step": 185
},
{
"epoch": 1.7155756207674944,
"grad_norm": 3.480860463387537,
"learning_rate": 4.0245483899193586e-07,
"loss": 0.7836,
"step": 190
},
{
"epoch": 1.7607223476297968,
"grad_norm": 3.589587047175883,
"learning_rate": 3.785099100483681e-07,
"loss": 0.7762,
"step": 195
},
{
"epoch": 1.8058690744920993,
"grad_norm": 3.6274088132542195,
"learning_rate": 3.548576613727689e-07,
"loss": 0.7883,
"step": 200
},
{
"epoch": 1.8058690744920993,
"eval_loss": 0.851189374923706,
"eval_runtime": 55.4816,
"eval_samples_per_second": 56.776,
"eval_steps_per_second": 0.901,
"step": 200
},
{
"epoch": 1.8510158013544018,
"grad_norm": 3.372357535141687,
"learning_rate": 3.3155507330388996e-07,
"loss": 0.7971,
"step": 205
},
{
"epoch": 1.8961625282167043,
"grad_norm": 3.6104920795328432,
"learning_rate": 3.086582838174551e-07,
"loss": 0.7746,
"step": 210
},
{
"epoch": 1.9413092550790068,
"grad_norm": 3.4811562266179483,
"learning_rate": 2.8622245328485907e-07,
"loss": 0.7953,
"step": 215
},
{
"epoch": 1.9864559819413092,
"grad_norm": 3.3951785256342033,
"learning_rate": 2.6430163158700113e-07,
"loss": 0.7852,
"step": 220
},
{
"epoch": 2.0316027088036117,
"grad_norm": 3.7824382386544113,
"learning_rate": 2.4294862790338916e-07,
"loss": 0.7419,
"step": 225
},
{
"epoch": 2.0767494356659144,
"grad_norm": 3.5530924921187497,
"learning_rate": 2.2221488349019902e-07,
"loss": 0.7353,
"step": 230
},
{
"epoch": 2.1218961625282167,
"grad_norm": 3.926435566036555,
"learning_rate": 2.021503477537833e-07,
"loss": 0.7404,
"step": 235
},
{
"epoch": 2.1670428893905194,
"grad_norm": 3.6537792748989526,
"learning_rate": 1.828033579181773e-07,
"loss": 0.7461,
"step": 240
},
{
"epoch": 2.2121896162528216,
"grad_norm": 3.722026306821089,
"learning_rate": 1.6422052257649077e-07,
"loss": 0.7317,
"step": 245
},
{
"epoch": 2.2573363431151243,
"grad_norm": 3.5765809981409293,
"learning_rate": 1.4644660940672627e-07,
"loss": 0.7298,
"step": 250
},
{
"epoch": 2.2573363431151243,
"eval_loss": 0.8508689403533936,
"eval_runtime": 55.5219,
"eval_samples_per_second": 56.734,
"eval_steps_per_second": 0.901,
"step": 250
},
{
"epoch": 2.3024830699774266,
"grad_norm": 3.7853317798872657,
"learning_rate": 1.2952443732252054e-07,
"loss": 0.7424,
"step": 255
},
{
"epoch": 2.3476297968397293,
"grad_norm": 3.7759516091418424,
"learning_rate": 1.134947733186315e-07,
"loss": 0.7368,
"step": 260
},
{
"epoch": 2.3927765237020315,
"grad_norm": 3.720889448963161,
"learning_rate": 9.839623425967758e-08,
"loss": 0.7319,
"step": 265
},
{
"epoch": 2.4379232505643342,
"grad_norm": 3.7006360348993064,
"learning_rate": 8.426519384872732e-08,
"loss": 0.7323,
"step": 270
},
{
"epoch": 2.4830699774266365,
"grad_norm": 3.7423771001420145,
"learning_rate": 7.1135694999864e-08,
"loss": 0.718,
"step": 275
},
{
"epoch": 2.528216704288939,
"grad_norm": 3.8150063518186688,
"learning_rate": 5.9039367825822526e-08,
"loss": 0.7243,
"step": 280
},
{
"epoch": 2.5733634311512414,
"grad_norm": 3.4978009765519373,
"learning_rate": 4.800535343827833e-08,
"loss": 0.7284,
"step": 285
},
{
"epoch": 2.618510158013544,
"grad_norm": 3.6340807089756075,
"learning_rate": 3.806023374435663e-08,
"loss": 0.7304,
"step": 290
},
{
"epoch": 2.6636568848758464,
"grad_norm": 3.8051705983031003,
"learning_rate": 2.922796740848965e-08,
"loss": 0.7372,
"step": 295
},
{
"epoch": 2.708803611738149,
"grad_norm": 4.590077919016769,
"learning_rate": 2.1529832133895588e-08,
"loss": 0.7224,
"step": 300
},
{
"epoch": 2.708803611738149,
"eval_loss": 0.849506676197052,
"eval_runtime": 55.5608,
"eval_samples_per_second": 56.695,
"eval_steps_per_second": 0.9,
"step": 300
},
{
"epoch": 2.7539503386004514,
"grad_norm": 3.6534823714780393,
"learning_rate": 1.4984373402728012e-08,
"loss": 0.7413,
"step": 305
},
{
"epoch": 2.799097065462754,
"grad_norm": 3.564552616439515,
"learning_rate": 9.607359798384784e-09,
"loss": 0.7415,
"step": 310
},
{
"epoch": 2.8442437923250563,
"grad_norm": 3.9260807148979766,
"learning_rate": 5.411745017609493e-09,
"loss": 0.736,
"step": 315
},
{
"epoch": 2.889390519187359,
"grad_norm": 3.63819093973674,
"learning_rate": 2.407636663901591e-09,
"loss": 0.7229,
"step": 320
},
{
"epoch": 2.9345372460496613,
"grad_norm": 3.9737291373937405,
"learning_rate": 6.022718974137975e-10,
"loss": 0.7351,
"step": 325
},
{
"epoch": 2.979683972911964,
"grad_norm": 3.64396905938945,
"learning_rate": 0.0,
"loss": 0.7152,
"step": 330
},
{
"epoch": 2.979683972911964,
"step": 330,
"total_flos": 1945595623243776.0,
"train_loss": 0.8490383249340635,
"train_runtime": 4686.7005,
"train_samples_per_second": 18.147,
"train_steps_per_second": 0.07
}
],
"logging_steps": 5,
"max_steps": 330,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1945595623243776.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}