aixcoder-7b-v2-sft / trainer_state.json
aiXcoder's picture
Upload folder using huggingface_hub
35a116e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9997197702115734,
"eval_steps": 50,
"global_step": 892,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02241838307412078,
"grad_norm": 11.57065531644996,
"learning_rate": 1.9775784753363228e-06,
"loss": 0.2054,
"step": 10
},
{
"epoch": 0.04483676614824156,
"grad_norm": 10.25086765406488,
"learning_rate": 1.9551569506726456e-06,
"loss": 0.1492,
"step": 20
},
{
"epoch": 0.06725514922236234,
"grad_norm": 7.822767237983853,
"learning_rate": 1.9327354260089685e-06,
"loss": 0.152,
"step": 30
},
{
"epoch": 0.08967353229648312,
"grad_norm": 8.157708200242721,
"learning_rate": 1.9103139013452914e-06,
"loss": 0.1472,
"step": 40
},
{
"epoch": 0.1120919153706039,
"grad_norm": 12.8257632132892,
"learning_rate": 1.8878923766816142e-06,
"loss": 0.1356,
"step": 50
},
{
"epoch": 0.1120919153706039,
"eval_loss": 0.1209246814250946,
"eval_runtime": 477.8616,
"eval_samples_per_second": 6.288,
"eval_steps_per_second": 0.787,
"step": 50
},
{
"epoch": 0.13451029844472467,
"grad_norm": 8.90816614814061,
"learning_rate": 1.865470852017937e-06,
"loss": 0.1281,
"step": 60
},
{
"epoch": 0.15692868151884545,
"grad_norm": 9.628880600516572,
"learning_rate": 1.84304932735426e-06,
"loss": 0.1381,
"step": 70
},
{
"epoch": 0.17934706459296623,
"grad_norm": 8.899868902286775,
"learning_rate": 1.8206278026905828e-06,
"loss": 0.1324,
"step": 80
},
{
"epoch": 0.201765447667087,
"grad_norm": 130.29750942761115,
"learning_rate": 1.798206278026906e-06,
"loss": 0.1277,
"step": 90
},
{
"epoch": 0.2241838307412078,
"grad_norm": 6.4686996638845145,
"learning_rate": 1.7757847533632286e-06,
"loss": 0.1221,
"step": 100
},
{
"epoch": 0.2241838307412078,
"eval_loss": 0.11111436039209366,
"eval_runtime": 477.824,
"eval_samples_per_second": 6.289,
"eval_steps_per_second": 0.787,
"step": 100
},
{
"epoch": 0.24660221381532857,
"grad_norm": 8.356076121379255,
"learning_rate": 1.7533632286995514e-06,
"loss": 0.1198,
"step": 110
},
{
"epoch": 0.26902059688944935,
"grad_norm": 6.743428651631778,
"learning_rate": 1.7309417040358743e-06,
"loss": 0.1076,
"step": 120
},
{
"epoch": 0.29143897996357016,
"grad_norm": 6.8645809938665465,
"learning_rate": 1.7085201793721974e-06,
"loss": 0.1187,
"step": 130
},
{
"epoch": 0.3138573630376909,
"grad_norm": 5.999842993813071,
"learning_rate": 1.68609865470852e-06,
"loss": 0.1182,
"step": 140
},
{
"epoch": 0.3362757461118117,
"grad_norm": 7.7478230858078305,
"learning_rate": 1.6636771300448429e-06,
"loss": 0.1127,
"step": 150
},
{
"epoch": 0.3362757461118117,
"eval_loss": 0.10507839918136597,
"eval_runtime": 477.8167,
"eval_samples_per_second": 6.289,
"eval_steps_per_second": 0.787,
"step": 150
},
{
"epoch": 0.35869412918593246,
"grad_norm": 6.564504796683449,
"learning_rate": 1.641255605381166e-06,
"loss": 0.1144,
"step": 160
},
{
"epoch": 0.38111251226005327,
"grad_norm": 20.75971255482363,
"learning_rate": 1.6188340807174888e-06,
"loss": 0.1078,
"step": 170
},
{
"epoch": 0.403530895334174,
"grad_norm": 6.913685427209001,
"learning_rate": 1.5964125560538115e-06,
"loss": 0.1189,
"step": 180
},
{
"epoch": 0.42594927840829483,
"grad_norm": 11.143255383687194,
"learning_rate": 1.5739910313901345e-06,
"loss": 0.1189,
"step": 190
},
{
"epoch": 0.4483676614824156,
"grad_norm": 4.747347623410045,
"learning_rate": 1.5515695067264574e-06,
"loss": 0.1072,
"step": 200
},
{
"epoch": 0.4483676614824156,
"eval_loss": 0.09710206091403961,
"eval_runtime": 477.8045,
"eval_samples_per_second": 6.289,
"eval_steps_per_second": 0.787,
"step": 200
},
{
"epoch": 0.4707860445565364,
"grad_norm": 5.512284850848945,
"learning_rate": 1.5291479820627803e-06,
"loss": 0.1136,
"step": 210
},
{
"epoch": 0.49320442763065714,
"grad_norm": 17.278200474599828,
"learning_rate": 1.506726457399103e-06,
"loss": 0.1094,
"step": 220
},
{
"epoch": 0.5156228107047779,
"grad_norm": 46.879816629413476,
"learning_rate": 1.484304932735426e-06,
"loss": 0.1091,
"step": 230
},
{
"epoch": 0.5380411937788987,
"grad_norm": 8.493451669290872,
"learning_rate": 1.4618834080717489e-06,
"loss": 0.1019,
"step": 240
},
{
"epoch": 0.5604595768530195,
"grad_norm": 9.286929395356434,
"learning_rate": 1.4394618834080715e-06,
"loss": 0.1075,
"step": 250
},
{
"epoch": 0.5604595768530195,
"eval_loss": 0.09229769557714462,
"eval_runtime": 477.6411,
"eval_samples_per_second": 6.291,
"eval_steps_per_second": 0.787,
"step": 250
},
{
"epoch": 0.5828779599271403,
"grad_norm": 7.559299755990858,
"learning_rate": 1.4170403587443946e-06,
"loss": 0.0985,
"step": 260
},
{
"epoch": 0.605296343001261,
"grad_norm": 3.9026812483393387,
"learning_rate": 1.3946188340807175e-06,
"loss": 0.0896,
"step": 270
},
{
"epoch": 0.6277147260753818,
"grad_norm": 8.335653074155596,
"learning_rate": 1.3721973094170403e-06,
"loss": 0.0893,
"step": 280
},
{
"epoch": 0.6501331091495026,
"grad_norm": 6.897239653871222,
"learning_rate": 1.349775784753363e-06,
"loss": 0.0914,
"step": 290
},
{
"epoch": 0.6725514922236234,
"grad_norm": 29.648594106464095,
"learning_rate": 1.327354260089686e-06,
"loss": 0.0972,
"step": 300
},
{
"epoch": 0.6725514922236234,
"eval_loss": 0.08544214069843292,
"eval_runtime": 477.8106,
"eval_samples_per_second": 6.289,
"eval_steps_per_second": 0.787,
"step": 300
},
{
"epoch": 0.6949698752977441,
"grad_norm": 3.969809179016744,
"learning_rate": 1.304932735426009e-06,
"loss": 0.089,
"step": 310
},
{
"epoch": 0.7173882583718649,
"grad_norm": 5.34402617506164,
"learning_rate": 1.2825112107623318e-06,
"loss": 0.0867,
"step": 320
},
{
"epoch": 0.7398066414459857,
"grad_norm": 5.506697389437645,
"learning_rate": 1.2600896860986546e-06,
"loss": 0.0803,
"step": 330
},
{
"epoch": 0.7622250245201065,
"grad_norm": 8.056193416126446,
"learning_rate": 1.2376681614349775e-06,
"loss": 0.0917,
"step": 340
},
{
"epoch": 0.7846434075942272,
"grad_norm": 4.7612681555615,
"learning_rate": 1.2152466367713004e-06,
"loss": 0.089,
"step": 350
},
{
"epoch": 0.7846434075942272,
"eval_loss": 0.07997283339500427,
"eval_runtime": 477.8824,
"eval_samples_per_second": 6.288,
"eval_steps_per_second": 0.787,
"step": 350
},
{
"epoch": 0.807061790668348,
"grad_norm": 9.702063628974917,
"learning_rate": 1.1928251121076232e-06,
"loss": 0.0886,
"step": 360
},
{
"epoch": 0.8294801737424689,
"grad_norm": 4.937196401368392,
"learning_rate": 1.170403587443946e-06,
"loss": 0.0805,
"step": 370
},
{
"epoch": 0.8518985568165897,
"grad_norm": 6.8769697579900555,
"learning_rate": 1.147982062780269e-06,
"loss": 0.0754,
"step": 380
},
{
"epoch": 0.8743169398907104,
"grad_norm": 10.270492248069893,
"learning_rate": 1.1255605381165918e-06,
"loss": 0.0855,
"step": 390
},
{
"epoch": 0.8967353229648312,
"grad_norm": 8.129704483519474,
"learning_rate": 1.103139013452915e-06,
"loss": 0.0898,
"step": 400
},
{
"epoch": 0.8967353229648312,
"eval_loss": 0.0756540298461914,
"eval_runtime": 477.7284,
"eval_samples_per_second": 6.29,
"eval_steps_per_second": 0.787,
"step": 400
},
{
"epoch": 0.919153706038952,
"grad_norm": 7.785886269800572,
"learning_rate": 1.0807174887892376e-06,
"loss": 0.0827,
"step": 410
},
{
"epoch": 0.9415720891130728,
"grad_norm": 7.119394503978009,
"learning_rate": 1.0582959641255604e-06,
"loss": 0.0862,
"step": 420
},
{
"epoch": 0.9639904721871935,
"grad_norm": 5.06264160032142,
"learning_rate": 1.0358744394618835e-06,
"loss": 0.078,
"step": 430
},
{
"epoch": 0.9864088552613143,
"grad_norm": 6.034467278570911,
"learning_rate": 1.0134529147982064e-06,
"loss": 0.0745,
"step": 440
},
{
"epoch": 1.008827238335435,
"grad_norm": 6.107563557049032,
"learning_rate": 9.91031390134529e-07,
"loss": 0.0635,
"step": 450
},
{
"epoch": 1.008827238335435,
"eval_loss": 0.07114721089601517,
"eval_runtime": 477.6398,
"eval_samples_per_second": 6.291,
"eval_steps_per_second": 0.787,
"step": 450
},
{
"epoch": 1.0312456214095558,
"grad_norm": 8.472989007522791,
"learning_rate": 9.68609865470852e-07,
"loss": 0.0687,
"step": 460
},
{
"epoch": 1.0536640044836767,
"grad_norm": 5.080789399491274,
"learning_rate": 9.461883408071749e-07,
"loss": 0.0635,
"step": 470
},
{
"epoch": 1.0760823875577974,
"grad_norm": 8.06976721278842,
"learning_rate": 9.237668161434977e-07,
"loss": 0.0674,
"step": 480
},
{
"epoch": 1.098500770631918,
"grad_norm": 9.00400557879772,
"learning_rate": 9.013452914798207e-07,
"loss": 0.0623,
"step": 490
},
{
"epoch": 1.120919153706039,
"grad_norm": 10.419684143484417,
"learning_rate": 8.789237668161434e-07,
"loss": 0.0742,
"step": 500
},
{
"epoch": 1.120919153706039,
"eval_loss": 0.06726241111755371,
"eval_runtime": 478.2051,
"eval_samples_per_second": 6.284,
"eval_steps_per_second": 0.786,
"step": 500
},
{
"epoch": 1.1433375367801597,
"grad_norm": 5.834457752004053,
"learning_rate": 8.565022421524663e-07,
"loss": 0.0698,
"step": 510
},
{
"epoch": 1.1657559198542806,
"grad_norm": 5.920575213015667,
"learning_rate": 8.340807174887892e-07,
"loss": 0.061,
"step": 520
},
{
"epoch": 1.1881743029284013,
"grad_norm": 11.4664445988588,
"learning_rate": 8.11659192825112e-07,
"loss": 0.0612,
"step": 530
},
{
"epoch": 1.210592686002522,
"grad_norm": 5.074793088570693,
"learning_rate": 7.892376681614349e-07,
"loss": 0.0582,
"step": 540
},
{
"epoch": 1.233011069076643,
"grad_norm": 4.964609158362768,
"learning_rate": 7.668161434977578e-07,
"loss": 0.0627,
"step": 550
},
{
"epoch": 1.233011069076643,
"eval_loss": 0.06428983807563782,
"eval_runtime": 477.8813,
"eval_samples_per_second": 6.288,
"eval_steps_per_second": 0.787,
"step": 550
},
{
"epoch": 1.2554294521507636,
"grad_norm": 5.775433133659037,
"learning_rate": 7.443946188340807e-07,
"loss": 0.0613,
"step": 560
},
{
"epoch": 1.2778478352248843,
"grad_norm": 7.504088139984216,
"learning_rate": 7.219730941704035e-07,
"loss": 0.0625,
"step": 570
},
{
"epoch": 1.3002662182990052,
"grad_norm": 12.423874958836857,
"learning_rate": 6.995515695067265e-07,
"loss": 0.0541,
"step": 580
},
{
"epoch": 1.322684601373126,
"grad_norm": 13.037612748873672,
"learning_rate": 6.771300448430492e-07,
"loss": 0.0638,
"step": 590
},
{
"epoch": 1.3451029844472466,
"grad_norm": 7.643633731840427,
"learning_rate": 6.547085201793722e-07,
"loss": 0.0528,
"step": 600
},
{
"epoch": 1.3451029844472466,
"eval_loss": 0.06060384213924408,
"eval_runtime": 477.628,
"eval_samples_per_second": 6.292,
"eval_steps_per_second": 0.787,
"step": 600
},
{
"epoch": 1.3675213675213675,
"grad_norm": 9.627024940339941,
"learning_rate": 6.322869955156951e-07,
"loss": 0.0569,
"step": 610
},
{
"epoch": 1.3899397505954882,
"grad_norm": 14.780208722543843,
"learning_rate": 6.098654708520179e-07,
"loss": 0.0504,
"step": 620
},
{
"epoch": 1.4123581336696092,
"grad_norm": 7.999021557044027,
"learning_rate": 5.874439461883408e-07,
"loss": 0.0609,
"step": 630
},
{
"epoch": 1.4347765167437299,
"grad_norm": 8.443684543866313,
"learning_rate": 5.650224215246636e-07,
"loss": 0.0531,
"step": 640
},
{
"epoch": 1.4571948998178508,
"grad_norm": 3.5781417864085086,
"learning_rate": 5.426008968609865e-07,
"loss": 0.0542,
"step": 650
},
{
"epoch": 1.4571948998178508,
"eval_loss": 0.057715680450201035,
"eval_runtime": 477.9099,
"eval_samples_per_second": 6.288,
"eval_steps_per_second": 0.787,
"step": 650
},
{
"epoch": 1.4796132828919715,
"grad_norm": 6.394299603034078,
"learning_rate": 5.201793721973094e-07,
"loss": 0.0549,
"step": 660
},
{
"epoch": 1.5020316659660922,
"grad_norm": 6.251887881398542,
"learning_rate": 4.977578475336322e-07,
"loss": 0.0536,
"step": 670
},
{
"epoch": 1.524450049040213,
"grad_norm": 4.45363233623469,
"learning_rate": 4.753363228699551e-07,
"loss": 0.0519,
"step": 680
},
{
"epoch": 1.5468684321143338,
"grad_norm": 7.236100371063919,
"learning_rate": 4.5291479820627797e-07,
"loss": 0.0516,
"step": 690
},
{
"epoch": 1.5692868151884545,
"grad_norm": 11.475278894303377,
"learning_rate": 4.304932735426009e-07,
"loss": 0.0463,
"step": 700
},
{
"epoch": 1.5692868151884545,
"eval_loss": 0.05437139794230461,
"eval_runtime": 477.9054,
"eval_samples_per_second": 6.288,
"eval_steps_per_second": 0.787,
"step": 700
},
{
"epoch": 1.5917051982625754,
"grad_norm": 6.676408323109306,
"learning_rate": 4.0807174887892375e-07,
"loss": 0.0532,
"step": 710
},
{
"epoch": 1.614123581336696,
"grad_norm": 4.965760634983937,
"learning_rate": 3.856502242152466e-07,
"loss": 0.0545,
"step": 720
},
{
"epoch": 1.6365419644108168,
"grad_norm": 27.900762279082834,
"learning_rate": 3.632286995515695e-07,
"loss": 0.0544,
"step": 730
},
{
"epoch": 1.6589603474849377,
"grad_norm": 5.498723625213236,
"learning_rate": 3.4080717488789235e-07,
"loss": 0.0518,
"step": 740
},
{
"epoch": 1.6813787305590584,
"grad_norm": 5.5744858185141775,
"learning_rate": 3.183856502242152e-07,
"loss": 0.0463,
"step": 750
},
{
"epoch": 1.6813787305590584,
"eval_loss": 0.05197111889719963,
"eval_runtime": 477.8827,
"eval_samples_per_second": 6.288,
"eval_steps_per_second": 0.787,
"step": 750
},
{
"epoch": 1.703797113633179,
"grad_norm": 5.741468990313337,
"learning_rate": 2.9596412556053813e-07,
"loss": 0.0454,
"step": 760
},
{
"epoch": 1.7262154967073,
"grad_norm": 9.331526709026493,
"learning_rate": 2.73542600896861e-07,
"loss": 0.0448,
"step": 770
},
{
"epoch": 1.748633879781421,
"grad_norm": 8.801924259928679,
"learning_rate": 2.5112107623318386e-07,
"loss": 0.0491,
"step": 780
},
{
"epoch": 1.7710522628555414,
"grad_norm": 3.9216347240361435,
"learning_rate": 2.2869955156950672e-07,
"loss": 0.0456,
"step": 790
},
{
"epoch": 1.7934706459296623,
"grad_norm": 5.775711795911055,
"learning_rate": 2.062780269058296e-07,
"loss": 0.0434,
"step": 800
},
{
"epoch": 1.7934706459296623,
"eval_loss": 0.04976892098784447,
"eval_runtime": 477.8498,
"eval_samples_per_second": 6.289,
"eval_steps_per_second": 0.787,
"step": 800
},
{
"epoch": 1.8158890290037832,
"grad_norm": 15.832420424237657,
"learning_rate": 1.8385650224215245e-07,
"loss": 0.0549,
"step": 810
},
{
"epoch": 1.838307412077904,
"grad_norm": 11.331927257096979,
"learning_rate": 1.6143497757847531e-07,
"loss": 0.0479,
"step": 820
},
{
"epoch": 1.8607257951520246,
"grad_norm": 3.4779552694261846,
"learning_rate": 1.390134529147982e-07,
"loss": 0.045,
"step": 830
},
{
"epoch": 1.8831441782261455,
"grad_norm": 14.096117466941674,
"learning_rate": 1.1659192825112107e-07,
"loss": 0.0468,
"step": 840
},
{
"epoch": 1.9055625613002662,
"grad_norm": 4.258023342595773,
"learning_rate": 9.417040358744393e-08,
"loss": 0.043,
"step": 850
},
{
"epoch": 1.9055625613002662,
"eval_loss": 0.047411367297172546,
"eval_runtime": 478.0538,
"eval_samples_per_second": 6.286,
"eval_steps_per_second": 0.787,
"step": 850
},
{
"epoch": 1.927980944374387,
"grad_norm": 4.472323710821738,
"learning_rate": 7.174887892376681e-08,
"loss": 0.0485,
"step": 860
},
{
"epoch": 1.9503993274485079,
"grad_norm": 8.238550394871776,
"learning_rate": 4.932735426008968e-08,
"loss": 0.0461,
"step": 870
},
{
"epoch": 1.9728177105226286,
"grad_norm": 3.661996756017377,
"learning_rate": 2.6905829596412556e-08,
"loss": 0.0414,
"step": 880
},
{
"epoch": 1.9952360935967492,
"grad_norm": 4.240752961770031,
"learning_rate": 4.484304932735426e-09,
"loss": 0.0496,
"step": 890
}
],
"logging_steps": 10,
"max_steps": 892,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 360,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1364495346169283e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}