medical_gemma3_1b_grpo / trainer_state.json
Cherran's picture
Upload folder using huggingface_hub
d32d573 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 46,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.021739130434782608,
"grad_norm": 21.980525970458984,
"learning_rate": 0.0,
"loss": 2.2134,
"step": 1
},
{
"epoch": 0.043478260869565216,
"grad_norm": 20.06768798828125,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.5251,
"step": 2
},
{
"epoch": 0.06521739130434782,
"grad_norm": 23.364561080932617,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.2033,
"step": 3
},
{
"epoch": 0.08695652173913043,
"grad_norm": 19.796825408935547,
"learning_rate": 3e-06,
"loss": 1.9776,
"step": 4
},
{
"epoch": 0.10869565217391304,
"grad_norm": 21.887332916259766,
"learning_rate": 4.000000000000001e-06,
"loss": 2.2999,
"step": 5
},
{
"epoch": 0.13043478260869565,
"grad_norm": 20.906566619873047,
"learning_rate": 5e-06,
"loss": 1.9261,
"step": 6
},
{
"epoch": 0.15217391304347827,
"grad_norm": 20.153642654418945,
"learning_rate": 4.992664502959351e-06,
"loss": 1.8537,
"step": 7
},
{
"epoch": 0.17391304347826086,
"grad_norm": 19.199687957763672,
"learning_rate": 4.970701059450872e-06,
"loss": 1.9019,
"step": 8
},
{
"epoch": 0.1956521739130435,
"grad_norm": 17.73782730102539,
"learning_rate": 4.934238559694448e-06,
"loss": 1.564,
"step": 9
},
{
"epoch": 0.21739130434782608,
"grad_norm": 16.246173858642578,
"learning_rate": 4.883490980137327e-06,
"loss": 1.6703,
"step": 10
},
{
"epoch": 0.2391304347826087,
"grad_norm": 1828651.875,
"learning_rate": 4.8187561277552376e-06,
"loss": 1.4389,
"step": 11
},
{
"epoch": 0.2608695652173913,
"grad_norm": 13.8928861618042,
"learning_rate": 4.740413892402639e-06,
"loss": 1.3761,
"step": 12
},
{
"epoch": 0.2826086956521739,
"grad_norm": 720403.375,
"learning_rate": 4.648924017468003e-06,
"loss": 1.212,
"step": 13
},
{
"epoch": 0.30434782608695654,
"grad_norm": 14.764030456542969,
"learning_rate": 4.544823401916794e-06,
"loss": 1.5574,
"step": 14
},
{
"epoch": 0.32608695652173914,
"grad_norm": 12.913108825683594,
"learning_rate": 4.428722949554858e-06,
"loss": 1.4192,
"step": 15
},
{
"epoch": 0.34782608695652173,
"grad_norm": 10.674172401428223,
"learning_rate": 4.3013039840019675e-06,
"loss": 1.2746,
"step": 16
},
{
"epoch": 0.3695652173913043,
"grad_norm": 9.80074691772461,
"learning_rate": 4.163314250413913e-06,
"loss": 1.072,
"step": 17
},
{
"epoch": 0.391304347826087,
"grad_norm": 10.42562484741211,
"learning_rate": 4.015563527416596e-06,
"loss": 0.9013,
"step": 18
},
{
"epoch": 0.41304347826086957,
"grad_norm": 10.28138256072998,
"learning_rate": 3.858918875003053e-06,
"loss": 1.3421,
"step": 19
},
{
"epoch": 0.43478260869565216,
"grad_norm": 9.2878999710083,
"learning_rate": 3.6942995462806574e-06,
"loss": 1.1947,
"step": 20
},
{
"epoch": 0.45652173913043476,
"grad_norm": 10.689302444458008,
"learning_rate": 3.5226715929283507e-06,
"loss": 1.1843,
"step": 21
},
{
"epoch": 0.4782608695652174,
"grad_norm": 11.090900421142578,
"learning_rate": 3.345042196021257e-06,
"loss": 1.2872,
"step": 22
},
{
"epoch": 0.5,
"grad_norm": 8.946170806884766,
"learning_rate": 3.162453755491655e-06,
"loss": 1.1389,
"step": 23
},
{
"epoch": 0.5217391304347826,
"grad_norm": 7.288694381713867,
"learning_rate": 2.975977772911671e-06,
"loss": 1.0438,
"step": 24
},
{
"epoch": 0.5434782608695652,
"grad_norm": 7.09510612487793,
"learning_rate": 2.786708563496002e-06,
"loss": 0.7908,
"step": 25
},
{
"epoch": 0.5652173913043478,
"grad_norm": 7.513128280639648,
"learning_rate": 2.595756834225089e-06,
"loss": 0.8884,
"step": 26
},
{
"epoch": 0.5869565217391305,
"grad_norm": 16309.765625,
"learning_rate": 2.404243165774912e-06,
"loss": 1.3966,
"step": 27
},
{
"epoch": 0.6086956521739131,
"grad_norm": 9.216928482055664,
"learning_rate": 2.2132914365039993e-06,
"loss": 0.7335,
"step": 28
},
{
"epoch": 0.6304347826086957,
"grad_norm": 8.130733489990234,
"learning_rate": 2.024022227088329e-06,
"loss": 1.4854,
"step": 29
},
{
"epoch": 0.6521739130434783,
"grad_norm": 7.4941534996032715,
"learning_rate": 1.8375462445083464e-06,
"loss": 0.6889,
"step": 30
},
{
"epoch": 0.6739130434782609,
"grad_norm": 8.134994506835938,
"learning_rate": 1.6549578039787436e-06,
"loss": 1.0505,
"step": 31
},
{
"epoch": 0.6956521739130435,
"grad_norm": 7.74264669418335,
"learning_rate": 1.4773284070716504e-06,
"loss": 0.9686,
"step": 32
},
{
"epoch": 0.717391304347826,
"grad_norm": 8.992671012878418,
"learning_rate": 1.3057004537193424e-06,
"loss": 1.0842,
"step": 33
},
{
"epoch": 0.7391304347826086,
"grad_norm": 7.693203449249268,
"learning_rate": 1.1410811249969475e-06,
"loss": 0.8311,
"step": 34
},
{
"epoch": 0.7608695652173914,
"grad_norm": 6.577566623687744,
"learning_rate": 9.844364725834058e-07,
"loss": 0.8217,
"step": 35
},
{
"epoch": 0.782608695652174,
"grad_norm": 10.145994186401367,
"learning_rate": 8.366857495860869e-07,
"loss": 0.8935,
"step": 36
},
{
"epoch": 0.8043478260869565,
"grad_norm": 6.9202375411987305,
"learning_rate": 6.986960159980327e-07,
"loss": 0.8456,
"step": 37
},
{
"epoch": 0.8260869565217391,
"grad_norm": 6.42577600479126,
"learning_rate": 5.712770504451426e-07,
"loss": 0.7391,
"step": 38
},
{
"epoch": 0.8478260869565217,
"grad_norm": 8.795654296875,
"learning_rate": 4.55176598083206e-07,
"loss": 1.1633,
"step": 39
},
{
"epoch": 0.8695652173913043,
"grad_norm": 9.373461723327637,
"learning_rate": 3.510759825319976e-07,
"loss": 0.8627,
"step": 40
},
{
"epoch": 0.8913043478260869,
"grad_norm": 7.181187629699707,
"learning_rate": 2.5958610759736133e-07,
"loss": 0.8084,
"step": 41
},
{
"epoch": 0.9130434782608695,
"grad_norm": 7.831035137176514,
"learning_rate": 1.8124387224476347e-07,
"loss": 0.8626,
"step": 42
},
{
"epoch": 0.9347826086956522,
"grad_norm": 8.75483512878418,
"learning_rate": 1.1650901986267365e-07,
"loss": 1.0262,
"step": 43
},
{
"epoch": 0.9565217391304348,
"grad_norm": 6.860268592834473,
"learning_rate": 6.576144030555259e-08,
"loss": 0.9124,
"step": 44
},
{
"epoch": 0.9782608695652174,
"grad_norm": 8.375471115112305,
"learning_rate": 2.9298940549128962e-08,
"loss": 0.9084,
"step": 45
},
{
"epoch": 1.0,
"grad_norm": 9.354442596435547,
"learning_rate": 7.335497040648898e-09,
"loss": 1.0854,
"step": 46
}
],
"logging_steps": 1,
"max_steps": 46,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 514794267553536.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}