easylm-helpsteer-rm-gemma-2-2b / trainer_state.json
scottsuk0306's picture
Model save
aa325f2 verified
raw
history blame contribute delete
No virus
7.29 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1605,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03115264797507788,
"grad_norm": 57.86160659790039,
"learning_rate": 9.976073288648913e-07,
"loss": 0.7289,
"step": 50
},
{
"epoch": 0.06230529595015576,
"grad_norm": 60.39614486694336,
"learning_rate": 9.904522149602086e-07,
"loss": 0.6972,
"step": 100
},
{
"epoch": 0.09345794392523364,
"grad_norm": 151.33900451660156,
"learning_rate": 9.78603137623984e-07,
"loss": 0.5746,
"step": 150
},
{
"epoch": 0.12461059190031153,
"grad_norm": 52.87775802612305,
"learning_rate": 9.621735006374983e-07,
"loss": 0.4166,
"step": 200
},
{
"epoch": 0.1557632398753894,
"grad_norm": 50.08223342895508,
"learning_rate": 9.413205468734626e-07,
"loss": 0.4558,
"step": 250
},
{
"epoch": 0.18691588785046728,
"grad_norm": 517.3528442382812,
"learning_rate": 9.162438533740891e-07,
"loss": 0.3246,
"step": 300
},
{
"epoch": 0.21806853582554517,
"grad_norm": 1556.693359375,
"learning_rate": 8.871834212621772e-07,
"loss": 0.4801,
"step": 350
},
{
"epoch": 0.24922118380062305,
"grad_norm": 16.822763442993164,
"learning_rate": 8.544173787660787e-07,
"loss": 0.2867,
"step": 400
},
{
"epoch": 0.2803738317757009,
"grad_norm": 2.0897529125213623,
"learning_rate": 8.182593193421624e-07,
"loss": 0.4674,
"step": 450
},
{
"epoch": 0.3115264797507788,
"grad_norm": 19.26500129699707,
"learning_rate": 7.790553003707689e-07,
"loss": 0.5003,
"step": 500
},
{
"epoch": 0.3115264797507788,
"eval_accuracy": 0.6294117647058823,
"eval_loss": 2.713998317718506,
"eval_runtime": 9.3954,
"eval_samples_per_second": 36.188,
"eval_steps_per_second": 9.047,
"step": 500
},
{
"epoch": 0.3426791277258567,
"grad_norm": 0.8728576898574829,
"learning_rate": 7.371805311501904e-07,
"loss": 0.5136,
"step": 550
},
{
"epoch": 0.37383177570093457,
"grad_norm": 167.8850860595703,
"learning_rate": 6.930357818868407e-07,
"loss": 0.2441,
"step": 600
},
{
"epoch": 0.40498442367601245,
"grad_norm": 177.43258666992188,
"learning_rate": 6.470435480500362e-07,
"loss": 0.4716,
"step": 650
},
{
"epoch": 0.43613707165109034,
"grad_norm": 289.399658203125,
"learning_rate": 5.996440068011383e-07,
"loss": 0.3897,
"step": 700
},
{
"epoch": 0.4672897196261682,
"grad_norm": 787.5913696289062,
"learning_rate": 5.512908041968018e-07,
"loss": 0.4029,
"step": 750
},
{
"epoch": 0.4984423676012461,
"grad_norm": 1.4492032960333745e-06,
"learning_rate": 5.024467134856725e-07,
"loss": 0.3285,
"step": 800
},
{
"epoch": 0.5295950155763239,
"grad_norm": 3.293706356544135e-07,
"learning_rate": 4.5357920605161114e-07,
"loss": 0.435,
"step": 850
},
{
"epoch": 0.5607476635514018,
"grad_norm": 1.7141993045806885,
"learning_rate": 4.0515597739254617e-07,
"loss": 0.3973,
"step": 900
},
{
"epoch": 0.5919003115264797,
"grad_norm": 2.922891617629375e-08,
"learning_rate": 3.576404709544031e-07,
"loss": 0.2163,
"step": 950
},
{
"epoch": 0.6230529595015576,
"grad_norm": 560.4365844726562,
"learning_rate": 3.114874426600802e-07,
"loss": 0.3939,
"step": 1000
},
{
"epoch": 0.6230529595015576,
"eval_accuracy": 0.65,
"eval_loss": 3.343003988265991,
"eval_runtime": 8.9452,
"eval_samples_per_second": 38.009,
"eval_steps_per_second": 9.502,
"step": 1000
},
{
"epoch": 0.6542056074766355,
"grad_norm": 0.0010744519531726837,
"learning_rate": 2.671386085839682e-07,
"loss": 0.2876,
"step": 1050
},
{
"epoch": 0.6853582554517134,
"grad_norm": 2111.46875,
"learning_rate": 2.2501841742674588e-07,
"loss": 0.3456,
"step": 1100
},
{
"epoch": 0.7165109034267912,
"grad_norm": 0.022361410781741142,
"learning_rate": 1.855299882507616e-07,
"loss": 0.3517,
"step": 1150
},
{
"epoch": 0.7476635514018691,
"grad_norm": 2.9614204422045987e-08,
"learning_rate": 1.490512523546559e-07,
"loss": 0.4218,
"step": 1200
},
{
"epoch": 0.778816199376947,
"grad_norm": 1.0811158546175648e-07,
"learning_rate": 1.1593133621212453e-07,
"loss": 0.2904,
"step": 1250
},
{
"epoch": 0.8099688473520249,
"grad_norm": 1.012383222579956,
"learning_rate": 8.648722009257315e-08,
"loss": 0.4572,
"step": 1300
},
{
"epoch": 0.8411214953271028,
"grad_norm": 3.705447113588889e-07,
"learning_rate": 6.10007043429538e-08,
"loss": 0.187,
"step": 1350
},
{
"epoch": 0.8722741433021807,
"grad_norm": 9.330016837338917e-06,
"learning_rate": 3.971571236554117e-08,
"loss": 0.1274,
"step": 1400
},
{
"epoch": 0.9034267912772586,
"grad_norm": 0.08515036106109619,
"learning_rate": 2.2835956103996524e-08,
"loss": 0.2892,
"step": 1450
},
{
"epoch": 0.9345794392523364,
"grad_norm": 0.008006788790225983,
"learning_rate": 1.0522986380618604e-08,
"loss": 0.065,
"step": 1500
},
{
"epoch": 0.9345794392523364,
"eval_accuracy": 0.6323529411764706,
"eval_loss": 2.9739811420440674,
"eval_runtime": 8.9019,
"eval_samples_per_second": 38.194,
"eval_steps_per_second": 9.549,
"step": 1500
},
{
"epoch": 0.9657320872274143,
"grad_norm": 1.7190651533383061e-06,
"learning_rate": 2.894646744385887e-09,
"loss": 0.288,
"step": 1550
},
{
"epoch": 0.9968847352024922,
"grad_norm": 909.2741088867188,
"learning_rate": 2.3945627523891398e-11,
"loss": 0.1994,
"step": 1600
},
{
"epoch": 1.0,
"step": 1605,
"total_flos": 0.0,
"train_loss": 0.3749335529650852,
"train_runtime": 2207.4532,
"train_samples_per_second": 2.908,
"train_steps_per_second": 0.727
}
],
"logging_steps": 50,
"max_steps": 1605,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}