XinYuan-Qwen2-7B / trainer_state.json
thomas-yanxin's picture
Upload folder using huggingface_hub
9eab161 verified
raw
history blame contribute delete
No virus
8.85 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9999310202110782,
"eval_steps": 500,
"global_step": 453,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.022073532454990687,
"grad_norm": 1.114385199189024,
"learning_rate": 1.0869565217391305e-05,
"loss": 0.8555,
"step": 10
},
{
"epoch": 0.044147064909981375,
"grad_norm": 0.4068262714558307,
"learning_rate": 2.173913043478261e-05,
"loss": 0.7997,
"step": 20
},
{
"epoch": 0.06622059736497206,
"grad_norm": 0.38632787233483873,
"learning_rate": 3.260869565217392e-05,
"loss": 0.7758,
"step": 30
},
{
"epoch": 0.08829412981996275,
"grad_norm": 0.33047790751954587,
"learning_rate": 4.347826086956522e-05,
"loss": 0.7616,
"step": 40
},
{
"epoch": 0.11036766227495344,
"grad_norm": 1.0326147215252546,
"learning_rate": 4.9988084660498037e-05,
"loss": 0.7609,
"step": 50
},
{
"epoch": 0.13244119472994412,
"grad_norm": 0.36413735150639703,
"learning_rate": 4.985416749673074e-05,
"loss": 0.7677,
"step": 60
},
{
"epoch": 0.1545147271849348,
"grad_norm": 0.23884890145764054,
"learning_rate": 4.957223915853709e-05,
"loss": 0.7558,
"step": 70
},
{
"epoch": 0.1765882596399255,
"grad_norm": 0.35792840187365693,
"learning_rate": 4.9143978581429445e-05,
"loss": 0.7492,
"step": 80
},
{
"epoch": 0.1986617920949162,
"grad_norm": 0.24556724333359267,
"learning_rate": 4.857193613652711e-05,
"loss": 0.7497,
"step": 90
},
{
"epoch": 0.22073532454990688,
"grad_norm": 0.2087162197069783,
"learning_rate": 4.78595184426236e-05,
"loss": 0.7433,
"step": 100
},
{
"epoch": 0.24280885700489757,
"grad_norm": 0.30705087020634647,
"learning_rate": 4.7010968079140294e-05,
"loss": 0.744,
"step": 110
},
{
"epoch": 0.26488238945988823,
"grad_norm": 0.2021404929324958,
"learning_rate": 4.6031338320779534e-05,
"loss": 0.7378,
"step": 120
},
{
"epoch": 0.28695592191487895,
"grad_norm": 0.3679260523738371,
"learning_rate": 4.492646304433711e-05,
"loss": 0.7388,
"step": 130
},
{
"epoch": 0.3090294543698696,
"grad_norm": 0.2575945484350903,
"learning_rate": 4.3702921986884574e-05,
"loss": 0.7301,
"step": 140
},
{
"epoch": 0.33110298682486033,
"grad_norm": 0.1987923782298564,
"learning_rate": 4.236800156221536e-05,
"loss": 0.7407,
"step": 150
},
{
"epoch": 0.353176519279851,
"grad_norm": 0.22541496226647023,
"learning_rate": 4.092965146890002e-05,
"loss": 0.7319,
"step": 160
},
{
"epoch": 0.3752500517348417,
"grad_norm": 0.1903443234063274,
"learning_rate": 3.9396437348357684e-05,
"loss": 0.7292,
"step": 170
},
{
"epoch": 0.3973235841898324,
"grad_norm": 0.2786630443541303,
"learning_rate": 3.777748977487366e-05,
"loss": 0.7488,
"step": 180
},
{
"epoch": 0.41939711664482304,
"grad_norm": 0.22941720588230605,
"learning_rate": 3.608244988133713e-05,
"loss": 0.7305,
"step": 190
},
{
"epoch": 0.44147064909981376,
"grad_norm": 0.2244920841625261,
"learning_rate": 3.432141194450772e-05,
"loss": 0.7329,
"step": 200
},
{
"epoch": 0.4635441815548044,
"grad_norm": 0.20271098423902761,
"learning_rate": 3.2504863271726286e-05,
"loss": 0.729,
"step": 210
},
{
"epoch": 0.48561771400979514,
"grad_norm": 0.21017591515648715,
"learning_rate": 3.064362174705578e-05,
"loss": 0.7392,
"step": 220
},
{
"epoch": 0.5076912464647858,
"grad_norm": 0.16688378969283219,
"learning_rate": 2.8748771408776466e-05,
"loss": 0.7323,
"step": 230
},
{
"epoch": 0.5297647789197765,
"grad_norm": 0.18531081231895546,
"learning_rate": 2.683159644188339e-05,
"loss": 0.7391,
"step": 240
},
{
"epoch": 0.5518383113747672,
"grad_norm": 0.1949891552768074,
"learning_rate": 2.4903513978673077e-05,
"loss": 0.7304,
"step": 250
},
{
"epoch": 0.5739118438297579,
"grad_norm": 0.1847538114841563,
"learning_rate": 2.2976006107604482e-05,
"loss": 0.7248,
"step": 260
},
{
"epoch": 0.5959853762847486,
"grad_norm": 0.17808591988598846,
"learning_rate": 2.1060551495333818e-05,
"loss": 0.7361,
"step": 270
},
{
"epoch": 0.6180589087397392,
"grad_norm": 0.15634167603442348,
"learning_rate": 1.9168557029126963e-05,
"loss": 0.7205,
"step": 280
},
{
"epoch": 0.6401324411947299,
"grad_norm": 0.16800926021112392,
"learning_rate": 1.7311289886731408e-05,
"loss": 0.7215,
"step": 290
},
{
"epoch": 0.6622059736497207,
"grad_norm": 0.15374716011431305,
"learning_rate": 1.549981043824425e-05,
"loss": 0.7204,
"step": 300
},
{
"epoch": 0.6842795061047113,
"grad_norm": 0.1358494068080939,
"learning_rate": 1.3744906379558165e-05,
"loss": 0.7217,
"step": 310
},
{
"epoch": 0.706353038559702,
"grad_norm": 0.1598338460219363,
"learning_rate": 1.2057028489632682e-05,
"loss": 0.7288,
"step": 320
},
{
"epoch": 0.7284265710146927,
"grad_norm": 0.1552517836620765,
"learning_rate": 1.0446228394168356e-05,
"loss": 0.7128,
"step": 330
},
{
"epoch": 0.7505001034696834,
"grad_norm": 0.1363267921768715,
"learning_rate": 8.922098706312548e-06,
"loss": 0.7212,
"step": 340
},
{
"epoch": 0.7725736359246741,
"grad_norm": 0.1417947472753852,
"learning_rate": 7.493715900870027e-06,
"loss": 0.7109,
"step": 350
},
{
"epoch": 0.7946471683796648,
"grad_norm": 0.13261081741759453,
"learning_rate": 6.169586262213081e-06,
"loss": 0.7206,
"step": 360
},
{
"epoch": 0.8167207008346554,
"grad_norm": 0.13329678652766908,
"learning_rate": 4.957595227781395e-06,
"loss": 0.7166,
"step": 370
},
{
"epoch": 0.8387942332896461,
"grad_norm": 0.13079371602746745,
"learning_rate": 3.864960428840375e-06,
"loss": 0.7095,
"step": 380
},
{
"epoch": 0.8608677657446369,
"grad_norm": 0.12980162903604736,
"learning_rate": 2.8981887081491576e-06,
"loss": 0.7088,
"step": 390
},
{
"epoch": 0.8829412981996275,
"grad_norm": 0.12204737322722421,
"learning_rate": 2.0630373705058407e-06,
"loss": 0.7189,
"step": 400
},
{
"epoch": 0.9050148306546182,
"grad_norm": 0.12227603852992606,
"learning_rate": 1.3644798969302403e-06,
"loss": 0.7215,
"step": 410
},
{
"epoch": 0.9270883631096088,
"grad_norm": 0.12433131124525597,
"learning_rate": 8.066763266625282e-07,
"loss": 0.7155,
"step": 420
},
{
"epoch": 0.9491618955645996,
"grad_norm": 0.12167710585090329,
"learning_rate": 3.929484833584546e-07,
"loss": 0.7076,
"step": 430
},
{
"epoch": 0.9712354280195903,
"grad_norm": 0.12190898417393506,
"learning_rate": 1.2576019301373532e-07,
"loss": 0.7176,
"step": 440
},
{
"epoch": 0.993308960474581,
"grad_norm": 0.1232187443009331,
"learning_rate": 6.702611423550775e-09,
"loss": 0.7082,
"step": 450
},
{
"epoch": 0.9999310202110782,
"step": 453,
"total_flos": 3799534094254080.0,
"train_loss": 0.7359120230011592,
"train_runtime": 173906.7289,
"train_samples_per_second": 1.334,
"train_steps_per_second": 0.003
}
],
"logging_steps": 10,
"max_steps": 453,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3799534094254080.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}