Dynosaur's picture
Model save
4d07024 verified
raw
history blame
No virus
9.79 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 52,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.038461538461538464,
"grad_norm": 20.85754319226794,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.2667,
"step": 1
},
{
"epoch": 0.07692307692307693,
"grad_norm": 24.246870301808055,
"learning_rate": 6.666666666666667e-06,
"loss": 1.3048,
"step": 2
},
{
"epoch": 0.11538461538461539,
"grad_norm": 19.58236853178882,
"learning_rate": 1e-05,
"loss": 1.1097,
"step": 3
},
{
"epoch": 0.15384615384615385,
"grad_norm": 30.55885846320602,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.9511,
"step": 4
},
{
"epoch": 0.19230769230769232,
"grad_norm": 20.08985298506213,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.7813,
"step": 5
},
{
"epoch": 0.23076923076923078,
"grad_norm": 14.162855282317404,
"learning_rate": 2e-05,
"loss": 0.6916,
"step": 6
},
{
"epoch": 0.2692307692307692,
"grad_norm": 20.028553980740814,
"learning_rate": 1.9976687691905394e-05,
"loss": 0.6448,
"step": 7
},
{
"epoch": 0.3076923076923077,
"grad_norm": 29.62124108104504,
"learning_rate": 1.9906859460363307e-05,
"loss": 0.8493,
"step": 8
},
{
"epoch": 0.34615384615384615,
"grad_norm": 50.58398328205813,
"learning_rate": 1.979084087682323e-05,
"loss": 0.9227,
"step": 9
},
{
"epoch": 0.38461538461538464,
"grad_norm": 3.960885136166706,
"learning_rate": 1.9629172873477995e-05,
"loss": 0.495,
"step": 10
},
{
"epoch": 0.4230769230769231,
"grad_norm": 4.1877316087885035,
"learning_rate": 1.9422609221188208e-05,
"loss": 0.495,
"step": 11
},
{
"epoch": 0.46153846153846156,
"grad_norm": 4.998304642102752,
"learning_rate": 1.917211301505453e-05,
"loss": 0.4478,
"step": 12
},
{
"epoch": 0.5,
"grad_norm": 2.4596034942030065,
"learning_rate": 1.8878852184023754e-05,
"loss": 0.4657,
"step": 13
},
{
"epoch": 0.5384615384615384,
"grad_norm": 1.6865528982507465,
"learning_rate": 1.8544194045464888e-05,
"loss": 0.4078,
"step": 14
},
{
"epoch": 0.5769230769230769,
"grad_norm": 1.7910809046492557,
"learning_rate": 1.816969893010442e-05,
"loss": 0.327,
"step": 15
},
{
"epoch": 0.6153846153846154,
"grad_norm": 1.8011895949239238,
"learning_rate": 1.77571129070442e-05,
"loss": 0.3845,
"step": 16
},
{
"epoch": 0.6538461538461539,
"grad_norm": 1.5712375007924961,
"learning_rate": 1.730835964278124e-05,
"loss": 0.3304,
"step": 17
},
{
"epoch": 0.6923076923076923,
"grad_norm": 1.2482964114523387,
"learning_rate": 1.6825531432186545e-05,
"loss": 0.3108,
"step": 18
},
{
"epoch": 0.7307692307692307,
"grad_norm": 1.3162781011551596,
"learning_rate": 1.631087944326053e-05,
"loss": 0.299,
"step": 19
},
{
"epoch": 0.7692307692307693,
"grad_norm": 1.2534379605750423,
"learning_rate": 1.5766803221148676e-05,
"loss": 0.2795,
"step": 20
},
{
"epoch": 0.8076923076923077,
"grad_norm": 1.171621258475382,
"learning_rate": 1.5195839500354337e-05,
"loss": 0.2945,
"step": 21
},
{
"epoch": 0.8461538461538461,
"grad_norm": 1.3353357877056748,
"learning_rate": 1.4600650377311523e-05,
"loss": 0.3072,
"step": 22
},
{
"epoch": 0.8846153846153846,
"grad_norm": 1.3675590758455805,
"learning_rate": 1.3984010898462417e-05,
"loss": 0.3407,
"step": 23
},
{
"epoch": 0.9230769230769231,
"grad_norm": 1.1353873052293306,
"learning_rate": 1.3348796121709862e-05,
"loss": 0.2404,
"step": 24
},
{
"epoch": 0.9615384615384616,
"grad_norm": 1.1094661531678627,
"learning_rate": 1.2697967711570243e-05,
"loss": 0.2822,
"step": 25
},
{
"epoch": 1.0,
"grad_norm": 1.0173771864310912,
"learning_rate": 1.2034560130526341e-05,
"loss": 0.2431,
"step": 26
},
{
"epoch": 1.0384615384615385,
"grad_norm": 0.9157192989332598,
"learning_rate": 1.1361666490962468e-05,
"loss": 0.1622,
"step": 27
},
{
"epoch": 1.0769230769230769,
"grad_norm": 1.170626036383389,
"learning_rate": 1.0682424133646712e-05,
"loss": 0.201,
"step": 28
},
{
"epoch": 1.1153846153846154,
"grad_norm": 1.2399773954594682,
"learning_rate": 1e-05,
"loss": 0.2164,
"step": 29
},
{
"epoch": 1.1538461538461537,
"grad_norm": 1.1889650956121858,
"learning_rate": 9.317575866353293e-06,
"loss": 0.2319,
"step": 30
},
{
"epoch": 1.1923076923076923,
"grad_norm": 0.8825986428496552,
"learning_rate": 8.638333509037537e-06,
"loss": 0.1528,
"step": 31
},
{
"epoch": 1.2307692307692308,
"grad_norm": 0.8783022286148305,
"learning_rate": 7.965439869473664e-06,
"loss": 0.1559,
"step": 32
},
{
"epoch": 1.2692307692307692,
"grad_norm": 0.8516056967052127,
"learning_rate": 7.3020322884297565e-06,
"loss": 0.1342,
"step": 33
},
{
"epoch": 1.3076923076923077,
"grad_norm": 0.936682816255717,
"learning_rate": 6.651203878290139e-06,
"loss": 0.1363,
"step": 34
},
{
"epoch": 1.3461538461538463,
"grad_norm": 0.8791979974362931,
"learning_rate": 6.015989101537586e-06,
"loss": 0.1312,
"step": 35
},
{
"epoch": 1.3846153846153846,
"grad_norm": 1.015695664235406,
"learning_rate": 5.399349622688479e-06,
"loss": 0.1569,
"step": 36
},
{
"epoch": 1.4230769230769231,
"grad_norm": 0.9985258223852007,
"learning_rate": 4.804160499645667e-06,
"loss": 0.1861,
"step": 37
},
{
"epoch": 1.4615384615384617,
"grad_norm": 0.7719062123826321,
"learning_rate": 4.2331967788513295e-06,
"loss": 0.1337,
"step": 38
},
{
"epoch": 1.5,
"grad_norm": 0.8094923832822339,
"learning_rate": 3.689120556739475e-06,
"loss": 0.1124,
"step": 39
},
{
"epoch": 1.5384615384615383,
"grad_norm": 0.9478634316121923,
"learning_rate": 3.174468567813461e-06,
"loss": 0.1355,
"step": 40
},
{
"epoch": 1.5769230769230769,
"grad_norm": 0.8471602530287855,
"learning_rate": 2.691640357218759e-06,
"loss": 0.1413,
"step": 41
},
{
"epoch": 1.6153846153846154,
"grad_norm": 0.8304448666983464,
"learning_rate": 2.2428870929558012e-06,
"loss": 0.1577,
"step": 42
},
{
"epoch": 1.6538461538461537,
"grad_norm": 0.7703268512000083,
"learning_rate": 1.8303010698955803e-06,
"loss": 0.128,
"step": 43
},
{
"epoch": 1.6923076923076923,
"grad_norm": 0.729939761686708,
"learning_rate": 1.4558059545351144e-06,
"loss": 0.1228,
"step": 44
},
{
"epoch": 1.7307692307692308,
"grad_norm": 0.8254947214606744,
"learning_rate": 1.121147815976248e-06,
"loss": 0.1437,
"step": 45
},
{
"epoch": 1.7692307692307692,
"grad_norm": 0.9952352246915486,
"learning_rate": 8.278869849454718e-07,
"loss": 0.1767,
"step": 46
},
{
"epoch": 1.8076923076923077,
"grad_norm": 0.779109190144192,
"learning_rate": 5.77390778811796e-07,
"loss": 0.1246,
"step": 47
},
{
"epoch": 1.8461538461538463,
"grad_norm": 0.6748859845108214,
"learning_rate": 3.708271265220087e-07,
"loss": 0.099,
"step": 48
},
{
"epoch": 1.8846153846153846,
"grad_norm": 0.782405714927644,
"learning_rate": 2.091591231767709e-07,
"loss": 0.1078,
"step": 49
},
{
"epoch": 1.9230769230769231,
"grad_norm": 0.9311280352392743,
"learning_rate": 9.314053963669245e-08,
"loss": 0.156,
"step": 50
},
{
"epoch": 1.9615384615384617,
"grad_norm": 0.8677277651118928,
"learning_rate": 2.3312308094607382e-08,
"loss": 0.1369,
"step": 51
},
{
"epoch": 2.0,
"grad_norm": 0.616841312096149,
"learning_rate": 0.0,
"loss": 0.1033,
"step": 52
},
{
"epoch": 2.0,
"step": 52,
"total_flos": 2404393205760.0,
"train_loss": 0.3522481211962608,
"train_runtime": 190.3027,
"train_samples_per_second": 4.288,
"train_steps_per_second": 0.273
}
],
"logging_steps": 1,
"max_steps": 52,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2404393205760.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}