vit-base-beans / trainer_state.json
cogsci13's picture
🍻 cheers
59a4540 verified
raw
history blame contribute delete
No virus
5.35 kB
{
"best_metric": 0.012702367268502712,
"best_model_checkpoint": "./vit-base-beans/checkpoint-200",
"epoch": 4.0,
"eval_steps": 100,
"global_step": 260,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.15,
"grad_norm": 1.5160256624221802,
"learning_rate": 0.00019230769230769233,
"loss": 0.8461,
"step": 10
},
{
"epoch": 0.31,
"grad_norm": 1.1838645935058594,
"learning_rate": 0.00018461538461538463,
"loss": 0.3715,
"step": 20
},
{
"epoch": 0.46,
"grad_norm": 1.8937193155288696,
"learning_rate": 0.00017692307692307693,
"loss": 0.354,
"step": 30
},
{
"epoch": 0.62,
"grad_norm": 1.1941872835159302,
"learning_rate": 0.00016923076923076923,
"loss": 0.2516,
"step": 40
},
{
"epoch": 0.77,
"grad_norm": 1.9662144184112549,
"learning_rate": 0.00016153846153846155,
"loss": 0.1664,
"step": 50
},
{
"epoch": 0.92,
"grad_norm": 8.811502456665039,
"learning_rate": 0.00015384615384615385,
"loss": 0.2074,
"step": 60
},
{
"epoch": 1.08,
"grad_norm": 0.2282760590314865,
"learning_rate": 0.00014615384615384615,
"loss": 0.0974,
"step": 70
},
{
"epoch": 1.23,
"grad_norm": 0.1650390475988388,
"learning_rate": 0.00013846153846153847,
"loss": 0.1168,
"step": 80
},
{
"epoch": 1.38,
"grad_norm": 2.3822529315948486,
"learning_rate": 0.00013076923076923077,
"loss": 0.0462,
"step": 90
},
{
"epoch": 1.54,
"grad_norm": 0.11838462203741074,
"learning_rate": 0.0001230769230769231,
"loss": 0.0666,
"step": 100
},
{
"epoch": 1.54,
"eval_accuracy": 0.9924812030075187,
"eval_loss": 0.032394178211688995,
"eval_runtime": 1.7903,
"eval_samples_per_second": 74.288,
"eval_steps_per_second": 9.496,
"step": 100
},
{
"epoch": 1.69,
"grad_norm": 3.3664941787719727,
"learning_rate": 0.00011538461538461538,
"loss": 0.0599,
"step": 110
},
{
"epoch": 1.85,
"grad_norm": 0.16068458557128906,
"learning_rate": 0.0001076923076923077,
"loss": 0.0993,
"step": 120
},
{
"epoch": 2.0,
"grad_norm": 0.11501188576221466,
"learning_rate": 0.0001,
"loss": 0.071,
"step": 130
},
{
"epoch": 2.15,
"grad_norm": 0.16083480417728424,
"learning_rate": 9.230769230769232e-05,
"loss": 0.0664,
"step": 140
},
{
"epoch": 2.31,
"grad_norm": 0.5593019127845764,
"learning_rate": 8.461538461538461e-05,
"loss": 0.0469,
"step": 150
},
{
"epoch": 2.46,
"grad_norm": 1.1808232069015503,
"learning_rate": 7.692307692307693e-05,
"loss": 0.0184,
"step": 160
},
{
"epoch": 2.62,
"grad_norm": 0.12332643568515778,
"learning_rate": 6.923076923076924e-05,
"loss": 0.0301,
"step": 170
},
{
"epoch": 2.77,
"grad_norm": 0.06919372081756592,
"learning_rate": 6.153846153846155e-05,
"loss": 0.0414,
"step": 180
},
{
"epoch": 2.92,
"grad_norm": 0.06933612376451492,
"learning_rate": 5.384615384615385e-05,
"loss": 0.0137,
"step": 190
},
{
"epoch": 3.08,
"grad_norm": 0.06432632356882095,
"learning_rate": 4.615384615384616e-05,
"loss": 0.0164,
"step": 200
},
{
"epoch": 3.08,
"eval_accuracy": 1.0,
"eval_loss": 0.012702367268502712,
"eval_runtime": 1.7615,
"eval_samples_per_second": 75.505,
"eval_steps_per_second": 9.651,
"step": 200
},
{
"epoch": 3.23,
"grad_norm": 0.05967758223414421,
"learning_rate": 3.846153846153846e-05,
"loss": 0.0122,
"step": 210
},
{
"epoch": 3.38,
"grad_norm": 0.06129758059978485,
"learning_rate": 3.0769230769230774e-05,
"loss": 0.0118,
"step": 220
},
{
"epoch": 3.54,
"grad_norm": 0.06324906647205353,
"learning_rate": 2.307692307692308e-05,
"loss": 0.0114,
"step": 230
},
{
"epoch": 3.69,
"grad_norm": 0.0768633708357811,
"learning_rate": 1.5384615384615387e-05,
"loss": 0.0114,
"step": 240
},
{
"epoch": 3.85,
"grad_norm": 0.05912179872393608,
"learning_rate": 7.692307692307694e-06,
"loss": 0.011,
"step": 250
},
{
"epoch": 4.0,
"grad_norm": 0.05486920475959778,
"learning_rate": 0.0,
"loss": 0.0111,
"step": 260
},
{
"epoch": 4.0,
"step": 260,
"total_flos": 3.205097416476426e+17,
"train_loss": 0.11755287733215553,
"train_runtime": 95.4129,
"train_samples_per_second": 43.348,
"train_steps_per_second": 2.725
}
],
"logging_steps": 10,
"max_steps": 260,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 100,
"total_flos": 3.205097416476426e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}