{ "best_metric": 2.5200698375701904, "best_model_checkpoint": "/home/iais_marenpielka/Bouthaina/nw_egy/checkpoint-1326", "epoch": 3.0, "eval_steps": 500, "global_step": 1326, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "grad_norm": 1.6992121934890747, "learning_rate": 4.4200000000000004e-05, "loss": 4.0723, "step": 442 }, { "epoch": 1.0, "eval_bleu": 0.22275841970876573, "eval_loss": 2.958087921142578, "eval_rouge1": 0.3912678429308313, "eval_rouge2": 0.16481568948149655, "eval_rougeL": 0.32008248461173605, "eval_runtime": 28.7079, "eval_samples_per_second": 30.793, "eval_steps_per_second": 3.867, "step": 442 }, { "epoch": 2.0, "grad_norm": 1.676450252532959, "learning_rate": 2.6755447941888623e-05, "loss": 2.8055, "step": 884 }, { "epoch": 2.0, "eval_bleu": 0.24154624358915514, "eval_loss": 2.606330633163452, "eval_rouge1": 0.43470628711649895, "eval_rouge2": 0.19710102219495218, "eval_rougeL": 0.3776869994743499, "eval_runtime": 28.9632, "eval_samples_per_second": 30.521, "eval_steps_per_second": 3.832, "step": 884 }, { "epoch": 3.0, "grad_norm": 1.5551148653030396, "learning_rate": 0.0, "loss": 2.5505, "step": 1326 }, { "epoch": 3.0, "eval_bleu": 0.25109238074220425, "eval_loss": 2.5200698375701904, "eval_rouge1": 0.4533940426786972, "eval_rouge2": 0.21369906854716414, "eval_rougeL": 0.39838507960838365, "eval_runtime": 29.2852, "eval_samples_per_second": 30.186, "eval_steps_per_second": 3.79, "step": 1326 }, { "epoch": 3.0, "step": 1326, "total_flos": 4151799742464000.0, "train_loss": 3.1427729219692684, "train_runtime": 589.6608, "train_samples_per_second": 17.965, "train_steps_per_second": 2.249 } ], "logging_steps": 500, "max_steps": 1326, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 3, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 4151799742464000.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }