{ "best_metric": 0.9359054565429688, "best_model_checkpoint": "data/Gemma-2-2B_task-3_120-samples_config-1_full_auto/checkpoint-77", "epoch": 14.0, "eval_steps": 500, "global_step": 154, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.09090909090909091, "grad_norm": 0.44340184330940247, "learning_rate": 1.818181818181818e-06, "loss": 1.2873, "step": 1 }, { "epoch": 0.18181818181818182, "grad_norm": 0.5295612215995789, "learning_rate": 3.636363636363636e-06, "loss": 1.5104, "step": 2 }, { "epoch": 0.36363636363636365, "grad_norm": 0.41922619938850403, "learning_rate": 7.272727272727272e-06, "loss": 1.3424, "step": 4 }, { "epoch": 0.5454545454545454, "grad_norm": 0.45832565426826477, "learning_rate": 1.0909090909090909e-05, "loss": 1.3772, "step": 6 }, { "epoch": 0.7272727272727273, "grad_norm": 0.4448011815547943, "learning_rate": 1.4545454545454545e-05, "loss": 1.43, "step": 8 }, { "epoch": 0.9090909090909091, "grad_norm": 0.40825021266937256, "learning_rate": 1.8181818181818182e-05, "loss": 1.3429, "step": 10 }, { "epoch": 1.0, "eval_loss": 1.3558740615844727, "eval_runtime": 7.6804, "eval_samples_per_second": 3.125, "eval_steps_per_second": 3.125, "step": 11 }, { "epoch": 1.0909090909090908, "grad_norm": 0.4454409182071686, "learning_rate": 2.1818181818181818e-05, "loss": 1.4164, "step": 12 }, { "epoch": 1.2727272727272727, "grad_norm": 0.3652619421482086, "learning_rate": 2.5454545454545454e-05, "loss": 1.3038, "step": 14 }, { "epoch": 1.4545454545454546, "grad_norm": 0.4184987246990204, "learning_rate": 2.909090909090909e-05, "loss": 1.3385, "step": 16 }, { "epoch": 1.6363636363636362, "grad_norm": 0.35863184928894043, "learning_rate": 3.272727272727273e-05, "loss": 1.3393, "step": 18 }, { "epoch": 1.8181818181818183, "grad_norm": 0.29392459988594055, "learning_rate": 3.6363636363636364e-05, "loss": 1.2529, "step": 20 }, { "epoch": 2.0, "grad_norm": 0.26279276609420776, "learning_rate": 4e-05, "loss": 1.1584, "step": 22 }, { "epoch": 2.0, "eval_loss": 1.2111910581588745, "eval_runtime": 7.4943, "eval_samples_per_second": 3.202, "eval_steps_per_second": 3.202, "step": 22 }, { "epoch": 2.1818181818181817, "grad_norm": 0.2251596301794052, "learning_rate": 4.3636363636363636e-05, "loss": 1.1951, "step": 24 }, { "epoch": 2.3636363636363638, "grad_norm": 0.27620360255241394, "learning_rate": 4.7272727272727275e-05, "loss": 1.1708, "step": 26 }, { "epoch": 2.5454545454545454, "grad_norm": 0.25171273946762085, "learning_rate": 5.090909090909091e-05, "loss": 1.1228, "step": 28 }, { "epoch": 2.7272727272727275, "grad_norm": 0.2604583203792572, "learning_rate": 5.4545454545454546e-05, "loss": 1.1486, "step": 30 }, { "epoch": 2.909090909090909, "grad_norm": 0.2893660366535187, "learning_rate": 5.818181818181818e-05, "loss": 1.0941, "step": 32 }, { "epoch": 3.0, "eval_loss": 1.0876060724258423, "eval_runtime": 7.5096, "eval_samples_per_second": 3.196, "eval_steps_per_second": 3.196, "step": 33 }, { "epoch": 3.090909090909091, "grad_norm": 0.26696836948394775, "learning_rate": 6.181818181818182e-05, "loss": 1.0925, "step": 34 }, { "epoch": 3.2727272727272725, "grad_norm": 0.2858625650405884, "learning_rate": 6.545454545454546e-05, "loss": 1.0371, "step": 36 }, { "epoch": 3.4545454545454546, "grad_norm": 0.34232887625694275, "learning_rate": 6.90909090909091e-05, "loss": 1.0255, "step": 38 }, { "epoch": 3.6363636363636362, "grad_norm": 0.2392912358045578, "learning_rate": 7.272727272727273e-05, "loss": 0.9489, "step": 40 }, { "epoch": 3.8181818181818183, "grad_norm": 0.4012575149536133, "learning_rate": 7.636363636363637e-05, "loss": 0.9203, "step": 42 }, { "epoch": 4.0, "grad_norm": 0.22504228353500366, "learning_rate": 8e-05, "loss": 1.0134, "step": 44 }, { "epoch": 4.0, "eval_loss": 0.9891138076782227, "eval_runtime": 7.4936, "eval_samples_per_second": 3.203, "eval_steps_per_second": 3.203, "step": 44 }, { "epoch": 4.181818181818182, "grad_norm": 0.25609034299850464, "learning_rate": 8.363636363636364e-05, "loss": 0.9138, "step": 46 }, { "epoch": 4.363636363636363, "grad_norm": 0.2524278461933136, "learning_rate": 8.727272727272727e-05, "loss": 0.9498, "step": 48 }, { "epoch": 4.545454545454545, "grad_norm": 0.20994818210601807, "learning_rate": 9.090909090909092e-05, "loss": 0.8959, "step": 50 }, { "epoch": 4.7272727272727275, "grad_norm": 0.24883906543254852, "learning_rate": 9.454545454545455e-05, "loss": 0.9049, "step": 52 }, { "epoch": 4.909090909090909, "grad_norm": 0.18293549120426178, "learning_rate": 9.818181818181818e-05, "loss": 0.9378, "step": 54 }, { "epoch": 5.0, "eval_loss": 0.9540689587593079, "eval_runtime": 7.5014, "eval_samples_per_second": 3.199, "eval_steps_per_second": 3.199, "step": 55 }, { "epoch": 5.090909090909091, "grad_norm": 0.28788265585899353, "learning_rate": 9.999899300364532e-05, "loss": 0.8829, "step": 56 }, { "epoch": 5.2727272727272725, "grad_norm": 0.2040722519159317, "learning_rate": 9.99909372761763e-05, "loss": 0.9036, "step": 58 }, { "epoch": 5.454545454545454, "grad_norm": 0.2116885632276535, "learning_rate": 9.997482711915927e-05, "loss": 0.8431, "step": 60 }, { "epoch": 5.636363636363637, "grad_norm": 0.21737386286258698, "learning_rate": 9.99506651282272e-05, "loss": 0.8668, "step": 62 }, { "epoch": 5.818181818181818, "grad_norm": 0.21680592000484467, "learning_rate": 9.991845519630678e-05, "loss": 0.8547, "step": 64 }, { "epoch": 6.0, "grad_norm": 0.31425392627716064, "learning_rate": 9.987820251299122e-05, "loss": 0.8949, "step": 66 }, { "epoch": 6.0, "eval_loss": 0.9391613602638245, "eval_runtime": 7.508, "eval_samples_per_second": 3.197, "eval_steps_per_second": 3.197, "step": 66 }, { "epoch": 6.181818181818182, "grad_norm": 0.25037598609924316, "learning_rate": 9.982991356370404e-05, "loss": 0.8083, "step": 68 }, { "epoch": 6.363636363636363, "grad_norm": 0.22885167598724365, "learning_rate": 9.977359612865423e-05, "loss": 0.8632, "step": 70 }, { "epoch": 6.545454545454545, "grad_norm": 0.25459662079811096, "learning_rate": 9.970925928158274e-05, "loss": 0.7854, "step": 72 }, { "epoch": 6.7272727272727275, "grad_norm": 0.25617337226867676, "learning_rate": 9.963691338830044e-05, "loss": 0.8213, "step": 74 }, { "epoch": 6.909090909090909, "grad_norm": 0.2213735282421112, "learning_rate": 9.955657010501806e-05, "loss": 0.8546, "step": 76 }, { "epoch": 7.0, "eval_loss": 0.9359054565429688, "eval_runtime": 7.4985, "eval_samples_per_second": 3.201, "eval_steps_per_second": 3.201, "step": 77 }, { "epoch": 7.090909090909091, "grad_norm": 0.22576187551021576, "learning_rate": 9.946824237646824e-05, "loss": 0.8968, "step": 78 }, { "epoch": 7.2727272727272725, "grad_norm": 0.2886678874492645, "learning_rate": 9.937194443381972e-05, "loss": 0.7526, "step": 80 }, { "epoch": 7.454545454545454, "grad_norm": 0.27642518281936646, "learning_rate": 9.926769179238466e-05, "loss": 0.7545, "step": 82 }, { "epoch": 7.636363636363637, "grad_norm": 0.48505523800849915, "learning_rate": 9.915550124911866e-05, "loss": 0.7889, "step": 84 }, { "epoch": 7.818181818181818, "grad_norm": 0.3221338987350464, "learning_rate": 9.903539087991462e-05, "loss": 0.7757, "step": 86 }, { "epoch": 8.0, "grad_norm": 0.2804253399372101, "learning_rate": 9.890738003669029e-05, "loss": 0.822, "step": 88 }, { "epoch": 8.0, "eval_loss": 0.9410832524299622, "eval_runtime": 7.4993, "eval_samples_per_second": 3.2, "eval_steps_per_second": 3.2, "step": 88 }, { "epoch": 8.181818181818182, "grad_norm": 0.3164221942424774, "learning_rate": 9.877148934427037e-05, "loss": 0.7738, "step": 90 }, { "epoch": 8.363636363636363, "grad_norm": 0.36224448680877686, "learning_rate": 9.862774069706346e-05, "loss": 0.7169, "step": 92 }, { "epoch": 8.545454545454545, "grad_norm": 0.35652151703834534, "learning_rate": 9.847615725553456e-05, "loss": 0.7579, "step": 94 }, { "epoch": 8.727272727272727, "grad_norm": 0.4091964066028595, "learning_rate": 9.831676344247342e-05, "loss": 0.6862, "step": 96 }, { "epoch": 8.909090909090908, "grad_norm": 0.41904446482658386, "learning_rate": 9.814958493905963e-05, "loss": 0.7437, "step": 98 }, { "epoch": 9.0, "eval_loss": 0.9579834342002869, "eval_runtime": 7.5118, "eval_samples_per_second": 3.195, "eval_steps_per_second": 3.195, "step": 99 }, { "epoch": 9.090909090909092, "grad_norm": 0.4185417592525482, "learning_rate": 9.797464868072488e-05, "loss": 0.7455, "step": 100 }, { "epoch": 9.272727272727273, "grad_norm": 0.4688882529735565, "learning_rate": 9.779198285281325e-05, "loss": 0.6541, "step": 102 }, { "epoch": 9.454545454545455, "grad_norm": 0.6384677290916443, "learning_rate": 9.760161688604008e-05, "loss": 0.6505, "step": 104 }, { "epoch": 9.636363636363637, "grad_norm": 0.5532212257385254, "learning_rate": 9.740358145174998e-05, "loss": 0.7368, "step": 106 }, { "epoch": 9.818181818181818, "grad_norm": 0.5052789449691772, "learning_rate": 9.719790845697533e-05, "loss": 0.6758, "step": 108 }, { "epoch": 10.0, "grad_norm": 0.674145519733429, "learning_rate": 9.698463103929542e-05, "loss": 0.6443, "step": 110 }, { "epoch": 10.0, "eval_loss": 1.0089081525802612, "eval_runtime": 7.4975, "eval_samples_per_second": 3.201, "eval_steps_per_second": 3.201, "step": 110 }, { "epoch": 10.181818181818182, "grad_norm": 0.5758641362190247, "learning_rate": 9.676378356149734e-05, "loss": 0.5998, "step": 112 }, { "epoch": 10.363636363636363, "grad_norm": 0.6187158823013306, "learning_rate": 9.653540160603956e-05, "loss": 0.5899, "step": 114 }, { "epoch": 10.545454545454545, "grad_norm": 0.6771482825279236, "learning_rate": 9.629952196931901e-05, "loss": 0.5955, "step": 116 }, { "epoch": 10.727272727272727, "grad_norm": 0.6831199526786804, "learning_rate": 9.60561826557425e-05, "loss": 0.5858, "step": 118 }, { "epoch": 10.909090909090908, "grad_norm": 0.6889560222625732, "learning_rate": 9.580542287160348e-05, "loss": 0.6397, "step": 120 }, { "epoch": 11.0, "eval_loss": 1.0663317441940308, "eval_runtime": 7.4957, "eval_samples_per_second": 3.202, "eval_steps_per_second": 3.202, "step": 121 }, { "epoch": 11.090909090909092, "grad_norm": 0.8115834593772888, "learning_rate": 9.554728301876526e-05, "loss": 0.5702, "step": 122 }, { "epoch": 11.272727272727273, "grad_norm": 0.9677160382270813, "learning_rate": 9.528180468815155e-05, "loss": 0.5163, "step": 124 }, { "epoch": 11.454545454545455, "grad_norm": 0.838153600692749, "learning_rate": 9.50090306530454e-05, "loss": 0.5314, "step": 126 }, { "epoch": 11.636363636363637, "grad_norm": 0.7266411781311035, "learning_rate": 9.472900486219769e-05, "loss": 0.5657, "step": 128 }, { "epoch": 11.818181818181818, "grad_norm": 0.9129046201705933, "learning_rate": 9.444177243274618e-05, "loss": 0.5353, "step": 130 }, { "epoch": 12.0, "grad_norm": 0.9295918345451355, "learning_rate": 9.414737964294636e-05, "loss": 0.5092, "step": 132 }, { "epoch": 12.0, "eval_loss": 1.141348958015442, "eval_runtime": 7.4975, "eval_samples_per_second": 3.201, "eval_steps_per_second": 3.201, "step": 132 }, { "epoch": 12.181818181818182, "grad_norm": 0.8798962235450745, "learning_rate": 9.384587392471515e-05, "loss": 0.4856, "step": 134 }, { "epoch": 12.363636363636363, "grad_norm": 1.078805685043335, "learning_rate": 9.353730385598887e-05, "loss": 0.4332, "step": 136 }, { "epoch": 12.545454545454545, "grad_norm": 1.0153404474258423, "learning_rate": 9.322171915289635e-05, "loss": 0.4568, "step": 138 }, { "epoch": 12.727272727272727, "grad_norm": 1.0150026082992554, "learning_rate": 9.289917066174886e-05, "loss": 0.4417, "step": 140 }, { "epoch": 12.909090909090908, "grad_norm": 1.4472639560699463, "learning_rate": 9.256971035084785e-05, "loss": 0.5121, "step": 142 }, { "epoch": 13.0, "eval_loss": 1.2276443243026733, "eval_runtime": 7.512, "eval_samples_per_second": 3.195, "eval_steps_per_second": 3.195, "step": 143 }, { "epoch": 13.090909090909092, "grad_norm": 0.9721190929412842, "learning_rate": 9.223339130211192e-05, "loss": 0.361, "step": 144 }, { "epoch": 13.272727272727273, "grad_norm": 1.4202260971069336, "learning_rate": 9.189026770252436e-05, "loss": 0.4109, "step": 146 }, { "epoch": 13.454545454545455, "grad_norm": 1.0497146844863892, "learning_rate": 9.154039483540273e-05, "loss": 0.4459, "step": 148 }, { "epoch": 13.636363636363637, "grad_norm": 1.0282554626464844, "learning_rate": 9.118382907149165e-05, "loss": 0.3977, "step": 150 }, { "epoch": 13.818181818181818, "grad_norm": 1.5992000102996826, "learning_rate": 9.082062785988049e-05, "loss": 0.3675, "step": 152 }, { "epoch": 14.0, "grad_norm": 1.129107117652893, "learning_rate": 9.045084971874738e-05, "loss": 0.3324, "step": 154 }, { "epoch": 14.0, "eval_loss": 1.3167015314102173, "eval_runtime": 7.4961, "eval_samples_per_second": 3.202, "eval_steps_per_second": 3.202, "step": 154 }, { "epoch": 14.0, "step": 154, "total_flos": 3.334524867130163e+16, "train_loss": 0.8300043313534229, "train_runtime": 1413.2295, "train_samples_per_second": 3.113, "train_steps_per_second": 0.389 } ], "logging_steps": 2, "max_steps": 550, "num_input_tokens_seen": 0, "num_train_epochs": 50, "save_steps": 25, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 7, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 3.334524867130163e+16, "train_batch_size": 1, "trial_name": null, "trial_params": null }