Ogamon's picture
Initial commit
7707188 verified
raw
history blame contribute delete
No virus
40.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.887459807073955,
"eval_steps": 500,
"global_step": 190,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02572347266881029,
"grad_norm": 911.2012939453125,
"learning_rate": 5.000000000000001e-07,
"loss": 12.6483,
"num_input_tokens_seen": 6512,
"step": 1
},
{
"epoch": 0.05144694533762058,
"grad_norm": 878.3407592773438,
"learning_rate": 1.0000000000000002e-06,
"loss": 12.2961,
"num_input_tokens_seen": 12848,
"step": 2
},
{
"epoch": 0.07717041800643087,
"grad_norm": 854.1091918945312,
"learning_rate": 1.5e-06,
"loss": 11.9707,
"num_input_tokens_seen": 19248,
"step": 3
},
{
"epoch": 0.10289389067524116,
"grad_norm": 789.3748168945312,
"learning_rate": 2.0000000000000003e-06,
"loss": 11.1284,
"num_input_tokens_seen": 25344,
"step": 4
},
{
"epoch": 0.12861736334405144,
"grad_norm": 632.95947265625,
"learning_rate": 2.5e-06,
"loss": 8.3234,
"num_input_tokens_seen": 31968,
"step": 5
},
{
"epoch": 0.15434083601286175,
"grad_norm": 367.85150146484375,
"learning_rate": 3e-06,
"loss": 5.8729,
"num_input_tokens_seen": 38160,
"step": 6
},
{
"epoch": 0.18006430868167203,
"grad_norm": 377.6339416503906,
"learning_rate": 3.5e-06,
"loss": 4.8266,
"num_input_tokens_seen": 44144,
"step": 7
},
{
"epoch": 0.2057877813504823,
"grad_norm": 473.7559814453125,
"learning_rate": 4.000000000000001e-06,
"loss": 2.5017,
"num_input_tokens_seen": 50432,
"step": 8
},
{
"epoch": 0.2315112540192926,
"grad_norm": 143.07786560058594,
"learning_rate": 4.5e-06,
"loss": 0.5637,
"num_input_tokens_seen": 56928,
"step": 9
},
{
"epoch": 0.2572347266881029,
"grad_norm": 73.97982788085938,
"learning_rate": 5e-06,
"loss": 0.2757,
"num_input_tokens_seen": 63344,
"step": 10
},
{
"epoch": 0.2829581993569132,
"grad_norm": 151.71585083007812,
"learning_rate": 4.9996192378909785e-06,
"loss": 1.4131,
"num_input_tokens_seen": 69568,
"step": 11
},
{
"epoch": 0.3086816720257235,
"grad_norm": 29.409135818481445,
"learning_rate": 4.99847706754774e-06,
"loss": 0.2805,
"num_input_tokens_seen": 75824,
"step": 12
},
{
"epoch": 0.33440514469453375,
"grad_norm": 183.50286865234375,
"learning_rate": 4.9965738368864345e-06,
"loss": 1.1385,
"num_input_tokens_seen": 82160,
"step": 13
},
{
"epoch": 0.36012861736334406,
"grad_norm": 258.92144775390625,
"learning_rate": 4.993910125649561e-06,
"loss": 1.5128,
"num_input_tokens_seen": 88624,
"step": 14
},
{
"epoch": 0.3858520900321543,
"grad_norm": 44.229591369628906,
"learning_rate": 4.990486745229364e-06,
"loss": 0.2268,
"num_input_tokens_seen": 94800,
"step": 15
},
{
"epoch": 0.4115755627009646,
"grad_norm": 78.64834594726562,
"learning_rate": 4.986304738420684e-06,
"loss": 1.0743,
"num_input_tokens_seen": 101360,
"step": 16
},
{
"epoch": 0.43729903536977494,
"grad_norm": 84.1305923461914,
"learning_rate": 4.981365379103306e-06,
"loss": 0.5564,
"num_input_tokens_seen": 107488,
"step": 17
},
{
"epoch": 0.4630225080385852,
"grad_norm": 157.61395263671875,
"learning_rate": 4.975670171853926e-06,
"loss": 1.009,
"num_input_tokens_seen": 113920,
"step": 18
},
{
"epoch": 0.4887459807073955,
"grad_norm": 91.1875228881836,
"learning_rate": 4.9692208514878445e-06,
"loss": 0.7292,
"num_input_tokens_seen": 120352,
"step": 19
},
{
"epoch": 0.5144694533762058,
"grad_norm": 21.991859436035156,
"learning_rate": 4.962019382530521e-06,
"loss": 0.3204,
"num_input_tokens_seen": 126880,
"step": 20
},
{
"epoch": 0.5401929260450161,
"grad_norm": 34.253746032714844,
"learning_rate": 4.9540679586191605e-06,
"loss": 0.2741,
"num_input_tokens_seen": 133344,
"step": 21
},
{
"epoch": 0.5659163987138264,
"grad_norm": 13.861297607421875,
"learning_rate": 4.9453690018345144e-06,
"loss": 0.1732,
"num_input_tokens_seen": 139744,
"step": 22
},
{
"epoch": 0.5916398713826366,
"grad_norm": 58.75880813598633,
"learning_rate": 4.935925161963089e-06,
"loss": 0.4089,
"num_input_tokens_seen": 146144,
"step": 23
},
{
"epoch": 0.617363344051447,
"grad_norm": 60.11404037475586,
"learning_rate": 4.925739315689991e-06,
"loss": 0.4201,
"num_input_tokens_seen": 152240,
"step": 24
},
{
"epoch": 0.6430868167202572,
"grad_norm": 20.798980712890625,
"learning_rate": 4.914814565722671e-06,
"loss": 0.1772,
"num_input_tokens_seen": 158512,
"step": 25
},
{
"epoch": 0.6688102893890675,
"grad_norm": 68.97270965576172,
"learning_rate": 4.903154239845798e-06,
"loss": 0.5611,
"num_input_tokens_seen": 164784,
"step": 26
},
{
"epoch": 0.6945337620578779,
"grad_norm": 57.77274703979492,
"learning_rate": 4.890761889907589e-06,
"loss": 0.4709,
"num_input_tokens_seen": 171312,
"step": 27
},
{
"epoch": 0.7202572347266881,
"grad_norm": 48.957454681396484,
"learning_rate": 4.8776412907378845e-06,
"loss": 0.3355,
"num_input_tokens_seen": 177856,
"step": 28
},
{
"epoch": 0.7459807073954984,
"grad_norm": 6.471732139587402,
"learning_rate": 4.863796438998293e-06,
"loss": 0.1259,
"num_input_tokens_seen": 184368,
"step": 29
},
{
"epoch": 0.7717041800643086,
"grad_norm": 15.684314727783203,
"learning_rate": 4.849231551964771e-06,
"loss": 0.1584,
"num_input_tokens_seen": 190928,
"step": 30
},
{
"epoch": 0.797427652733119,
"grad_norm": 24.78313636779785,
"learning_rate": 4.833951066243004e-06,
"loss": 0.1765,
"num_input_tokens_seen": 197232,
"step": 31
},
{
"epoch": 0.8231511254019293,
"grad_norm": 17.641653060913086,
"learning_rate": 4.817959636416969e-06,
"loss": 0.1372,
"num_input_tokens_seen": 203584,
"step": 32
},
{
"epoch": 0.8488745980707395,
"grad_norm": 13.282624244689941,
"learning_rate": 4.801262133631101e-06,
"loss": 0.1321,
"num_input_tokens_seen": 210128,
"step": 33
},
{
"epoch": 0.8745980707395499,
"grad_norm": 26.005861282348633,
"learning_rate": 4.783863644106502e-06,
"loss": 0.2427,
"num_input_tokens_seen": 216512,
"step": 34
},
{
"epoch": 0.9003215434083601,
"grad_norm": 24.06787872314453,
"learning_rate": 4.765769467591626e-06,
"loss": 0.2354,
"num_input_tokens_seen": 222928,
"step": 35
},
{
"epoch": 0.9260450160771704,
"grad_norm": 8.713873863220215,
"learning_rate": 4.746985115747918e-06,
"loss": 0.0977,
"num_input_tokens_seen": 229232,
"step": 36
},
{
"epoch": 0.9517684887459807,
"grad_norm": 13.601832389831543,
"learning_rate": 4.72751631047092e-06,
"loss": 0.1405,
"num_input_tokens_seen": 235680,
"step": 37
},
{
"epoch": 0.977491961414791,
"grad_norm": 19.58551597595215,
"learning_rate": 4.707368982147318e-06,
"loss": 0.2396,
"num_input_tokens_seen": 242192,
"step": 38
},
{
"epoch": 1.0032154340836013,
"grad_norm": 13.862666130065918,
"learning_rate": 4.68654926784849e-06,
"loss": 0.1272,
"num_input_tokens_seen": 248672,
"step": 39
},
{
"epoch": 1.0289389067524115,
"grad_norm": 7.956608295440674,
"learning_rate": 4.665063509461098e-06,
"loss": 0.0815,
"num_input_tokens_seen": 255072,
"step": 40
},
{
"epoch": 1.0546623794212218,
"grad_norm": 6.680484294891357,
"learning_rate": 4.642918251755281e-06,
"loss": 0.0771,
"num_input_tokens_seen": 261584,
"step": 41
},
{
"epoch": 1.0803858520900322,
"grad_norm": 7.737441539764404,
"learning_rate": 4.620120240391065e-06,
"loss": 0.0821,
"num_input_tokens_seen": 268032,
"step": 42
},
{
"epoch": 1.1061093247588425,
"grad_norm": 7.45509672164917,
"learning_rate": 4.596676419863561e-06,
"loss": 0.0597,
"num_input_tokens_seen": 274560,
"step": 43
},
{
"epoch": 1.1318327974276527,
"grad_norm": 5.9655375480651855,
"learning_rate": 4.572593931387604e-06,
"loss": 0.0356,
"num_input_tokens_seen": 280960,
"step": 44
},
{
"epoch": 1.157556270096463,
"grad_norm": 5.330617427825928,
"learning_rate": 4.54788011072248e-06,
"loss": 0.0895,
"num_input_tokens_seen": 287104,
"step": 45
},
{
"epoch": 1.1832797427652733,
"grad_norm": 6.651185989379883,
"learning_rate": 4.522542485937369e-06,
"loss": 0.0402,
"num_input_tokens_seen": 293520,
"step": 46
},
{
"epoch": 1.2090032154340835,
"grad_norm": 6.889407157897949,
"learning_rate": 4.496588775118232e-06,
"loss": 0.0659,
"num_input_tokens_seen": 299936,
"step": 47
},
{
"epoch": 1.234726688102894,
"grad_norm": 7.155170440673828,
"learning_rate": 4.470026884016805e-06,
"loss": 0.091,
"num_input_tokens_seen": 305936,
"step": 48
},
{
"epoch": 1.2604501607717042,
"grad_norm": 5.898871421813965,
"learning_rate": 4.442864903642428e-06,
"loss": 0.038,
"num_input_tokens_seen": 312528,
"step": 49
},
{
"epoch": 1.2861736334405145,
"grad_norm": 11.158126831054688,
"learning_rate": 4.415111107797445e-06,
"loss": 0.0998,
"num_input_tokens_seen": 318752,
"step": 50
},
{
"epoch": 1.3118971061093248,
"grad_norm": 10.609006881713867,
"learning_rate": 4.386773950556931e-06,
"loss": 0.1125,
"num_input_tokens_seen": 325088,
"step": 51
},
{
"epoch": 1.337620578778135,
"grad_norm": 5.496508598327637,
"learning_rate": 4.357862063693486e-06,
"loss": 0.033,
"num_input_tokens_seen": 331424,
"step": 52
},
{
"epoch": 1.3633440514469453,
"grad_norm": 7.192482948303223,
"learning_rate": 4.328384254047927e-06,
"loss": 0.0688,
"num_input_tokens_seen": 337776,
"step": 53
},
{
"epoch": 1.3890675241157555,
"grad_norm": 5.988494873046875,
"learning_rate": 4.2983495008466285e-06,
"loss": 0.0433,
"num_input_tokens_seen": 344000,
"step": 54
},
{
"epoch": 1.414790996784566,
"grad_norm": 2.032310724258423,
"learning_rate": 4.267766952966369e-06,
"loss": 0.0116,
"num_input_tokens_seen": 350528,
"step": 55
},
{
"epoch": 1.4405144694533762,
"grad_norm": 4.31352424621582,
"learning_rate": 4.236645926147493e-06,
"loss": 0.0634,
"num_input_tokens_seen": 356896,
"step": 56
},
{
"epoch": 1.4662379421221865,
"grad_norm": 6.693724632263184,
"learning_rate": 4.204995900156247e-06,
"loss": 0.0729,
"num_input_tokens_seen": 363376,
"step": 57
},
{
"epoch": 1.4919614147909968,
"grad_norm": 7.857651710510254,
"learning_rate": 4.172826515897146e-06,
"loss": 0.1315,
"num_input_tokens_seen": 369680,
"step": 58
},
{
"epoch": 1.517684887459807,
"grad_norm": 4.782497882843018,
"learning_rate": 4.140147572476269e-06,
"loss": 0.05,
"num_input_tokens_seen": 375904,
"step": 59
},
{
"epoch": 1.5434083601286175,
"grad_norm": 4.245779514312744,
"learning_rate": 4.106969024216348e-06,
"loss": 0.0838,
"num_input_tokens_seen": 382304,
"step": 60
},
{
"epoch": 1.5691318327974275,
"grad_norm": 8.497410774230957,
"learning_rate": 4.073300977624594e-06,
"loss": 0.0577,
"num_input_tokens_seen": 388688,
"step": 61
},
{
"epoch": 1.594855305466238,
"grad_norm": 4.8486528396606445,
"learning_rate": 4.039153688314146e-06,
"loss": 0.0465,
"num_input_tokens_seen": 395152,
"step": 62
},
{
"epoch": 1.6205787781350482,
"grad_norm": 7.597763538360596,
"learning_rate": 4.0045375578801216e-06,
"loss": 0.0497,
"num_input_tokens_seen": 401728,
"step": 63
},
{
"epoch": 1.6463022508038585,
"grad_norm": 4.45956563949585,
"learning_rate": 3.969463130731183e-06,
"loss": 0.0559,
"num_input_tokens_seen": 407904,
"step": 64
},
{
"epoch": 1.6720257234726688,
"grad_norm": 4.355950355529785,
"learning_rate": 3.933941090877615e-06,
"loss": 0.0394,
"num_input_tokens_seen": 414240,
"step": 65
},
{
"epoch": 1.697749196141479,
"grad_norm": 5.178930759429932,
"learning_rate": 3.897982258676867e-06,
"loss": 0.0737,
"num_input_tokens_seen": 420448,
"step": 66
},
{
"epoch": 1.7234726688102895,
"grad_norm": 5.892956256866455,
"learning_rate": 3.861597587537568e-06,
"loss": 0.0466,
"num_input_tokens_seen": 426784,
"step": 67
},
{
"epoch": 1.7491961414790995,
"grad_norm": 5.221459865570068,
"learning_rate": 3.824798160583012e-06,
"loss": 0.0619,
"num_input_tokens_seen": 432816,
"step": 68
},
{
"epoch": 1.77491961414791,
"grad_norm": 6.393134117126465,
"learning_rate": 3.787595187275136e-06,
"loss": 0.0749,
"num_input_tokens_seen": 439232,
"step": 69
},
{
"epoch": 1.8006430868167203,
"grad_norm": 9.137306213378906,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.0328,
"num_input_tokens_seen": 445792,
"step": 70
},
{
"epoch": 1.8263665594855305,
"grad_norm": 5.408595085144043,
"learning_rate": 3.7120240506158433e-06,
"loss": 0.0896,
"num_input_tokens_seen": 452400,
"step": 71
},
{
"epoch": 1.852090032154341,
"grad_norm": 8.002784729003906,
"learning_rate": 3.6736789069647273e-06,
"loss": 0.0489,
"num_input_tokens_seen": 458528,
"step": 72
},
{
"epoch": 1.877813504823151,
"grad_norm": 4.455572605133057,
"learning_rate": 3.634976249348867e-06,
"loss": 0.0305,
"num_input_tokens_seen": 464976,
"step": 73
},
{
"epoch": 1.9035369774919615,
"grad_norm": 5.060314178466797,
"learning_rate": 3.595927866972694e-06,
"loss": 0.0547,
"num_input_tokens_seen": 471440,
"step": 74
},
{
"epoch": 1.9292604501607717,
"grad_norm": 8.70103645324707,
"learning_rate": 3.556545654351749e-06,
"loss": 0.0554,
"num_input_tokens_seen": 477776,
"step": 75
},
{
"epoch": 1.954983922829582,
"grad_norm": 7.640180587768555,
"learning_rate": 3.516841607689501e-06,
"loss": 0.1169,
"num_input_tokens_seen": 484096,
"step": 76
},
{
"epoch": 1.9807073954983923,
"grad_norm": 4.052855968475342,
"learning_rate": 3.476827821223184e-06,
"loss": 0.0587,
"num_input_tokens_seen": 490176,
"step": 77
},
{
"epoch": 2.0064308681672025,
"grad_norm": 6.758493423461914,
"learning_rate": 3.436516483539781e-06,
"loss": 0.0624,
"num_input_tokens_seen": 496672,
"step": 78
},
{
"epoch": 2.032154340836013,
"grad_norm": 2.274886131286621,
"learning_rate": 3.39591987386325e-06,
"loss": 0.0152,
"num_input_tokens_seen": 503088,
"step": 79
},
{
"epoch": 2.057877813504823,
"grad_norm": 2.0651822090148926,
"learning_rate": 3.3550503583141726e-06,
"loss": 0.0224,
"num_input_tokens_seen": 509472,
"step": 80
},
{
"epoch": 2.0836012861736335,
"grad_norm": 1.8226125240325928,
"learning_rate": 3.313920386142892e-06,
"loss": 0.0084,
"num_input_tokens_seen": 515728,
"step": 81
},
{
"epoch": 2.1093247588424435,
"grad_norm": 4.027963638305664,
"learning_rate": 3.272542485937369e-06,
"loss": 0.0293,
"num_input_tokens_seen": 521904,
"step": 82
},
{
"epoch": 2.135048231511254,
"grad_norm": 3.684131145477295,
"learning_rate": 3.230929261806842e-06,
"loss": 0.0167,
"num_input_tokens_seen": 527952,
"step": 83
},
{
"epoch": 2.1607717041800645,
"grad_norm": 4.047924041748047,
"learning_rate": 3.189093389542498e-06,
"loss": 0.0094,
"num_input_tokens_seen": 534624,
"step": 84
},
{
"epoch": 2.1864951768488745,
"grad_norm": 6.1028828620910645,
"learning_rate": 3.147047612756302e-06,
"loss": 0.0551,
"num_input_tokens_seen": 541168,
"step": 85
},
{
"epoch": 2.212218649517685,
"grad_norm": 9.818526268005371,
"learning_rate": 3.1048047389991693e-06,
"loss": 0.0684,
"num_input_tokens_seen": 547488,
"step": 86
},
{
"epoch": 2.237942122186495,
"grad_norm": 9.057320594787598,
"learning_rate": 3.062377635859663e-06,
"loss": 0.0178,
"num_input_tokens_seen": 554272,
"step": 87
},
{
"epoch": 2.2636655948553055,
"grad_norm": 8.866120338439941,
"learning_rate": 3.019779227044398e-06,
"loss": 0.0515,
"num_input_tokens_seen": 560528,
"step": 88
},
{
"epoch": 2.289389067524116,
"grad_norm": 7.091884613037109,
"learning_rate": 2.9770224884413625e-06,
"loss": 0.0116,
"num_input_tokens_seen": 566784,
"step": 89
},
{
"epoch": 2.315112540192926,
"grad_norm": 2.1906943321228027,
"learning_rate": 2.9341204441673267e-06,
"loss": 0.001,
"num_input_tokens_seen": 573344,
"step": 90
},
{
"epoch": 2.3408360128617365,
"grad_norm": 5.1505866050720215,
"learning_rate": 2.8910861626005774e-06,
"loss": 0.0061,
"num_input_tokens_seen": 579776,
"step": 91
},
{
"epoch": 2.3665594855305465,
"grad_norm": 3.573850393295288,
"learning_rate": 2.847932752400164e-06,
"loss": 0.0099,
"num_input_tokens_seen": 586096,
"step": 92
},
{
"epoch": 2.392282958199357,
"grad_norm": 5.356033802032471,
"learning_rate": 2.804673358512869e-06,
"loss": 0.0246,
"num_input_tokens_seen": 592528,
"step": 93
},
{
"epoch": 2.418006430868167,
"grad_norm": 6.930913925170898,
"learning_rate": 2.761321158169134e-06,
"loss": 0.0235,
"num_input_tokens_seen": 598960,
"step": 94
},
{
"epoch": 2.4437299035369775,
"grad_norm": 6.397644519805908,
"learning_rate": 2.717889356869146e-06,
"loss": 0.0563,
"num_input_tokens_seen": 605232,
"step": 95
},
{
"epoch": 2.469453376205788,
"grad_norm": 7.781960487365723,
"learning_rate": 2.6743911843603134e-06,
"loss": 0.0436,
"num_input_tokens_seen": 611344,
"step": 96
},
{
"epoch": 2.495176848874598,
"grad_norm": 4.364964485168457,
"learning_rate": 2.6308398906073603e-06,
"loss": 0.0139,
"num_input_tokens_seen": 617712,
"step": 97
},
{
"epoch": 2.5209003215434085,
"grad_norm": 5.554233074188232,
"learning_rate": 2.587248741756253e-06,
"loss": 0.0301,
"num_input_tokens_seen": 624208,
"step": 98
},
{
"epoch": 2.5466237942122185,
"grad_norm": 4.78815221786499,
"learning_rate": 2.543631016093209e-06,
"loss": 0.0154,
"num_input_tokens_seen": 630496,
"step": 99
},
{
"epoch": 2.572347266881029,
"grad_norm": 3.197784900665283,
"learning_rate": 2.5e-06,
"loss": 0.0167,
"num_input_tokens_seen": 637040,
"step": 100
},
{
"epoch": 2.598070739549839,
"grad_norm": 3.381016254425049,
"learning_rate": 2.4563689839067913e-06,
"loss": 0.0171,
"num_input_tokens_seen": 643472,
"step": 101
},
{
"epoch": 2.6237942122186495,
"grad_norm": 1.942832112312317,
"learning_rate": 2.4127512582437486e-06,
"loss": 0.0219,
"num_input_tokens_seen": 649760,
"step": 102
},
{
"epoch": 2.64951768488746,
"grad_norm": 2.4761767387390137,
"learning_rate": 2.3691601093926406e-06,
"loss": 0.0108,
"num_input_tokens_seen": 656096,
"step": 103
},
{
"epoch": 2.67524115755627,
"grad_norm": 1.4578922986984253,
"learning_rate": 2.325608815639687e-06,
"loss": 0.0124,
"num_input_tokens_seen": 662624,
"step": 104
},
{
"epoch": 2.7009646302250805,
"grad_norm": 3.501628875732422,
"learning_rate": 2.2821106431308546e-06,
"loss": 0.0489,
"num_input_tokens_seen": 668768,
"step": 105
},
{
"epoch": 2.7266881028938905,
"grad_norm": 4.389317989349365,
"learning_rate": 2.238678841830867e-06,
"loss": 0.0131,
"num_input_tokens_seen": 674992,
"step": 106
},
{
"epoch": 2.752411575562701,
"grad_norm": 3.6161351203918457,
"learning_rate": 2.195326641487132e-06,
"loss": 0.0148,
"num_input_tokens_seen": 681120,
"step": 107
},
{
"epoch": 2.778135048231511,
"grad_norm": 4.236209869384766,
"learning_rate": 2.1520672475998374e-06,
"loss": 0.0279,
"num_input_tokens_seen": 687376,
"step": 108
},
{
"epoch": 2.8038585209003215,
"grad_norm": 2.5146102905273438,
"learning_rate": 2.1089138373994226e-06,
"loss": 0.0177,
"num_input_tokens_seen": 693984,
"step": 109
},
{
"epoch": 2.829581993569132,
"grad_norm": 2.784385919570923,
"learning_rate": 2.0658795558326745e-06,
"loss": 0.0362,
"num_input_tokens_seen": 700352,
"step": 110
},
{
"epoch": 2.855305466237942,
"grad_norm": 5.2366228103637695,
"learning_rate": 2.022977511558638e-06,
"loss": 0.038,
"num_input_tokens_seen": 706768,
"step": 111
},
{
"epoch": 2.8810289389067525,
"grad_norm": 1.9886460304260254,
"learning_rate": 1.9802207729556023e-06,
"loss": 0.0249,
"num_input_tokens_seen": 713248,
"step": 112
},
{
"epoch": 2.906752411575563,
"grad_norm": 1.715645670890808,
"learning_rate": 1.937622364140338e-06,
"loss": 0.0074,
"num_input_tokens_seen": 719568,
"step": 113
},
{
"epoch": 2.932475884244373,
"grad_norm": 3.5033886432647705,
"learning_rate": 1.895195261000831e-06,
"loss": 0.0147,
"num_input_tokens_seen": 725984,
"step": 114
},
{
"epoch": 2.958199356913183,
"grad_norm": 1.0496388673782349,
"learning_rate": 1.852952387243698e-06,
"loss": 0.0168,
"num_input_tokens_seen": 732224,
"step": 115
},
{
"epoch": 2.9839228295819935,
"grad_norm": 1.4470492601394653,
"learning_rate": 1.8109066104575023e-06,
"loss": 0.0083,
"num_input_tokens_seen": 738496,
"step": 116
},
{
"epoch": 3.009646302250804,
"grad_norm": 2.3363845348358154,
"learning_rate": 1.7690707381931585e-06,
"loss": 0.0086,
"num_input_tokens_seen": 744880,
"step": 117
},
{
"epoch": 3.035369774919614,
"grad_norm": 0.5077351331710815,
"learning_rate": 1.7274575140626318e-06,
"loss": 0.0025,
"num_input_tokens_seen": 751408,
"step": 118
},
{
"epoch": 3.0610932475884245,
"grad_norm": 0.9326338768005371,
"learning_rate": 1.686079613857109e-06,
"loss": 0.0067,
"num_input_tokens_seen": 757632,
"step": 119
},
{
"epoch": 3.0868167202572345,
"grad_norm": 0.40275102853775024,
"learning_rate": 1.6449496416858285e-06,
"loss": 0.002,
"num_input_tokens_seen": 763936,
"step": 120
},
{
"epoch": 3.112540192926045,
"grad_norm": 0.051462285220623016,
"learning_rate": 1.6040801261367494e-06,
"loss": 0.0003,
"num_input_tokens_seen": 770064,
"step": 121
},
{
"epoch": 3.1382636655948555,
"grad_norm": 0.8453549742698669,
"learning_rate": 1.56348351646022e-06,
"loss": 0.0012,
"num_input_tokens_seen": 776176,
"step": 122
},
{
"epoch": 3.1639871382636655,
"grad_norm": 0.02829030156135559,
"learning_rate": 1.5231721787768162e-06,
"loss": 0.0001,
"num_input_tokens_seen": 782464,
"step": 123
},
{
"epoch": 3.189710610932476,
"grad_norm": 7.223586082458496,
"learning_rate": 1.4831583923105e-06,
"loss": 0.0098,
"num_input_tokens_seen": 789072,
"step": 124
},
{
"epoch": 3.215434083601286,
"grad_norm": 0.9582700729370117,
"learning_rate": 1.443454345648252e-06,
"loss": 0.0029,
"num_input_tokens_seen": 795536,
"step": 125
},
{
"epoch": 3.2411575562700965,
"grad_norm": 0.002318341052159667,
"learning_rate": 1.4040721330273063e-06,
"loss": 0.0,
"num_input_tokens_seen": 801888,
"step": 126
},
{
"epoch": 3.266881028938907,
"grad_norm": 0.003536389209330082,
"learning_rate": 1.3650237506511333e-06,
"loss": 0.0,
"num_input_tokens_seen": 808432,
"step": 127
},
{
"epoch": 3.292604501607717,
"grad_norm": 0.07501762360334396,
"learning_rate": 1.3263210930352737e-06,
"loss": 0.0002,
"num_input_tokens_seen": 814896,
"step": 128
},
{
"epoch": 3.3183279742765275,
"grad_norm": 5.415798187255859,
"learning_rate": 1.2879759493841577e-06,
"loss": 0.0331,
"num_input_tokens_seen": 821200,
"step": 129
},
{
"epoch": 3.3440514469453375,
"grad_norm": 0.002228162717074156,
"learning_rate": 1.2500000000000007e-06,
"loss": 0.0,
"num_input_tokens_seen": 827584,
"step": 130
},
{
"epoch": 3.369774919614148,
"grad_norm": 1.7616236209869385,
"learning_rate": 1.2124048127248644e-06,
"loss": 0.0038,
"num_input_tokens_seen": 834048,
"step": 131
},
{
"epoch": 3.395498392282958,
"grad_norm": 0.000835901708342135,
"learning_rate": 1.1752018394169882e-06,
"loss": 0.0,
"num_input_tokens_seen": 840240,
"step": 132
},
{
"epoch": 3.4212218649517685,
"grad_norm": 2.558288812637329,
"learning_rate": 1.1384024124624324e-06,
"loss": 0.0051,
"num_input_tokens_seen": 846448,
"step": 133
},
{
"epoch": 3.446945337620579,
"grad_norm": 8.443009376525879,
"learning_rate": 1.1020177413231334e-06,
"loss": 0.0589,
"num_input_tokens_seen": 852928,
"step": 134
},
{
"epoch": 3.472668810289389,
"grad_norm": 0.6075427532196045,
"learning_rate": 1.0660589091223854e-06,
"loss": 0.0003,
"num_input_tokens_seen": 859312,
"step": 135
},
{
"epoch": 3.4983922829581995,
"grad_norm": 7.910192012786865,
"learning_rate": 1.0305368692688175e-06,
"loss": 0.0355,
"num_input_tokens_seen": 865440,
"step": 136
},
{
"epoch": 3.5241157556270095,
"grad_norm": 3.2053821086883545,
"learning_rate": 9.95462442119879e-07,
"loss": 0.0402,
"num_input_tokens_seen": 871968,
"step": 137
},
{
"epoch": 3.54983922829582,
"grad_norm": 0.0018598005408421159,
"learning_rate": 9.608463116858544e-07,
"loss": 0.0,
"num_input_tokens_seen": 878352,
"step": 138
},
{
"epoch": 3.57556270096463,
"grad_norm": 8.275130271911621,
"learning_rate": 9.266990223754069e-07,
"loss": 0.0083,
"num_input_tokens_seen": 884736,
"step": 139
},
{
"epoch": 3.6012861736334405,
"grad_norm": 0.0012662999797612429,
"learning_rate": 8.930309757836517e-07,
"loss": 0.0,
"num_input_tokens_seen": 891008,
"step": 140
},
{
"epoch": 3.627009646302251,
"grad_norm": 1.8417574167251587,
"learning_rate": 8.598524275237321e-07,
"loss": 0.0012,
"num_input_tokens_seen": 897120,
"step": 141
},
{
"epoch": 3.652733118971061,
"grad_norm": 3.729846239089966,
"learning_rate": 8.271734841028553e-07,
"loss": 0.0342,
"num_input_tokens_seen": 903536,
"step": 142
},
{
"epoch": 3.6784565916398715,
"grad_norm": 9.431204795837402,
"learning_rate": 7.950040998437541e-07,
"loss": 0.0723,
"num_input_tokens_seen": 909824,
"step": 143
},
{
"epoch": 3.7041800643086815,
"grad_norm": 0.0016427229857072234,
"learning_rate": 7.633540738525066e-07,
"loss": 0.0,
"num_input_tokens_seen": 916112,
"step": 144
},
{
"epoch": 3.729903536977492,
"grad_norm": 2.577866554260254,
"learning_rate": 7.322330470336314e-07,
"loss": 0.041,
"num_input_tokens_seen": 922592,
"step": 145
},
{
"epoch": 3.755627009646302,
"grad_norm": 0.032972197979688644,
"learning_rate": 7.016504991533727e-07,
"loss": 0.0,
"num_input_tokens_seen": 928640,
"step": 146
},
{
"epoch": 3.7813504823151125,
"grad_norm": 0.07500260323286057,
"learning_rate": 6.716157459520739e-07,
"loss": 0.0001,
"num_input_tokens_seen": 935488,
"step": 147
},
{
"epoch": 3.807073954983923,
"grad_norm": 3.048534870147705,
"learning_rate": 6.421379363065142e-07,
"loss": 0.022,
"num_input_tokens_seen": 941952,
"step": 148
},
{
"epoch": 3.832797427652733,
"grad_norm": 0.003397310385480523,
"learning_rate": 6.1322604944307e-07,
"loss": 0.0,
"num_input_tokens_seen": 948368,
"step": 149
},
{
"epoch": 3.8585209003215435,
"grad_norm": 4.262376308441162,
"learning_rate": 5.848888922025553e-07,
"loss": 0.0095,
"num_input_tokens_seen": 954752,
"step": 150
},
{
"epoch": 3.884244372990354,
"grad_norm": 0.2115989625453949,
"learning_rate": 5.571350963575728e-07,
"loss": 0.0002,
"num_input_tokens_seen": 961248,
"step": 151
},
{
"epoch": 3.909967845659164,
"grad_norm": 0.005494151264429092,
"learning_rate": 5.299731159831953e-07,
"loss": 0.0,
"num_input_tokens_seen": 967424,
"step": 152
},
{
"epoch": 3.935691318327974,
"grad_norm": 12.337247848510742,
"learning_rate": 5.034112248817685e-07,
"loss": 0.0274,
"num_input_tokens_seen": 973696,
"step": 153
},
{
"epoch": 3.9614147909967845,
"grad_norm": 3.028348684310913,
"learning_rate": 4.774575140626317e-07,
"loss": 0.0284,
"num_input_tokens_seen": 980144,
"step": 154
},
{
"epoch": 3.987138263665595,
"grad_norm": 0.03550104424357414,
"learning_rate": 4.5211988927752026e-07,
"loss": 0.0001,
"num_input_tokens_seen": 986352,
"step": 155
},
{
"epoch": 4.012861736334405,
"grad_norm": 0.006979313213378191,
"learning_rate": 4.27406068612396e-07,
"loss": 0.0,
"num_input_tokens_seen": 992880,
"step": 156
},
{
"epoch": 4.038585209003215,
"grad_norm": 0.04005662724375725,
"learning_rate": 4.033235801364402e-07,
"loss": 0.0001,
"num_input_tokens_seen": 999200,
"step": 157
},
{
"epoch": 4.064308681672026,
"grad_norm": 0.03479517623782158,
"learning_rate": 3.798797596089351e-07,
"loss": 0.0001,
"num_input_tokens_seen": 1005568,
"step": 158
},
{
"epoch": 4.090032154340836,
"grad_norm": 1.3862966299057007,
"learning_rate": 3.5708174824471947e-07,
"loss": 0.0121,
"num_input_tokens_seen": 1011728,
"step": 159
},
{
"epoch": 4.115755627009646,
"grad_norm": 0.013909807428717613,
"learning_rate": 3.3493649053890325e-07,
"loss": 0.0,
"num_input_tokens_seen": 1018272,
"step": 160
},
{
"epoch": 4.141479099678457,
"grad_norm": 0.0408039316534996,
"learning_rate": 3.134507321515107e-07,
"loss": 0.0001,
"num_input_tokens_seen": 1024688,
"step": 161
},
{
"epoch": 4.167202572347267,
"grad_norm": 0.031128501519560814,
"learning_rate": 2.9263101785268253e-07,
"loss": 0.0001,
"num_input_tokens_seen": 1031152,
"step": 162
},
{
"epoch": 4.192926045016077,
"grad_norm": 0.12184783816337585,
"learning_rate": 2.7248368952908055e-07,
"loss": 0.0002,
"num_input_tokens_seen": 1037632,
"step": 163
},
{
"epoch": 4.218649517684887,
"grad_norm": 0.17244334518909454,
"learning_rate": 2.53014884252083e-07,
"loss": 0.0005,
"num_input_tokens_seen": 1043936,
"step": 164
},
{
"epoch": 4.244372990353698,
"grad_norm": 0.036631420254707336,
"learning_rate": 2.3423053240837518e-07,
"loss": 0.0001,
"num_input_tokens_seen": 1050048,
"step": 165
},
{
"epoch": 4.270096463022508,
"grad_norm": 0.028202209621667862,
"learning_rate": 2.1613635589349756e-07,
"loss": 0.0001,
"num_input_tokens_seen": 1056496,
"step": 166
},
{
"epoch": 4.295819935691318,
"grad_norm": 0.017439717426896095,
"learning_rate": 1.9873786636889908e-07,
"loss": 0.0001,
"num_input_tokens_seen": 1062848,
"step": 167
},
{
"epoch": 4.321543408360129,
"grad_norm": 0.1556771695613861,
"learning_rate": 1.8204036358303173e-07,
"loss": 0.0001,
"num_input_tokens_seen": 1069136,
"step": 168
},
{
"epoch": 4.347266881028939,
"grad_norm": 0.06420128047466278,
"learning_rate": 1.6604893375699594e-07,
"loss": 0.0001,
"num_input_tokens_seen": 1075328,
"step": 169
},
{
"epoch": 4.372990353697749,
"grad_norm": 0.04077797383069992,
"learning_rate": 1.507684480352292e-07,
"loss": 0.0002,
"num_input_tokens_seen": 1081696,
"step": 170
},
{
"epoch": 4.39871382636656,
"grad_norm": 1.3370263576507568,
"learning_rate": 1.362035610017079e-07,
"loss": 0.0026,
"num_input_tokens_seen": 1088112,
"step": 171
},
{
"epoch": 4.42443729903537,
"grad_norm": 1.2518365383148193,
"learning_rate": 1.223587092621162e-07,
"loss": 0.0032,
"num_input_tokens_seen": 1094512,
"step": 172
},
{
"epoch": 4.45016077170418,
"grad_norm": 0.1654355376958847,
"learning_rate": 1.0923811009241142e-07,
"loss": 0.0004,
"num_input_tokens_seen": 1101040,
"step": 173
},
{
"epoch": 4.47588424437299,
"grad_norm": 0.015155570581555367,
"learning_rate": 9.684576015420277e-08,
"loss": 0.0001,
"num_input_tokens_seen": 1107168,
"step": 174
},
{
"epoch": 4.501607717041801,
"grad_norm": 1.3546839952468872,
"learning_rate": 8.518543427732951e-08,
"loss": 0.0074,
"num_input_tokens_seen": 1113408,
"step": 175
},
{
"epoch": 4.527331189710611,
"grad_norm": 1.3428282737731934,
"learning_rate": 7.426068431000883e-08,
"loss": 0.0029,
"num_input_tokens_seen": 1119568,
"step": 176
},
{
"epoch": 4.553054662379421,
"grad_norm": 0.009344842284917831,
"learning_rate": 6.407483803691216e-08,
"loss": 0.0001,
"num_input_tokens_seen": 1126048,
"step": 177
},
{
"epoch": 4.578778135048232,
"grad_norm": 0.010992766357958317,
"learning_rate": 5.463099816548578e-08,
"loss": 0.0,
"num_input_tokens_seen": 1132400,
"step": 178
},
{
"epoch": 4.604501607717042,
"grad_norm": 2.1642343997955322,
"learning_rate": 4.593204138084006e-08,
"loss": 0.0082,
"num_input_tokens_seen": 1138832,
"step": 179
},
{
"epoch": 4.630225080385852,
"grad_norm": 0.013943604193627834,
"learning_rate": 3.798061746947995e-08,
"loss": 0.0001,
"num_input_tokens_seen": 1145344,
"step": 180
},
{
"epoch": 4.655948553054662,
"grad_norm": 0.0075085959397256374,
"learning_rate": 3.077914851215585e-08,
"loss": 0.0,
"num_input_tokens_seen": 1151536,
"step": 181
},
{
"epoch": 4.681672025723473,
"grad_norm": 0.034857023507356644,
"learning_rate": 2.4329828146074096e-08,
"loss": 0.0001,
"num_input_tokens_seen": 1157696,
"step": 182
},
{
"epoch": 4.707395498392283,
"grad_norm": 0.030652256682515144,
"learning_rate": 1.8634620896695044e-08,
"loss": 0.0001,
"num_input_tokens_seen": 1163712,
"step": 183
},
{
"epoch": 4.733118971061093,
"grad_norm": 1.9094609022140503,
"learning_rate": 1.3695261579316776e-08,
"loss": 0.0052,
"num_input_tokens_seen": 1169888,
"step": 184
},
{
"epoch": 4.758842443729904,
"grad_norm": 0.3100648820400238,
"learning_rate": 9.513254770636138e-09,
"loss": 0.0009,
"num_input_tokens_seen": 1176272,
"step": 185
},
{
"epoch": 4.784565916398714,
"grad_norm": 0.01326401811093092,
"learning_rate": 6.089874350439507e-09,
"loss": 0.0001,
"num_input_tokens_seen": 1182688,
"step": 186
},
{
"epoch": 4.810289389067524,
"grad_norm": 0.29082992672920227,
"learning_rate": 3.4261631135654174e-09,
"loss": 0.001,
"num_input_tokens_seen": 1189360,
"step": 187
},
{
"epoch": 4.836012861736334,
"grad_norm": 0.3995530903339386,
"learning_rate": 1.5229324522605949e-09,
"loss": 0.0013,
"num_input_tokens_seen": 1195632,
"step": 188
},
{
"epoch": 4.861736334405145,
"grad_norm": 0.06924965977668762,
"learning_rate": 3.8076210902182607e-10,
"loss": 0.0001,
"num_input_tokens_seen": 1202016,
"step": 189
},
{
"epoch": 4.887459807073955,
"grad_norm": 0.11802174150943756,
"learning_rate": 0.0,
"loss": 0.0004,
"num_input_tokens_seen": 1208400,
"step": 190
}
],
"logging_steps": 1,
"max_steps": 190,
"num_input_tokens_seen": 1208400,
"num_train_epochs": 5,
"save_steps": 5000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.441370708980531e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}