faridlazuarda's picture
Replace with latest checkpoint files
9665a23 verified
raw
history blame
No virus
11.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.0,
"eval_steps": 500,
"global_step": 16506,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09087604507451835,
"grad_norm": 0.4355389475822449,
"learning_rate": 0.0002,
"loss": 0.8037,
"step": 250
},
{
"epoch": 0.1817520901490367,
"grad_norm": 0.3333398103713989,
"learning_rate": 0.0002,
"loss": 0.472,
"step": 500
},
{
"epoch": 0.27262813522355506,
"grad_norm": 0.4328073561191559,
"learning_rate": 0.0002,
"loss": 0.3581,
"step": 750
},
{
"epoch": 0.3635041802980734,
"grad_norm": 0.5728178024291992,
"learning_rate": 0.0002,
"loss": 0.29,
"step": 1000
},
{
"epoch": 0.45438022537259176,
"grad_norm": 0.7130202054977417,
"learning_rate": 0.0002,
"loss": 0.2542,
"step": 1250
},
{
"epoch": 0.5452562704471101,
"grad_norm": 0.4695929288864136,
"learning_rate": 0.0002,
"loss": 0.2186,
"step": 1500
},
{
"epoch": 0.6361323155216285,
"grad_norm": 0.5198377966880798,
"learning_rate": 0.0002,
"loss": 0.2042,
"step": 1750
},
{
"epoch": 0.7270083605961468,
"grad_norm": 0.4845351278781891,
"learning_rate": 0.0002,
"loss": 0.1857,
"step": 2000
},
{
"epoch": 0.8178844056706652,
"grad_norm": 0.3002346456050873,
"learning_rate": 0.0002,
"loss": 0.1727,
"step": 2250
},
{
"epoch": 0.9087604507451835,
"grad_norm": 0.3961799740791321,
"learning_rate": 0.0002,
"loss": 0.1647,
"step": 2500
},
{
"epoch": 0.9996364958197019,
"grad_norm": 0.19754305481910706,
"learning_rate": 0.0002,
"loss": 0.1561,
"step": 2750
},
{
"epoch": 1.0905125408942202,
"grad_norm": 0.4301386773586273,
"learning_rate": 0.0002,
"loss": 0.1478,
"step": 3000
},
{
"epoch": 1.1813885859687385,
"grad_norm": 0.33132612705230713,
"learning_rate": 0.0002,
"loss": 0.1451,
"step": 3250
},
{
"epoch": 1.272264631043257,
"grad_norm": 0.2488761842250824,
"learning_rate": 0.0002,
"loss": 0.1445,
"step": 3500
},
{
"epoch": 1.3631406761177753,
"grad_norm": 0.16658537089824677,
"learning_rate": 0.0002,
"loss": 0.1403,
"step": 3750
},
{
"epoch": 1.4540167211922936,
"grad_norm": 0.1872565895318985,
"learning_rate": 0.0002,
"loss": 0.1365,
"step": 4000
},
{
"epoch": 1.5448927662668122,
"grad_norm": 0.1460885852575302,
"learning_rate": 0.0002,
"loss": 0.1344,
"step": 4250
},
{
"epoch": 1.6357688113413305,
"grad_norm": 0.35161587595939636,
"learning_rate": 0.0002,
"loss": 0.134,
"step": 4500
},
{
"epoch": 1.7266448564158487,
"grad_norm": 0.2876887917518616,
"learning_rate": 0.0002,
"loss": 0.1332,
"step": 4750
},
{
"epoch": 1.8175209014903673,
"grad_norm": 0.18391716480255127,
"learning_rate": 0.0002,
"loss": 0.1312,
"step": 5000
},
{
"epoch": 1.9083969465648853,
"grad_norm": 0.18932031095027924,
"learning_rate": 0.0002,
"loss": 0.1306,
"step": 5250
},
{
"epoch": 1.9992729916394039,
"grad_norm": 0.2454090714454651,
"learning_rate": 0.0002,
"loss": 0.1291,
"step": 5500
},
{
"epoch": 2.0901490367139224,
"grad_norm": 0.15890659391880035,
"learning_rate": 0.0002,
"loss": 0.1241,
"step": 5750
},
{
"epoch": 2.1810250817884405,
"grad_norm": 0.13456985354423523,
"learning_rate": 0.0002,
"loss": 0.1255,
"step": 6000
},
{
"epoch": 2.271901126862959,
"grad_norm": 0.18098081648349762,
"learning_rate": 0.0002,
"loss": 0.1249,
"step": 6250
},
{
"epoch": 2.362777171937477,
"grad_norm": 0.21501798927783966,
"learning_rate": 0.0002,
"loss": 0.1272,
"step": 6500
},
{
"epoch": 2.4536532170119956,
"grad_norm": 0.10344691574573517,
"learning_rate": 0.0002,
"loss": 0.1266,
"step": 6750
},
{
"epoch": 2.544529262086514,
"grad_norm": 0.16723701357841492,
"learning_rate": 0.0002,
"loss": 0.1265,
"step": 7000
},
{
"epoch": 2.635405307161032,
"grad_norm": 0.15142661333084106,
"learning_rate": 0.0002,
"loss": 0.1253,
"step": 7250
},
{
"epoch": 2.7262813522355507,
"grad_norm": 0.18276916444301605,
"learning_rate": 0.0002,
"loss": 0.1247,
"step": 7500
},
{
"epoch": 2.817157397310069,
"grad_norm": 0.10605284571647644,
"learning_rate": 0.0002,
"loss": 0.125,
"step": 7750
},
{
"epoch": 2.9080334423845873,
"grad_norm": 0.13421468436717987,
"learning_rate": 0.0002,
"loss": 0.1235,
"step": 8000
},
{
"epoch": 2.998909487459106,
"grad_norm": 0.117579385638237,
"learning_rate": 0.0002,
"loss": 0.1216,
"step": 8250
},
{
"epoch": 3.0897855325336243,
"grad_norm": 0.09406478703022003,
"learning_rate": 0.0002,
"loss": 0.1208,
"step": 8500
},
{
"epoch": 3.1806615776081424,
"grad_norm": 0.13346560299396515,
"learning_rate": 0.0002,
"loss": 0.1211,
"step": 8750
},
{
"epoch": 3.271537622682661,
"grad_norm": 0.2320273071527481,
"learning_rate": 0.0002,
"loss": 0.1199,
"step": 9000
},
{
"epoch": 3.3624136677571794,
"grad_norm": 0.1322121024131775,
"learning_rate": 0.0002,
"loss": 0.1206,
"step": 9250
},
{
"epoch": 3.4532897128316975,
"grad_norm": 0.12291130423545837,
"learning_rate": 0.0002,
"loss": 0.1203,
"step": 9500
},
{
"epoch": 3.544165757906216,
"grad_norm": 0.1050431877374649,
"learning_rate": 0.0002,
"loss": 0.1206,
"step": 9750
},
{
"epoch": 3.6350418029807345,
"grad_norm": 0.10830684006214142,
"learning_rate": 0.0002,
"loss": 0.1204,
"step": 10000
},
{
"epoch": 3.7259178480552526,
"grad_norm": 0.16537795960903168,
"learning_rate": 0.0002,
"loss": 0.1214,
"step": 10250
},
{
"epoch": 3.816793893129771,
"grad_norm": 0.13683146238327026,
"learning_rate": 0.0002,
"loss": 0.1231,
"step": 10500
},
{
"epoch": 3.907669938204289,
"grad_norm": 0.10453382879495621,
"learning_rate": 0.0002,
"loss": 0.1193,
"step": 10750
},
{
"epoch": 3.9985459832788077,
"grad_norm": 0.08612716197967529,
"learning_rate": 0.0002,
"loss": 0.1191,
"step": 11000
},
{
"epoch": 4.089422028353326,
"grad_norm": 0.08494025468826294,
"learning_rate": 0.0002,
"loss": 0.1171,
"step": 11250
},
{
"epoch": 4.180298073427845,
"grad_norm": 0.05780186131596565,
"learning_rate": 0.0002,
"loss": 0.1172,
"step": 11500
},
{
"epoch": 4.271174118502363,
"grad_norm": 0.10587610304355621,
"learning_rate": 0.0002,
"loss": 0.1189,
"step": 11750
},
{
"epoch": 4.362050163576881,
"grad_norm": 0.09237153828144073,
"learning_rate": 0.0002,
"loss": 0.1178,
"step": 12000
},
{
"epoch": 4.4529262086514,
"grad_norm": 0.22916856408119202,
"learning_rate": 0.0002,
"loss": 0.1191,
"step": 12250
},
{
"epoch": 4.543802253725918,
"grad_norm": 0.12149995565414429,
"learning_rate": 0.0002,
"loss": 0.118,
"step": 12500
},
{
"epoch": 4.634678298800436,
"grad_norm": 0.10446422547101974,
"learning_rate": 0.0002,
"loss": 0.1187,
"step": 12750
},
{
"epoch": 4.725554343874954,
"grad_norm": 0.09491357207298279,
"learning_rate": 0.0002,
"loss": 0.1181,
"step": 13000
},
{
"epoch": 4.816430388949473,
"grad_norm": 0.15105749666690826,
"learning_rate": 0.0002,
"loss": 0.1192,
"step": 13250
},
{
"epoch": 4.907306434023991,
"grad_norm": 0.11921977251768112,
"learning_rate": 0.0002,
"loss": 0.1168,
"step": 13500
},
{
"epoch": 4.99818247909851,
"grad_norm": 0.07625231146812439,
"learning_rate": 0.0002,
"loss": 0.1172,
"step": 13750
},
{
"epoch": 5.089058524173028,
"grad_norm": 0.09381510317325592,
"learning_rate": 0.0002,
"loss": 0.1151,
"step": 14000
},
{
"epoch": 5.179934569247546,
"grad_norm": 0.08833180367946625,
"learning_rate": 0.0002,
"loss": 0.1156,
"step": 14250
},
{
"epoch": 5.270810614322064,
"grad_norm": 0.08934313803911209,
"learning_rate": 0.0002,
"loss": 0.1164,
"step": 14500
},
{
"epoch": 5.361686659396583,
"grad_norm": 0.08423677831888199,
"learning_rate": 0.0002,
"loss": 0.1162,
"step": 14750
},
{
"epoch": 5.452562704471101,
"grad_norm": 0.08956829458475113,
"learning_rate": 0.0002,
"loss": 0.1147,
"step": 15000
},
{
"epoch": 5.543438749545619,
"grad_norm": 0.07213614135980606,
"learning_rate": 0.0002,
"loss": 0.1166,
"step": 15250
},
{
"epoch": 5.634314794620138,
"grad_norm": 0.24475565552711487,
"learning_rate": 0.0002,
"loss": 0.1172,
"step": 15500
},
{
"epoch": 5.7251908396946565,
"grad_norm": 0.10199423879384995,
"learning_rate": 0.0002,
"loss": 0.1171,
"step": 15750
},
{
"epoch": 5.8160668847691745,
"grad_norm": 0.2594470679759979,
"learning_rate": 0.0002,
"loss": 0.1165,
"step": 16000
},
{
"epoch": 5.9069429298436935,
"grad_norm": 0.08738715201616287,
"learning_rate": 0.0002,
"loss": 0.115,
"step": 16250
},
{
"epoch": 5.997818974918212,
"grad_norm": 0.07172822952270508,
"learning_rate": 0.0002,
"loss": 0.1153,
"step": 16500
}
],
"logging_steps": 250,
"max_steps": 16506,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 250,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.0312668928252314e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}