|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 76, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.013157894736842105, |
|
"grad_norm": 13.238144898761828, |
|
"learning_rate": 1.25e-06, |
|
"loss": 0.3431, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02631578947368421, |
|
"grad_norm": 13.138533030251612, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.3429, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.039473684210526314, |
|
"grad_norm": 11.211925298543088, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"loss": 0.2715, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05263157894736842, |
|
"grad_norm": 9.07840716756589, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2923, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06578947368421052, |
|
"grad_norm": 4.255454799591527, |
|
"learning_rate": 6.25e-06, |
|
"loss": 0.1766, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07894736842105263, |
|
"grad_norm": 4.899723911649479, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.1795, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09210526315789473, |
|
"grad_norm": 21.444343227297015, |
|
"learning_rate": 8.750000000000001e-06, |
|
"loss": 0.2065, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.10526315789473684, |
|
"grad_norm": 4.9912226694910995, |
|
"learning_rate": 1e-05, |
|
"loss": 0.1955, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.11842105263157894, |
|
"grad_norm": 6.015085827837624, |
|
"learning_rate": 9.994664874011864e-06, |
|
"loss": 0.2171, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.13157894736842105, |
|
"grad_norm": 6.232109397110641, |
|
"learning_rate": 9.978670881475173e-06, |
|
"loss": 0.1812, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14473684210526316, |
|
"grad_norm": 13.260525050150829, |
|
"learning_rate": 9.952052154376027e-06, |
|
"loss": 0.2299, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.15789473684210525, |
|
"grad_norm": 2.8474419609274584, |
|
"learning_rate": 9.91486549841951e-06, |
|
"loss": 0.1729, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.17105263157894737, |
|
"grad_norm": 4.441367319332706, |
|
"learning_rate": 9.867190271803466e-06, |
|
"loss": 0.1394, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.18421052631578946, |
|
"grad_norm": 3.9791362793155245, |
|
"learning_rate": 9.809128215864096e-06, |
|
"loss": 0.1596, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.19736842105263158, |
|
"grad_norm": 5.398799668373648, |
|
"learning_rate": 9.74080323795483e-06, |
|
"loss": 0.1883, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.21052631578947367, |
|
"grad_norm": 3.801676563771218, |
|
"learning_rate": 9.66236114702178e-06, |
|
"loss": 0.1542, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.2236842105263158, |
|
"grad_norm": 1.925322887230293, |
|
"learning_rate": 9.573969342440107e-06, |
|
"loss": 0.1535, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.23684210526315788, |
|
"grad_norm": 2.4885347468657666, |
|
"learning_rate": 9.475816456775313e-06, |
|
"loss": 0.1495, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 1.7097680648804299, |
|
"learning_rate": 9.368111953231849e-06, |
|
"loss": 0.0999, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.2631578947368421, |
|
"grad_norm": 1.7070432708248722, |
|
"learning_rate": 9.251085678648072e-06, |
|
"loss": 0.1556, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.27631578947368424, |
|
"grad_norm": 2.2017885079132475, |
|
"learning_rate": 9.124987372991512e-06, |
|
"loss": 0.1323, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2894736842105263, |
|
"grad_norm": 2.2923056410651657, |
|
"learning_rate": 8.990086136401199e-06, |
|
"loss": 0.1442, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.3026315789473684, |
|
"grad_norm": 1.3443143761442609, |
|
"learning_rate": 8.846669854914395e-06, |
|
"loss": 0.1067, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.3157894736842105, |
|
"grad_norm": 2.499856741628817, |
|
"learning_rate": 8.695044586103297e-06, |
|
"loss": 0.1428, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.32894736842105265, |
|
"grad_norm": 1.6830200645247368, |
|
"learning_rate": 8.535533905932739e-06, |
|
"loss": 0.1201, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.34210526315789475, |
|
"grad_norm": 1.5831400874663497, |
|
"learning_rate": 8.368478218232787e-06, |
|
"loss": 0.114, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.35526315789473684, |
|
"grad_norm": 1.6535836650289428, |
|
"learning_rate": 8.194234028259806e-06, |
|
"loss": 0.1209, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.3684210526315789, |
|
"grad_norm": 1.4967093603851553, |
|
"learning_rate": 8.013173181896283e-06, |
|
"loss": 0.0847, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.3815789473684211, |
|
"grad_norm": 1.6642707923715867, |
|
"learning_rate": 7.82568207211296e-06, |
|
"loss": 0.1259, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.39473684210526316, |
|
"grad_norm": 1.1497880560263245, |
|
"learning_rate": 7.63216081438678e-06, |
|
"loss": 0.1029, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.40789473684210525, |
|
"grad_norm": 1.3303308469541215, |
|
"learning_rate": 7.4330223928342814e-06, |
|
"loss": 0.1004, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 2.427667338258928, |
|
"learning_rate": 7.2286917788826926e-06, |
|
"loss": 0.0991, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.4342105263157895, |
|
"grad_norm": 1.4112715962724514, |
|
"learning_rate": 7.019605024359475e-06, |
|
"loss": 0.1117, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.4473684210526316, |
|
"grad_norm": 1.0945264209924628, |
|
"learning_rate": 6.806208330935766e-06, |
|
"loss": 0.0974, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.4605263157894737, |
|
"grad_norm": 2.222867676262349, |
|
"learning_rate": 6.588957097909509e-06, |
|
"loss": 0.1114, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.47368421052631576, |
|
"grad_norm": 1.445820032177189, |
|
"learning_rate": 6.368314950360416e-06, |
|
"loss": 0.1082, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4868421052631579, |
|
"grad_norm": 2.005804185500154, |
|
"learning_rate": 6.144752749750671e-06, |
|
"loss": 0.0932, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 1.4446629324877371, |
|
"learning_rate": 5.918747589082853e-06, |
|
"loss": 0.11, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.5131578947368421, |
|
"grad_norm": 1.2013902671499292, |
|
"learning_rate": 5.690781774759412e-06, |
|
"loss": 0.088, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"grad_norm": 0.9671394427016465, |
|
"learning_rate": 5.46134179731651e-06, |
|
"loss": 0.0965, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5394736842105263, |
|
"grad_norm": 0.874352355591093, |
|
"learning_rate": 5.230917293228699e-06, |
|
"loss": 0.0827, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.5526315789473685, |
|
"grad_norm": 1.0990734862760956, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1026, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5657894736842105, |
|
"grad_norm": 1.3629123541548118, |
|
"learning_rate": 4.7690827067713035e-06, |
|
"loss": 0.1379, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.5789473684210527, |
|
"grad_norm": 1.1191667397706833, |
|
"learning_rate": 4.53865820268349e-06, |
|
"loss": 0.1006, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.5921052631578947, |
|
"grad_norm": 1.1613132302632512, |
|
"learning_rate": 4.309218225240591e-06, |
|
"loss": 0.1196, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6052631578947368, |
|
"grad_norm": 1.4178964529072136, |
|
"learning_rate": 4.081252410917148e-06, |
|
"loss": 0.1107, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.618421052631579, |
|
"grad_norm": 0.7977784874888711, |
|
"learning_rate": 3.855247250249331e-06, |
|
"loss": 0.0797, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.631578947368421, |
|
"grad_norm": 1.0845045774450395, |
|
"learning_rate": 3.6316850496395863e-06, |
|
"loss": 0.1116, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.6447368421052632, |
|
"grad_norm": 1.8478183045971928, |
|
"learning_rate": 3.4110429020904924e-06, |
|
"loss": 0.1146, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.6578947368421053, |
|
"grad_norm": 0.7367795913317746, |
|
"learning_rate": 3.1937916690642356e-06, |
|
"loss": 0.0685, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6710526315789473, |
|
"grad_norm": 1.1081708398994705, |
|
"learning_rate": 2.980394975640526e-06, |
|
"loss": 0.0934, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.6842105263157895, |
|
"grad_norm": 6.210726159509134, |
|
"learning_rate": 2.771308221117309e-06, |
|
"loss": 0.1012, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.6973684210526315, |
|
"grad_norm": 0.987185487363248, |
|
"learning_rate": 2.5669776071657194e-06, |
|
"loss": 0.1173, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.7105263157894737, |
|
"grad_norm": 1.0233786719046054, |
|
"learning_rate": 2.3678391856132203e-06, |
|
"loss": 0.1006, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.7236842105263158, |
|
"grad_norm": 0.8805942584459667, |
|
"learning_rate": 2.174317927887041e-06, |
|
"loss": 0.0773, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.7368421052631579, |
|
"grad_norm": 1.4063628412136746, |
|
"learning_rate": 1.9868268181037186e-06, |
|
"loss": 0.1285, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.7609777001959152, |
|
"learning_rate": 1.8057659717401948e-06, |
|
"loss": 0.0827, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.7631578947368421, |
|
"grad_norm": 0.699472385650843, |
|
"learning_rate": 1.6315217817672142e-06, |
|
"loss": 0.0728, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.7763157894736842, |
|
"grad_norm": 0.8086483434606366, |
|
"learning_rate": 1.4644660940672628e-06, |
|
"loss": 0.0814, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.7894736842105263, |
|
"grad_norm": 0.7120681165430872, |
|
"learning_rate": 1.3049554138967052e-06, |
|
"loss": 0.0746, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8026315789473685, |
|
"grad_norm": 0.6530934488185232, |
|
"learning_rate": 1.1533301450856054e-06, |
|
"loss": 0.0759, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.8157894736842105, |
|
"grad_norm": 0.6010174144424382, |
|
"learning_rate": 1.0099138635988026e-06, |
|
"loss": 0.0851, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.8289473684210527, |
|
"grad_norm": 0.7632251026621609, |
|
"learning_rate": 8.750126270084891e-07, |
|
"loss": 0.133, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 0.9433341471827504, |
|
"learning_rate": 7.489143213519301e-07, |
|
"loss": 0.0887, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.8552631578947368, |
|
"grad_norm": 0.6626834135219665, |
|
"learning_rate": 6.318880467681527e-07, |
|
"loss": 0.0612, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.868421052631579, |
|
"grad_norm": 0.6498581718557976, |
|
"learning_rate": 5.241835432246888e-07, |
|
"loss": 0.0807, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.881578947368421, |
|
"grad_norm": 0.7771761015493923, |
|
"learning_rate": 4.2603065755989493e-07, |
|
"loss": 0.1067, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.8947368421052632, |
|
"grad_norm": 0.7594547340399601, |
|
"learning_rate": 3.3763885297822153e-07, |
|
"loss": 0.0803, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.9078947368421053, |
|
"grad_norm": 0.980352554631326, |
|
"learning_rate": 2.5919676204517073e-07, |
|
"loss": 0.0956, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.9210526315789473, |
|
"grad_norm": 0.8650676858446684, |
|
"learning_rate": 1.908717841359048e-07, |
|
"loss": 0.0798, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9342105263157895, |
|
"grad_norm": 0.6440476763274703, |
|
"learning_rate": 1.328097281965357e-07, |
|
"loss": 0.0724, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.9473684210526315, |
|
"grad_norm": 0.989220896843026, |
|
"learning_rate": 8.513450158049109e-08, |
|
"loss": 0.1021, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.9605263157894737, |
|
"grad_norm": 0.6855314313706689, |
|
"learning_rate": 4.794784562397459e-08, |
|
"loss": 0.0656, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.9736842105263158, |
|
"grad_norm": 0.8621391032787515, |
|
"learning_rate": 2.1329118524827662e-08, |
|
"loss": 0.0734, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.9868421052631579, |
|
"grad_norm": 0.7312318211226774, |
|
"learning_rate": 5.3351259881379016e-09, |
|
"loss": 0.0711, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.8656756829991957, |
|
"learning_rate": 0.0, |
|
"loss": 0.1306, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.09319383651018143, |
|
"eval_runtime": 140.3661, |
|
"eval_samples_per_second": 36.369, |
|
"eval_steps_per_second": 1.14, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 76, |
|
"total_flos": 2.244031216091136e+16, |
|
"train_loss": 0.12603967764267796, |
|
"train_runtime": 1049.273, |
|
"train_samples_per_second": 9.243, |
|
"train_steps_per_second": 0.072 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 76, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.244031216091136e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|