yuweiiizz commited on
Commit
3efbbeb
1 Parent(s): 0cebe08

Training in progress, step 4000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:11656bf7f033202b92b73fbd190a96edd871986eae5df5760f4085f5dda8ca05
3
  size 966995080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0abaa86fc4833d57f7fe503c09526ace445919e1b76f14b35ac06c12e186254f
3
  size 966995080
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:65bb8e9be3f0e1854108e3633b2dea43fc25763c0c02b9d8dc515936b822151a
3
  size 1925064044
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77a61b8fba58f2ecec15e248785b8d20ba3be4672b027d63e4ae27a50a6e05e6
3
  size 1925064044
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27cdf2edd39f57a70573d9ff0027b58248741fcc4a77b968063bd6a9c61fd866
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4a887c0679a244fb3578da62fc4230274c5d38de547b25494a50298ffcd112e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17af6a83bb1cb19cd0edadcdd8667775ae13ecbc6438dd8bbc5fbd929a74874b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a76e193687c482f6cc875caf45cbf094edc541bfbe3eb9f8259fd2d597d2f4e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 19.074374661838764,
3
- "best_model_checkpoint": "./whisper-small-taiwanese-hanzi/checkpoint-3000",
4
- "epoch": 1.2,
5
  "eval_steps": 1000,
6
- "global_step": 3000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -874,6 +874,295 @@
874
  "eval_samples_per_second": 2.265,
875
  "eval_steps_per_second": 0.283,
876
  "step": 3000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
877
  }
878
  ],
879
  "logging_steps": 25,
@@ -881,7 +1170,7 @@
881
  "num_input_tokens_seen": 0,
882
  "num_train_epochs": 2,
883
  "save_steps": 1000,
884
- "total_flos": 1.385209921536e+19,
885
  "train_batch_size": 8,
886
  "trial_name": null,
887
  "trial_params": null
 
1
  {
2
+ "best_metric": 18.183709992924626,
3
+ "best_model_checkpoint": "./whisper-small-taiwanese-hanzi/checkpoint-4000",
4
+ "epoch": 1.6,
5
  "eval_steps": 1000,
6
+ "global_step": 4000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
874
  "eval_samples_per_second": 2.265,
875
  "eval_steps_per_second": 0.283,
876
  "step": 3000
877
+ },
878
+ {
879
+ "epoch": 1.21,
880
+ "grad_norm": 7.052708148956299,
881
+ "learning_rate": 4.388888888888889e-06,
882
+ "loss": 0.2062,
883
+ "step": 3025
884
+ },
885
+ {
886
+ "epoch": 1.22,
887
+ "grad_norm": 9.39709186553955,
888
+ "learning_rate": 4.333333333333334e-06,
889
+ "loss": 0.2342,
890
+ "step": 3050
891
+ },
892
+ {
893
+ "epoch": 1.23,
894
+ "grad_norm": 5.614500522613525,
895
+ "learning_rate": 4.277777777777778e-06,
896
+ "loss": 0.218,
897
+ "step": 3075
898
+ },
899
+ {
900
+ "epoch": 1.24,
901
+ "grad_norm": 7.228747367858887,
902
+ "learning_rate": 4.222222222222223e-06,
903
+ "loss": 0.2154,
904
+ "step": 3100
905
+ },
906
+ {
907
+ "epoch": 1.25,
908
+ "grad_norm": 10.118727684020996,
909
+ "learning_rate": 4.166666666666667e-06,
910
+ "loss": 0.2519,
911
+ "step": 3125
912
+ },
913
+ {
914
+ "epoch": 1.26,
915
+ "grad_norm": 9.65976333618164,
916
+ "learning_rate": 4.111111111111111e-06,
917
+ "loss": 0.2336,
918
+ "step": 3150
919
+ },
920
+ {
921
+ "epoch": 1.27,
922
+ "grad_norm": 9.654402732849121,
923
+ "learning_rate": 4.055555555555556e-06,
924
+ "loss": 0.2474,
925
+ "step": 3175
926
+ },
927
+ {
928
+ "epoch": 1.28,
929
+ "grad_norm": 6.564414024353027,
930
+ "learning_rate": 4.000000000000001e-06,
931
+ "loss": 0.2329,
932
+ "step": 3200
933
+ },
934
+ {
935
+ "epoch": 1.29,
936
+ "grad_norm": 7.532024383544922,
937
+ "learning_rate": 3.944444444444445e-06,
938
+ "loss": 0.2135,
939
+ "step": 3225
940
+ },
941
+ {
942
+ "epoch": 1.3,
943
+ "grad_norm": 5.012012004852295,
944
+ "learning_rate": 3.88888888888889e-06,
945
+ "loss": 0.2278,
946
+ "step": 3250
947
+ },
948
+ {
949
+ "epoch": 1.31,
950
+ "grad_norm": 7.632811069488525,
951
+ "learning_rate": 3.833333333333334e-06,
952
+ "loss": 0.2238,
953
+ "step": 3275
954
+ },
955
+ {
956
+ "epoch": 1.32,
957
+ "grad_norm": 6.163909912109375,
958
+ "learning_rate": 3.777777777777778e-06,
959
+ "loss": 0.2338,
960
+ "step": 3300
961
+ },
962
+ {
963
+ "epoch": 1.33,
964
+ "grad_norm": 7.474846839904785,
965
+ "learning_rate": 3.7222222222222225e-06,
966
+ "loss": 0.2217,
967
+ "step": 3325
968
+ },
969
+ {
970
+ "epoch": 1.34,
971
+ "grad_norm": 9.621731758117676,
972
+ "learning_rate": 3.6666666666666666e-06,
973
+ "loss": 0.226,
974
+ "step": 3350
975
+ },
976
+ {
977
+ "epoch": 1.35,
978
+ "grad_norm": 7.167582035064697,
979
+ "learning_rate": 3.6111111111111115e-06,
980
+ "loss": 0.2219,
981
+ "step": 3375
982
+ },
983
+ {
984
+ "epoch": 1.3599999999999999,
985
+ "grad_norm": 8.028946876525879,
986
+ "learning_rate": 3.555555555555556e-06,
987
+ "loss": 0.2243,
988
+ "step": 3400
989
+ },
990
+ {
991
+ "epoch": 1.37,
992
+ "grad_norm": 9.479345321655273,
993
+ "learning_rate": 3.5e-06,
994
+ "loss": 0.2486,
995
+ "step": 3425
996
+ },
997
+ {
998
+ "epoch": 1.38,
999
+ "grad_norm": 7.631903648376465,
1000
+ "learning_rate": 3.444444444444445e-06,
1001
+ "loss": 0.219,
1002
+ "step": 3450
1003
+ },
1004
+ {
1005
+ "epoch": 1.3900000000000001,
1006
+ "grad_norm": 8.521742820739746,
1007
+ "learning_rate": 3.3888888888888893e-06,
1008
+ "loss": 0.2106,
1009
+ "step": 3475
1010
+ },
1011
+ {
1012
+ "epoch": 1.4,
1013
+ "grad_norm": 9.257588386535645,
1014
+ "learning_rate": 3.3333333333333333e-06,
1015
+ "loss": 0.225,
1016
+ "step": 3500
1017
+ },
1018
+ {
1019
+ "epoch": 1.41,
1020
+ "grad_norm": 10.871211051940918,
1021
+ "learning_rate": 3.277777777777778e-06,
1022
+ "loss": 0.2248,
1023
+ "step": 3525
1024
+ },
1025
+ {
1026
+ "epoch": 1.42,
1027
+ "grad_norm": 5.058568954467773,
1028
+ "learning_rate": 3.2222222222222227e-06,
1029
+ "loss": 0.2315,
1030
+ "step": 3550
1031
+ },
1032
+ {
1033
+ "epoch": 1.43,
1034
+ "grad_norm": 5.446198463439941,
1035
+ "learning_rate": 3.1666666666666667e-06,
1036
+ "loss": 0.2213,
1037
+ "step": 3575
1038
+ },
1039
+ {
1040
+ "epoch": 1.44,
1041
+ "grad_norm": 7.046563148498535,
1042
+ "learning_rate": 3.1111111111111116e-06,
1043
+ "loss": 0.2029,
1044
+ "step": 3600
1045
+ },
1046
+ {
1047
+ "epoch": 1.45,
1048
+ "grad_norm": 6.039666175842285,
1049
+ "learning_rate": 3.055555555555556e-06,
1050
+ "loss": 0.2046,
1051
+ "step": 3625
1052
+ },
1053
+ {
1054
+ "epoch": 1.46,
1055
+ "grad_norm": 6.638487815856934,
1056
+ "learning_rate": 3e-06,
1057
+ "loss": 0.2218,
1058
+ "step": 3650
1059
+ },
1060
+ {
1061
+ "epoch": 1.47,
1062
+ "grad_norm": 9.455551147460938,
1063
+ "learning_rate": 2.944444444444445e-06,
1064
+ "loss": 0.2252,
1065
+ "step": 3675
1066
+ },
1067
+ {
1068
+ "epoch": 1.48,
1069
+ "grad_norm": 9.041964530944824,
1070
+ "learning_rate": 2.888888888888889e-06,
1071
+ "loss": 0.2082,
1072
+ "step": 3700
1073
+ },
1074
+ {
1075
+ "epoch": 1.49,
1076
+ "grad_norm": 10.04900074005127,
1077
+ "learning_rate": 2.8333333333333335e-06,
1078
+ "loss": 0.2298,
1079
+ "step": 3725
1080
+ },
1081
+ {
1082
+ "epoch": 1.5,
1083
+ "grad_norm": 5.999593734741211,
1084
+ "learning_rate": 2.7777777777777783e-06,
1085
+ "loss": 0.2085,
1086
+ "step": 3750
1087
+ },
1088
+ {
1089
+ "epoch": 1.51,
1090
+ "grad_norm": 7.189665794372559,
1091
+ "learning_rate": 2.7222222222222224e-06,
1092
+ "loss": 0.2016,
1093
+ "step": 3775
1094
+ },
1095
+ {
1096
+ "epoch": 1.52,
1097
+ "grad_norm": 6.945767879486084,
1098
+ "learning_rate": 2.666666666666667e-06,
1099
+ "loss": 0.2196,
1100
+ "step": 3800
1101
+ },
1102
+ {
1103
+ "epoch": 1.53,
1104
+ "grad_norm": 6.626684188842773,
1105
+ "learning_rate": 2.6111111111111113e-06,
1106
+ "loss": 0.1848,
1107
+ "step": 3825
1108
+ },
1109
+ {
1110
+ "epoch": 1.54,
1111
+ "grad_norm": 8.809107780456543,
1112
+ "learning_rate": 2.5555555555555557e-06,
1113
+ "loss": 0.2567,
1114
+ "step": 3850
1115
+ },
1116
+ {
1117
+ "epoch": 1.55,
1118
+ "grad_norm": 6.8933186531066895,
1119
+ "learning_rate": 2.5e-06,
1120
+ "loss": 0.2177,
1121
+ "step": 3875
1122
+ },
1123
+ {
1124
+ "epoch": 1.56,
1125
+ "grad_norm": 5.856971740722656,
1126
+ "learning_rate": 2.4444444444444447e-06,
1127
+ "loss": 0.214,
1128
+ "step": 3900
1129
+ },
1130
+ {
1131
+ "epoch": 1.5699999999999998,
1132
+ "grad_norm": 6.439066410064697,
1133
+ "learning_rate": 2.388888888888889e-06,
1134
+ "loss": 0.2088,
1135
+ "step": 3925
1136
+ },
1137
+ {
1138
+ "epoch": 1.58,
1139
+ "grad_norm": 6.070130348205566,
1140
+ "learning_rate": 2.3333333333333336e-06,
1141
+ "loss": 0.2085,
1142
+ "step": 3950
1143
+ },
1144
+ {
1145
+ "epoch": 1.5899999999999999,
1146
+ "grad_norm": 7.12228536605835,
1147
+ "learning_rate": 2.277777777777778e-06,
1148
+ "loss": 0.1959,
1149
+ "step": 3975
1150
+ },
1151
+ {
1152
+ "epoch": 1.6,
1153
+ "grad_norm": 5.102252006530762,
1154
+ "learning_rate": 2.222222222222222e-06,
1155
+ "loss": 0.2183,
1156
+ "step": 4000
1157
+ },
1158
+ {
1159
+ "epoch": 1.6,
1160
+ "eval_cer": 18.183709992924626,
1161
+ "eval_loss": 0.3092849850654602,
1162
+ "eval_runtime": 1753.7044,
1163
+ "eval_samples_per_second": 2.244,
1164
+ "eval_steps_per_second": 0.281,
1165
+ "step": 4000
1166
  }
1167
  ],
1168
  "logging_steps": 25,
 
1170
  "num_input_tokens_seen": 0,
1171
  "num_train_epochs": 2,
1172
  "save_steps": 1000,
1173
+ "total_flos": 1.846946562048e+19,
1174
  "train_batch_size": 8,
1175
  "trial_name": null,
1176
  "trial_params": null