tyzhu commited on
Commit
bd65a20
1 Parent(s): 7e12f2f

End of training

Browse files
Files changed (6) hide show
  1. README.md +14 -2
  2. all_results.json +16 -0
  3. eval_results.json +10 -0
  4. tokenizer.json +1 -6
  5. train_results.json +9 -0
  6. trainer_state.json +295 -0
README.md CHANGED
@@ -3,11 +3,23 @@ license: other
3
  base_model: Qwen/Qwen1.5-4B
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - accuracy
8
  model-index:
9
  - name: lmind_hotpot_train8000_eval7405_v1_qa_Qwen_Qwen1.5-4B_lora2
10
- results: []
 
 
 
 
 
 
 
 
 
 
11
  library_name: peft
12
  ---
13
 
@@ -16,7 +28,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # lmind_hotpot_train8000_eval7405_v1_qa_Qwen_Qwen1.5-4B_lora2
18
 
19
- This model is a fine-tuned version of [Qwen/Qwen1.5-4B](https://huggingface.co/Qwen/Qwen1.5-4B) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 3.4933
22
  - Accuracy: 0.4926
 
3
  base_model: Qwen/Qwen1.5-4B
4
  tags:
5
  - generated_from_trainer
6
+ datasets:
7
+ - tyzhu/lmind_hotpot_train8000_eval7405_v1_qa
8
  metrics:
9
  - accuracy
10
  model-index:
11
  - name: lmind_hotpot_train8000_eval7405_v1_qa_Qwen_Qwen1.5-4B_lora2
12
+ results:
13
+ - task:
14
+ name: Causal Language Modeling
15
+ type: text-generation
16
+ dataset:
17
+ name: tyzhu/lmind_hotpot_train8000_eval7405_v1_qa
18
+ type: tyzhu/lmind_hotpot_train8000_eval7405_v1_qa
19
+ metrics:
20
+ - name: Accuracy
21
+ type: accuracy
22
+ value: 0.49263492063492065
23
  library_name: peft
24
  ---
25
 
 
28
 
29
  # lmind_hotpot_train8000_eval7405_v1_qa_Qwen_Qwen1.5-4B_lora2
30
 
31
+ This model is a fine-tuned version of [Qwen/Qwen1.5-4B](https://huggingface.co/Qwen/Qwen1.5-4B) on the tyzhu/lmind_hotpot_train8000_eval7405_v1_qa dataset.
32
  It achieves the following results on the evaluation set:
33
  - Loss: 3.4933
34
  - Accuracy: 0.4926
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.49263492063492065,
4
+ "eval_loss": 3.4933342933654785,
5
+ "eval_runtime": 8.3568,
6
+ "eval_samples": 500,
7
+ "eval_samples_per_second": 59.832,
8
+ "eval_steps_per_second": 7.539,
9
+ "perplexity": 32.89544812596355,
10
+ "total_flos": 6.467692909717094e+16,
11
+ "train_loss": 1.3154203582763673,
12
+ "train_runtime": 9112.5942,
13
+ "train_samples": 8000,
14
+ "train_samples_per_second": 8.779,
15
+ "train_steps_per_second": 0.274
16
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.49263492063492065,
4
+ "eval_loss": 3.4933342933654785,
5
+ "eval_runtime": 8.3568,
6
+ "eval_samples": 500,
7
+ "eval_samples_per_second": 59.832,
8
+ "eval_steps_per_second": 7.539,
9
+ "perplexity": 32.89544812596355
10
+ }
tokenizer.json CHANGED
@@ -1,11 +1,6 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 1024,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "total_flos": 6.467692909717094e+16,
4
+ "train_loss": 1.3154203582763673,
5
+ "train_runtime": 9112.5942,
6
+ "train_samples": 8000,
7
+ "train_samples_per_second": 8.779,
8
+ "train_steps_per_second": 0.274
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 10.0,
5
+ "eval_steps": 500,
6
+ "global_step": 2500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.4,
13
+ "grad_norm": 0.5425021052360535,
14
+ "learning_rate": 0.0001,
15
+ "loss": 2.3548,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.8,
20
+ "grad_norm": 0.5427804589271545,
21
+ "learning_rate": 0.0001,
22
+ "loss": 2.2624,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 1.0,
27
+ "eval_accuracy": 0.5159047619047619,
28
+ "eval_loss": 2.3220136165618896,
29
+ "eval_runtime": 8.2469,
30
+ "eval_samples_per_second": 60.629,
31
+ "eval_steps_per_second": 7.639,
32
+ "step": 250
33
+ },
34
+ {
35
+ "epoch": 1.2,
36
+ "grad_norm": 0.7659817337989807,
37
+ "learning_rate": 0.0001,
38
+ "loss": 2.1894,
39
+ "step": 300
40
+ },
41
+ {
42
+ "epoch": 1.6,
43
+ "grad_norm": 0.9417976140975952,
44
+ "learning_rate": 0.0001,
45
+ "loss": 2.1155,
46
+ "step": 400
47
+ },
48
+ {
49
+ "epoch": 2.0,
50
+ "grad_norm": 0.8788428902626038,
51
+ "learning_rate": 0.0001,
52
+ "loss": 2.0942,
53
+ "step": 500
54
+ },
55
+ {
56
+ "epoch": 2.0,
57
+ "eval_accuracy": 0.5176190476190476,
58
+ "eval_loss": 2.32893443107605,
59
+ "eval_runtime": 6.9616,
60
+ "eval_samples_per_second": 71.822,
61
+ "eval_steps_per_second": 9.05,
62
+ "step": 500
63
+ },
64
+ {
65
+ "epoch": 2.4,
66
+ "grad_norm": 1.2184635400772095,
67
+ "learning_rate": 0.0001,
68
+ "loss": 1.8388,
69
+ "step": 600
70
+ },
71
+ {
72
+ "epoch": 2.8,
73
+ "grad_norm": 1.161513090133667,
74
+ "learning_rate": 0.0001,
75
+ "loss": 1.8479,
76
+ "step": 700
77
+ },
78
+ {
79
+ "epoch": 3.0,
80
+ "eval_accuracy": 0.5147619047619048,
81
+ "eval_loss": 2.3996665477752686,
82
+ "eval_runtime": 8.8091,
83
+ "eval_samples_per_second": 56.76,
84
+ "eval_steps_per_second": 7.152,
85
+ "step": 750
86
+ },
87
+ {
88
+ "epoch": 3.2,
89
+ "grad_norm": 1.4641433954238892,
90
+ "learning_rate": 0.0001,
91
+ "loss": 1.7257,
92
+ "step": 800
93
+ },
94
+ {
95
+ "epoch": 3.6,
96
+ "grad_norm": 1.4882707595825195,
97
+ "learning_rate": 0.0001,
98
+ "loss": 1.5898,
99
+ "step": 900
100
+ },
101
+ {
102
+ "epoch": 4.0,
103
+ "grad_norm": 1.650571584701538,
104
+ "learning_rate": 0.0001,
105
+ "loss": 1.6153,
106
+ "step": 1000
107
+ },
108
+ {
109
+ "epoch": 4.0,
110
+ "eval_accuracy": 0.5106984126984127,
111
+ "eval_loss": 2.506742000579834,
112
+ "eval_runtime": 9.1456,
113
+ "eval_samples_per_second": 54.671,
114
+ "eval_steps_per_second": 6.889,
115
+ "step": 1000
116
+ },
117
+ {
118
+ "epoch": 4.4,
119
+ "grad_norm": 1.7832984924316406,
120
+ "learning_rate": 0.0001,
121
+ "loss": 1.328,
122
+ "step": 1100
123
+ },
124
+ {
125
+ "epoch": 4.8,
126
+ "grad_norm": 2.0173535346984863,
127
+ "learning_rate": 0.0001,
128
+ "loss": 1.3618,
129
+ "step": 1200
130
+ },
131
+ {
132
+ "epoch": 5.0,
133
+ "eval_accuracy": 0.5052063492063492,
134
+ "eval_loss": 2.6640872955322266,
135
+ "eval_runtime": 7.8899,
136
+ "eval_samples_per_second": 63.372,
137
+ "eval_steps_per_second": 7.985,
138
+ "step": 1250
139
+ },
140
+ {
141
+ "epoch": 5.2,
142
+ "grad_norm": 1.9236400127410889,
143
+ "learning_rate": 0.0001,
144
+ "loss": 1.2386,
145
+ "step": 1300
146
+ },
147
+ {
148
+ "epoch": 5.6,
149
+ "grad_norm": 1.9365407228469849,
150
+ "learning_rate": 0.0001,
151
+ "loss": 1.1149,
152
+ "step": 1400
153
+ },
154
+ {
155
+ "epoch": 6.0,
156
+ "grad_norm": 2.087843656539917,
157
+ "learning_rate": 0.0001,
158
+ "loss": 1.1477,
159
+ "step": 1500
160
+ },
161
+ {
162
+ "epoch": 6.0,
163
+ "eval_accuracy": 0.5015873015873016,
164
+ "eval_loss": 2.8410720825195312,
165
+ "eval_runtime": 7.5175,
166
+ "eval_samples_per_second": 66.512,
167
+ "eval_steps_per_second": 8.38,
168
+ "step": 1500
169
+ },
170
+ {
171
+ "epoch": 6.4,
172
+ "grad_norm": 2.397096872329712,
173
+ "learning_rate": 0.0001,
174
+ "loss": 0.8925,
175
+ "step": 1600
176
+ },
177
+ {
178
+ "epoch": 6.8,
179
+ "grad_norm": 2.441624641418457,
180
+ "learning_rate": 0.0001,
181
+ "loss": 0.9248,
182
+ "step": 1700
183
+ },
184
+ {
185
+ "epoch": 7.0,
186
+ "eval_accuracy": 0.49777777777777776,
187
+ "eval_loss": 3.0245563983917236,
188
+ "eval_runtime": 8.2076,
189
+ "eval_samples_per_second": 60.919,
190
+ "eval_steps_per_second": 7.676,
191
+ "step": 1750
192
+ },
193
+ {
194
+ "epoch": 7.2,
195
+ "grad_norm": 2.44694185256958,
196
+ "learning_rate": 0.0001,
197
+ "loss": 0.821,
198
+ "step": 1800
199
+ },
200
+ {
201
+ "epoch": 7.6,
202
+ "grad_norm": 2.26471209526062,
203
+ "learning_rate": 0.0001,
204
+ "loss": 0.7379,
205
+ "step": 1900
206
+ },
207
+ {
208
+ "epoch": 8.0,
209
+ "grad_norm": 2.3444228172302246,
210
+ "learning_rate": 0.0001,
211
+ "loss": 0.7705,
212
+ "step": 2000
213
+ },
214
+ {
215
+ "epoch": 8.0,
216
+ "eval_accuracy": 0.4953968253968254,
217
+ "eval_loss": 3.2090303897857666,
218
+ "eval_runtime": 7.2355,
219
+ "eval_samples_per_second": 69.104,
220
+ "eval_steps_per_second": 8.707,
221
+ "step": 2000
222
+ },
223
+ {
224
+ "epoch": 8.4,
225
+ "grad_norm": 2.152717113494873,
226
+ "learning_rate": 0.0001,
227
+ "loss": 0.601,
228
+ "step": 2100
229
+ },
230
+ {
231
+ "epoch": 8.8,
232
+ "grad_norm": 2.0435783863067627,
233
+ "learning_rate": 0.0001,
234
+ "loss": 0.6344,
235
+ "step": 2200
236
+ },
237
+ {
238
+ "epoch": 9.0,
239
+ "eval_accuracy": 0.4934920634920635,
240
+ "eval_loss": 3.339984893798828,
241
+ "eval_runtime": 7.5883,
242
+ "eval_samples_per_second": 65.891,
243
+ "eval_steps_per_second": 8.302,
244
+ "step": 2250
245
+ },
246
+ {
247
+ "epoch": 9.2,
248
+ "grad_norm": 1.7771090269088745,
249
+ "learning_rate": 0.0001,
250
+ "loss": 0.5798,
251
+ "step": 2300
252
+ },
253
+ {
254
+ "epoch": 9.6,
255
+ "grad_norm": 1.9982362985610962,
256
+ "learning_rate": 0.0001,
257
+ "loss": 0.5377,
258
+ "step": 2400
259
+ },
260
+ {
261
+ "epoch": 10.0,
262
+ "grad_norm": 1.932521104812622,
263
+ "learning_rate": 0.0001,
264
+ "loss": 0.5612,
265
+ "step": 2500
266
+ },
267
+ {
268
+ "epoch": 10.0,
269
+ "eval_accuracy": 0.49263492063492065,
270
+ "eval_loss": 3.4933342933654785,
271
+ "eval_runtime": 7.1358,
272
+ "eval_samples_per_second": 70.069,
273
+ "eval_steps_per_second": 8.829,
274
+ "step": 2500
275
+ },
276
+ {
277
+ "epoch": 10.0,
278
+ "step": 2500,
279
+ "total_flos": 6.467692909717094e+16,
280
+ "train_loss": 1.3154203582763673,
281
+ "train_runtime": 9112.5942,
282
+ "train_samples_per_second": 8.779,
283
+ "train_steps_per_second": 0.274
284
+ }
285
+ ],
286
+ "logging_steps": 100,
287
+ "max_steps": 2500,
288
+ "num_input_tokens_seen": 0,
289
+ "num_train_epochs": 10,
290
+ "save_steps": 500,
291
+ "total_flos": 6.467692909717094e+16,
292
+ "train_batch_size": 1,
293
+ "trial_name": null,
294
+ "trial_params": null
295
+ }