Technotech commited on
Commit
d5cd382
1 Parent(s): 5d3e445

Upload 48 files

Browse files
Files changed (48) hide show
  1. README.md +18 -1
  2. adapter_config.json +21 -0
  3. adapter_model.bin +3 -0
  4. checkpoint-3400/README.md +20 -0
  5. checkpoint-3400/adapter_config.json +21 -0
  6. checkpoint-3400/adapter_model.bin +3 -0
  7. checkpoint-3400/optimizer.pt +3 -0
  8. checkpoint-3400/rng_state.pth +3 -0
  9. checkpoint-3400/scheduler.pt +3 -0
  10. checkpoint-3400/trainer_state.json +424 -0
  11. checkpoint-3400/training_args.bin +3 -0
  12. checkpoint-3600/README.md +20 -0
  13. checkpoint-3600/adapter_config.json +21 -0
  14. checkpoint-3600/adapter_model.bin +3 -0
  15. checkpoint-3600/optimizer.pt +3 -0
  16. checkpoint-3600/rng_state.pth +3 -0
  17. checkpoint-3600/scheduler.pt +3 -0
  18. checkpoint-3600/trainer_state.json +448 -0
  19. checkpoint-3600/training_args.bin +3 -0
  20. checkpoint-3800/README.md +20 -0
  21. checkpoint-3800/adapter_config.json +21 -0
  22. checkpoint-3800/adapter_model.bin +3 -0
  23. checkpoint-3800/optimizer.pt +3 -0
  24. checkpoint-3800/rng_state.pth +3 -0
  25. checkpoint-3800/scheduler.pt +3 -0
  26. checkpoint-3800/trainer_state.json +472 -0
  27. checkpoint-3800/training_args.bin +3 -0
  28. checkpoint-4000/README.md +20 -0
  29. checkpoint-4000/adapter_config.json +21 -0
  30. checkpoint-4000/adapter_model.bin +3 -0
  31. checkpoint-4000/optimizer.pt +3 -0
  32. checkpoint-4000/rng_state.pth +3 -0
  33. checkpoint-4000/scheduler.pt +3 -0
  34. checkpoint-4000/trainer_state.json +496 -0
  35. checkpoint-4000/training_args.bin +3 -0
  36. checkpoint-4200/README.md +20 -0
  37. checkpoint-4200/adapter_config.json +21 -0
  38. checkpoint-4200/adapter_model.bin +3 -0
  39. checkpoint-4200/optimizer.pt +3 -0
  40. checkpoint-4200/rng_state.pth +3 -0
  41. checkpoint-4200/scheduler.pt +3 -0
  42. checkpoint-4200/trainer_state.json +520 -0
  43. checkpoint-4200/training_args.bin +3 -0
  44. optimizer.pt +3 -0
  45. rng_state.pth +3 -0
  46. scheduler.pt +3 -0
  47. trainer_state.json +520 -0
  48. training_args.bin +3 -0
README.md CHANGED
@@ -1,3 +1,20 @@
1
  ---
2
- license: apache-2.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: peft
3
  ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.5.0.dev0
adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/kaggle/working/ez-trainer/models/open_llama_3b_v2",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0d86f569a939cba8b99aaf66b94bee9b9b22b6f24d8ace5e340d6d5788f072c
3
+ size 10686701
checkpoint-3400/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.5.0.dev0
checkpoint-3400/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/kaggle/working/ez-trainer/models/open_llama_3b_v2",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-3400/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb3bd196e5f56ee1c75a8f4041b13d8b1a8c53d81a5a8d7800d20c14a3c88733
3
+ size 10686701
checkpoint-3400/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18fa321f6b14fc3429d22f5ca9d9215f49d15e8bdfc878ebc8e80616e1ab8ee0
3
+ size 21389189
checkpoint-3400/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2278a87cdf86c3f9219223c847f6b27f6b7f15b8226b617f38936e8ff2cbcde
3
+ size 14575
checkpoint-3400/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24fa2cc341cf5db64be1ae6ed119c4e53feec7613872b3d7ca4b9822e43bab0e
3
+ size 627
checkpoint-3400/trainer_state.json ADDED
@@ -0,0 +1,424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.33206367809356385,
5
+ "global_step": 3400,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.0,
12
+ "learning_rate": 5e-05,
13
+ "loss": 3.3163,
14
+ "step": 50
15
+ },
16
+ {
17
+ "epoch": 0.01,
18
+ "learning_rate": 0.0001,
19
+ "loss": 2.491,
20
+ "step": 100
21
+ },
22
+ {
23
+ "epoch": 0.01,
24
+ "learning_rate": 0.0001,
25
+ "loss": 2.0415,
26
+ "step": 150
27
+ },
28
+ {
29
+ "epoch": 0.02,
30
+ "learning_rate": 0.0001,
31
+ "loss": 1.9681,
32
+ "step": 200
33
+ },
34
+ {
35
+ "epoch": 0.02,
36
+ "learning_rate": 0.0001,
37
+ "loss": 1.8944,
38
+ "step": 250
39
+ },
40
+ {
41
+ "epoch": 0.03,
42
+ "learning_rate": 0.0001,
43
+ "loss": 1.8218,
44
+ "step": 300
45
+ },
46
+ {
47
+ "epoch": 0.03,
48
+ "learning_rate": 0.0001,
49
+ "loss": 1.7536,
50
+ "step": 350
51
+ },
52
+ {
53
+ "epoch": 0.04,
54
+ "learning_rate": 0.0001,
55
+ "loss": 1.681,
56
+ "step": 400
57
+ },
58
+ {
59
+ "epoch": 0.04,
60
+ "learning_rate": 0.0001,
61
+ "loss": 1.6851,
62
+ "step": 450
63
+ },
64
+ {
65
+ "epoch": 0.05,
66
+ "learning_rate": 0.0001,
67
+ "loss": 1.6193,
68
+ "step": 500
69
+ },
70
+ {
71
+ "epoch": 0.05,
72
+ "learning_rate": 0.0001,
73
+ "loss": 1.6291,
74
+ "step": 550
75
+ },
76
+ {
77
+ "epoch": 0.06,
78
+ "learning_rate": 0.0001,
79
+ "loss": 1.6241,
80
+ "step": 600
81
+ },
82
+ {
83
+ "epoch": 0.06,
84
+ "learning_rate": 0.0001,
85
+ "loss": 1.5997,
86
+ "step": 650
87
+ },
88
+ {
89
+ "epoch": 0.07,
90
+ "learning_rate": 0.0001,
91
+ "loss": 1.5744,
92
+ "step": 700
93
+ },
94
+ {
95
+ "epoch": 0.07,
96
+ "learning_rate": 0.0001,
97
+ "loss": 1.6018,
98
+ "step": 750
99
+ },
100
+ {
101
+ "epoch": 0.08,
102
+ "learning_rate": 0.0001,
103
+ "loss": 1.6006,
104
+ "step": 800
105
+ },
106
+ {
107
+ "epoch": 0.08,
108
+ "learning_rate": 0.0001,
109
+ "loss": 1.5671,
110
+ "step": 850
111
+ },
112
+ {
113
+ "epoch": 0.09,
114
+ "learning_rate": 0.0001,
115
+ "loss": 1.5721,
116
+ "step": 900
117
+ },
118
+ {
119
+ "epoch": 0.09,
120
+ "learning_rate": 0.0001,
121
+ "loss": 1.5573,
122
+ "step": 950
123
+ },
124
+ {
125
+ "epoch": 0.1,
126
+ "learning_rate": 0.0001,
127
+ "loss": 1.5539,
128
+ "step": 1000
129
+ },
130
+ {
131
+ "epoch": 0.1,
132
+ "learning_rate": 0.0001,
133
+ "loss": 1.542,
134
+ "step": 1050
135
+ },
136
+ {
137
+ "epoch": 0.11,
138
+ "learning_rate": 0.0001,
139
+ "loss": 1.5165,
140
+ "step": 1100
141
+ },
142
+ {
143
+ "epoch": 0.11,
144
+ "learning_rate": 0.0001,
145
+ "loss": 1.5797,
146
+ "step": 1150
147
+ },
148
+ {
149
+ "epoch": 0.12,
150
+ "learning_rate": 0.0001,
151
+ "loss": 1.5609,
152
+ "step": 1200
153
+ },
154
+ {
155
+ "epoch": 0.12,
156
+ "learning_rate": 0.0001,
157
+ "loss": 1.5435,
158
+ "step": 1250
159
+ },
160
+ {
161
+ "epoch": 0.13,
162
+ "learning_rate": 0.0001,
163
+ "loss": 1.5645,
164
+ "step": 1300
165
+ },
166
+ {
167
+ "epoch": 0.13,
168
+ "learning_rate": 0.0001,
169
+ "loss": 1.5258,
170
+ "step": 1350
171
+ },
172
+ {
173
+ "epoch": 0.14,
174
+ "learning_rate": 0.0001,
175
+ "loss": 1.4773,
176
+ "step": 1400
177
+ },
178
+ {
179
+ "epoch": 0.14,
180
+ "learning_rate": 0.0001,
181
+ "loss": 1.4825,
182
+ "step": 1450
183
+ },
184
+ {
185
+ "epoch": 0.15,
186
+ "learning_rate": 0.0001,
187
+ "loss": 1.5124,
188
+ "step": 1500
189
+ },
190
+ {
191
+ "epoch": 0.15,
192
+ "learning_rate": 0.0001,
193
+ "loss": 1.4963,
194
+ "step": 1550
195
+ },
196
+ {
197
+ "epoch": 0.16,
198
+ "learning_rate": 0.0001,
199
+ "loss": 1.4861,
200
+ "step": 1600
201
+ },
202
+ {
203
+ "epoch": 0.16,
204
+ "learning_rate": 0.0001,
205
+ "loss": 1.5275,
206
+ "step": 1650
207
+ },
208
+ {
209
+ "epoch": 0.17,
210
+ "learning_rate": 0.0001,
211
+ "loss": 1.484,
212
+ "step": 1700
213
+ },
214
+ {
215
+ "epoch": 0.17,
216
+ "learning_rate": 0.0001,
217
+ "loss": 1.5349,
218
+ "step": 1750
219
+ },
220
+ {
221
+ "epoch": 0.18,
222
+ "learning_rate": 0.0001,
223
+ "loss": 1.4641,
224
+ "step": 1800
225
+ },
226
+ {
227
+ "epoch": 0.18,
228
+ "learning_rate": 0.0001,
229
+ "loss": 1.4752,
230
+ "step": 1850
231
+ },
232
+ {
233
+ "epoch": 0.19,
234
+ "learning_rate": 0.0001,
235
+ "loss": 1.4475,
236
+ "step": 1900
237
+ },
238
+ {
239
+ "epoch": 0.19,
240
+ "learning_rate": 0.0001,
241
+ "loss": 1.5358,
242
+ "step": 1950
243
+ },
244
+ {
245
+ "epoch": 0.2,
246
+ "learning_rate": 0.0001,
247
+ "loss": 1.4652,
248
+ "step": 2000
249
+ },
250
+ {
251
+ "epoch": 0.2,
252
+ "learning_rate": 0.0001,
253
+ "loss": 1.419,
254
+ "step": 2050
255
+ },
256
+ {
257
+ "epoch": 0.21,
258
+ "learning_rate": 0.0001,
259
+ "loss": 1.4588,
260
+ "step": 2100
261
+ },
262
+ {
263
+ "epoch": 0.21,
264
+ "learning_rate": 0.0001,
265
+ "loss": 1.4593,
266
+ "step": 2150
267
+ },
268
+ {
269
+ "epoch": 0.21,
270
+ "learning_rate": 0.0001,
271
+ "loss": 1.4604,
272
+ "step": 2200
273
+ },
274
+ {
275
+ "epoch": 0.22,
276
+ "learning_rate": 0.0001,
277
+ "loss": 1.4598,
278
+ "step": 2250
279
+ },
280
+ {
281
+ "epoch": 0.22,
282
+ "learning_rate": 0.0001,
283
+ "loss": 1.482,
284
+ "step": 2300
285
+ },
286
+ {
287
+ "epoch": 0.23,
288
+ "learning_rate": 0.0001,
289
+ "loss": 1.4749,
290
+ "step": 2350
291
+ },
292
+ {
293
+ "epoch": 0.23,
294
+ "learning_rate": 0.0001,
295
+ "loss": 1.4718,
296
+ "step": 2400
297
+ },
298
+ {
299
+ "epoch": 0.24,
300
+ "learning_rate": 0.0001,
301
+ "loss": 1.4343,
302
+ "step": 2450
303
+ },
304
+ {
305
+ "epoch": 0.24,
306
+ "learning_rate": 0.0001,
307
+ "loss": 1.4861,
308
+ "step": 2500
309
+ },
310
+ {
311
+ "epoch": 0.25,
312
+ "learning_rate": 0.0001,
313
+ "loss": 1.4717,
314
+ "step": 2550
315
+ },
316
+ {
317
+ "epoch": 0.25,
318
+ "learning_rate": 0.0001,
319
+ "loss": 1.4599,
320
+ "step": 2600
321
+ },
322
+ {
323
+ "epoch": 0.26,
324
+ "learning_rate": 0.0001,
325
+ "loss": 1.4746,
326
+ "step": 2650
327
+ },
328
+ {
329
+ "epoch": 0.26,
330
+ "learning_rate": 0.0001,
331
+ "loss": 1.4517,
332
+ "step": 2700
333
+ },
334
+ {
335
+ "epoch": 0.27,
336
+ "learning_rate": 0.0001,
337
+ "loss": 1.4267,
338
+ "step": 2750
339
+ },
340
+ {
341
+ "epoch": 0.27,
342
+ "learning_rate": 0.0001,
343
+ "loss": 1.4303,
344
+ "step": 2800
345
+ },
346
+ {
347
+ "epoch": 0.28,
348
+ "learning_rate": 0.0001,
349
+ "loss": 1.4146,
350
+ "step": 2850
351
+ },
352
+ {
353
+ "epoch": 0.28,
354
+ "learning_rate": 0.0001,
355
+ "loss": 1.4245,
356
+ "step": 2900
357
+ },
358
+ {
359
+ "epoch": 0.29,
360
+ "learning_rate": 0.0001,
361
+ "loss": 1.4356,
362
+ "step": 2950
363
+ },
364
+ {
365
+ "epoch": 0.29,
366
+ "learning_rate": 0.0001,
367
+ "loss": 1.4183,
368
+ "step": 3000
369
+ },
370
+ {
371
+ "epoch": 0.3,
372
+ "learning_rate": 0.0001,
373
+ "loss": 1.423,
374
+ "step": 3050
375
+ },
376
+ {
377
+ "epoch": 0.3,
378
+ "learning_rate": 0.0001,
379
+ "loss": 1.4854,
380
+ "step": 3100
381
+ },
382
+ {
383
+ "epoch": 0.31,
384
+ "learning_rate": 0.0001,
385
+ "loss": 1.4552,
386
+ "step": 3150
387
+ },
388
+ {
389
+ "epoch": 0.31,
390
+ "learning_rate": 0.0001,
391
+ "loss": 1.456,
392
+ "step": 3200
393
+ },
394
+ {
395
+ "epoch": 0.32,
396
+ "learning_rate": 0.0001,
397
+ "loss": 1.4202,
398
+ "step": 3250
399
+ },
400
+ {
401
+ "epoch": 0.32,
402
+ "learning_rate": 0.0001,
403
+ "loss": 1.4025,
404
+ "step": 3300
405
+ },
406
+ {
407
+ "epoch": 0.33,
408
+ "learning_rate": 0.0001,
409
+ "loss": 1.4134,
410
+ "step": 3350
411
+ },
412
+ {
413
+ "epoch": 0.33,
414
+ "learning_rate": 0.0001,
415
+ "loss": 1.4282,
416
+ "step": 3400
417
+ }
418
+ ],
419
+ "max_steps": 20478,
420
+ "num_train_epochs": 2,
421
+ "total_flos": 5.1211908863232e+16,
422
+ "trial_name": null,
423
+ "trial_params": null
424
+ }
checkpoint-3400/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af5a5e7107b6cd66fb7591ec5f90dff0454583f7b3c471f207ac4b50ee600ca7
3
+ size 3899
checkpoint-3600/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.5.0.dev0
checkpoint-3600/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/kaggle/working/ez-trainer/models/open_llama_3b_v2",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-3600/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:098858796e87f64ede10f2fd9bd0a14b7f45abf65cd79e71c59e56f1594e05d7
3
+ size 10686701
checkpoint-3600/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e778d9217527eaebbe8a6476ad6dee284a64a53f0b33e44d2851899528b3433f
3
+ size 21389189
checkpoint-3600/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2278a87cdf86c3f9219223c847f6b27f6b7f15b8226b617f38936e8ff2cbcde
3
+ size 14575
checkpoint-3600/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08bebec12b6f8f0fa31001d9b50358312390d03174e7a26774051200ad81f734
3
+ size 627
checkpoint-3600/trainer_state.json ADDED
@@ -0,0 +1,448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.3515968356284793,
5
+ "global_step": 3600,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.0,
12
+ "learning_rate": 5e-05,
13
+ "loss": 3.3163,
14
+ "step": 50
15
+ },
16
+ {
17
+ "epoch": 0.01,
18
+ "learning_rate": 0.0001,
19
+ "loss": 2.491,
20
+ "step": 100
21
+ },
22
+ {
23
+ "epoch": 0.01,
24
+ "learning_rate": 0.0001,
25
+ "loss": 2.0415,
26
+ "step": 150
27
+ },
28
+ {
29
+ "epoch": 0.02,
30
+ "learning_rate": 0.0001,
31
+ "loss": 1.9681,
32
+ "step": 200
33
+ },
34
+ {
35
+ "epoch": 0.02,
36
+ "learning_rate": 0.0001,
37
+ "loss": 1.8944,
38
+ "step": 250
39
+ },
40
+ {
41
+ "epoch": 0.03,
42
+ "learning_rate": 0.0001,
43
+ "loss": 1.8218,
44
+ "step": 300
45
+ },
46
+ {
47
+ "epoch": 0.03,
48
+ "learning_rate": 0.0001,
49
+ "loss": 1.7536,
50
+ "step": 350
51
+ },
52
+ {
53
+ "epoch": 0.04,
54
+ "learning_rate": 0.0001,
55
+ "loss": 1.681,
56
+ "step": 400
57
+ },
58
+ {
59
+ "epoch": 0.04,
60
+ "learning_rate": 0.0001,
61
+ "loss": 1.6851,
62
+ "step": 450
63
+ },
64
+ {
65
+ "epoch": 0.05,
66
+ "learning_rate": 0.0001,
67
+ "loss": 1.6193,
68
+ "step": 500
69
+ },
70
+ {
71
+ "epoch": 0.05,
72
+ "learning_rate": 0.0001,
73
+ "loss": 1.6291,
74
+ "step": 550
75
+ },
76
+ {
77
+ "epoch": 0.06,
78
+ "learning_rate": 0.0001,
79
+ "loss": 1.6241,
80
+ "step": 600
81
+ },
82
+ {
83
+ "epoch": 0.06,
84
+ "learning_rate": 0.0001,
85
+ "loss": 1.5997,
86
+ "step": 650
87
+ },
88
+ {
89
+ "epoch": 0.07,
90
+ "learning_rate": 0.0001,
91
+ "loss": 1.5744,
92
+ "step": 700
93
+ },
94
+ {
95
+ "epoch": 0.07,
96
+ "learning_rate": 0.0001,
97
+ "loss": 1.6018,
98
+ "step": 750
99
+ },
100
+ {
101
+ "epoch": 0.08,
102
+ "learning_rate": 0.0001,
103
+ "loss": 1.6006,
104
+ "step": 800
105
+ },
106
+ {
107
+ "epoch": 0.08,
108
+ "learning_rate": 0.0001,
109
+ "loss": 1.5671,
110
+ "step": 850
111
+ },
112
+ {
113
+ "epoch": 0.09,
114
+ "learning_rate": 0.0001,
115
+ "loss": 1.5721,
116
+ "step": 900
117
+ },
118
+ {
119
+ "epoch": 0.09,
120
+ "learning_rate": 0.0001,
121
+ "loss": 1.5573,
122
+ "step": 950
123
+ },
124
+ {
125
+ "epoch": 0.1,
126
+ "learning_rate": 0.0001,
127
+ "loss": 1.5539,
128
+ "step": 1000
129
+ },
130
+ {
131
+ "epoch": 0.1,
132
+ "learning_rate": 0.0001,
133
+ "loss": 1.542,
134
+ "step": 1050
135
+ },
136
+ {
137
+ "epoch": 0.11,
138
+ "learning_rate": 0.0001,
139
+ "loss": 1.5165,
140
+ "step": 1100
141
+ },
142
+ {
143
+ "epoch": 0.11,
144
+ "learning_rate": 0.0001,
145
+ "loss": 1.5797,
146
+ "step": 1150
147
+ },
148
+ {
149
+ "epoch": 0.12,
150
+ "learning_rate": 0.0001,
151
+ "loss": 1.5609,
152
+ "step": 1200
153
+ },
154
+ {
155
+ "epoch": 0.12,
156
+ "learning_rate": 0.0001,
157
+ "loss": 1.5435,
158
+ "step": 1250
159
+ },
160
+ {
161
+ "epoch": 0.13,
162
+ "learning_rate": 0.0001,
163
+ "loss": 1.5645,
164
+ "step": 1300
165
+ },
166
+ {
167
+ "epoch": 0.13,
168
+ "learning_rate": 0.0001,
169
+ "loss": 1.5258,
170
+ "step": 1350
171
+ },
172
+ {
173
+ "epoch": 0.14,
174
+ "learning_rate": 0.0001,
175
+ "loss": 1.4773,
176
+ "step": 1400
177
+ },
178
+ {
179
+ "epoch": 0.14,
180
+ "learning_rate": 0.0001,
181
+ "loss": 1.4825,
182
+ "step": 1450
183
+ },
184
+ {
185
+ "epoch": 0.15,
186
+ "learning_rate": 0.0001,
187
+ "loss": 1.5124,
188
+ "step": 1500
189
+ },
190
+ {
191
+ "epoch": 0.15,
192
+ "learning_rate": 0.0001,
193
+ "loss": 1.4963,
194
+ "step": 1550
195
+ },
196
+ {
197
+ "epoch": 0.16,
198
+ "learning_rate": 0.0001,
199
+ "loss": 1.4861,
200
+ "step": 1600
201
+ },
202
+ {
203
+ "epoch": 0.16,
204
+ "learning_rate": 0.0001,
205
+ "loss": 1.5275,
206
+ "step": 1650
207
+ },
208
+ {
209
+ "epoch": 0.17,
210
+ "learning_rate": 0.0001,
211
+ "loss": 1.484,
212
+ "step": 1700
213
+ },
214
+ {
215
+ "epoch": 0.17,
216
+ "learning_rate": 0.0001,
217
+ "loss": 1.5349,
218
+ "step": 1750
219
+ },
220
+ {
221
+ "epoch": 0.18,
222
+ "learning_rate": 0.0001,
223
+ "loss": 1.4641,
224
+ "step": 1800
225
+ },
226
+ {
227
+ "epoch": 0.18,
228
+ "learning_rate": 0.0001,
229
+ "loss": 1.4752,
230
+ "step": 1850
231
+ },
232
+ {
233
+ "epoch": 0.19,
234
+ "learning_rate": 0.0001,
235
+ "loss": 1.4475,
236
+ "step": 1900
237
+ },
238
+ {
239
+ "epoch": 0.19,
240
+ "learning_rate": 0.0001,
241
+ "loss": 1.5358,
242
+ "step": 1950
243
+ },
244
+ {
245
+ "epoch": 0.2,
246
+ "learning_rate": 0.0001,
247
+ "loss": 1.4652,
248
+ "step": 2000
249
+ },
250
+ {
251
+ "epoch": 0.2,
252
+ "learning_rate": 0.0001,
253
+ "loss": 1.419,
254
+ "step": 2050
255
+ },
256
+ {
257
+ "epoch": 0.21,
258
+ "learning_rate": 0.0001,
259
+ "loss": 1.4588,
260
+ "step": 2100
261
+ },
262
+ {
263
+ "epoch": 0.21,
264
+ "learning_rate": 0.0001,
265
+ "loss": 1.4593,
266
+ "step": 2150
267
+ },
268
+ {
269
+ "epoch": 0.21,
270
+ "learning_rate": 0.0001,
271
+ "loss": 1.4604,
272
+ "step": 2200
273
+ },
274
+ {
275
+ "epoch": 0.22,
276
+ "learning_rate": 0.0001,
277
+ "loss": 1.4598,
278
+ "step": 2250
279
+ },
280
+ {
281
+ "epoch": 0.22,
282
+ "learning_rate": 0.0001,
283
+ "loss": 1.482,
284
+ "step": 2300
285
+ },
286
+ {
287
+ "epoch": 0.23,
288
+ "learning_rate": 0.0001,
289
+ "loss": 1.4749,
290
+ "step": 2350
291
+ },
292
+ {
293
+ "epoch": 0.23,
294
+ "learning_rate": 0.0001,
295
+ "loss": 1.4718,
296
+ "step": 2400
297
+ },
298
+ {
299
+ "epoch": 0.24,
300
+ "learning_rate": 0.0001,
301
+ "loss": 1.4343,
302
+ "step": 2450
303
+ },
304
+ {
305
+ "epoch": 0.24,
306
+ "learning_rate": 0.0001,
307
+ "loss": 1.4861,
308
+ "step": 2500
309
+ },
310
+ {
311
+ "epoch": 0.25,
312
+ "learning_rate": 0.0001,
313
+ "loss": 1.4717,
314
+ "step": 2550
315
+ },
316
+ {
317
+ "epoch": 0.25,
318
+ "learning_rate": 0.0001,
319
+ "loss": 1.4599,
320
+ "step": 2600
321
+ },
322
+ {
323
+ "epoch": 0.26,
324
+ "learning_rate": 0.0001,
325
+ "loss": 1.4746,
326
+ "step": 2650
327
+ },
328
+ {
329
+ "epoch": 0.26,
330
+ "learning_rate": 0.0001,
331
+ "loss": 1.4517,
332
+ "step": 2700
333
+ },
334
+ {
335
+ "epoch": 0.27,
336
+ "learning_rate": 0.0001,
337
+ "loss": 1.4267,
338
+ "step": 2750
339
+ },
340
+ {
341
+ "epoch": 0.27,
342
+ "learning_rate": 0.0001,
343
+ "loss": 1.4303,
344
+ "step": 2800
345
+ },
346
+ {
347
+ "epoch": 0.28,
348
+ "learning_rate": 0.0001,
349
+ "loss": 1.4146,
350
+ "step": 2850
351
+ },
352
+ {
353
+ "epoch": 0.28,
354
+ "learning_rate": 0.0001,
355
+ "loss": 1.4245,
356
+ "step": 2900
357
+ },
358
+ {
359
+ "epoch": 0.29,
360
+ "learning_rate": 0.0001,
361
+ "loss": 1.4356,
362
+ "step": 2950
363
+ },
364
+ {
365
+ "epoch": 0.29,
366
+ "learning_rate": 0.0001,
367
+ "loss": 1.4183,
368
+ "step": 3000
369
+ },
370
+ {
371
+ "epoch": 0.3,
372
+ "learning_rate": 0.0001,
373
+ "loss": 1.423,
374
+ "step": 3050
375
+ },
376
+ {
377
+ "epoch": 0.3,
378
+ "learning_rate": 0.0001,
379
+ "loss": 1.4854,
380
+ "step": 3100
381
+ },
382
+ {
383
+ "epoch": 0.31,
384
+ "learning_rate": 0.0001,
385
+ "loss": 1.4552,
386
+ "step": 3150
387
+ },
388
+ {
389
+ "epoch": 0.31,
390
+ "learning_rate": 0.0001,
391
+ "loss": 1.456,
392
+ "step": 3200
393
+ },
394
+ {
395
+ "epoch": 0.32,
396
+ "learning_rate": 0.0001,
397
+ "loss": 1.4202,
398
+ "step": 3250
399
+ },
400
+ {
401
+ "epoch": 0.32,
402
+ "learning_rate": 0.0001,
403
+ "loss": 1.4025,
404
+ "step": 3300
405
+ },
406
+ {
407
+ "epoch": 0.33,
408
+ "learning_rate": 0.0001,
409
+ "loss": 1.4134,
410
+ "step": 3350
411
+ },
412
+ {
413
+ "epoch": 0.33,
414
+ "learning_rate": 0.0001,
415
+ "loss": 1.4282,
416
+ "step": 3400
417
+ },
418
+ {
419
+ "epoch": 0.34,
420
+ "learning_rate": 0.0001,
421
+ "loss": 1.4256,
422
+ "step": 3450
423
+ },
424
+ {
425
+ "epoch": 0.34,
426
+ "learning_rate": 0.0001,
427
+ "loss": 1.4056,
428
+ "step": 3500
429
+ },
430
+ {
431
+ "epoch": 0.35,
432
+ "learning_rate": 0.0001,
433
+ "loss": 1.3779,
434
+ "step": 3550
435
+ },
436
+ {
437
+ "epoch": 0.35,
438
+ "learning_rate": 0.0001,
439
+ "loss": 1.3857,
440
+ "step": 3600
441
+ }
442
+ ],
443
+ "max_steps": 20478,
444
+ "num_train_epochs": 2,
445
+ "total_flos": 5.4220124910336e+16,
446
+ "trial_name": null,
447
+ "trial_params": null
448
+ }
checkpoint-3600/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af5a5e7107b6cd66fb7591ec5f90dff0454583f7b3c471f207ac4b50ee600ca7
3
+ size 3899
checkpoint-3800/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.5.0.dev0
checkpoint-3800/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/kaggle/working/ez-trainer/models/open_llama_3b_v2",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-3800/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60dc40334601e9811def6fe32914a5ff3421fed446c6e6c0c32e4a4eef3777fe
3
+ size 10686701
checkpoint-3800/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2920519a7f1ae8d338fdf7f597214557b8e46795c9a8e26803e0cd09662c426a
3
+ size 21389189
checkpoint-3800/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2278a87cdf86c3f9219223c847f6b27f6b7f15b8226b617f38936e8ff2cbcde
3
+ size 14575
checkpoint-3800/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3123ba61d8dfe6c3f0e315673366423c43af4c672afd33ace336f5de34c3fa83
3
+ size 627
checkpoint-3800/trainer_state.json ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.37112999316339484,
5
+ "global_step": 3800,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.0,
12
+ "learning_rate": 5e-05,
13
+ "loss": 3.3163,
14
+ "step": 50
15
+ },
16
+ {
17
+ "epoch": 0.01,
18
+ "learning_rate": 0.0001,
19
+ "loss": 2.491,
20
+ "step": 100
21
+ },
22
+ {
23
+ "epoch": 0.01,
24
+ "learning_rate": 0.0001,
25
+ "loss": 2.0415,
26
+ "step": 150
27
+ },
28
+ {
29
+ "epoch": 0.02,
30
+ "learning_rate": 0.0001,
31
+ "loss": 1.9681,
32
+ "step": 200
33
+ },
34
+ {
35
+ "epoch": 0.02,
36
+ "learning_rate": 0.0001,
37
+ "loss": 1.8944,
38
+ "step": 250
39
+ },
40
+ {
41
+ "epoch": 0.03,
42
+ "learning_rate": 0.0001,
43
+ "loss": 1.8218,
44
+ "step": 300
45
+ },
46
+ {
47
+ "epoch": 0.03,
48
+ "learning_rate": 0.0001,
49
+ "loss": 1.7536,
50
+ "step": 350
51
+ },
52
+ {
53
+ "epoch": 0.04,
54
+ "learning_rate": 0.0001,
55
+ "loss": 1.681,
56
+ "step": 400
57
+ },
58
+ {
59
+ "epoch": 0.04,
60
+ "learning_rate": 0.0001,
61
+ "loss": 1.6851,
62
+ "step": 450
63
+ },
64
+ {
65
+ "epoch": 0.05,
66
+ "learning_rate": 0.0001,
67
+ "loss": 1.6193,
68
+ "step": 500
69
+ },
70
+ {
71
+ "epoch": 0.05,
72
+ "learning_rate": 0.0001,
73
+ "loss": 1.6291,
74
+ "step": 550
75
+ },
76
+ {
77
+ "epoch": 0.06,
78
+ "learning_rate": 0.0001,
79
+ "loss": 1.6241,
80
+ "step": 600
81
+ },
82
+ {
83
+ "epoch": 0.06,
84
+ "learning_rate": 0.0001,
85
+ "loss": 1.5997,
86
+ "step": 650
87
+ },
88
+ {
89
+ "epoch": 0.07,
90
+ "learning_rate": 0.0001,
91
+ "loss": 1.5744,
92
+ "step": 700
93
+ },
94
+ {
95
+ "epoch": 0.07,
96
+ "learning_rate": 0.0001,
97
+ "loss": 1.6018,
98
+ "step": 750
99
+ },
100
+ {
101
+ "epoch": 0.08,
102
+ "learning_rate": 0.0001,
103
+ "loss": 1.6006,
104
+ "step": 800
105
+ },
106
+ {
107
+ "epoch": 0.08,
108
+ "learning_rate": 0.0001,
109
+ "loss": 1.5671,
110
+ "step": 850
111
+ },
112
+ {
113
+ "epoch": 0.09,
114
+ "learning_rate": 0.0001,
115
+ "loss": 1.5721,
116
+ "step": 900
117
+ },
118
+ {
119
+ "epoch": 0.09,
120
+ "learning_rate": 0.0001,
121
+ "loss": 1.5573,
122
+ "step": 950
123
+ },
124
+ {
125
+ "epoch": 0.1,
126
+ "learning_rate": 0.0001,
127
+ "loss": 1.5539,
128
+ "step": 1000
129
+ },
130
+ {
131
+ "epoch": 0.1,
132
+ "learning_rate": 0.0001,
133
+ "loss": 1.542,
134
+ "step": 1050
135
+ },
136
+ {
137
+ "epoch": 0.11,
138
+ "learning_rate": 0.0001,
139
+ "loss": 1.5165,
140
+ "step": 1100
141
+ },
142
+ {
143
+ "epoch": 0.11,
144
+ "learning_rate": 0.0001,
145
+ "loss": 1.5797,
146
+ "step": 1150
147
+ },
148
+ {
149
+ "epoch": 0.12,
150
+ "learning_rate": 0.0001,
151
+ "loss": 1.5609,
152
+ "step": 1200
153
+ },
154
+ {
155
+ "epoch": 0.12,
156
+ "learning_rate": 0.0001,
157
+ "loss": 1.5435,
158
+ "step": 1250
159
+ },
160
+ {
161
+ "epoch": 0.13,
162
+ "learning_rate": 0.0001,
163
+ "loss": 1.5645,
164
+ "step": 1300
165
+ },
166
+ {
167
+ "epoch": 0.13,
168
+ "learning_rate": 0.0001,
169
+ "loss": 1.5258,
170
+ "step": 1350
171
+ },
172
+ {
173
+ "epoch": 0.14,
174
+ "learning_rate": 0.0001,
175
+ "loss": 1.4773,
176
+ "step": 1400
177
+ },
178
+ {
179
+ "epoch": 0.14,
180
+ "learning_rate": 0.0001,
181
+ "loss": 1.4825,
182
+ "step": 1450
183
+ },
184
+ {
185
+ "epoch": 0.15,
186
+ "learning_rate": 0.0001,
187
+ "loss": 1.5124,
188
+ "step": 1500
189
+ },
190
+ {
191
+ "epoch": 0.15,
192
+ "learning_rate": 0.0001,
193
+ "loss": 1.4963,
194
+ "step": 1550
195
+ },
196
+ {
197
+ "epoch": 0.16,
198
+ "learning_rate": 0.0001,
199
+ "loss": 1.4861,
200
+ "step": 1600
201
+ },
202
+ {
203
+ "epoch": 0.16,
204
+ "learning_rate": 0.0001,
205
+ "loss": 1.5275,
206
+ "step": 1650
207
+ },
208
+ {
209
+ "epoch": 0.17,
210
+ "learning_rate": 0.0001,
211
+ "loss": 1.484,
212
+ "step": 1700
213
+ },
214
+ {
215
+ "epoch": 0.17,
216
+ "learning_rate": 0.0001,
217
+ "loss": 1.5349,
218
+ "step": 1750
219
+ },
220
+ {
221
+ "epoch": 0.18,
222
+ "learning_rate": 0.0001,
223
+ "loss": 1.4641,
224
+ "step": 1800
225
+ },
226
+ {
227
+ "epoch": 0.18,
228
+ "learning_rate": 0.0001,
229
+ "loss": 1.4752,
230
+ "step": 1850
231
+ },
232
+ {
233
+ "epoch": 0.19,
234
+ "learning_rate": 0.0001,
235
+ "loss": 1.4475,
236
+ "step": 1900
237
+ },
238
+ {
239
+ "epoch": 0.19,
240
+ "learning_rate": 0.0001,
241
+ "loss": 1.5358,
242
+ "step": 1950
243
+ },
244
+ {
245
+ "epoch": 0.2,
246
+ "learning_rate": 0.0001,
247
+ "loss": 1.4652,
248
+ "step": 2000
249
+ },
250
+ {
251
+ "epoch": 0.2,
252
+ "learning_rate": 0.0001,
253
+ "loss": 1.419,
254
+ "step": 2050
255
+ },
256
+ {
257
+ "epoch": 0.21,
258
+ "learning_rate": 0.0001,
259
+ "loss": 1.4588,
260
+ "step": 2100
261
+ },
262
+ {
263
+ "epoch": 0.21,
264
+ "learning_rate": 0.0001,
265
+ "loss": 1.4593,
266
+ "step": 2150
267
+ },
268
+ {
269
+ "epoch": 0.21,
270
+ "learning_rate": 0.0001,
271
+ "loss": 1.4604,
272
+ "step": 2200
273
+ },
274
+ {
275
+ "epoch": 0.22,
276
+ "learning_rate": 0.0001,
277
+ "loss": 1.4598,
278
+ "step": 2250
279
+ },
280
+ {
281
+ "epoch": 0.22,
282
+ "learning_rate": 0.0001,
283
+ "loss": 1.482,
284
+ "step": 2300
285
+ },
286
+ {
287
+ "epoch": 0.23,
288
+ "learning_rate": 0.0001,
289
+ "loss": 1.4749,
290
+ "step": 2350
291
+ },
292
+ {
293
+ "epoch": 0.23,
294
+ "learning_rate": 0.0001,
295
+ "loss": 1.4718,
296
+ "step": 2400
297
+ },
298
+ {
299
+ "epoch": 0.24,
300
+ "learning_rate": 0.0001,
301
+ "loss": 1.4343,
302
+ "step": 2450
303
+ },
304
+ {
305
+ "epoch": 0.24,
306
+ "learning_rate": 0.0001,
307
+ "loss": 1.4861,
308
+ "step": 2500
309
+ },
310
+ {
311
+ "epoch": 0.25,
312
+ "learning_rate": 0.0001,
313
+ "loss": 1.4717,
314
+ "step": 2550
315
+ },
316
+ {
317
+ "epoch": 0.25,
318
+ "learning_rate": 0.0001,
319
+ "loss": 1.4599,
320
+ "step": 2600
321
+ },
322
+ {
323
+ "epoch": 0.26,
324
+ "learning_rate": 0.0001,
325
+ "loss": 1.4746,
326
+ "step": 2650
327
+ },
328
+ {
329
+ "epoch": 0.26,
330
+ "learning_rate": 0.0001,
331
+ "loss": 1.4517,
332
+ "step": 2700
333
+ },
334
+ {
335
+ "epoch": 0.27,
336
+ "learning_rate": 0.0001,
337
+ "loss": 1.4267,
338
+ "step": 2750
339
+ },
340
+ {
341
+ "epoch": 0.27,
342
+ "learning_rate": 0.0001,
343
+ "loss": 1.4303,
344
+ "step": 2800
345
+ },
346
+ {
347
+ "epoch": 0.28,
348
+ "learning_rate": 0.0001,
349
+ "loss": 1.4146,
350
+ "step": 2850
351
+ },
352
+ {
353
+ "epoch": 0.28,
354
+ "learning_rate": 0.0001,
355
+ "loss": 1.4245,
356
+ "step": 2900
357
+ },
358
+ {
359
+ "epoch": 0.29,
360
+ "learning_rate": 0.0001,
361
+ "loss": 1.4356,
362
+ "step": 2950
363
+ },
364
+ {
365
+ "epoch": 0.29,
366
+ "learning_rate": 0.0001,
367
+ "loss": 1.4183,
368
+ "step": 3000
369
+ },
370
+ {
371
+ "epoch": 0.3,
372
+ "learning_rate": 0.0001,
373
+ "loss": 1.423,
374
+ "step": 3050
375
+ },
376
+ {
377
+ "epoch": 0.3,
378
+ "learning_rate": 0.0001,
379
+ "loss": 1.4854,
380
+ "step": 3100
381
+ },
382
+ {
383
+ "epoch": 0.31,
384
+ "learning_rate": 0.0001,
385
+ "loss": 1.4552,
386
+ "step": 3150
387
+ },
388
+ {
389
+ "epoch": 0.31,
390
+ "learning_rate": 0.0001,
391
+ "loss": 1.456,
392
+ "step": 3200
393
+ },
394
+ {
395
+ "epoch": 0.32,
396
+ "learning_rate": 0.0001,
397
+ "loss": 1.4202,
398
+ "step": 3250
399
+ },
400
+ {
401
+ "epoch": 0.32,
402
+ "learning_rate": 0.0001,
403
+ "loss": 1.4025,
404
+ "step": 3300
405
+ },
406
+ {
407
+ "epoch": 0.33,
408
+ "learning_rate": 0.0001,
409
+ "loss": 1.4134,
410
+ "step": 3350
411
+ },
412
+ {
413
+ "epoch": 0.33,
414
+ "learning_rate": 0.0001,
415
+ "loss": 1.4282,
416
+ "step": 3400
417
+ },
418
+ {
419
+ "epoch": 0.34,
420
+ "learning_rate": 0.0001,
421
+ "loss": 1.4256,
422
+ "step": 3450
423
+ },
424
+ {
425
+ "epoch": 0.34,
426
+ "learning_rate": 0.0001,
427
+ "loss": 1.4056,
428
+ "step": 3500
429
+ },
430
+ {
431
+ "epoch": 0.35,
432
+ "learning_rate": 0.0001,
433
+ "loss": 1.3779,
434
+ "step": 3550
435
+ },
436
+ {
437
+ "epoch": 0.35,
438
+ "learning_rate": 0.0001,
439
+ "loss": 1.3857,
440
+ "step": 3600
441
+ },
442
+ {
443
+ "epoch": 0.36,
444
+ "learning_rate": 0.0001,
445
+ "loss": 1.3819,
446
+ "step": 3650
447
+ },
448
+ {
449
+ "epoch": 0.36,
450
+ "learning_rate": 0.0001,
451
+ "loss": 1.4095,
452
+ "step": 3700
453
+ },
454
+ {
455
+ "epoch": 0.37,
456
+ "learning_rate": 0.0001,
457
+ "loss": 1.4387,
458
+ "step": 3750
459
+ },
460
+ {
461
+ "epoch": 0.37,
462
+ "learning_rate": 0.0001,
463
+ "loss": 1.3831,
464
+ "step": 3800
465
+ }
466
+ ],
467
+ "max_steps": 20478,
468
+ "num_train_epochs": 2,
469
+ "total_flos": 5.7305189584896e+16,
470
+ "trial_name": null,
471
+ "trial_params": null
472
+ }
checkpoint-3800/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af5a5e7107b6cd66fb7591ec5f90dff0454583f7b3c471f207ac4b50ee600ca7
3
+ size 3899
checkpoint-4000/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.5.0.dev0
checkpoint-4000/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/kaggle/working/ez-trainer/models/open_llama_3b_v2",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-4000/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9610d0a33a6c317dd31374ca995da107315a90919ff85385ba1111a3403c78c
3
+ size 10686701
checkpoint-4000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8dc56ded4a6eba13a2bccf868e84335966d264f395662db0f523ae880e00808
3
+ size 21389189
checkpoint-4000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2278a87cdf86c3f9219223c847f6b27f6b7f15b8226b617f38936e8ff2cbcde
3
+ size 14575
checkpoint-4000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb0bd3fc0bd7c8caa7ccf108049180d64c98a1bc3a04f138cbc446c268e7861d
3
+ size 627
checkpoint-4000/trainer_state.json ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.39066315069831037,
5
+ "global_step": 4000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.0,
12
+ "learning_rate": 5e-05,
13
+ "loss": 3.3163,
14
+ "step": 50
15
+ },
16
+ {
17
+ "epoch": 0.01,
18
+ "learning_rate": 0.0001,
19
+ "loss": 2.491,
20
+ "step": 100
21
+ },
22
+ {
23
+ "epoch": 0.01,
24
+ "learning_rate": 0.0001,
25
+ "loss": 2.0415,
26
+ "step": 150
27
+ },
28
+ {
29
+ "epoch": 0.02,
30
+ "learning_rate": 0.0001,
31
+ "loss": 1.9681,
32
+ "step": 200
33
+ },
34
+ {
35
+ "epoch": 0.02,
36
+ "learning_rate": 0.0001,
37
+ "loss": 1.8944,
38
+ "step": 250
39
+ },
40
+ {
41
+ "epoch": 0.03,
42
+ "learning_rate": 0.0001,
43
+ "loss": 1.8218,
44
+ "step": 300
45
+ },
46
+ {
47
+ "epoch": 0.03,
48
+ "learning_rate": 0.0001,
49
+ "loss": 1.7536,
50
+ "step": 350
51
+ },
52
+ {
53
+ "epoch": 0.04,
54
+ "learning_rate": 0.0001,
55
+ "loss": 1.681,
56
+ "step": 400
57
+ },
58
+ {
59
+ "epoch": 0.04,
60
+ "learning_rate": 0.0001,
61
+ "loss": 1.6851,
62
+ "step": 450
63
+ },
64
+ {
65
+ "epoch": 0.05,
66
+ "learning_rate": 0.0001,
67
+ "loss": 1.6193,
68
+ "step": 500
69
+ },
70
+ {
71
+ "epoch": 0.05,
72
+ "learning_rate": 0.0001,
73
+ "loss": 1.6291,
74
+ "step": 550
75
+ },
76
+ {
77
+ "epoch": 0.06,
78
+ "learning_rate": 0.0001,
79
+ "loss": 1.6241,
80
+ "step": 600
81
+ },
82
+ {
83
+ "epoch": 0.06,
84
+ "learning_rate": 0.0001,
85
+ "loss": 1.5997,
86
+ "step": 650
87
+ },
88
+ {
89
+ "epoch": 0.07,
90
+ "learning_rate": 0.0001,
91
+ "loss": 1.5744,
92
+ "step": 700
93
+ },
94
+ {
95
+ "epoch": 0.07,
96
+ "learning_rate": 0.0001,
97
+ "loss": 1.6018,
98
+ "step": 750
99
+ },
100
+ {
101
+ "epoch": 0.08,
102
+ "learning_rate": 0.0001,
103
+ "loss": 1.6006,
104
+ "step": 800
105
+ },
106
+ {
107
+ "epoch": 0.08,
108
+ "learning_rate": 0.0001,
109
+ "loss": 1.5671,
110
+ "step": 850
111
+ },
112
+ {
113
+ "epoch": 0.09,
114
+ "learning_rate": 0.0001,
115
+ "loss": 1.5721,
116
+ "step": 900
117
+ },
118
+ {
119
+ "epoch": 0.09,
120
+ "learning_rate": 0.0001,
121
+ "loss": 1.5573,
122
+ "step": 950
123
+ },
124
+ {
125
+ "epoch": 0.1,
126
+ "learning_rate": 0.0001,
127
+ "loss": 1.5539,
128
+ "step": 1000
129
+ },
130
+ {
131
+ "epoch": 0.1,
132
+ "learning_rate": 0.0001,
133
+ "loss": 1.542,
134
+ "step": 1050
135
+ },
136
+ {
137
+ "epoch": 0.11,
138
+ "learning_rate": 0.0001,
139
+ "loss": 1.5165,
140
+ "step": 1100
141
+ },
142
+ {
143
+ "epoch": 0.11,
144
+ "learning_rate": 0.0001,
145
+ "loss": 1.5797,
146
+ "step": 1150
147
+ },
148
+ {
149
+ "epoch": 0.12,
150
+ "learning_rate": 0.0001,
151
+ "loss": 1.5609,
152
+ "step": 1200
153
+ },
154
+ {
155
+ "epoch": 0.12,
156
+ "learning_rate": 0.0001,
157
+ "loss": 1.5435,
158
+ "step": 1250
159
+ },
160
+ {
161
+ "epoch": 0.13,
162
+ "learning_rate": 0.0001,
163
+ "loss": 1.5645,
164
+ "step": 1300
165
+ },
166
+ {
167
+ "epoch": 0.13,
168
+ "learning_rate": 0.0001,
169
+ "loss": 1.5258,
170
+ "step": 1350
171
+ },
172
+ {
173
+ "epoch": 0.14,
174
+ "learning_rate": 0.0001,
175
+ "loss": 1.4773,
176
+ "step": 1400
177
+ },
178
+ {
179
+ "epoch": 0.14,
180
+ "learning_rate": 0.0001,
181
+ "loss": 1.4825,
182
+ "step": 1450
183
+ },
184
+ {
185
+ "epoch": 0.15,
186
+ "learning_rate": 0.0001,
187
+ "loss": 1.5124,
188
+ "step": 1500
189
+ },
190
+ {
191
+ "epoch": 0.15,
192
+ "learning_rate": 0.0001,
193
+ "loss": 1.4963,
194
+ "step": 1550
195
+ },
196
+ {
197
+ "epoch": 0.16,
198
+ "learning_rate": 0.0001,
199
+ "loss": 1.4861,
200
+ "step": 1600
201
+ },
202
+ {
203
+ "epoch": 0.16,
204
+ "learning_rate": 0.0001,
205
+ "loss": 1.5275,
206
+ "step": 1650
207
+ },
208
+ {
209
+ "epoch": 0.17,
210
+ "learning_rate": 0.0001,
211
+ "loss": 1.484,
212
+ "step": 1700
213
+ },
214
+ {
215
+ "epoch": 0.17,
216
+ "learning_rate": 0.0001,
217
+ "loss": 1.5349,
218
+ "step": 1750
219
+ },
220
+ {
221
+ "epoch": 0.18,
222
+ "learning_rate": 0.0001,
223
+ "loss": 1.4641,
224
+ "step": 1800
225
+ },
226
+ {
227
+ "epoch": 0.18,
228
+ "learning_rate": 0.0001,
229
+ "loss": 1.4752,
230
+ "step": 1850
231
+ },
232
+ {
233
+ "epoch": 0.19,
234
+ "learning_rate": 0.0001,
235
+ "loss": 1.4475,
236
+ "step": 1900
237
+ },
238
+ {
239
+ "epoch": 0.19,
240
+ "learning_rate": 0.0001,
241
+ "loss": 1.5358,
242
+ "step": 1950
243
+ },
244
+ {
245
+ "epoch": 0.2,
246
+ "learning_rate": 0.0001,
247
+ "loss": 1.4652,
248
+ "step": 2000
249
+ },
250
+ {
251
+ "epoch": 0.2,
252
+ "learning_rate": 0.0001,
253
+ "loss": 1.419,
254
+ "step": 2050
255
+ },
256
+ {
257
+ "epoch": 0.21,
258
+ "learning_rate": 0.0001,
259
+ "loss": 1.4588,
260
+ "step": 2100
261
+ },
262
+ {
263
+ "epoch": 0.21,
264
+ "learning_rate": 0.0001,
265
+ "loss": 1.4593,
266
+ "step": 2150
267
+ },
268
+ {
269
+ "epoch": 0.21,
270
+ "learning_rate": 0.0001,
271
+ "loss": 1.4604,
272
+ "step": 2200
273
+ },
274
+ {
275
+ "epoch": 0.22,
276
+ "learning_rate": 0.0001,
277
+ "loss": 1.4598,
278
+ "step": 2250
279
+ },
280
+ {
281
+ "epoch": 0.22,
282
+ "learning_rate": 0.0001,
283
+ "loss": 1.482,
284
+ "step": 2300
285
+ },
286
+ {
287
+ "epoch": 0.23,
288
+ "learning_rate": 0.0001,
289
+ "loss": 1.4749,
290
+ "step": 2350
291
+ },
292
+ {
293
+ "epoch": 0.23,
294
+ "learning_rate": 0.0001,
295
+ "loss": 1.4718,
296
+ "step": 2400
297
+ },
298
+ {
299
+ "epoch": 0.24,
300
+ "learning_rate": 0.0001,
301
+ "loss": 1.4343,
302
+ "step": 2450
303
+ },
304
+ {
305
+ "epoch": 0.24,
306
+ "learning_rate": 0.0001,
307
+ "loss": 1.4861,
308
+ "step": 2500
309
+ },
310
+ {
311
+ "epoch": 0.25,
312
+ "learning_rate": 0.0001,
313
+ "loss": 1.4717,
314
+ "step": 2550
315
+ },
316
+ {
317
+ "epoch": 0.25,
318
+ "learning_rate": 0.0001,
319
+ "loss": 1.4599,
320
+ "step": 2600
321
+ },
322
+ {
323
+ "epoch": 0.26,
324
+ "learning_rate": 0.0001,
325
+ "loss": 1.4746,
326
+ "step": 2650
327
+ },
328
+ {
329
+ "epoch": 0.26,
330
+ "learning_rate": 0.0001,
331
+ "loss": 1.4517,
332
+ "step": 2700
333
+ },
334
+ {
335
+ "epoch": 0.27,
336
+ "learning_rate": 0.0001,
337
+ "loss": 1.4267,
338
+ "step": 2750
339
+ },
340
+ {
341
+ "epoch": 0.27,
342
+ "learning_rate": 0.0001,
343
+ "loss": 1.4303,
344
+ "step": 2800
345
+ },
346
+ {
347
+ "epoch": 0.28,
348
+ "learning_rate": 0.0001,
349
+ "loss": 1.4146,
350
+ "step": 2850
351
+ },
352
+ {
353
+ "epoch": 0.28,
354
+ "learning_rate": 0.0001,
355
+ "loss": 1.4245,
356
+ "step": 2900
357
+ },
358
+ {
359
+ "epoch": 0.29,
360
+ "learning_rate": 0.0001,
361
+ "loss": 1.4356,
362
+ "step": 2950
363
+ },
364
+ {
365
+ "epoch": 0.29,
366
+ "learning_rate": 0.0001,
367
+ "loss": 1.4183,
368
+ "step": 3000
369
+ },
370
+ {
371
+ "epoch": 0.3,
372
+ "learning_rate": 0.0001,
373
+ "loss": 1.423,
374
+ "step": 3050
375
+ },
376
+ {
377
+ "epoch": 0.3,
378
+ "learning_rate": 0.0001,
379
+ "loss": 1.4854,
380
+ "step": 3100
381
+ },
382
+ {
383
+ "epoch": 0.31,
384
+ "learning_rate": 0.0001,
385
+ "loss": 1.4552,
386
+ "step": 3150
387
+ },
388
+ {
389
+ "epoch": 0.31,
390
+ "learning_rate": 0.0001,
391
+ "loss": 1.456,
392
+ "step": 3200
393
+ },
394
+ {
395
+ "epoch": 0.32,
396
+ "learning_rate": 0.0001,
397
+ "loss": 1.4202,
398
+ "step": 3250
399
+ },
400
+ {
401
+ "epoch": 0.32,
402
+ "learning_rate": 0.0001,
403
+ "loss": 1.4025,
404
+ "step": 3300
405
+ },
406
+ {
407
+ "epoch": 0.33,
408
+ "learning_rate": 0.0001,
409
+ "loss": 1.4134,
410
+ "step": 3350
411
+ },
412
+ {
413
+ "epoch": 0.33,
414
+ "learning_rate": 0.0001,
415
+ "loss": 1.4282,
416
+ "step": 3400
417
+ },
418
+ {
419
+ "epoch": 0.34,
420
+ "learning_rate": 0.0001,
421
+ "loss": 1.4256,
422
+ "step": 3450
423
+ },
424
+ {
425
+ "epoch": 0.34,
426
+ "learning_rate": 0.0001,
427
+ "loss": 1.4056,
428
+ "step": 3500
429
+ },
430
+ {
431
+ "epoch": 0.35,
432
+ "learning_rate": 0.0001,
433
+ "loss": 1.3779,
434
+ "step": 3550
435
+ },
436
+ {
437
+ "epoch": 0.35,
438
+ "learning_rate": 0.0001,
439
+ "loss": 1.3857,
440
+ "step": 3600
441
+ },
442
+ {
443
+ "epoch": 0.36,
444
+ "learning_rate": 0.0001,
445
+ "loss": 1.3819,
446
+ "step": 3650
447
+ },
448
+ {
449
+ "epoch": 0.36,
450
+ "learning_rate": 0.0001,
451
+ "loss": 1.4095,
452
+ "step": 3700
453
+ },
454
+ {
455
+ "epoch": 0.37,
456
+ "learning_rate": 0.0001,
457
+ "loss": 1.4387,
458
+ "step": 3750
459
+ },
460
+ {
461
+ "epoch": 0.37,
462
+ "learning_rate": 0.0001,
463
+ "loss": 1.3831,
464
+ "step": 3800
465
+ },
466
+ {
467
+ "epoch": 0.38,
468
+ "learning_rate": 0.0001,
469
+ "loss": 1.3913,
470
+ "step": 3850
471
+ },
472
+ {
473
+ "epoch": 0.38,
474
+ "learning_rate": 0.0001,
475
+ "loss": 1.4007,
476
+ "step": 3900
477
+ },
478
+ {
479
+ "epoch": 0.39,
480
+ "learning_rate": 0.0001,
481
+ "loss": 1.4038,
482
+ "step": 3950
483
+ },
484
+ {
485
+ "epoch": 0.39,
486
+ "learning_rate": 0.0001,
487
+ "loss": 1.3926,
488
+ "step": 4000
489
+ }
490
+ ],
491
+ "max_steps": 20478,
492
+ "num_train_epochs": 2,
493
+ "total_flos": 6.0291907784448e+16,
494
+ "trial_name": null,
495
+ "trial_params": null
496
+ }
checkpoint-4000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af5a5e7107b6cd66fb7591ec5f90dff0454583f7b3c471f207ac4b50ee600ca7
3
+ size 3899
checkpoint-4200/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.5.0.dev0
checkpoint-4200/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/kaggle/working/ez-trainer/models/open_llama_3b_v2",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-4200/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0d86f569a939cba8b99aaf66b94bee9b9b22b6f24d8ace5e340d6d5788f072c
3
+ size 10686701
checkpoint-4200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d127341f4d9d2b786efaca34c8e8b8f52a74a6cec74953621d484128bc2d1314
3
+ size 21389189
checkpoint-4200/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2278a87cdf86c3f9219223c847f6b27f6b7f15b8226b617f38936e8ff2cbcde
3
+ size 14575
checkpoint-4200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:875747e57e538cfa807aea6e22367c4b153938ead47679a1c1dc3235d57041eb
3
+ size 627
checkpoint-4200/trainer_state.json ADDED
@@ -0,0 +1,520 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.4101963082332259,
5
+ "global_step": 4200,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.0,
12
+ "learning_rate": 5e-05,
13
+ "loss": 3.3163,
14
+ "step": 50
15
+ },
16
+ {
17
+ "epoch": 0.01,
18
+ "learning_rate": 0.0001,
19
+ "loss": 2.491,
20
+ "step": 100
21
+ },
22
+ {
23
+ "epoch": 0.01,
24
+ "learning_rate": 0.0001,
25
+ "loss": 2.0415,
26
+ "step": 150
27
+ },
28
+ {
29
+ "epoch": 0.02,
30
+ "learning_rate": 0.0001,
31
+ "loss": 1.9681,
32
+ "step": 200
33
+ },
34
+ {
35
+ "epoch": 0.02,
36
+ "learning_rate": 0.0001,
37
+ "loss": 1.8944,
38
+ "step": 250
39
+ },
40
+ {
41
+ "epoch": 0.03,
42
+ "learning_rate": 0.0001,
43
+ "loss": 1.8218,
44
+ "step": 300
45
+ },
46
+ {
47
+ "epoch": 0.03,
48
+ "learning_rate": 0.0001,
49
+ "loss": 1.7536,
50
+ "step": 350
51
+ },
52
+ {
53
+ "epoch": 0.04,
54
+ "learning_rate": 0.0001,
55
+ "loss": 1.681,
56
+ "step": 400
57
+ },
58
+ {
59
+ "epoch": 0.04,
60
+ "learning_rate": 0.0001,
61
+ "loss": 1.6851,
62
+ "step": 450
63
+ },
64
+ {
65
+ "epoch": 0.05,
66
+ "learning_rate": 0.0001,
67
+ "loss": 1.6193,
68
+ "step": 500
69
+ },
70
+ {
71
+ "epoch": 0.05,
72
+ "learning_rate": 0.0001,
73
+ "loss": 1.6291,
74
+ "step": 550
75
+ },
76
+ {
77
+ "epoch": 0.06,
78
+ "learning_rate": 0.0001,
79
+ "loss": 1.6241,
80
+ "step": 600
81
+ },
82
+ {
83
+ "epoch": 0.06,
84
+ "learning_rate": 0.0001,
85
+ "loss": 1.5997,
86
+ "step": 650
87
+ },
88
+ {
89
+ "epoch": 0.07,
90
+ "learning_rate": 0.0001,
91
+ "loss": 1.5744,
92
+ "step": 700
93
+ },
94
+ {
95
+ "epoch": 0.07,
96
+ "learning_rate": 0.0001,
97
+ "loss": 1.6018,
98
+ "step": 750
99
+ },
100
+ {
101
+ "epoch": 0.08,
102
+ "learning_rate": 0.0001,
103
+ "loss": 1.6006,
104
+ "step": 800
105
+ },
106
+ {
107
+ "epoch": 0.08,
108
+ "learning_rate": 0.0001,
109
+ "loss": 1.5671,
110
+ "step": 850
111
+ },
112
+ {
113
+ "epoch": 0.09,
114
+ "learning_rate": 0.0001,
115
+ "loss": 1.5721,
116
+ "step": 900
117
+ },
118
+ {
119
+ "epoch": 0.09,
120
+ "learning_rate": 0.0001,
121
+ "loss": 1.5573,
122
+ "step": 950
123
+ },
124
+ {
125
+ "epoch": 0.1,
126
+ "learning_rate": 0.0001,
127
+ "loss": 1.5539,
128
+ "step": 1000
129
+ },
130
+ {
131
+ "epoch": 0.1,
132
+ "learning_rate": 0.0001,
133
+ "loss": 1.542,
134
+ "step": 1050
135
+ },
136
+ {
137
+ "epoch": 0.11,
138
+ "learning_rate": 0.0001,
139
+ "loss": 1.5165,
140
+ "step": 1100
141
+ },
142
+ {
143
+ "epoch": 0.11,
144
+ "learning_rate": 0.0001,
145
+ "loss": 1.5797,
146
+ "step": 1150
147
+ },
148
+ {
149
+ "epoch": 0.12,
150
+ "learning_rate": 0.0001,
151
+ "loss": 1.5609,
152
+ "step": 1200
153
+ },
154
+ {
155
+ "epoch": 0.12,
156
+ "learning_rate": 0.0001,
157
+ "loss": 1.5435,
158
+ "step": 1250
159
+ },
160
+ {
161
+ "epoch": 0.13,
162
+ "learning_rate": 0.0001,
163
+ "loss": 1.5645,
164
+ "step": 1300
165
+ },
166
+ {
167
+ "epoch": 0.13,
168
+ "learning_rate": 0.0001,
169
+ "loss": 1.5258,
170
+ "step": 1350
171
+ },
172
+ {
173
+ "epoch": 0.14,
174
+ "learning_rate": 0.0001,
175
+ "loss": 1.4773,
176
+ "step": 1400
177
+ },
178
+ {
179
+ "epoch": 0.14,
180
+ "learning_rate": 0.0001,
181
+ "loss": 1.4825,
182
+ "step": 1450
183
+ },
184
+ {
185
+ "epoch": 0.15,
186
+ "learning_rate": 0.0001,
187
+ "loss": 1.5124,
188
+ "step": 1500
189
+ },
190
+ {
191
+ "epoch": 0.15,
192
+ "learning_rate": 0.0001,
193
+ "loss": 1.4963,
194
+ "step": 1550
195
+ },
196
+ {
197
+ "epoch": 0.16,
198
+ "learning_rate": 0.0001,
199
+ "loss": 1.4861,
200
+ "step": 1600
201
+ },
202
+ {
203
+ "epoch": 0.16,
204
+ "learning_rate": 0.0001,
205
+ "loss": 1.5275,
206
+ "step": 1650
207
+ },
208
+ {
209
+ "epoch": 0.17,
210
+ "learning_rate": 0.0001,
211
+ "loss": 1.484,
212
+ "step": 1700
213
+ },
214
+ {
215
+ "epoch": 0.17,
216
+ "learning_rate": 0.0001,
217
+ "loss": 1.5349,
218
+ "step": 1750
219
+ },
220
+ {
221
+ "epoch": 0.18,
222
+ "learning_rate": 0.0001,
223
+ "loss": 1.4641,
224
+ "step": 1800
225
+ },
226
+ {
227
+ "epoch": 0.18,
228
+ "learning_rate": 0.0001,
229
+ "loss": 1.4752,
230
+ "step": 1850
231
+ },
232
+ {
233
+ "epoch": 0.19,
234
+ "learning_rate": 0.0001,
235
+ "loss": 1.4475,
236
+ "step": 1900
237
+ },
238
+ {
239
+ "epoch": 0.19,
240
+ "learning_rate": 0.0001,
241
+ "loss": 1.5358,
242
+ "step": 1950
243
+ },
244
+ {
245
+ "epoch": 0.2,
246
+ "learning_rate": 0.0001,
247
+ "loss": 1.4652,
248
+ "step": 2000
249
+ },
250
+ {
251
+ "epoch": 0.2,
252
+ "learning_rate": 0.0001,
253
+ "loss": 1.419,
254
+ "step": 2050
255
+ },
256
+ {
257
+ "epoch": 0.21,
258
+ "learning_rate": 0.0001,
259
+ "loss": 1.4588,
260
+ "step": 2100
261
+ },
262
+ {
263
+ "epoch": 0.21,
264
+ "learning_rate": 0.0001,
265
+ "loss": 1.4593,
266
+ "step": 2150
267
+ },
268
+ {
269
+ "epoch": 0.21,
270
+ "learning_rate": 0.0001,
271
+ "loss": 1.4604,
272
+ "step": 2200
273
+ },
274
+ {
275
+ "epoch": 0.22,
276
+ "learning_rate": 0.0001,
277
+ "loss": 1.4598,
278
+ "step": 2250
279
+ },
280
+ {
281
+ "epoch": 0.22,
282
+ "learning_rate": 0.0001,
283
+ "loss": 1.482,
284
+ "step": 2300
285
+ },
286
+ {
287
+ "epoch": 0.23,
288
+ "learning_rate": 0.0001,
289
+ "loss": 1.4749,
290
+ "step": 2350
291
+ },
292
+ {
293
+ "epoch": 0.23,
294
+ "learning_rate": 0.0001,
295
+ "loss": 1.4718,
296
+ "step": 2400
297
+ },
298
+ {
299
+ "epoch": 0.24,
300
+ "learning_rate": 0.0001,
301
+ "loss": 1.4343,
302
+ "step": 2450
303
+ },
304
+ {
305
+ "epoch": 0.24,
306
+ "learning_rate": 0.0001,
307
+ "loss": 1.4861,
308
+ "step": 2500
309
+ },
310
+ {
311
+ "epoch": 0.25,
312
+ "learning_rate": 0.0001,
313
+ "loss": 1.4717,
314
+ "step": 2550
315
+ },
316
+ {
317
+ "epoch": 0.25,
318
+ "learning_rate": 0.0001,
319
+ "loss": 1.4599,
320
+ "step": 2600
321
+ },
322
+ {
323
+ "epoch": 0.26,
324
+ "learning_rate": 0.0001,
325
+ "loss": 1.4746,
326
+ "step": 2650
327
+ },
328
+ {
329
+ "epoch": 0.26,
330
+ "learning_rate": 0.0001,
331
+ "loss": 1.4517,
332
+ "step": 2700
333
+ },
334
+ {
335
+ "epoch": 0.27,
336
+ "learning_rate": 0.0001,
337
+ "loss": 1.4267,
338
+ "step": 2750
339
+ },
340
+ {
341
+ "epoch": 0.27,
342
+ "learning_rate": 0.0001,
343
+ "loss": 1.4303,
344
+ "step": 2800
345
+ },
346
+ {
347
+ "epoch": 0.28,
348
+ "learning_rate": 0.0001,
349
+ "loss": 1.4146,
350
+ "step": 2850
351
+ },
352
+ {
353
+ "epoch": 0.28,
354
+ "learning_rate": 0.0001,
355
+ "loss": 1.4245,
356
+ "step": 2900
357
+ },
358
+ {
359
+ "epoch": 0.29,
360
+ "learning_rate": 0.0001,
361
+ "loss": 1.4356,
362
+ "step": 2950
363
+ },
364
+ {
365
+ "epoch": 0.29,
366
+ "learning_rate": 0.0001,
367
+ "loss": 1.4183,
368
+ "step": 3000
369
+ },
370
+ {
371
+ "epoch": 0.3,
372
+ "learning_rate": 0.0001,
373
+ "loss": 1.423,
374
+ "step": 3050
375
+ },
376
+ {
377
+ "epoch": 0.3,
378
+ "learning_rate": 0.0001,
379
+ "loss": 1.4854,
380
+ "step": 3100
381
+ },
382
+ {
383
+ "epoch": 0.31,
384
+ "learning_rate": 0.0001,
385
+ "loss": 1.4552,
386
+ "step": 3150
387
+ },
388
+ {
389
+ "epoch": 0.31,
390
+ "learning_rate": 0.0001,
391
+ "loss": 1.456,
392
+ "step": 3200
393
+ },
394
+ {
395
+ "epoch": 0.32,
396
+ "learning_rate": 0.0001,
397
+ "loss": 1.4202,
398
+ "step": 3250
399
+ },
400
+ {
401
+ "epoch": 0.32,
402
+ "learning_rate": 0.0001,
403
+ "loss": 1.4025,
404
+ "step": 3300
405
+ },
406
+ {
407
+ "epoch": 0.33,
408
+ "learning_rate": 0.0001,
409
+ "loss": 1.4134,
410
+ "step": 3350
411
+ },
412
+ {
413
+ "epoch": 0.33,
414
+ "learning_rate": 0.0001,
415
+ "loss": 1.4282,
416
+ "step": 3400
417
+ },
418
+ {
419
+ "epoch": 0.34,
420
+ "learning_rate": 0.0001,
421
+ "loss": 1.4256,
422
+ "step": 3450
423
+ },
424
+ {
425
+ "epoch": 0.34,
426
+ "learning_rate": 0.0001,
427
+ "loss": 1.4056,
428
+ "step": 3500
429
+ },
430
+ {
431
+ "epoch": 0.35,
432
+ "learning_rate": 0.0001,
433
+ "loss": 1.3779,
434
+ "step": 3550
435
+ },
436
+ {
437
+ "epoch": 0.35,
438
+ "learning_rate": 0.0001,
439
+ "loss": 1.3857,
440
+ "step": 3600
441
+ },
442
+ {
443
+ "epoch": 0.36,
444
+ "learning_rate": 0.0001,
445
+ "loss": 1.3819,
446
+ "step": 3650
447
+ },
448
+ {
449
+ "epoch": 0.36,
450
+ "learning_rate": 0.0001,
451
+ "loss": 1.4095,
452
+ "step": 3700
453
+ },
454
+ {
455
+ "epoch": 0.37,
456
+ "learning_rate": 0.0001,
457
+ "loss": 1.4387,
458
+ "step": 3750
459
+ },
460
+ {
461
+ "epoch": 0.37,
462
+ "learning_rate": 0.0001,
463
+ "loss": 1.3831,
464
+ "step": 3800
465
+ },
466
+ {
467
+ "epoch": 0.38,
468
+ "learning_rate": 0.0001,
469
+ "loss": 1.3913,
470
+ "step": 3850
471
+ },
472
+ {
473
+ "epoch": 0.38,
474
+ "learning_rate": 0.0001,
475
+ "loss": 1.4007,
476
+ "step": 3900
477
+ },
478
+ {
479
+ "epoch": 0.39,
480
+ "learning_rate": 0.0001,
481
+ "loss": 1.4038,
482
+ "step": 3950
483
+ },
484
+ {
485
+ "epoch": 0.39,
486
+ "learning_rate": 0.0001,
487
+ "loss": 1.3926,
488
+ "step": 4000
489
+ },
490
+ {
491
+ "epoch": 0.4,
492
+ "learning_rate": 0.0001,
493
+ "loss": 1.36,
494
+ "step": 4050
495
+ },
496
+ {
497
+ "epoch": 0.4,
498
+ "learning_rate": 0.0001,
499
+ "loss": 1.4146,
500
+ "step": 4100
501
+ },
502
+ {
503
+ "epoch": 0.41,
504
+ "learning_rate": 0.0001,
505
+ "loss": 1.3626,
506
+ "step": 4150
507
+ },
508
+ {
509
+ "epoch": 0.41,
510
+ "learning_rate": 0.0001,
511
+ "loss": 1.3562,
512
+ "step": 4200
513
+ }
514
+ ],
515
+ "max_steps": 20478,
516
+ "num_train_epochs": 2,
517
+ "total_flos": 6.3352674125568e+16,
518
+ "trial_name": null,
519
+ "trial_params": null
520
+ }
checkpoint-4200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af5a5e7107b6cd66fb7591ec5f90dff0454583f7b3c471f207ac4b50ee600ca7
3
+ size 3899
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d127341f4d9d2b786efaca34c8e8b8f52a74a6cec74953621d484128bc2d1314
3
+ size 21389189
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2278a87cdf86c3f9219223c847f6b27f6b7f15b8226b617f38936e8ff2cbcde
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:875747e57e538cfa807aea6e22367c4b153938ead47679a1c1dc3235d57041eb
3
+ size 627
trainer_state.json ADDED
@@ -0,0 +1,520 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.4101963082332259,
5
+ "global_step": 4200,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.0,
12
+ "learning_rate": 5e-05,
13
+ "loss": 3.3163,
14
+ "step": 50
15
+ },
16
+ {
17
+ "epoch": 0.01,
18
+ "learning_rate": 0.0001,
19
+ "loss": 2.491,
20
+ "step": 100
21
+ },
22
+ {
23
+ "epoch": 0.01,
24
+ "learning_rate": 0.0001,
25
+ "loss": 2.0415,
26
+ "step": 150
27
+ },
28
+ {
29
+ "epoch": 0.02,
30
+ "learning_rate": 0.0001,
31
+ "loss": 1.9681,
32
+ "step": 200
33
+ },
34
+ {
35
+ "epoch": 0.02,
36
+ "learning_rate": 0.0001,
37
+ "loss": 1.8944,
38
+ "step": 250
39
+ },
40
+ {
41
+ "epoch": 0.03,
42
+ "learning_rate": 0.0001,
43
+ "loss": 1.8218,
44
+ "step": 300
45
+ },
46
+ {
47
+ "epoch": 0.03,
48
+ "learning_rate": 0.0001,
49
+ "loss": 1.7536,
50
+ "step": 350
51
+ },
52
+ {
53
+ "epoch": 0.04,
54
+ "learning_rate": 0.0001,
55
+ "loss": 1.681,
56
+ "step": 400
57
+ },
58
+ {
59
+ "epoch": 0.04,
60
+ "learning_rate": 0.0001,
61
+ "loss": 1.6851,
62
+ "step": 450
63
+ },
64
+ {
65
+ "epoch": 0.05,
66
+ "learning_rate": 0.0001,
67
+ "loss": 1.6193,
68
+ "step": 500
69
+ },
70
+ {
71
+ "epoch": 0.05,
72
+ "learning_rate": 0.0001,
73
+ "loss": 1.6291,
74
+ "step": 550
75
+ },
76
+ {
77
+ "epoch": 0.06,
78
+ "learning_rate": 0.0001,
79
+ "loss": 1.6241,
80
+ "step": 600
81
+ },
82
+ {
83
+ "epoch": 0.06,
84
+ "learning_rate": 0.0001,
85
+ "loss": 1.5997,
86
+ "step": 650
87
+ },
88
+ {
89
+ "epoch": 0.07,
90
+ "learning_rate": 0.0001,
91
+ "loss": 1.5744,
92
+ "step": 700
93
+ },
94
+ {
95
+ "epoch": 0.07,
96
+ "learning_rate": 0.0001,
97
+ "loss": 1.6018,
98
+ "step": 750
99
+ },
100
+ {
101
+ "epoch": 0.08,
102
+ "learning_rate": 0.0001,
103
+ "loss": 1.6006,
104
+ "step": 800
105
+ },
106
+ {
107
+ "epoch": 0.08,
108
+ "learning_rate": 0.0001,
109
+ "loss": 1.5671,
110
+ "step": 850
111
+ },
112
+ {
113
+ "epoch": 0.09,
114
+ "learning_rate": 0.0001,
115
+ "loss": 1.5721,
116
+ "step": 900
117
+ },
118
+ {
119
+ "epoch": 0.09,
120
+ "learning_rate": 0.0001,
121
+ "loss": 1.5573,
122
+ "step": 950
123
+ },
124
+ {
125
+ "epoch": 0.1,
126
+ "learning_rate": 0.0001,
127
+ "loss": 1.5539,
128
+ "step": 1000
129
+ },
130
+ {
131
+ "epoch": 0.1,
132
+ "learning_rate": 0.0001,
133
+ "loss": 1.542,
134
+ "step": 1050
135
+ },
136
+ {
137
+ "epoch": 0.11,
138
+ "learning_rate": 0.0001,
139
+ "loss": 1.5165,
140
+ "step": 1100
141
+ },
142
+ {
143
+ "epoch": 0.11,
144
+ "learning_rate": 0.0001,
145
+ "loss": 1.5797,
146
+ "step": 1150
147
+ },
148
+ {
149
+ "epoch": 0.12,
150
+ "learning_rate": 0.0001,
151
+ "loss": 1.5609,
152
+ "step": 1200
153
+ },
154
+ {
155
+ "epoch": 0.12,
156
+ "learning_rate": 0.0001,
157
+ "loss": 1.5435,
158
+ "step": 1250
159
+ },
160
+ {
161
+ "epoch": 0.13,
162
+ "learning_rate": 0.0001,
163
+ "loss": 1.5645,
164
+ "step": 1300
165
+ },
166
+ {
167
+ "epoch": 0.13,
168
+ "learning_rate": 0.0001,
169
+ "loss": 1.5258,
170
+ "step": 1350
171
+ },
172
+ {
173
+ "epoch": 0.14,
174
+ "learning_rate": 0.0001,
175
+ "loss": 1.4773,
176
+ "step": 1400
177
+ },
178
+ {
179
+ "epoch": 0.14,
180
+ "learning_rate": 0.0001,
181
+ "loss": 1.4825,
182
+ "step": 1450
183
+ },
184
+ {
185
+ "epoch": 0.15,
186
+ "learning_rate": 0.0001,
187
+ "loss": 1.5124,
188
+ "step": 1500
189
+ },
190
+ {
191
+ "epoch": 0.15,
192
+ "learning_rate": 0.0001,
193
+ "loss": 1.4963,
194
+ "step": 1550
195
+ },
196
+ {
197
+ "epoch": 0.16,
198
+ "learning_rate": 0.0001,
199
+ "loss": 1.4861,
200
+ "step": 1600
201
+ },
202
+ {
203
+ "epoch": 0.16,
204
+ "learning_rate": 0.0001,
205
+ "loss": 1.5275,
206
+ "step": 1650
207
+ },
208
+ {
209
+ "epoch": 0.17,
210
+ "learning_rate": 0.0001,
211
+ "loss": 1.484,
212
+ "step": 1700
213
+ },
214
+ {
215
+ "epoch": 0.17,
216
+ "learning_rate": 0.0001,
217
+ "loss": 1.5349,
218
+ "step": 1750
219
+ },
220
+ {
221
+ "epoch": 0.18,
222
+ "learning_rate": 0.0001,
223
+ "loss": 1.4641,
224
+ "step": 1800
225
+ },
226
+ {
227
+ "epoch": 0.18,
228
+ "learning_rate": 0.0001,
229
+ "loss": 1.4752,
230
+ "step": 1850
231
+ },
232
+ {
233
+ "epoch": 0.19,
234
+ "learning_rate": 0.0001,
235
+ "loss": 1.4475,
236
+ "step": 1900
237
+ },
238
+ {
239
+ "epoch": 0.19,
240
+ "learning_rate": 0.0001,
241
+ "loss": 1.5358,
242
+ "step": 1950
243
+ },
244
+ {
245
+ "epoch": 0.2,
246
+ "learning_rate": 0.0001,
247
+ "loss": 1.4652,
248
+ "step": 2000
249
+ },
250
+ {
251
+ "epoch": 0.2,
252
+ "learning_rate": 0.0001,
253
+ "loss": 1.419,
254
+ "step": 2050
255
+ },
256
+ {
257
+ "epoch": 0.21,
258
+ "learning_rate": 0.0001,
259
+ "loss": 1.4588,
260
+ "step": 2100
261
+ },
262
+ {
263
+ "epoch": 0.21,
264
+ "learning_rate": 0.0001,
265
+ "loss": 1.4593,
266
+ "step": 2150
267
+ },
268
+ {
269
+ "epoch": 0.21,
270
+ "learning_rate": 0.0001,
271
+ "loss": 1.4604,
272
+ "step": 2200
273
+ },
274
+ {
275
+ "epoch": 0.22,
276
+ "learning_rate": 0.0001,
277
+ "loss": 1.4598,
278
+ "step": 2250
279
+ },
280
+ {
281
+ "epoch": 0.22,
282
+ "learning_rate": 0.0001,
283
+ "loss": 1.482,
284
+ "step": 2300
285
+ },
286
+ {
287
+ "epoch": 0.23,
288
+ "learning_rate": 0.0001,
289
+ "loss": 1.4749,
290
+ "step": 2350
291
+ },
292
+ {
293
+ "epoch": 0.23,
294
+ "learning_rate": 0.0001,
295
+ "loss": 1.4718,
296
+ "step": 2400
297
+ },
298
+ {
299
+ "epoch": 0.24,
300
+ "learning_rate": 0.0001,
301
+ "loss": 1.4343,
302
+ "step": 2450
303
+ },
304
+ {
305
+ "epoch": 0.24,
306
+ "learning_rate": 0.0001,
307
+ "loss": 1.4861,
308
+ "step": 2500
309
+ },
310
+ {
311
+ "epoch": 0.25,
312
+ "learning_rate": 0.0001,
313
+ "loss": 1.4717,
314
+ "step": 2550
315
+ },
316
+ {
317
+ "epoch": 0.25,
318
+ "learning_rate": 0.0001,
319
+ "loss": 1.4599,
320
+ "step": 2600
321
+ },
322
+ {
323
+ "epoch": 0.26,
324
+ "learning_rate": 0.0001,
325
+ "loss": 1.4746,
326
+ "step": 2650
327
+ },
328
+ {
329
+ "epoch": 0.26,
330
+ "learning_rate": 0.0001,
331
+ "loss": 1.4517,
332
+ "step": 2700
333
+ },
334
+ {
335
+ "epoch": 0.27,
336
+ "learning_rate": 0.0001,
337
+ "loss": 1.4267,
338
+ "step": 2750
339
+ },
340
+ {
341
+ "epoch": 0.27,
342
+ "learning_rate": 0.0001,
343
+ "loss": 1.4303,
344
+ "step": 2800
345
+ },
346
+ {
347
+ "epoch": 0.28,
348
+ "learning_rate": 0.0001,
349
+ "loss": 1.4146,
350
+ "step": 2850
351
+ },
352
+ {
353
+ "epoch": 0.28,
354
+ "learning_rate": 0.0001,
355
+ "loss": 1.4245,
356
+ "step": 2900
357
+ },
358
+ {
359
+ "epoch": 0.29,
360
+ "learning_rate": 0.0001,
361
+ "loss": 1.4356,
362
+ "step": 2950
363
+ },
364
+ {
365
+ "epoch": 0.29,
366
+ "learning_rate": 0.0001,
367
+ "loss": 1.4183,
368
+ "step": 3000
369
+ },
370
+ {
371
+ "epoch": 0.3,
372
+ "learning_rate": 0.0001,
373
+ "loss": 1.423,
374
+ "step": 3050
375
+ },
376
+ {
377
+ "epoch": 0.3,
378
+ "learning_rate": 0.0001,
379
+ "loss": 1.4854,
380
+ "step": 3100
381
+ },
382
+ {
383
+ "epoch": 0.31,
384
+ "learning_rate": 0.0001,
385
+ "loss": 1.4552,
386
+ "step": 3150
387
+ },
388
+ {
389
+ "epoch": 0.31,
390
+ "learning_rate": 0.0001,
391
+ "loss": 1.456,
392
+ "step": 3200
393
+ },
394
+ {
395
+ "epoch": 0.32,
396
+ "learning_rate": 0.0001,
397
+ "loss": 1.4202,
398
+ "step": 3250
399
+ },
400
+ {
401
+ "epoch": 0.32,
402
+ "learning_rate": 0.0001,
403
+ "loss": 1.4025,
404
+ "step": 3300
405
+ },
406
+ {
407
+ "epoch": 0.33,
408
+ "learning_rate": 0.0001,
409
+ "loss": 1.4134,
410
+ "step": 3350
411
+ },
412
+ {
413
+ "epoch": 0.33,
414
+ "learning_rate": 0.0001,
415
+ "loss": 1.4282,
416
+ "step": 3400
417
+ },
418
+ {
419
+ "epoch": 0.34,
420
+ "learning_rate": 0.0001,
421
+ "loss": 1.4256,
422
+ "step": 3450
423
+ },
424
+ {
425
+ "epoch": 0.34,
426
+ "learning_rate": 0.0001,
427
+ "loss": 1.4056,
428
+ "step": 3500
429
+ },
430
+ {
431
+ "epoch": 0.35,
432
+ "learning_rate": 0.0001,
433
+ "loss": 1.3779,
434
+ "step": 3550
435
+ },
436
+ {
437
+ "epoch": 0.35,
438
+ "learning_rate": 0.0001,
439
+ "loss": 1.3857,
440
+ "step": 3600
441
+ },
442
+ {
443
+ "epoch": 0.36,
444
+ "learning_rate": 0.0001,
445
+ "loss": 1.3819,
446
+ "step": 3650
447
+ },
448
+ {
449
+ "epoch": 0.36,
450
+ "learning_rate": 0.0001,
451
+ "loss": 1.4095,
452
+ "step": 3700
453
+ },
454
+ {
455
+ "epoch": 0.37,
456
+ "learning_rate": 0.0001,
457
+ "loss": 1.4387,
458
+ "step": 3750
459
+ },
460
+ {
461
+ "epoch": 0.37,
462
+ "learning_rate": 0.0001,
463
+ "loss": 1.3831,
464
+ "step": 3800
465
+ },
466
+ {
467
+ "epoch": 0.38,
468
+ "learning_rate": 0.0001,
469
+ "loss": 1.3913,
470
+ "step": 3850
471
+ },
472
+ {
473
+ "epoch": 0.38,
474
+ "learning_rate": 0.0001,
475
+ "loss": 1.4007,
476
+ "step": 3900
477
+ },
478
+ {
479
+ "epoch": 0.39,
480
+ "learning_rate": 0.0001,
481
+ "loss": 1.4038,
482
+ "step": 3950
483
+ },
484
+ {
485
+ "epoch": 0.39,
486
+ "learning_rate": 0.0001,
487
+ "loss": 1.3926,
488
+ "step": 4000
489
+ },
490
+ {
491
+ "epoch": 0.4,
492
+ "learning_rate": 0.0001,
493
+ "loss": 1.36,
494
+ "step": 4050
495
+ },
496
+ {
497
+ "epoch": 0.4,
498
+ "learning_rate": 0.0001,
499
+ "loss": 1.4146,
500
+ "step": 4100
501
+ },
502
+ {
503
+ "epoch": 0.41,
504
+ "learning_rate": 0.0001,
505
+ "loss": 1.3626,
506
+ "step": 4150
507
+ },
508
+ {
509
+ "epoch": 0.41,
510
+ "learning_rate": 0.0001,
511
+ "loss": 1.3562,
512
+ "step": 4200
513
+ }
514
+ ],
515
+ "max_steps": 20478,
516
+ "num_train_epochs": 2,
517
+ "total_flos": 6.3352674125568e+16,
518
+ "trial_name": null,
519
+ "trial_params": null
520
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af5a5e7107b6cd66fb7591ec5f90dff0454583f7b3c471f207ac4b50ee600ca7
3
+ size 3899