tyzhu commited on
Commit
2a66303
1 Parent(s): e23bd90

End of training

Browse files
Files changed (6) hide show
  1. README.md +14 -2
  2. all_results.json +12 -12
  3. eval_results.json +7 -7
  4. tokenizer.json +1 -6
  5. train_results.json +6 -6
  6. trainer_state.json +277 -12
README.md CHANGED
@@ -3,11 +3,23 @@ license: other
3
  base_model: Qwen/Qwen1.5-4B
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - accuracy
8
  model-index:
9
  - name: lmind_hotpot_train8000_eval7405_v1_qa_Qwen_Qwen1.5-4B_lora2
10
- results: []
 
 
 
 
 
 
 
 
 
 
11
  library_name: peft
12
  ---
13
 
@@ -16,7 +28,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # lmind_hotpot_train8000_eval7405_v1_qa_Qwen_Qwen1.5-4B_lora2
18
 
19
- This model is a fine-tuned version of [Qwen/Qwen1.5-4B](https://huggingface.co/Qwen/Qwen1.5-4B) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 3.9177
22
  - Accuracy: 0.4908
 
3
  base_model: Qwen/Qwen1.5-4B
4
  tags:
5
  - generated_from_trainer
6
+ datasets:
7
+ - tyzhu/lmind_hotpot_train8000_eval7405_v1_qa
8
  metrics:
9
  - accuracy
10
  model-index:
11
  - name: lmind_hotpot_train8000_eval7405_v1_qa_Qwen_Qwen1.5-4B_lora2
12
+ results:
13
+ - task:
14
+ name: Causal Language Modeling
15
+ type: text-generation
16
+ dataset:
17
+ name: tyzhu/lmind_hotpot_train8000_eval7405_v1_qa
18
+ type: tyzhu/lmind_hotpot_train8000_eval7405_v1_qa
19
+ metrics:
20
+ - name: Accuracy
21
+ type: accuracy
22
+ value: 0.4907619047619048
23
  library_name: peft
24
  ---
25
 
 
28
 
29
  # lmind_hotpot_train8000_eval7405_v1_qa_Qwen_Qwen1.5-4B_lora2
30
 
31
+ This model is a fine-tuned version of [Qwen/Qwen1.5-4B](https://huggingface.co/Qwen/Qwen1.5-4B) on the tyzhu/lmind_hotpot_train8000_eval7405_v1_qa dataset.
32
  It achieves the following results on the evaluation set:
33
  - Loss: 3.9177
34
  - Accuracy: 0.4908
all_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
- "epoch": 10.0,
3
- "eval_accuracy": 0.49263492063492065,
4
- "eval_loss": 3.4933342933654785,
5
- "eval_runtime": 8.3568,
6
  "eval_samples": 500,
7
- "eval_samples_per_second": 59.832,
8
- "eval_steps_per_second": 7.539,
9
- "perplexity": 32.89544812596355,
10
- "total_flos": 6.467692909717094e+16,
11
- "train_loss": 1.3154203582763673,
12
- "train_runtime": 9112.5942,
13
  "train_samples": 8000,
14
- "train_samples_per_second": 8.779,
15
- "train_steps_per_second": 0.274
16
  }
 
1
  {
2
+ "epoch": 20.0,
3
+ "eval_accuracy": 0.4907619047619048,
4
+ "eval_loss": 3.91774582862854,
5
+ "eval_runtime": 6.2195,
6
  "eval_samples": 500,
7
+ "eval_samples_per_second": 80.392,
8
+ "eval_steps_per_second": 10.129,
9
+ "perplexity": 50.286961491778904,
10
+ "total_flos": 1.293538587312128e+17,
11
+ "train_loss": 0.20586764678955077,
12
+ "train_runtime": 5541.4031,
13
  "train_samples": 8000,
14
+ "train_samples_per_second": 28.874,
15
+ "train_steps_per_second": 0.902
16
  }
eval_results.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
- "epoch": 10.0,
3
- "eval_accuracy": 0.49263492063492065,
4
- "eval_loss": 3.4933342933654785,
5
- "eval_runtime": 8.3568,
6
  "eval_samples": 500,
7
- "eval_samples_per_second": 59.832,
8
- "eval_steps_per_second": 7.539,
9
- "perplexity": 32.89544812596355
10
  }
 
1
  {
2
+ "epoch": 20.0,
3
+ "eval_accuracy": 0.4907619047619048,
4
+ "eval_loss": 3.91774582862854,
5
+ "eval_runtime": 6.2195,
6
  "eval_samples": 500,
7
+ "eval_samples_per_second": 80.392,
8
+ "eval_steps_per_second": 10.129,
9
+ "perplexity": 50.286961491778904
10
  }
tokenizer.json CHANGED
@@ -1,11 +1,6 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 1024,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 10.0,
3
- "total_flos": 6.467692909717094e+16,
4
- "train_loss": 1.3154203582763673,
5
- "train_runtime": 9112.5942,
6
  "train_samples": 8000,
7
- "train_samples_per_second": 8.779,
8
- "train_steps_per_second": 0.274
9
  }
 
1
  {
2
+ "epoch": 20.0,
3
+ "total_flos": 1.293538587312128e+17,
4
+ "train_loss": 0.20586764678955077,
5
+ "train_runtime": 5541.4031,
6
  "train_samples": 8000,
7
+ "train_samples_per_second": 28.874,
8
+ "train_steps_per_second": 0.902
9
  }
trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 10.0,
5
  "eval_steps": 500,
6
- "global_step": 2500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -274,21 +274,286 @@
274
  "step": 2500
275
  },
276
  {
277
- "epoch": 10.0,
278
- "step": 2500,
279
- "total_flos": 6.467692909717094e+16,
280
- "train_loss": 1.3154203582763673,
281
- "train_runtime": 9112.5942,
282
- "train_samples_per_second": 8.779,
283
- "train_steps_per_second": 0.274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
  }
285
  ],
286
  "logging_steps": 100,
287
- "max_steps": 2500,
288
  "num_input_tokens_seen": 0,
289
- "num_train_epochs": 10,
290
  "save_steps": 500,
291
- "total_flos": 6.467692909717094e+16,
292
  "train_batch_size": 1,
293
  "trial_name": null,
294
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 20.0,
5
  "eval_steps": 500,
6
+ "global_step": 5000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
274
  "step": 2500
275
  },
276
  {
277
+ "epoch": 10.4,
278
+ "grad_norm": 1.8493461608886719,
279
+ "learning_rate": 0.0001,
280
+ "loss": 0.4688,
281
+ "step": 2600
282
+ },
283
+ {
284
+ "epoch": 10.8,
285
+ "grad_norm": 2.332373857498169,
286
+ "learning_rate": 0.0001,
287
+ "loss": 0.4967,
288
+ "step": 2700
289
+ },
290
+ {
291
+ "epoch": 11.0,
292
+ "eval_accuracy": 0.49174603174603176,
293
+ "eval_loss": 3.579373598098755,
294
+ "eval_runtime": 5.4241,
295
+ "eval_samples_per_second": 92.181,
296
+ "eval_steps_per_second": 11.615,
297
+ "step": 2750
298
+ },
299
+ {
300
+ "epoch": 11.2,
301
+ "grad_norm": 1.2992867231369019,
302
+ "learning_rate": 0.0001,
303
+ "loss": 0.4714,
304
+ "step": 2800
305
+ },
306
+ {
307
+ "epoch": 11.6,
308
+ "grad_norm": 1.6464879512786865,
309
+ "learning_rate": 0.0001,
310
+ "loss": 0.4546,
311
+ "step": 2900
312
+ },
313
+ {
314
+ "epoch": 12.0,
315
+ "grad_norm": 1.847381830215454,
316
+ "learning_rate": 0.0001,
317
+ "loss": 0.4696,
318
+ "step": 3000
319
+ },
320
+ {
321
+ "epoch": 12.0,
322
+ "eval_accuracy": 0.4913968253968254,
323
+ "eval_loss": 3.6326358318328857,
324
+ "eval_runtime": 5.6473,
325
+ "eval_samples_per_second": 88.538,
326
+ "eval_steps_per_second": 11.156,
327
+ "step": 3000
328
+ },
329
+ {
330
+ "epoch": 12.4,
331
+ "grad_norm": 1.7568799257278442,
332
+ "learning_rate": 0.0001,
333
+ "loss": 0.4156,
334
+ "step": 3100
335
+ },
336
+ {
337
+ "epoch": 12.8,
338
+ "grad_norm": 1.6567174196243286,
339
+ "learning_rate": 0.0001,
340
+ "loss": 0.4399,
341
+ "step": 3200
342
+ },
343
+ {
344
+ "epoch": 13.0,
345
+ "eval_accuracy": 0.49196825396825394,
346
+ "eval_loss": 3.7408335208892822,
347
+ "eval_runtime": 5.63,
348
+ "eval_samples_per_second": 88.81,
349
+ "eval_steps_per_second": 11.19,
350
+ "step": 3250
351
+ },
352
+ {
353
+ "epoch": 13.2,
354
+ "grad_norm": 1.3751463890075684,
355
+ "learning_rate": 0.0001,
356
+ "loss": 0.4228,
357
+ "step": 3300
358
+ },
359
+ {
360
+ "epoch": 13.6,
361
+ "grad_norm": 1.4488946199417114,
362
+ "learning_rate": 0.0001,
363
+ "loss": 0.4132,
364
+ "step": 3400
365
+ },
366
+ {
367
+ "epoch": 14.0,
368
+ "grad_norm": 1.9310686588287354,
369
+ "learning_rate": 0.0001,
370
+ "loss": 0.4324,
371
+ "step": 3500
372
+ },
373
+ {
374
+ "epoch": 14.0,
375
+ "eval_accuracy": 0.49152380952380953,
376
+ "eval_loss": 3.7449769973754883,
377
+ "eval_runtime": 5.4609,
378
+ "eval_samples_per_second": 91.56,
379
+ "eval_steps_per_second": 11.537,
380
+ "step": 3500
381
+ },
382
+ {
383
+ "epoch": 14.4,
384
+ "grad_norm": 1.3325213193893433,
385
+ "learning_rate": 0.0001,
386
+ "loss": 0.3941,
387
+ "step": 3600
388
+ },
389
+ {
390
+ "epoch": 14.8,
391
+ "grad_norm": 2.006122589111328,
392
+ "learning_rate": 0.0001,
393
+ "loss": 0.4105,
394
+ "step": 3700
395
+ },
396
+ {
397
+ "epoch": 15.0,
398
+ "eval_accuracy": 0.4922222222222222,
399
+ "eval_loss": 3.830101490020752,
400
+ "eval_runtime": 5.8004,
401
+ "eval_samples_per_second": 86.2,
402
+ "eval_steps_per_second": 10.861,
403
+ "step": 3750
404
+ },
405
+ {
406
+ "epoch": 15.2,
407
+ "grad_norm": 1.9847638607025146,
408
+ "learning_rate": 0.0001,
409
+ "loss": 0.3968,
410
+ "step": 3800
411
+ },
412
+ {
413
+ "epoch": 15.6,
414
+ "grad_norm": 0.9312750697135925,
415
+ "learning_rate": 0.0001,
416
+ "loss": 0.3949,
417
+ "step": 3900
418
+ },
419
+ {
420
+ "epoch": 16.0,
421
+ "grad_norm": 1.4381016492843628,
422
+ "learning_rate": 0.0001,
423
+ "loss": 0.4081,
424
+ "step": 4000
425
+ },
426
+ {
427
+ "epoch": 16.0,
428
+ "eval_accuracy": 0.49206349206349204,
429
+ "eval_loss": 3.848762035369873,
430
+ "eval_runtime": 5.7303,
431
+ "eval_samples_per_second": 87.256,
432
+ "eval_steps_per_second": 10.994,
433
+ "step": 4000
434
+ },
435
+ {
436
+ "epoch": 16.4,
437
+ "grad_norm": 1.317929983139038,
438
+ "learning_rate": 0.0001,
439
+ "loss": 0.3752,
440
+ "step": 4100
441
+ },
442
+ {
443
+ "epoch": 16.8,
444
+ "grad_norm": 1.591886281967163,
445
+ "learning_rate": 0.0001,
446
+ "loss": 0.3939,
447
+ "step": 4200
448
+ },
449
+ {
450
+ "epoch": 17.0,
451
+ "eval_accuracy": 0.49133333333333334,
452
+ "eval_loss": 3.849243640899658,
453
+ "eval_runtime": 5.4459,
454
+ "eval_samples_per_second": 91.813,
455
+ "eval_steps_per_second": 11.568,
456
+ "step": 4250
457
+ },
458
+ {
459
+ "epoch": 17.2,
460
+ "grad_norm": 1.3771722316741943,
461
+ "learning_rate": 0.0001,
462
+ "loss": 0.3857,
463
+ "step": 4300
464
+ },
465
+ {
466
+ "epoch": 17.6,
467
+ "grad_norm": 1.066933035850525,
468
+ "learning_rate": 0.0001,
469
+ "loss": 0.3792,
470
+ "step": 4400
471
+ },
472
+ {
473
+ "epoch": 18.0,
474
+ "grad_norm": 1.6044634580612183,
475
+ "learning_rate": 0.0001,
476
+ "loss": 0.3924,
477
+ "step": 4500
478
+ },
479
+ {
480
+ "epoch": 18.0,
481
+ "eval_accuracy": 0.49152380952380953,
482
+ "eval_loss": 3.8751370906829834,
483
+ "eval_runtime": 5.872,
484
+ "eval_samples_per_second": 85.149,
485
+ "eval_steps_per_second": 10.729,
486
+ "step": 4500
487
+ },
488
+ {
489
+ "epoch": 18.4,
490
+ "grad_norm": 1.43901526927948,
491
+ "learning_rate": 0.0001,
492
+ "loss": 0.3675,
493
+ "step": 4600
494
+ },
495
+ {
496
+ "epoch": 18.8,
497
+ "grad_norm": 0.888443112373352,
498
+ "learning_rate": 0.0001,
499
+ "loss": 0.382,
500
+ "step": 4700
501
+ },
502
+ {
503
+ "epoch": 19.0,
504
+ "eval_accuracy": 0.490952380952381,
505
+ "eval_loss": 3.9336612224578857,
506
+ "eval_runtime": 5.6336,
507
+ "eval_samples_per_second": 88.754,
508
+ "eval_steps_per_second": 11.183,
509
+ "step": 4750
510
+ },
511
+ {
512
+ "epoch": 19.2,
513
+ "grad_norm": 0.9845600128173828,
514
+ "learning_rate": 0.0001,
515
+ "loss": 0.3715,
516
+ "step": 4800
517
+ },
518
+ {
519
+ "epoch": 19.6,
520
+ "grad_norm": 1.0316622257232666,
521
+ "learning_rate": 0.0001,
522
+ "loss": 0.3734,
523
+ "step": 4900
524
+ },
525
+ {
526
+ "epoch": 20.0,
527
+ "grad_norm": 0.970047116279602,
528
+ "learning_rate": 0.0001,
529
+ "loss": 0.3832,
530
+ "step": 5000
531
+ },
532
+ {
533
+ "epoch": 20.0,
534
+ "eval_accuracy": 0.4907619047619048,
535
+ "eval_loss": 3.91774582862854,
536
+ "eval_runtime": 5.9392,
537
+ "eval_samples_per_second": 84.186,
538
+ "eval_steps_per_second": 10.607,
539
+ "step": 5000
540
+ },
541
+ {
542
+ "epoch": 20.0,
543
+ "step": 5000,
544
+ "total_flos": 1.293538587312128e+17,
545
+ "train_loss": 0.20586764678955077,
546
+ "train_runtime": 5541.4031,
547
+ "train_samples_per_second": 28.874,
548
+ "train_steps_per_second": 0.902
549
  }
550
  ],
551
  "logging_steps": 100,
552
+ "max_steps": 5000,
553
  "num_input_tokens_seen": 0,
554
+ "num_train_epochs": 20,
555
  "save_steps": 500,
556
+ "total_flos": 1.293538587312128e+17,
557
  "train_batch_size": 1,
558
  "trial_name": null,
559
  "trial_params": null