tyzhu commited on
Commit
41f356a
1 Parent(s): b469891

Training in progress, epoch 2, checkpoint

Browse files
checkpoint-375/adapter_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "auto_mapping": null,
3
- "base_model_name_or_path": "Qwen/Qwen1.5-4B",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
 
1
  {
2
  "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-7b-hf",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
checkpoint-375/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d70c5026b0f396f0bf0b499426984aabfb325772baa90fe2a68ea978a5136bb
3
+ size 143269386
checkpoint-375/added_tokens.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "<|endoftext|>": 151643,
3
- "<|im_end|>": 151645,
4
- "<|im_start|>": 151644
5
  }
 
1
  {
2
+ "</s>": 2,
3
+ "<s>": 1,
4
+ "<unk>": 0
5
  }
checkpoint-375/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:15b2e22e97f990c7c05718d1aaf09e33b5a2019bb3f2e6efd8bcda24dd887d95
3
- size 224537202
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb0eb4b93ed3df3462c831eca3b384d067275876b9cb485f4286f8a7fe951108
3
+ size 286585234
checkpoint-375/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ffe0452198a1775d0b1a3be4eb867c6abd6124bbb5457137601b99a292ab513
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e25f60b6a2aedad76c8e0e0320dbef6efe914d871d8ee84d16d8972f0eca0949
3
  size 15024
checkpoint-375/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d18156f60ff9496f99d6e8597e2804556a6f961cdb63da148bfe79f0adea77e3
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ef72b5f01911322eabcce5980ccae5f7294320fbb198cbfda8fafbcf2232505
3
  size 15024
checkpoint-375/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4dfcf6d49a39b317c7ee77599884c4bc614792c14d8c393cd27013ccb5871d02
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9332c9182a574d85ff435ce6cfe8a54edf04db9242321ac4961f00f6f40e511a
3
  size 15024
checkpoint-375/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6dfdefc8565bff7ff9ce3cd55b31cd1c475ceb10ead1886f615c4c0e60cfcdf7
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:378da0be37d1c6f5098ff249d769f1474358c25c69cad26e5e1ce3ecf37cb115
3
  size 15024
checkpoint-375/special_tokens_map.json CHANGED
@@ -1,14 +1,6 @@
1
  {
2
- "additional_special_tokens": [
3
- "<|im_start|>",
4
- "<|im_end|>"
5
- ],
6
- "eos_token": {
7
- "content": "<|endoftext|>",
8
- "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false
12
- },
13
- "pad_token": "<|endoftext|>"
14
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "</s>",
5
+ "unk_token": "<unk>"
 
 
 
 
 
 
 
 
6
  }
checkpoint-375/tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
checkpoint-375/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-375/tokenizer_config.json CHANGED
@@ -1,24 +1,23 @@
1
  {
2
- "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
- "151643": {
5
- "content": "<|endoftext|>",
6
  "lstrip": false,
7
  "normalized": false,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
11
  },
12
- "151644": {
13
- "content": "<|im_start|>",
14
  "lstrip": false,
15
  "normalized": false,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
19
  },
20
- "151645": {
21
- "content": "<|im_end|>",
22
  "lstrip": false,
23
  "normalized": false,
24
  "rstrip": false,
@@ -26,18 +25,16 @@
26
  "special": true
27
  }
28
  },
29
- "additional_special_tokens": [
30
- "<|im_start|>",
31
- "<|im_end|>"
32
- ],
33
- "bos_token": null,
34
- "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
  "clean_up_tokenization_spaces": false,
36
- "eos_token": "<|endoftext|>",
37
- "errors": "replace",
38
- "model_max_length": 32768,
39
- "pad_token": "<|endoftext|>",
40
- "split_special_tokens": false,
41
- "tokenizer_class": "Qwen2Tokenizer",
42
- "unk_token": null
 
 
43
  }
 
1
  {
 
2
  "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
10
  },
11
+ "1": {
12
+ "content": "<s>",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "2": {
20
+ "content": "</s>",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
 
25
  "special": true
26
  }
27
  },
28
+ "additional_special_tokens": [],
29
+ "bos_token": "<s>",
 
 
 
 
30
  "clean_up_tokenization_spaces": false,
31
+ "eos_token": "</s>",
32
+ "legacy": false,
33
+ "model_max_length": 1000000000000000019884624838656,
34
+ "pad_token": "</s>",
35
+ "padding_side": "left",
36
+ "sp_model_kwargs": {},
37
+ "tokenizer_class": "LlamaTokenizer",
38
+ "unk_token": "<unk>",
39
+ "use_default_system_prompt": true
40
  }
checkpoint-375/trainer_state.json CHANGED
@@ -9,84 +9,67 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.5333333333333333,
13
- "grad_norm": 0.271308958530426,
14
  "learning_rate": 5e-05,
15
- "loss": 1.8262,
16
  "step": 100
17
  },
18
  {
19
- "epoch": 0.9973333333333333,
20
- "eval_accuracy": 0.6047174887892377,
21
- "eval_loss": 1.7027028799057007,
22
- "eval_runtime": 6.6338,
23
- "eval_samples_per_second": 75.372,
24
- "eval_steps_per_second": 9.497,
25
  "step": 187
26
  },
27
  {
28
- "epoch": 0.9973333333333333,
29
- "eval_exact_match": 14.6,
30
- "eval_f1": 23.665714285714298,
31
- "eval_qa_bleu": 10.24028390049623,
32
- "eval_qa_exact_match": 0.114,
33
- "eval_recite_bleu": 12.525534079250063,
34
  "eval_recite_exact_match": 0.0,
35
  "step": 187
36
  },
37
  {
38
- "epoch": 1.0666666666666667,
39
- "grad_norm": 0.301651269197464,
40
  "learning_rate": 5e-05,
41
- "loss": 1.7424,
42
  "step": 200
43
  },
44
  {
45
  "epoch": 1.6,
46
- "grad_norm": 0.35949045419692993,
47
  "learning_rate": 5e-05,
48
- "loss": 1.6978,
49
  "step": 300
50
  },
51
  {
52
  "epoch": 2.0,
53
- "eval_accuracy": 0.6068699551569506,
54
- "eval_loss": 1.6845285892486572,
55
- "eval_runtime": 5.7524,
56
- "eval_samples_per_second": 86.92,
57
- "eval_steps_per_second": 10.952,
58
  "step": 375
59
  },
60
  {
61
  "epoch": 2.0,
62
- "eval_exact_match": 14.6,
63
- "eval_f1": 23.74190476190477,
64
- "eval_qa_bleu": 9.70432698897927,
65
- "eval_qa_exact_match": 0.114,
66
- "eval_recite_bleu": 13.014822566317982,
67
- "eval_recite_exact_match": 0.006,
68
  "step": 375
69
  }
70
  ],
71
  "logging_steps": 100,
72
  "max_steps": 9350,
73
- "num_input_tokens_seen": 0,
74
  "num_train_epochs": 50,
75
  "save_steps": 500,
76
- "stateful_callbacks": {
77
- "TrainerControl": {
78
- "args": {
79
- "should_epoch_stop": false,
80
- "should_evaluate": false,
81
- "should_log": false,
82
- "should_save": true,
83
- "should_training_stop": false
84
- },
85
- "attributes": {}
86
- }
87
- },
88
- "total_flos": 3.777309007518106e+16,
89
- "train_batch_size": 1,
90
  "trial_name": null,
91
  "trial_params": null
92
  }
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.53,
 
13
  "learning_rate": 5e-05,
14
+ "loss": 1.3988,
15
  "step": 100
16
  },
17
  {
18
+ "epoch": 1.0,
19
+ "eval_accuracy": 0.6660784313725491,
20
+ "eval_loss": 1.2107256650924683,
21
+ "eval_runtime": 7.728,
22
+ "eval_samples_per_second": 64.7,
23
+ "eval_steps_per_second": 8.152,
24
  "step": 187
25
  },
26
  {
27
+ "epoch": 1.0,
28
+ "eval_exact_match": 19.4,
29
+ "eval_f1": 27.911959489312448,
30
+ "eval_qa_bleu": 2.842200543476061,
31
+ "eval_qa_exact_match": 0.152,
32
+ "eval_recite_bleu": 12.149078895446083,
33
  "eval_recite_exact_match": 0.0,
34
  "step": 187
35
  },
36
  {
37
+ "epoch": 1.07,
 
38
  "learning_rate": 5e-05,
39
+ "loss": 1.238,
40
  "step": 200
41
  },
42
  {
43
  "epoch": 1.6,
 
44
  "learning_rate": 5e-05,
45
+ "loss": 1.1977,
46
  "step": 300
47
  },
48
  {
49
  "epoch": 2.0,
50
+ "eval_accuracy": 0.6676078431372549,
51
+ "eval_loss": 1.1987448930740356,
52
+ "eval_runtime": 7.4628,
53
+ "eval_samples_per_second": 66.999,
54
+ "eval_steps_per_second": 8.442,
55
  "step": 375
56
  },
57
  {
58
  "epoch": 2.0,
59
+ "eval_exact_match": 21.4,
60
+ "eval_f1": 27.979969946667268,
61
+ "eval_qa_bleu": 3.4273838915653356,
62
+ "eval_qa_exact_match": 0.158,
63
+ "eval_recite_bleu": 11.474877690710539,
64
+ "eval_recite_exact_match": 0.0,
65
  "step": 375
66
  }
67
  ],
68
  "logging_steps": 100,
69
  "max_steps": 9350,
 
70
  "num_train_epochs": 50,
71
  "save_steps": 500,
72
+ "total_flos": 8.689846617689293e+16,
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  "trial_name": null,
74
  "trial_params": null
75
  }
checkpoint-375/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:01f9f0dc44c57fb44afb03320f4d02bf48066009e3bbcd96d8e4377b0171c5a5
3
- size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae9143de0ad8fd0f027f30f757d262a6e02edc4ceeaea2c21697f4f44b5b1eeb
3
+ size 4728