AniketParab commited on
Commit
aaddf9a
1 Parent(s): f960d7b

AniketParab/kawasa_sample_llama2_fine_tuned/

Browse files
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- base_model: abhishek/llama-2-7b-hf-small-shards
3
  tags:
4
  - generated_from_trainer
5
  model-index:
@@ -12,7 +12,7 @@ should probably proofread and complete it, then remove this comment. -->
12
 
13
  # results
14
 
15
- This model is a fine-tuned version of [abhishek/llama-2-7b-hf-small-shards](https://huggingface.co/abhishek/llama-2-7b-hf-small-shards) on an unknown dataset.
16
 
17
  ## Model description
18
 
@@ -48,7 +48,7 @@ The following hyperparameters were used during training:
48
 
49
  ### Framework versions
50
 
51
- - Transformers 4.32.1
52
  - Pytorch 2.0.1+cu118
53
  - Datasets 2.14.4
54
  - Tokenizers 0.13.3
 
1
  ---
2
+ base_model: meta-llama/Llama-2-7b-chat-hf
3
  tags:
4
  - generated_from_trainer
5
  model-index:
 
12
 
13
  # results
14
 
15
+ This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on an unknown dataset.
16
 
17
  ## Model description
18
 
 
48
 
49
  ### Framework versions
50
 
51
+ - Transformers 4.33.0
52
  - Pytorch 2.0.1+cu118
53
  - Datasets 2.14.4
54
  - Tokenizers 0.13.3
adapter_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "auto_mapping": null,
3
- "base_model_name_or_path": "abhishek/llama-2-7b-hf-small-shards",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
 
1
  {
2
  "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-7b-chat-hf",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa3ad2771665f844ce76568f4ec086ea76b6149253b897ab8ea8bd6d46c68ace
3
  size 134263757
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64f35136d53885a3026cb68a1b09fbdff939a6e4b50a89c85bd139708c9b08a4
3
  size 134263757
special_tokens_map.json CHANGED
@@ -2,14 +2,14 @@
2
  "bos_token": {
3
  "content": "<s>",
4
  "lstrip": false,
5
- "normalized": true,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
  "content": "</s>",
11
  "lstrip": false,
12
- "normalized": true,
13
  "rstrip": false,
14
  "single_word": false
15
  },
@@ -17,7 +17,7 @@
17
  "unk_token": {
18
  "content": "<unk>",
19
  "lstrip": false,
20
- "normalized": true,
21
  "rstrip": false,
22
  "single_word": false
23
  }
 
2
  "bos_token": {
3
  "content": "<s>",
4
  "lstrip": false,
5
+ "normalized": false,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
  "content": "</s>",
11
  "lstrip": false,
12
+ "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
 
17
  "unk_token": {
18
  "content": "<unk>",
19
  "lstrip": false,
20
+ "normalized": false,
21
  "rstrip": false,
22
  "single_word": false
23
  }
tokenizer.json CHANGED
@@ -1,6 +1,11 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
@@ -18,7 +23,7 @@
18
  "single_word": false,
19
  "lstrip": false,
20
  "rstrip": false,
21
- "normalized": true,
22
  "special": true
23
  },
24
  {
@@ -27,17 +32,8 @@
27
  "single_word": false,
28
  "lstrip": false,
29
  "rstrip": false,
30
- "normalized": true,
31
  "special": true
32
- },
33
- {
34
- "id": 32000,
35
- "content": "<pad>",
36
- "single_word": false,
37
- "lstrip": false,
38
- "rstrip": false,
39
- "normalized": true,
40
- "special": false
41
  }
42
  ],
43
  "normalizer": {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 512,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
23
  "single_word": false,
24
  "lstrip": false,
25
  "rstrip": false,
26
+ "normalized": false,
27
  "special": true
28
  },
29
  {
 
32
  "single_word": false,
33
  "lstrip": false,
34
  "rstrip": false,
35
+ "normalized": false,
36
  "special": true
 
 
 
 
 
 
 
 
 
37
  }
38
  ],
39
  "normalizer": {
tokenizer_config.json CHANGED
@@ -3,7 +3,7 @@
3
  "__type": "AddedToken",
4
  "content": "<s>",
5
  "lstrip": false,
6
- "normalized": true,
7
  "rstrip": false,
8
  "single_word": false
9
  },
@@ -12,20 +12,21 @@
12
  "__type": "AddedToken",
13
  "content": "</s>",
14
  "lstrip": false,
15
- "normalized": true,
16
  "rstrip": false,
17
  "single_word": false
18
  },
19
  "legacy": false,
20
  "model_max_length": 1000000000000000019884624838656,
21
  "pad_token": null,
 
22
  "sp_model_kwargs": {},
23
  "tokenizer_class": "LlamaTokenizer",
24
  "unk_token": {
25
  "__type": "AddedToken",
26
  "content": "<unk>",
27
  "lstrip": false,
28
- "normalized": true,
29
  "rstrip": false,
30
  "single_word": false
31
  },
 
3
  "__type": "AddedToken",
4
  "content": "<s>",
5
  "lstrip": false,
6
+ "normalized": false,
7
  "rstrip": false,
8
  "single_word": false
9
  },
 
12
  "__type": "AddedToken",
13
  "content": "</s>",
14
  "lstrip": false,
15
+ "normalized": false,
16
  "rstrip": false,
17
  "single_word": false
18
  },
19
  "legacy": false,
20
  "model_max_length": 1000000000000000019884624838656,
21
  "pad_token": null,
22
+ "padding_side": "right",
23
  "sp_model_kwargs": {},
24
  "tokenizer_class": "LlamaTokenizer",
25
  "unk_token": {
26
  "__type": "AddedToken",
27
  "content": "<unk>",
28
  "lstrip": false,
29
+ "normalized": false,
30
  "rstrip": false,
31
  "single_word": false
32
  },
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:465982149d4076d009de4ca934edb02fcb2157087ab5ecced148e604448045af
3
  size 4027
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f77268fbdac0f2ec2968b68d549ac0cfde7a2c77806ebe8f88ab075c603334f
3
  size 4027