PathFinderKR commited on
Commit
5bfbd92
1 Parent(s): fcfe2d3

Upload LlamaForCausalLM

Browse files
config.json CHANGED
@@ -23,7 +23,7 @@
23
  "rope_theta": 500000.0,
24
  "tie_word_embeddings": false,
25
  "torch_dtype": "float16",
26
- "transformers_version": "4.41.0",
27
  "use_cache": true,
28
- "vocab_size": 145793
29
  }
 
23
  "rope_theta": 500000.0,
24
  "tie_word_embeddings": false,
25
  "torch_dtype": "float16",
26
+ "transformers_version": "4.41.2",
27
  "use_cache": true,
28
+ "vocab_size": 128256
29
  }
generation_config.json CHANGED
@@ -1,12 +1,12 @@
1
  {
2
  "bos_token_id": 128000,
3
- "do_sample": false,
4
  "eos_token_id": [
5
  128001,
6
  128009
7
  ],
8
- "max_length": 8192,
9
  "temperature": 0.6,
10
  "top_p": 0.9,
11
- "transformers_version": "4.41.0"
12
  }
 
1
  {
2
  "bos_token_id": 128000,
3
+ "do_sample": true,
4
  "eos_token_id": [
5
  128001,
6
  128009
7
  ],
8
+ "max_length": 4096,
9
  "temperature": 0.6,
10
  "top_p": 0.9,
11
+ "transformers_version": "4.41.2"
12
  }
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4fb12eba0f8137e4f73d943e5a206fc15fbc2d61714954c6030fd5cd31428b62
3
- size 4885463832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:409fed81ea945bdcc7297f987a95f95a5b057e98dfb41d8f9f19598db0bb463b
3
+ size 4976698592
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1af891e3d4566ead818511d23bf2fd8dd12992da6b6c59c5a14c5c34223bf377
3
- size 4915916056
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c5c1a762dfe72a51778f4c9b1fc0414c06b42a7d4a5ff849bb68fd490eca16a
3
+ size 4999802616
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:520d8b48f3092798a976ba1bc18d9cda3a00a730af3ee77902a2e8164f67dad7
3
- size 4999819232
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f86839802e851c1689340fd06b740273059ef3da108f02d1cac8f1de4b18add
3
+ size 4915916080
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f87f023e5d0184c5cdc7ca363ce241fc707ba21794a8bc87ba4a308383ab81e4
3
- size 1546683160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88d70d5950ba4a7d07a1525bb4a500eb02ce55fd78d2d88d0827d6ca582ab442
3
+ size 1168138808
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 16347848704
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00004-of-00004.safetensors",
@@ -104,11 +104,11 @@
104
  "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
105
  "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
106
  "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
107
- "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
108
- "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
109
  "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
110
  "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
111
- "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
112
  "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
113
  "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
114
  "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
@@ -124,13 +124,13 @@
124
  "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
125
  "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
126
  "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
127
- "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
128
  "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
129
  "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
130
- "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
131
- "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
132
- "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
133
- "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
134
  "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
135
  "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
136
  "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
@@ -232,8 +232,8 @@
232
  "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
233
  "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
234
  "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
235
- "model.layers.31.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
236
- "model.layers.31.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
237
  "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
238
  "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
239
  "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
@@ -275,11 +275,11 @@
275
  "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
276
  "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
277
  "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
278
- "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
279
- "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
280
  "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
281
- "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
282
- "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
283
  "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
284
  "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
285
  "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 16060522496
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00004-of-00004.safetensors",
 
104
  "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
105
  "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
106
  "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
109
  "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
110
  "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
112
  "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
113
  "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
114
  "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
 
124
  "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
125
  "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
126
  "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
128
  "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
129
  "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
134
  "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
135
  "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
136
  "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
 
232
  "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
233
  "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
234
  "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
237
  "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
238
  "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
239
  "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
 
275
  "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
276
  "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
277
  "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
280
  "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
283
  "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
284
  "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
285
  "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",