update special tokens
#1
by
RangiLyu
- opened
- .gitattributes +0 -8
- README.md +4 -173
- config.json +7 -9
- configuration_internlm2.py → configuration_internlm.py +38 -54
- generation_config.json +3 -5
- model-00005-of-00008.safetensors +0 -3
- model-00006-of-00008.safetensors +0 -3
- model-00007-of-00008.safetensors +0 -3
- model-00008-of-00008.safetensors +0 -3
- model.safetensors.index.json +0 -234
- modeling_internlm2.py +363 -784
- model-00004-of-00008.safetensors → pytorch_model-00001-of-00008.bin +2 -2
- model-00001-of-00008.safetensors → pytorch_model-00002-of-00008.bin +2 -2
- model-00002-of-00008.safetensors → pytorch_model-00003-of-00008.bin +2 -2
- model-00003-of-00008.safetensors → pytorch_model-00004-of-00008.bin +2 -2
- pytorch_model-00005-of-00008.bin +3 -0
- pytorch_model-00006-of-00008.bin +3 -0
- pytorch_model-00007-of-00008.bin +3 -0
- pytorch_model-00008-of-00008.bin +3 -0
- pytorch_model.bin.index.json +3 -0
- special_tokens_map.json +4 -36
- tokenization_internlm2.py → tokenization_internlm.py +10 -6
- tokenization_internlm2_fast.py +0 -214
- tokenizer_config.json +3 -90
.gitattributes
CHANGED
@@ -43,11 +43,3 @@ pytorch_model-00001-of-00008.bin filter=lfs diff=lfs merge=lfs -text
|
|
43 |
pytorch_model-00002-of-00008.bin filter=lfs diff=lfs merge=lfs -text
|
44 |
pytorch_model-00005-of-00008.bin filter=lfs diff=lfs merge=lfs -text
|
45 |
pytorch_model.bin.index.json filter=lfs diff=lfs merge=lfs -text
|
46 |
-
model-00001-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
|
47 |
-
model-00002-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
|
48 |
-
model-00003-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
|
49 |
-
model-00004-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
|
50 |
-
model-00005-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
|
51 |
-
model-00006-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
|
52 |
-
model-00007-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
|
53 |
-
model-00008-of-00008.safetensors filter=lfs diff=lfs merge=lfs -text
|
|
|
43 |
pytorch_model-00002-of-00008.bin filter=lfs diff=lfs merge=lfs -text
|
44 |
pytorch_model-00005-of-00008.bin filter=lfs diff=lfs merge=lfs -text
|
45 |
pytorch_model.bin.index.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
@@ -20,15 +20,10 @@ license: other
|
|
20 |
|
21 |
[![evaluation](https://github.com/InternLM/InternLM/assets/22529082/f80a2a58-5ddf-471a-8da4-32ab65c8fd3b)](https://github.com/internLM/OpenCompass/)
|
22 |
|
23 |
-
[💻Github Repo](https://github.com/InternLM/InternLM) • [🤔Reporting Issues](https://github.com/InternLM/InternLM/issues/new)
|
24 |
|
25 |
</div>
|
26 |
|
27 |
-
<p align="center">
|
28 |
-
👋 join us on <a href="https://discord.gg/xa29JuW87d" target="_blank">Discord</a> and <a href="https://github.com/InternLM/InternLM/assets/25839884/a6aad896-7232-4220-ac84-9e070c2633ce" target="_blank">WeChat</a>
|
29 |
-
</p>
|
30 |
-
|
31 |
-
|
32 |
|
33 |
## Introduction
|
34 |
|
@@ -46,7 +41,7 @@ InternLM2 has open-sourced a 7 billion parameter base model and a chat model tai
|
|
46 |
|
47 |
### Performance Evaluation
|
48 |
|
49 |
-
We conducted a comprehensive evaluation of InternLM using the open-source evaluation tool [OpenCompass](https://github.com/internLM/OpenCompass/). The evaluation covered five dimensions of capabilities: disciplinary competence, language competence, knowledge competence, inference competence, and comprehension competence. Here are some of the evaluation results, and you can visit the [OpenCompass leaderboard](https://
|
50 |
|
51 |
| Dataset\Models | InternLM2-7B | InternLM2-Chat-7B | InternLM2-20B | InternLM2-Chat-20B | ChatGPT | GPT-4 |
|
52 |
| --- | --- | --- | --- | --- | --- | --- |
|
@@ -99,92 +94,10 @@ for response, history in model.stream_chat(tokenizer, "Hello", history=[]):
|
|
99 |
length = len(response)
|
100 |
```
|
101 |
|
102 |
-
## Deployment
|
103 |
-
|
104 |
-
### LMDeploy
|
105 |
-
|
106 |
-
LMDeploy is a toolkit for compressing, deploying, and serving LLM, developed by the MMRazor and MMDeploy teams.
|
107 |
-
|
108 |
-
```bash
|
109 |
-
pip install lmdeploy
|
110 |
-
```
|
111 |
-
|
112 |
-
You can run batch inference locally with the following python code:
|
113 |
-
|
114 |
-
```python
|
115 |
-
import lmdeploy
|
116 |
-
pipe = lmdeploy.pipeline("internlm/internlm2-chat-7b")
|
117 |
-
response = pipe(["Hi, pls intro yourself", "Shanghai is"])
|
118 |
-
print(response)
|
119 |
-
```
|
120 |
-
|
121 |
-
Or you can launch an OpenAI compatible server with the following command:
|
122 |
-
|
123 |
-
```bash
|
124 |
-
lmdeploy serve api_server internlm/internlm2-chat-7b --model-name internlm2-chat-7b --server-port 23333
|
125 |
-
```
|
126 |
-
|
127 |
-
Then you can send a chat request to the server:
|
128 |
-
|
129 |
-
```bash
|
130 |
-
curl http://localhost:23333/v1/chat/completions \
|
131 |
-
-H "Content-Type: application/json" \
|
132 |
-
-d '{
|
133 |
-
"model": "internlm2-chat-7b",
|
134 |
-
"messages": [
|
135 |
-
{"role": "system", "content": "You are a helpful assistant."},
|
136 |
-
{"role": "user", "content": "Introduce deep learning to me."}
|
137 |
-
]
|
138 |
-
}'
|
139 |
-
```
|
140 |
-
|
141 |
-
Find more details in the [LMDeploy documentation](https://lmdeploy.readthedocs.io/en/latest/)
|
142 |
-
|
143 |
-
### vLLM
|
144 |
-
|
145 |
-
Launch OpenAI compatible server with `vLLM>=0.3.2`:
|
146 |
-
|
147 |
-
```bash
|
148 |
-
pip install vllm
|
149 |
-
```
|
150 |
-
|
151 |
-
```bash
|
152 |
-
python -m vllm.entrypoints.openai.api_server --model internlm/internlm2-chat-7b --served-model-name internlm2-chat-7b --trust-remote-code
|
153 |
-
```
|
154 |
-
|
155 |
-
Then you can send a chat request to the server:
|
156 |
-
|
157 |
-
```bash
|
158 |
-
curl http://localhost:8000/v1/chat/completions \
|
159 |
-
-H "Content-Type: application/json" \
|
160 |
-
-d '{
|
161 |
-
"model": "internlm2-chat-7b",
|
162 |
-
"messages": [
|
163 |
-
{"role": "system", "content": "You are a helpful assistant."},
|
164 |
-
{"role": "user", "content": "Introduce deep learning to me."}
|
165 |
-
]
|
166 |
-
}'
|
167 |
-
```
|
168 |
-
|
169 |
-
Find more details in the [vLLM documentation](https://docs.vllm.ai/en/latest/index.html)
|
170 |
-
|
171 |
## Open Source License
|
172 |
|
173 |
The code is licensed under Apache-2.0, while model weights are fully open for academic research and also allow **free** commercial usage. To apply for a commercial license, please fill in the [application form (English)](https://wj.qq.com/s2/12727483/5dba/)/[申请表(中文)](https://wj.qq.com/s2/12725412/f7c1/). For other questions or collaborations, please contact <internlm@pjlab.org.cn>.
|
174 |
|
175 |
-
## Citation
|
176 |
-
|
177 |
-
```
|
178 |
-
@misc{cai2024internlm2,
|
179 |
-
title={InternLM2 Technical Report},
|
180 |
-
author={Zheng Cai and Maosong Cao and Haojiong Chen and Kai Chen and Keyu Chen and Xin Chen and Xun Chen and Zehui Chen and Zhi Chen and Pei Chu and Xiaoyi Dong and Haodong Duan and Qi Fan and Zhaoye Fei and Yang Gao and Jiaye Ge and Chenya Gu and Yuzhe Gu and Tao Gui and Aijia Guo and Qipeng Guo and Conghui He and Yingfan Hu and Ting Huang and Tao Jiang and Penglong Jiao and Zhenjiang Jin and Zhikai Lei and Jiaxing Li and Jingwen Li and Linyang Li and Shuaibin Li and Wei Li and Yining Li and Hongwei Liu and Jiangning Liu and Jiawei Hong and Kaiwen Liu and Kuikun Liu and Xiaoran Liu and Chengqi Lv and Haijun Lv and Kai Lv and Li Ma and Runyuan Ma and Zerun Ma and Wenchang Ning and Linke Ouyang and Jiantao Qiu and Yuan Qu and Fukai Shang and Yunfan Shao and Demin Song and Zifan Song and Zhihao Sui and Peng Sun and Yu Sun and Huanze Tang and Bin Wang and Guoteng Wang and Jiaqi Wang and Jiayu Wang and Rui Wang and Yudong Wang and Ziyi Wang and Xingjian Wei and Qizhen Weng and Fan Wu and Yingtong Xiong and Chao Xu and Ruiliang Xu and Hang Yan and Yirong Yan and Xiaogui Yang and Haochen Ye and Huaiyuan Ying and Jia Yu and Jing Yu and Yuhang Zang and Chuyu Zhang and Li Zhang and Pan Zhang and Peng Zhang and Ruijie Zhang and Shuo Zhang and Songyang Zhang and Wenjian Zhang and Wenwei Zhang and Xingcheng Zhang and Xinyue Zhang and Hui Zhao and Qian Zhao and Xiaomeng Zhao and Fengzhe Zhou and Zaida Zhou and Jingming Zhuo and Yicheng Zou and Xipeng Qiu and Yu Qiao and Dahua Lin},
|
181 |
-
year={2024},
|
182 |
-
eprint={2403.17297},
|
183 |
-
archivePrefix={arXiv},
|
184 |
-
primaryClass={cs.CL}
|
185 |
-
}
|
186 |
-
```
|
187 |
-
|
188 |
## 简介
|
189 |
|
190 |
InternLM2 ,即书生·浦语大模型第二代,开源了面向实用场景的70亿参数基础模型与对话模型 (InternLM2-Chat-7B)。模型具有以下特点:
|
@@ -198,7 +111,7 @@ InternLM2 ,即书生·浦语大模型第二代,开源了面向实用场景
|
|
198 |
|
199 |
### 性能评测
|
200 |
|
201 |
-
我们使用开源评测工具 [OpenCompass](https://github.com/internLM/OpenCompass/) 从学科综合能力、语言能力、知识能力、推理能力、理解能力五大能力维度对InternLM开展全面评测,部分评测结果如下表所示,欢迎访问[ OpenCompass 榜单 ](https://
|
202 |
|
203 |
| 评测集 | InternLM2-7B | InternLM2-Chat-7B | InternLM2-20B | InternLM2-Chat-20B | ChatGPT | GPT-4 |
|
204 |
| --- | --- | --- | --- | --- | --- | --- |
|
@@ -250,88 +163,6 @@ for response, history in model.stream_chat(tokenizer, "你好", history=[]):
|
|
250 |
length = len(response)
|
251 |
```
|
252 |
|
253 |
-
## 部署
|
254 |
-
|
255 |
-
### LMDeploy
|
256 |
-
|
257 |
-
LMDeploy 由 MMDeploy 和 MMRazor 团队联合开发,是涵盖了 LLM 任务的全套轻量化、部署和服务解决方案。
|
258 |
-
|
259 |
-
```bash
|
260 |
-
pip install lmdeploy
|
261 |
-
```
|
262 |
-
|
263 |
-
你可以使用以下 python 代码进行本地批量推理:
|
264 |
-
|
265 |
-
```python
|
266 |
-
import lmdeploy
|
267 |
-
pipe = lmdeploy.pipeline("internlm/internlm2-chat-7b")
|
268 |
-
response = pipe(["Hi, pls intro yourself", "Shanghai is"])
|
269 |
-
print(response)
|
270 |
-
```
|
271 |
-
|
272 |
-
或者你可以使用以下命令启动兼容 OpenAI API 的服务:
|
273 |
-
|
274 |
-
```bash
|
275 |
-
lmdeploy serve api_server internlm/internlm2-chat-7b --server-port 23333
|
276 |
-
```
|
277 |
-
|
278 |
-
然后你可以向服务端发起一个聊天请求:
|
279 |
-
|
280 |
-
```bash
|
281 |
-
curl http://localhost:23333/v1/chat/completions \
|
282 |
-
-H "Content-Type: application/json" \
|
283 |
-
-d '{
|
284 |
-
"model": "internlm2-chat-7b",
|
285 |
-
"messages": [
|
286 |
-
{"role": "system", "content": "你是个友善的AI助手。"},
|
287 |
-
{"role": "user", "content": "介绍一下深度学习。"}
|
288 |
-
]
|
289 |
-
}'
|
290 |
-
```
|
291 |
-
|
292 |
-
更多信息请查看 [LMDeploy 文档](https://lmdeploy.readthedocs.io/en/latest/)
|
293 |
-
|
294 |
-
### vLLM
|
295 |
-
|
296 |
-
使用`vLLM>=0.3.2`启动兼容 OpenAI API 的服务:
|
297 |
-
|
298 |
-
```bash
|
299 |
-
pip install vllm
|
300 |
-
```
|
301 |
-
|
302 |
-
```bash
|
303 |
-
python -m vllm.entrypoints.openai.api_server --model internlm/internlm2-chat-7b --trust-remote-code
|
304 |
-
```
|
305 |
-
|
306 |
-
然后你可以向服务端发起一个聊天请求:
|
307 |
-
|
308 |
-
```bash
|
309 |
-
curl http://localhost:8000/v1/chat/completions \
|
310 |
-
-H "Content-Type: application/json" \
|
311 |
-
-d '{
|
312 |
-
"model": "internlm2-chat-7b",
|
313 |
-
"messages": [
|
314 |
-
{"role": "system", "content": "你是个友善的AI助手。"},
|
315 |
-
{"role": "user", "content": "介绍一下深度学习。"}
|
316 |
-
]
|
317 |
-
}'
|
318 |
-
```
|
319 |
-
|
320 |
-
更多信息请查看 [vLLM 文档](https://docs.vllm.ai/en/latest/index.html)
|
321 |
-
|
322 |
## 开源许可证
|
323 |
|
324 |
-
本仓库的代码依照 Apache-2.0 协议开源。模型权重对学术研究完全开放,也可申请免费的商业使用授权([申请表](https://wj.qq.com/s2/12725412/f7c1/))。其他问题与合作请联系 <internlm@pjlab.org.cn>。
|
325 |
-
|
326 |
-
## 引用
|
327 |
-
|
328 |
-
```
|
329 |
-
@misc{cai2024internlm2,
|
330 |
-
title={InternLM2 Technical Report},
|
331 |
-
author={Zheng Cai and Maosong Cao and Haojiong Chen and Kai Chen and Keyu Chen and Xin Chen and Xun Chen and Zehui Chen and Zhi Chen and Pei Chu and Xiaoyi Dong and Haodong Duan and Qi Fan and Zhaoye Fei and Yang Gao and Jiaye Ge and Chenya Gu and Yuzhe Gu and Tao Gui and Aijia Guo and Qipeng Guo and Conghui He and Yingfan Hu and Ting Huang and Tao Jiang and Penglong Jiao and Zhenjiang Jin and Zhikai Lei and Jiaxing Li and Jingwen Li and Linyang Li and Shuaibin Li and Wei Li and Yining Li and Hongwei Liu and Jiangning Liu and Jiawei Hong and Kaiwen Liu and Kuikun Liu and Xiaoran Liu and Chengqi Lv and Haijun Lv and Kai Lv and Li Ma and Runyuan Ma and Zerun Ma and Wenchang Ning and Linke Ouyang and Jiantao Qiu and Yuan Qu and Fukai Shang and Yunfan Shao and Demin Song and Zifan Song and Zhihao Sui and Peng Sun and Yu Sun and Huanze Tang and Bin Wang and Guoteng Wang and Jiaqi Wang and Jiayu Wang and Rui Wang and Yudong Wang and Ziyi Wang and Xingjian Wei and Qizhen Weng and Fan Wu and Yingtong Xiong and Chao Xu and Ruiliang Xu and Hang Yan and Yirong Yan and Xiaogui Yang and Haochen Ye and Huaiyuan Ying and Jia Yu and Jing Yu and Yuhang Zang and Chuyu Zhang and Li Zhang and Pan Zhang and Peng Zhang and Ruijie Zhang and Shuo Zhang and Songyang Zhang and Wenjian Zhang and Wenwei Zhang and Xingcheng Zhang and Xinyue Zhang and Hui Zhao and Qian Zhao and Xiaomeng Zhao and Fengzhe Zhou and Zaida Zhou and Jingming Zhuo and Yicheng Zou and Xipeng Qiu and Yu Qiao and Dahua Lin},
|
332 |
-
year={2024},
|
333 |
-
eprint={2403.17297},
|
334 |
-
archivePrefix={arXiv},
|
335 |
-
primaryClass={cs.CL}
|
336 |
-
}
|
337 |
-
```
|
|
|
20 |
|
21 |
[![evaluation](https://github.com/InternLM/InternLM/assets/22529082/f80a2a58-5ddf-471a-8da4-32ab65c8fd3b)](https://github.com/internLM/OpenCompass/)
|
22 |
|
23 |
+
[💻Github Repo](https://github.com/InternLM/InternLM) • [🤔Reporting Issues](https://github.com/InternLM/InternLM/issues/new)
|
24 |
|
25 |
</div>
|
26 |
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
## Introduction
|
29 |
|
|
|
41 |
|
42 |
### Performance Evaluation
|
43 |
|
44 |
+
We conducted a comprehensive evaluation of InternLM using the open-source evaluation tool [OpenCompass](https://github.com/internLM/OpenCompass/). The evaluation covered five dimensions of capabilities: disciplinary competence, language competence, knowledge competence, inference competence, and comprehension competence. Here are some of the evaluation results, and you can visit the [OpenCompass leaderboard](https://opencompass.org.cn/rank) for more evaluation results.
|
45 |
|
46 |
| Dataset\Models | InternLM2-7B | InternLM2-Chat-7B | InternLM2-20B | InternLM2-Chat-20B | ChatGPT | GPT-4 |
|
47 |
| --- | --- | --- | --- | --- | --- | --- |
|
|
|
94 |
length = len(response)
|
95 |
```
|
96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
## Open Source License
|
98 |
|
99 |
The code is licensed under Apache-2.0, while model weights are fully open for academic research and also allow **free** commercial usage. To apply for a commercial license, please fill in the [application form (English)](https://wj.qq.com/s2/12727483/5dba/)/[申请表(中文)](https://wj.qq.com/s2/12725412/f7c1/). For other questions or collaborations, please contact <internlm@pjlab.org.cn>.
|
100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
## 简介
|
102 |
|
103 |
InternLM2 ,即书生·浦语大模型第二代,开源了面向实用场景的70亿参数基础模型与对话模型 (InternLM2-Chat-7B)。模型具有以下特点:
|
|
|
111 |
|
112 |
### 性能评测
|
113 |
|
114 |
+
我们使用开源评测工具 [OpenCompass](https://github.com/internLM/OpenCompass/) 从学科综合能力、语言能力、知识能力、推理能力、理解能力五大能力维度对InternLM开展全面评测,部分评测结果如下表所示,欢迎访问[ OpenCompass 榜单 ](https://opencompass.org.cn/rank)获取更多的评测结果。
|
115 |
|
116 |
| 评测集 | InternLM2-7B | InternLM2-Chat-7B | InternLM2-20B | InternLM2-Chat-20B | ChatGPT | GPT-4 |
|
117 |
| --- | --- | --- | --- | --- | --- | --- |
|
|
|
163 |
length = len(response)
|
164 |
```
|
165 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
## 开源许可证
|
167 |
|
168 |
+
本仓库的代码依照 Apache-2.0 协议开源。模型权重对学术研究完全开放,也可申请免费的商业使用授权([申请表](https://wj.qq.com/s2/12725412/f7c1/))。其他问题与合作请联系 <internlm@pjlab.org.cn>。
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config.json
CHANGED
@@ -2,9 +2,8 @@
|
|
2 |
"architectures": [
|
3 |
"InternLM2ForCausalLM"
|
4 |
],
|
5 |
-
"attn_implementation": "eager",
|
6 |
"auto_map": {
|
7 |
-
"AutoConfig": "
|
8 |
"AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM",
|
9 |
"AutoModel": "modeling_internlm2.InternLM2ForCausalLM"
|
10 |
},
|
@@ -16,21 +15,20 @@
|
|
16 |
"initializer_range": 0.02,
|
17 |
"intermediate_size": 14336,
|
18 |
"max_position_embeddings": 32768,
|
19 |
-
"model_type": "
|
20 |
"num_attention_heads": 32,
|
21 |
"num_hidden_layers": 32,
|
22 |
"num_key_value_heads": 8,
|
23 |
"pad_token_id": 2,
|
24 |
"rms_norm_eps": 1e-05,
|
25 |
"rope_scaling": {
|
26 |
-
"
|
27 |
-
"
|
28 |
},
|
29 |
"rope_theta": 1000000,
|
30 |
"tie_word_embeddings": false,
|
31 |
-
"torch_dtype": "
|
32 |
-
"transformers_version": "4.
|
33 |
"use_cache": true,
|
34 |
-
"vocab_size": 92544
|
35 |
-
"pretraining_tp": 1
|
36 |
}
|
|
|
2 |
"architectures": [
|
3 |
"InternLM2ForCausalLM"
|
4 |
],
|
|
|
5 |
"auto_map": {
|
6 |
+
"AutoConfig": "configuration_internlm.InternLMConfig",
|
7 |
"AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM",
|
8 |
"AutoModel": "modeling_internlm2.InternLM2ForCausalLM"
|
9 |
},
|
|
|
15 |
"initializer_range": 0.02,
|
16 |
"intermediate_size": 14336,
|
17 |
"max_position_embeddings": 32768,
|
18 |
+
"model_type": "internlm",
|
19 |
"num_attention_heads": 32,
|
20 |
"num_hidden_layers": 32,
|
21 |
"num_key_value_heads": 8,
|
22 |
"pad_token_id": 2,
|
23 |
"rms_norm_eps": 1e-05,
|
24 |
"rope_scaling": {
|
25 |
+
"factor": 1.0,
|
26 |
+
"type": "dynamic"
|
27 |
},
|
28 |
"rope_theta": 1000000,
|
29 |
"tie_word_embeddings": false,
|
30 |
+
"torch_dtype": "float16",
|
31 |
+
"transformers_version": "4.33.2",
|
32 |
"use_cache": true,
|
33 |
+
"vocab_size": 92544
|
|
|
34 |
}
|
configuration_internlm2.py → configuration_internlm.py
RENAMED
@@ -1,7 +1,10 @@
|
|
1 |
# coding=utf-8
|
2 |
-
# Copyright (c)
|
3 |
#
|
4 |
-
# This code is based on
|
|
|
|
|
|
|
5 |
#
|
6 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
# you may not use this file except in compliance with the License.
|
@@ -14,22 +17,21 @@
|
|
14 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
# See the License for the specific language governing permissions and
|
16 |
# limitations under the License.
|
17 |
-
"""
|
18 |
|
19 |
from transformers.configuration_utils import PretrainedConfig
|
20 |
from transformers.utils import logging
|
21 |
|
22 |
logger = logging.get_logger(__name__)
|
23 |
|
24 |
-
|
25 |
|
26 |
|
27 |
-
|
28 |
-
class InternLM2Config(PretrainedConfig):
|
29 |
r"""
|
30 |
-
This is the configuration class to store the configuration of a [`
|
31 |
-
an
|
32 |
-
configuration with the defaults will yield a similar configuration to that of the
|
33 |
|
34 |
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
35 |
documentation from [`PretrainedConfig`] for more information.
|
@@ -37,16 +39,16 @@ class InternLM2Config(PretrainedConfig):
|
|
37 |
|
38 |
Args:
|
39 |
vocab_size (`int`, *optional*, defaults to 32000):
|
40 |
-
Vocabulary size of the
|
41 |
-
`inputs_ids` passed when calling [`
|
42 |
hidden_size (`int`, *optional*, defaults to 4096):
|
43 |
Dimension of the hidden representations.
|
44 |
intermediate_size (`int`, *optional*, defaults to 11008):
|
45 |
Dimension of the MLP representations.
|
46 |
num_hidden_layers (`int`, *optional*, defaults to 32):
|
47 |
-
Number of hidden layers in the Transformer
|
48 |
num_attention_heads (`int`, *optional*, defaults to 32):
|
49 |
-
Number of attention heads for each attention layer in the Transformer
|
50 |
num_key_value_heads (`int`, *optional*):
|
51 |
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
52 |
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
@@ -58,42 +60,33 @@ class InternLM2Config(PretrainedConfig):
|
|
58 |
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
59 |
The non-linear activation function (function or string) in the decoder.
|
60 |
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
61 |
-
The maximum sequence length that this model might ever be used with.
|
|
|
62 |
initializer_range (`float`, *optional*, defaults to 0.02):
|
63 |
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
64 |
-
rms_norm_eps (`float`, *optional*, defaults to 1e-
|
65 |
The epsilon used by the rms normalization layers.
|
66 |
use_cache (`bool`, *optional*, defaults to `True`):
|
67 |
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
68 |
relevant if `config.is_decoder=True`.
|
69 |
-
|
70 |
-
Padding token id.
|
71 |
-
bos_token_id (`int`, *optional*, defaults to 1):
|
72 |
-
Beginning of stream token id.
|
73 |
-
eos_token_id (`int`, *optional*, defaults to 2):
|
74 |
-
End of stream token id.
|
75 |
-
pretraining_tp (`int`, *optional*, defaults to 1):
|
76 |
-
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
|
77 |
-
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism)
|
78 |
-
to understand more about it. This value is necessary to ensure exact reproducibility
|
79 |
-
of the pretraining results. Please refer to [this
|
80 |
-
issue](https://github.com/pytorch/pytorch/issues/76232).
|
81 |
-
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
82 |
Whether to tie weight embeddings
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
|
|
|
|
|
|
|
|
94 |
_auto_class = "AutoConfig"
|
95 |
-
model_type = "internlm2"
|
96 |
-
keys_to_ignore_at_inference = ["past_key_values"]
|
97 |
|
98 |
def __init__( # pylint: disable=W0102
|
99 |
self,
|
@@ -111,12 +104,11 @@ class InternLM2Config(PretrainedConfig):
|
|
111 |
pad_token_id=0,
|
112 |
bos_token_id=1,
|
113 |
eos_token_id=2,
|
114 |
-
pretraining_tp=1,
|
115 |
tie_word_embeddings=False,
|
116 |
bias=True,
|
117 |
rope_theta=10000,
|
118 |
rope_scaling=None,
|
119 |
-
attn_implementation=
|
120 |
**kwargs,
|
121 |
):
|
122 |
self.vocab_size = vocab_size
|
@@ -134,15 +126,14 @@ class InternLM2Config(PretrainedConfig):
|
|
134 |
self.hidden_act = hidden_act
|
135 |
self.initializer_range = initializer_range
|
136 |
self.rms_norm_eps = rms_norm_eps
|
137 |
-
self.pretraining_tp = pretraining_tp
|
138 |
self.use_cache = use_cache
|
139 |
self.rope_theta = rope_theta
|
140 |
self.rope_scaling = rope_scaling
|
141 |
self._rope_scaling_validation()
|
|
|
142 |
self.attn_implementation = attn_implementation
|
143 |
if self.attn_implementation is None:
|
144 |
self.attn_implementation = "eager"
|
145 |
-
|
146 |
super().__init__(
|
147 |
pad_token_id=pad_token_id,
|
148 |
bos_token_id=bos_token_id,
|
@@ -169,12 +160,5 @@ class InternLM2Config(PretrainedConfig):
|
|
169 |
raise ValueError(
|
170 |
f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
|
171 |
)
|
172 |
-
if (
|
173 |
-
|
174 |
-
or not isinstance(rope_scaling_factor, (float, int))
|
175 |
-
or rope_scaling_factor < 1.0
|
176 |
-
):
|
177 |
-
raise ValueError(
|
178 |
-
f"`rope_scaling`'s factor field must be a number >= 1, got {rope_scaling_factor} "
|
179 |
-
f"of type {type(rope_scaling_factor)}"
|
180 |
-
)
|
|
|
1 |
# coding=utf-8
|
2 |
+
# Copyright (c) InternLM. All rights reserved.
|
3 |
#
|
4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
5 |
+
# and OPT implementations in this library. It has been modified from its
|
6 |
+
# original forms to accommodate minor architectural differences compared
|
7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
8 |
#
|
9 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
10 |
# you may not use this file except in compliance with the License.
|
|
|
17 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
18 |
# See the License for the specific language governing permissions and
|
19 |
# limitations under the License.
|
20 |
+
""" InternLM model configuration"""
|
21 |
|
22 |
from transformers.configuration_utils import PretrainedConfig
|
23 |
from transformers.utils import logging
|
24 |
|
25 |
logger = logging.get_logger(__name__)
|
26 |
|
27 |
+
INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
|
28 |
|
29 |
|
30 |
+
class InternLMConfig(PretrainedConfig):
|
|
|
31 |
r"""
|
32 |
+
This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate
|
33 |
+
an InternLM model according to the specified arguments, defining the model architecture. Instantiating a
|
34 |
+
configuration with the defaults will yield a similar configuration to that of the InternLM-7B.
|
35 |
|
36 |
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
37 |
documentation from [`PretrainedConfig`] for more information.
|
|
|
39 |
|
40 |
Args:
|
41 |
vocab_size (`int`, *optional*, defaults to 32000):
|
42 |
+
Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
|
43 |
+
`inputs_ids` passed when calling [`InternLMModel`]
|
44 |
hidden_size (`int`, *optional*, defaults to 4096):
|
45 |
Dimension of the hidden representations.
|
46 |
intermediate_size (`int`, *optional*, defaults to 11008):
|
47 |
Dimension of the MLP representations.
|
48 |
num_hidden_layers (`int`, *optional*, defaults to 32):
|
49 |
+
Number of hidden layers in the Transformer encoder.
|
50 |
num_attention_heads (`int`, *optional*, defaults to 32):
|
51 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
52 |
num_key_value_heads (`int`, *optional*):
|
53 |
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
54 |
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
|
|
60 |
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
61 |
The non-linear activation function (function or string) in the decoder.
|
62 |
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
63 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
64 |
+
just in case (e.g., 512 or 1024 or 2048).
|
65 |
initializer_range (`float`, *optional*, defaults to 0.02):
|
66 |
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
67 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-12):
|
68 |
The epsilon used by the rms normalization layers.
|
69 |
use_cache (`bool`, *optional*, defaults to `True`):
|
70 |
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
71 |
relevant if `config.is_decoder=True`.
|
72 |
+
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
Whether to tie weight embeddings
|
74 |
+
Example:
|
75 |
+
|
76 |
+
```python
|
77 |
+
>>> from transformers import InternLMModel, InternLMConfig
|
78 |
+
|
79 |
+
>>> # Initializing a InternLM internlm-7b style configuration
|
80 |
+
>>> configuration = InternLMConfig()
|
81 |
+
|
82 |
+
>>> # Initializing a model from the internlm-7b style configuration
|
83 |
+
>>> model = InternLMModel(configuration)
|
84 |
+
|
85 |
+
>>> # Accessing the model configuration
|
86 |
+
>>> configuration = model.config
|
87 |
+
```"""
|
88 |
+
model_type = "internlm"
|
89 |
_auto_class = "AutoConfig"
|
|
|
|
|
90 |
|
91 |
def __init__( # pylint: disable=W0102
|
92 |
self,
|
|
|
104 |
pad_token_id=0,
|
105 |
bos_token_id=1,
|
106 |
eos_token_id=2,
|
|
|
107 |
tie_word_embeddings=False,
|
108 |
bias=True,
|
109 |
rope_theta=10000,
|
110 |
rope_scaling=None,
|
111 |
+
attn_implementation="eager",
|
112 |
**kwargs,
|
113 |
):
|
114 |
self.vocab_size = vocab_size
|
|
|
126 |
self.hidden_act = hidden_act
|
127 |
self.initializer_range = initializer_range
|
128 |
self.rms_norm_eps = rms_norm_eps
|
|
|
129 |
self.use_cache = use_cache
|
130 |
self.rope_theta = rope_theta
|
131 |
self.rope_scaling = rope_scaling
|
132 |
self._rope_scaling_validation()
|
133 |
+
|
134 |
self.attn_implementation = attn_implementation
|
135 |
if self.attn_implementation is None:
|
136 |
self.attn_implementation = "eager"
|
|
|
137 |
super().__init__(
|
138 |
pad_token_id=pad_token_id,
|
139 |
bos_token_id=bos_token_id,
|
|
|
160 |
raise ValueError(
|
161 |
f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
|
162 |
)
|
163 |
+
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
|
164 |
+
raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
generation_config.json
CHANGED
@@ -1,9 +1,7 @@
|
|
1 |
{
|
|
|
2 |
"bos_token_id": 1,
|
3 |
-
"eos_token_id":
|
4 |
-
2,
|
5 |
-
92542
|
6 |
-
],
|
7 |
"pad_token_id": 2,
|
8 |
-
"transformers_version": "4.
|
9 |
}
|
|
|
1 |
{
|
2 |
+
"_from_model_config": true,
|
3 |
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
|
|
|
|
|
|
5 |
"pad_token_id": 2,
|
6 |
+
"transformers_version": "4.33.2"
|
7 |
}
|
model-00005-of-00008.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:4a28f7db7ba9fc0991e3552a156e11b942494e945c4c134003aad40bb4e49ed6
|
3 |
-
size 1979780456
|
|
|
|
|
|
|
|
model-00006-of-00008.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:d330d795037a2db1c7a15a991c88ba6bf2bc2890e36210571d7499d1ae6dfd51
|
3 |
-
size 1946242728
|
|
|
|
|
|
|
|
model-00007-of-00008.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:c6c65720f77de3aec168dff5b0f11e779f4c9059f2fdb04c67bae4321f5efd6a
|
3 |
-
size 1979780456
|
|
|
|
|
|
|
|
model-00008-of-00008.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:eb488551289c4630b7b1fb015acf2ff52a206383c96370a7664b42253a007eae
|
3 |
-
size 1748035640
|
|
|
|
|
|
|
|
model.safetensors.index.json
DELETED
@@ -1,234 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"metadata": {
|
3 |
-
"total_size": 15475417088
|
4 |
-
},
|
5 |
-
"weight_map": {
|
6 |
-
"model.layers.0.attention.wo.weight": "model-00001-of-00008.safetensors",
|
7 |
-
"model.layers.0.attention.wqkv.weight": "model-00001-of-00008.safetensors",
|
8 |
-
"model.layers.0.attention_norm.weight": "model-00001-of-00008.safetensors",
|
9 |
-
"model.layers.0.feed_forward.w1.weight": "model-00001-of-00008.safetensors",
|
10 |
-
"model.layers.0.feed_forward.w2.weight": "model-00001-of-00008.safetensors",
|
11 |
-
"model.layers.0.feed_forward.w3.weight": "model-00001-of-00008.safetensors",
|
12 |
-
"model.layers.0.ffn_norm.weight": "model-00001-of-00008.safetensors",
|
13 |
-
"model.layers.1.attention.wo.weight": "model-00001-of-00008.safetensors",
|
14 |
-
"model.layers.1.attention.wqkv.weight": "model-00001-of-00008.safetensors",
|
15 |
-
"model.layers.1.attention_norm.weight": "model-00001-of-00008.safetensors",
|
16 |
-
"model.layers.1.feed_forward.w1.weight": "model-00001-of-00008.safetensors",
|
17 |
-
"model.layers.1.feed_forward.w2.weight": "model-00001-of-00008.safetensors",
|
18 |
-
"model.layers.1.feed_forward.w3.weight": "model-00001-of-00008.safetensors",
|
19 |
-
"model.layers.1.ffn_norm.weight": "model-00001-of-00008.safetensors",
|
20 |
-
"model.layers.10.attention.wo.weight": "model-00003-of-00008.safetensors",
|
21 |
-
"model.layers.10.attention.wqkv.weight": "model-00003-of-00008.safetensors",
|
22 |
-
"model.layers.10.attention_norm.weight": "model-00003-of-00008.safetensors",
|
23 |
-
"model.layers.10.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
|
24 |
-
"model.layers.10.feed_forward.w2.weight": "model-00003-of-00008.safetensors",
|
25 |
-
"model.layers.10.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
|
26 |
-
"model.layers.10.ffn_norm.weight": "model-00003-of-00008.safetensors",
|
27 |
-
"model.layers.11.attention.wo.weight": "model-00003-of-00008.safetensors",
|
28 |
-
"model.layers.11.attention.wqkv.weight": "model-00003-of-00008.safetensors",
|
29 |
-
"model.layers.11.attention_norm.weight": "model-00004-of-00008.safetensors",
|
30 |
-
"model.layers.11.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
|
31 |
-
"model.layers.11.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
|
32 |
-
"model.layers.11.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
|
33 |
-
"model.layers.11.ffn_norm.weight": "model-00004-of-00008.safetensors",
|
34 |
-
"model.layers.12.attention.wo.weight": "model-00004-of-00008.safetensors",
|
35 |
-
"model.layers.12.attention.wqkv.weight": "model-00004-of-00008.safetensors",
|
36 |
-
"model.layers.12.attention_norm.weight": "model-00004-of-00008.safetensors",
|
37 |
-
"model.layers.12.feed_forward.w1.weight": "model-00004-of-00008.safetensors",
|
38 |
-
"model.layers.12.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
|
39 |
-
"model.layers.12.feed_forward.w3.weight": "model-00004-of-00008.safetensors",
|
40 |
-
"model.layers.12.ffn_norm.weight": "model-00004-of-00008.safetensors",
|
41 |
-
"model.layers.13.attention.wo.weight": "model-00004-of-00008.safetensors",
|
42 |
-
"model.layers.13.attention.wqkv.weight": "model-00004-of-00008.safetensors",
|
43 |
-
"model.layers.13.attention_norm.weight": "model-00004-of-00008.safetensors",
|
44 |
-
"model.layers.13.feed_forward.w1.weight": "model-00004-of-00008.safetensors",
|
45 |
-
"model.layers.13.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
|
46 |
-
"model.layers.13.feed_forward.w3.weight": "model-00004-of-00008.safetensors",
|
47 |
-
"model.layers.13.ffn_norm.weight": "model-00004-of-00008.safetensors",
|
48 |
-
"model.layers.14.attention.wo.weight": "model-00004-of-00008.safetensors",
|
49 |
-
"model.layers.14.attention.wqkv.weight": "model-00004-of-00008.safetensors",
|
50 |
-
"model.layers.14.attention_norm.weight": "model-00004-of-00008.safetensors",
|
51 |
-
"model.layers.14.feed_forward.w1.weight": "model-00004-of-00008.safetensors",
|
52 |
-
"model.layers.14.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
|
53 |
-
"model.layers.14.feed_forward.w3.weight": "model-00004-of-00008.safetensors",
|
54 |
-
"model.layers.14.ffn_norm.weight": "model-00004-of-00008.safetensors",
|
55 |
-
"model.layers.15.attention.wo.weight": "model-00004-of-00008.safetensors",
|
56 |
-
"model.layers.15.attention.wqkv.weight": "model-00004-of-00008.safetensors",
|
57 |
-
"model.layers.15.attention_norm.weight": "model-00004-of-00008.safetensors",
|
58 |
-
"model.layers.15.feed_forward.w1.weight": "model-00004-of-00008.safetensors",
|
59 |
-
"model.layers.15.feed_forward.w2.weight": "model-00004-of-00008.safetensors",
|
60 |
-
"model.layers.15.feed_forward.w3.weight": "model-00004-of-00008.safetensors",
|
61 |
-
"model.layers.15.ffn_norm.weight": "model-00004-of-00008.safetensors",
|
62 |
-
"model.layers.16.attention.wo.weight": "model-00004-of-00008.safetensors",
|
63 |
-
"model.layers.16.attention.wqkv.weight": "model-00004-of-00008.safetensors",
|
64 |
-
"model.layers.16.attention_norm.weight": "model-00005-of-00008.safetensors",
|
65 |
-
"model.layers.16.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
|
66 |
-
"model.layers.16.feed_forward.w2.weight": "model-00005-of-00008.safetensors",
|
67 |
-
"model.layers.16.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
|
68 |
-
"model.layers.16.ffn_norm.weight": "model-00005-of-00008.safetensors",
|
69 |
-
"model.layers.17.attention.wo.weight": "model-00005-of-00008.safetensors",
|
70 |
-
"model.layers.17.attention.wqkv.weight": "model-00005-of-00008.safetensors",
|
71 |
-
"model.layers.17.attention_norm.weight": "model-00005-of-00008.safetensors",
|
72 |
-
"model.layers.17.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
|
73 |
-
"model.layers.17.feed_forward.w2.weight": "model-00005-of-00008.safetensors",
|
74 |
-
"model.layers.17.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
|
75 |
-
"model.layers.17.ffn_norm.weight": "model-00005-of-00008.safetensors",
|
76 |
-
"model.layers.18.attention.wo.weight": "model-00005-of-00008.safetensors",
|
77 |
-
"model.layers.18.attention.wqkv.weight": "model-00005-of-00008.safetensors",
|
78 |
-
"model.layers.18.attention_norm.weight": "model-00005-of-00008.safetensors",
|
79 |
-
"model.layers.18.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
|
80 |
-
"model.layers.18.feed_forward.w2.weight": "model-00005-of-00008.safetensors",
|
81 |
-
"model.layers.18.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
|
82 |
-
"model.layers.18.ffn_norm.weight": "model-00005-of-00008.safetensors",
|
83 |
-
"model.layers.19.attention.wo.weight": "model-00005-of-00008.safetensors",
|
84 |
-
"model.layers.19.attention.wqkv.weight": "model-00005-of-00008.safetensors",
|
85 |
-
"model.layers.19.attention_norm.weight": "model-00005-of-00008.safetensors",
|
86 |
-
"model.layers.19.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
|
87 |
-
"model.layers.19.feed_forward.w2.weight": "model-00005-of-00008.safetensors",
|
88 |
-
"model.layers.19.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
|
89 |
-
"model.layers.19.ffn_norm.weight": "model-00005-of-00008.safetensors",
|
90 |
-
"model.layers.2.attention.wo.weight": "model-00001-of-00008.safetensors",
|
91 |
-
"model.layers.2.attention.wqkv.weight": "model-00001-of-00008.safetensors",
|
92 |
-
"model.layers.2.attention_norm.weight": "model-00002-of-00008.safetensors",
|
93 |
-
"model.layers.2.feed_forward.w1.weight": "model-00001-of-00008.safetensors",
|
94 |
-
"model.layers.2.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
|
95 |
-
"model.layers.2.feed_forward.w3.weight": "model-00001-of-00008.safetensors",
|
96 |
-
"model.layers.2.ffn_norm.weight": "model-00002-of-00008.safetensors",
|
97 |
-
"model.layers.20.attention.wo.weight": "model-00005-of-00008.safetensors",
|
98 |
-
"model.layers.20.attention.wqkv.weight": "model-00005-of-00008.safetensors",
|
99 |
-
"model.layers.20.attention_norm.weight": "model-00006-of-00008.safetensors",
|
100 |
-
"model.layers.20.feed_forward.w1.weight": "model-00005-of-00008.safetensors",
|
101 |
-
"model.layers.20.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
|
102 |
-
"model.layers.20.feed_forward.w3.weight": "model-00005-of-00008.safetensors",
|
103 |
-
"model.layers.20.ffn_norm.weight": "model-00006-of-00008.safetensors",
|
104 |
-
"model.layers.21.attention.wo.weight": "model-00006-of-00008.safetensors",
|
105 |
-
"model.layers.21.attention.wqkv.weight": "model-00006-of-00008.safetensors",
|
106 |
-
"model.layers.21.attention_norm.weight": "model-00006-of-00008.safetensors",
|
107 |
-
"model.layers.21.feed_forward.w1.weight": "model-00006-of-00008.safetensors",
|
108 |
-
"model.layers.21.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
|
109 |
-
"model.layers.21.feed_forward.w3.weight": "model-00006-of-00008.safetensors",
|
110 |
-
"model.layers.21.ffn_norm.weight": "model-00006-of-00008.safetensors",
|
111 |
-
"model.layers.22.attention.wo.weight": "model-00006-of-00008.safetensors",
|
112 |
-
"model.layers.22.attention.wqkv.weight": "model-00006-of-00008.safetensors",
|
113 |
-
"model.layers.22.attention_norm.weight": "model-00006-of-00008.safetensors",
|
114 |
-
"model.layers.22.feed_forward.w1.weight": "model-00006-of-00008.safetensors",
|
115 |
-
"model.layers.22.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
|
116 |
-
"model.layers.22.feed_forward.w3.weight": "model-00006-of-00008.safetensors",
|
117 |
-
"model.layers.22.ffn_norm.weight": "model-00006-of-00008.safetensors",
|
118 |
-
"model.layers.23.attention.wo.weight": "model-00006-of-00008.safetensors",
|
119 |
-
"model.layers.23.attention.wqkv.weight": "model-00006-of-00008.safetensors",
|
120 |
-
"model.layers.23.attention_norm.weight": "model-00006-of-00008.safetensors",
|
121 |
-
"model.layers.23.feed_forward.w1.weight": "model-00006-of-00008.safetensors",
|
122 |
-
"model.layers.23.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
|
123 |
-
"model.layers.23.feed_forward.w3.weight": "model-00006-of-00008.safetensors",
|
124 |
-
"model.layers.23.ffn_norm.weight": "model-00006-of-00008.safetensors",
|
125 |
-
"model.layers.24.attention.wo.weight": "model-00006-of-00008.safetensors",
|
126 |
-
"model.layers.24.attention.wqkv.weight": "model-00006-of-00008.safetensors",
|
127 |
-
"model.layers.24.attention_norm.weight": "model-00006-of-00008.safetensors",
|
128 |
-
"model.layers.24.feed_forward.w1.weight": "model-00006-of-00008.safetensors",
|
129 |
-
"model.layers.24.feed_forward.w2.weight": "model-00006-of-00008.safetensors",
|
130 |
-
"model.layers.24.feed_forward.w3.weight": "model-00006-of-00008.safetensors",
|
131 |
-
"model.layers.24.ffn_norm.weight": "model-00006-of-00008.safetensors",
|
132 |
-
"model.layers.25.attention.wo.weight": "model-00006-of-00008.safetensors",
|
133 |
-
"model.layers.25.attention.wqkv.weight": "model-00006-of-00008.safetensors",
|
134 |
-
"model.layers.25.attention_norm.weight": "model-00007-of-00008.safetensors",
|
135 |
-
"model.layers.25.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
|
136 |
-
"model.layers.25.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
|
137 |
-
"model.layers.25.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
|
138 |
-
"model.layers.25.ffn_norm.weight": "model-00007-of-00008.safetensors",
|
139 |
-
"model.layers.26.attention.wo.weight": "model-00007-of-00008.safetensors",
|
140 |
-
"model.layers.26.attention.wqkv.weight": "model-00007-of-00008.safetensors",
|
141 |
-
"model.layers.26.attention_norm.weight": "model-00007-of-00008.safetensors",
|
142 |
-
"model.layers.26.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
|
143 |
-
"model.layers.26.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
|
144 |
-
"model.layers.26.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
|
145 |
-
"model.layers.26.ffn_norm.weight": "model-00007-of-00008.safetensors",
|
146 |
-
"model.layers.27.attention.wo.weight": "model-00007-of-00008.safetensors",
|
147 |
-
"model.layers.27.attention.wqkv.weight": "model-00007-of-00008.safetensors",
|
148 |
-
"model.layers.27.attention_norm.weight": "model-00007-of-00008.safetensors",
|
149 |
-
"model.layers.27.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
|
150 |
-
"model.layers.27.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
|
151 |
-
"model.layers.27.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
|
152 |
-
"model.layers.27.ffn_norm.weight": "model-00007-of-00008.safetensors",
|
153 |
-
"model.layers.28.attention.wo.weight": "model-00007-of-00008.safetensors",
|
154 |
-
"model.layers.28.attention.wqkv.weight": "model-00007-of-00008.safetensors",
|
155 |
-
"model.layers.28.attention_norm.weight": "model-00007-of-00008.safetensors",
|
156 |
-
"model.layers.28.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
|
157 |
-
"model.layers.28.feed_forward.w2.weight": "model-00007-of-00008.safetensors",
|
158 |
-
"model.layers.28.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
|
159 |
-
"model.layers.28.ffn_norm.weight": "model-00007-of-00008.safetensors",
|
160 |
-
"model.layers.29.attention.wo.weight": "model-00007-of-00008.safetensors",
|
161 |
-
"model.layers.29.attention.wqkv.weight": "model-00007-of-00008.safetensors",
|
162 |
-
"model.layers.29.attention_norm.weight": "model-00008-of-00008.safetensors",
|
163 |
-
"model.layers.29.feed_forward.w1.weight": "model-00007-of-00008.safetensors",
|
164 |
-
"model.layers.29.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
|
165 |
-
"model.layers.29.feed_forward.w3.weight": "model-00007-of-00008.safetensors",
|
166 |
-
"model.layers.29.ffn_norm.weight": "model-00008-of-00008.safetensors",
|
167 |
-
"model.layers.3.attention.wo.weight": "model-00002-of-00008.safetensors",
|
168 |
-
"model.layers.3.attention.wqkv.weight": "model-00002-of-00008.safetensors",
|
169 |
-
"model.layers.3.attention_norm.weight": "model-00002-of-00008.safetensors",
|
170 |
-
"model.layers.3.feed_forward.w1.weight": "model-00002-of-00008.safetensors",
|
171 |
-
"model.layers.3.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
|
172 |
-
"model.layers.3.feed_forward.w3.weight": "model-00002-of-00008.safetensors",
|
173 |
-
"model.layers.3.ffn_norm.weight": "model-00002-of-00008.safetensors",
|
174 |
-
"model.layers.30.attention.wo.weight": "model-00008-of-00008.safetensors",
|
175 |
-
"model.layers.30.attention.wqkv.weight": "model-00008-of-00008.safetensors",
|
176 |
-
"model.layers.30.attention_norm.weight": "model-00008-of-00008.safetensors",
|
177 |
-
"model.layers.30.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
|
178 |
-
"model.layers.30.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
|
179 |
-
"model.layers.30.feed_forward.w3.weight": "model-00008-of-00008.safetensors",
|
180 |
-
"model.layers.30.ffn_norm.weight": "model-00008-of-00008.safetensors",
|
181 |
-
"model.layers.31.attention.wo.weight": "model-00008-of-00008.safetensors",
|
182 |
-
"model.layers.31.attention.wqkv.weight": "model-00008-of-00008.safetensors",
|
183 |
-
"model.layers.31.attention_norm.weight": "model-00008-of-00008.safetensors",
|
184 |
-
"model.layers.31.feed_forward.w1.weight": "model-00008-of-00008.safetensors",
|
185 |
-
"model.layers.31.feed_forward.w2.weight": "model-00008-of-00008.safetensors",
|
186 |
-
"model.layers.31.feed_forward.w3.weight": "model-00008-of-00008.safetensors",
|
187 |
-
"model.layers.31.ffn_norm.weight": "model-00008-of-00008.safetensors",
|
188 |
-
"model.layers.4.attention.wo.weight": "model-00002-of-00008.safetensors",
|
189 |
-
"model.layers.4.attention.wqkv.weight": "model-00002-of-00008.safetensors",
|
190 |
-
"model.layers.4.attention_norm.weight": "model-00002-of-00008.safetensors",
|
191 |
-
"model.layers.4.feed_forward.w1.weight": "model-00002-of-00008.safetensors",
|
192 |
-
"model.layers.4.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
|
193 |
-
"model.layers.4.feed_forward.w3.weight": "model-00002-of-00008.safetensors",
|
194 |
-
"model.layers.4.ffn_norm.weight": "model-00002-of-00008.safetensors",
|
195 |
-
"model.layers.5.attention.wo.weight": "model-00002-of-00008.safetensors",
|
196 |
-
"model.layers.5.attention.wqkv.weight": "model-00002-of-00008.safetensors",
|
197 |
-
"model.layers.5.attention_norm.weight": "model-00002-of-00008.safetensors",
|
198 |
-
"model.layers.5.feed_forward.w1.weight": "model-00002-of-00008.safetensors",
|
199 |
-
"model.layers.5.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
|
200 |
-
"model.layers.5.feed_forward.w3.weight": "model-00002-of-00008.safetensors",
|
201 |
-
"model.layers.5.ffn_norm.weight": "model-00002-of-00008.safetensors",
|
202 |
-
"model.layers.6.attention.wo.weight": "model-00002-of-00008.safetensors",
|
203 |
-
"model.layers.6.attention.wqkv.weight": "model-00002-of-00008.safetensors",
|
204 |
-
"model.layers.6.attention_norm.weight": "model-00002-of-00008.safetensors",
|
205 |
-
"model.layers.6.feed_forward.w1.weight": "model-00002-of-00008.safetensors",
|
206 |
-
"model.layers.6.feed_forward.w2.weight": "model-00002-of-00008.safetensors",
|
207 |
-
"model.layers.6.feed_forward.w3.weight": "model-00002-of-00008.safetensors",
|
208 |
-
"model.layers.6.ffn_norm.weight": "model-00002-of-00008.safetensors",
|
209 |
-
"model.layers.7.attention.wo.weight": "model-00002-of-00008.safetensors",
|
210 |
-
"model.layers.7.attention.wqkv.weight": "model-00002-of-00008.safetensors",
|
211 |
-
"model.layers.7.attention_norm.weight": "model-00003-of-00008.safetensors",
|
212 |
-
"model.layers.7.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
|
213 |
-
"model.layers.7.feed_forward.w2.weight": "model-00003-of-00008.safetensors",
|
214 |
-
"model.layers.7.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
|
215 |
-
"model.layers.7.ffn_norm.weight": "model-00003-of-00008.safetensors",
|
216 |
-
"model.layers.8.attention.wo.weight": "model-00003-of-00008.safetensors",
|
217 |
-
"model.layers.8.attention.wqkv.weight": "model-00003-of-00008.safetensors",
|
218 |
-
"model.layers.8.attention_norm.weight": "model-00003-of-00008.safetensors",
|
219 |
-
"model.layers.8.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
|
220 |
-
"model.layers.8.feed_forward.w2.weight": "model-00003-of-00008.safetensors",
|
221 |
-
"model.layers.8.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
|
222 |
-
"model.layers.8.ffn_norm.weight": "model-00003-of-00008.safetensors",
|
223 |
-
"model.layers.9.attention.wo.weight": "model-00003-of-00008.safetensors",
|
224 |
-
"model.layers.9.attention.wqkv.weight": "model-00003-of-00008.safetensors",
|
225 |
-
"model.layers.9.attention_norm.weight": "model-00003-of-00008.safetensors",
|
226 |
-
"model.layers.9.feed_forward.w1.weight": "model-00003-of-00008.safetensors",
|
227 |
-
"model.layers.9.feed_forward.w2.weight": "model-00003-of-00008.safetensors",
|
228 |
-
"model.layers.9.feed_forward.w3.weight": "model-00003-of-00008.safetensors",
|
229 |
-
"model.layers.9.ffn_norm.weight": "model-00003-of-00008.safetensors",
|
230 |
-
"model.norm.weight": "model-00008-of-00008.safetensors",
|
231 |
-
"model.tok_embeddings.weight": "model-00001-of-00008.safetensors",
|
232 |
-
"output.weight": "model-00008-of-00008.safetensors"
|
233 |
-
}
|
234 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
modeling_internlm2.py
CHANGED
@@ -13,10 +13,11 @@
|
|
13 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
# See the License for the specific language governing permissions and
|
15 |
# limitations under the License.
|
16 |
-
"""PyTorch InternLM2 model."""
|
17 |
import math
|
18 |
import queue
|
19 |
import threading
|
|
|
20 |
from typing import List, Optional, Tuple, Union
|
21 |
|
22 |
import torch
|
@@ -26,54 +27,49 @@ from einops import rearrange
|
|
26 |
from torch import nn
|
27 |
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
28 |
from transformers.activations import ACT2FN
|
29 |
-
from transformers.cache_utils import Cache, DynamicCache, StaticCache
|
30 |
-
from transformers.modeling_attn_mask_utils import AttentionMaskConverter
|
31 |
from transformers.modeling_outputs import (
|
32 |
BaseModelOutputWithPast,
|
33 |
CausalLMOutputWithPast,
|
34 |
-
QuestionAnsweringModelOutput,
|
35 |
SequenceClassifierOutputWithPast,
|
36 |
-
TokenClassifierOutput,
|
37 |
)
|
38 |
from transformers.modeling_utils import PreTrainedModel
|
39 |
-
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
|
40 |
from transformers.utils import (
|
41 |
add_start_docstrings,
|
42 |
add_start_docstrings_to_model_forward,
|
43 |
-
is_flash_attn_greater_or_equal_2_10,
|
44 |
logging,
|
45 |
replace_return_docstrings,
|
46 |
)
|
47 |
|
48 |
try:
|
49 |
from transformers.generation.streamers import BaseStreamer
|
50 |
-
except
|
51 |
BaseStreamer = None
|
52 |
|
53 |
-
from .
|
54 |
-
|
55 |
-
|
56 |
-
try:
|
57 |
-
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
58 |
-
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
|
59 |
-
except:
|
60 |
-
pass
|
61 |
-
|
62 |
-
try:
|
63 |
-
support_bf16_triu = torch.__version__ >= "2.1.0"
|
64 |
-
except Exception:
|
65 |
-
support_bf16_triu = False
|
66 |
|
67 |
logger = logging.get_logger(__name__)
|
68 |
|
69 |
_CONFIG_FOR_DOC = "InternLM2Config"
|
70 |
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
def _get_unpad_data(attention_mask):
|
73 |
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
74 |
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
75 |
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
76 |
-
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
77 |
return (
|
78 |
indices,
|
79 |
cu_seqlens,
|
@@ -81,10 +77,45 @@ def _get_unpad_data(attention_mask):
|
|
81 |
)
|
82 |
|
83 |
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
def __init__(self, hidden_size, eps=1e-6):
|
|
|
|
|
|
|
88 |
super().__init__()
|
89 |
self.weight = nn.Parameter(torch.ones(hidden_size))
|
90 |
self.variance_epsilon = eps
|
@@ -97,68 +128,93 @@ class InternLM2RMSNorm(nn.Module):
|
|
97 |
return self.weight * hidden_states.to(input_dtype)
|
98 |
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
class InternLM2RotaryEmbedding(nn.Module):
|
104 |
-
|
105 |
-
|
106 |
-
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
107 |
super().__init__()
|
108 |
-
|
109 |
self.dim = dim
|
110 |
self.max_position_embeddings = max_position_embeddings
|
111 |
self.base = base
|
112 |
-
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2
|
113 |
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
114 |
-
# For BC we register cos and sin cached
|
115 |
-
self.max_seq_len_cached = max_position_embeddings
|
116 |
|
117 |
-
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
# x: [bs, num_attention_heads, seq_len, head_size]
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
128 |
-
emb = torch.cat((freqs, freqs), dim=-1)
|
129 |
-
cos = emb.cos()
|
130 |
-
sin = emb.sin()
|
131 |
-
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
132 |
|
133 |
|
|
|
134 |
class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
|
135 |
"""InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
|
136 |
|
137 |
-
def
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
|
|
|
|
|
|
142 |
|
|
|
|
|
|
|
|
|
|
|
143 |
|
|
|
|
|
144 |
class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
|
145 |
"""InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
|
146 |
-
Credits to the Reddit users /u/bloc97 and /u/emozilla
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
|
148 |
-
def forward(self, x, position_ids):
|
149 |
-
# difference to the original RoPE: inv_freq is recomputed when the sequence length > original length
|
150 |
-
seq_len = torch.max(position_ids) + 1
|
151 |
if seq_len > self.max_position_embeddings:
|
152 |
base = self.base * (
|
153 |
(self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
|
154 |
) ** (self.dim / (self.dim - 2))
|
155 |
-
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2
|
156 |
-
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
|
|
|
|
157 |
|
158 |
-
|
159 |
-
|
|
|
|
|
|
|
160 |
|
161 |
|
|
|
162 |
def rotate_half(x):
|
163 |
"""Rotates half the hidden dims of the input."""
|
164 |
x1 = x[..., : x.shape[-1] // 2]
|
@@ -166,36 +222,17 @@ def rotate_half(x):
|
|
166 |
return torch.cat((-x2, x1), dim=-1)
|
167 |
|
168 |
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
k (`torch.Tensor`): The key tensor.
|
175 |
-
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
176 |
-
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
177 |
-
position_ids (`torch.Tensor`, *optional*):
|
178 |
-
Deprecated and unused.
|
179 |
-
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
180 |
-
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
181 |
-
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
182 |
-
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
183 |
-
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
184 |
-
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
185 |
-
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
186 |
-
Returns:
|
187 |
-
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
188 |
-
"""
|
189 |
-
cos = cos.unsqueeze(unsqueeze_dim)
|
190 |
-
sin = sin.unsqueeze(unsqueeze_dim)
|
191 |
q_embed = (q * cos) + (rotate_half(q) * sin)
|
192 |
k_embed = (k * cos) + (rotate_half(k) * sin)
|
193 |
return q_embed, k_embed
|
194 |
|
195 |
|
196 |
class InternLM2MLP(nn.Module):
|
197 |
-
"""MLP for InternLM2 model."""
|
198 |
-
|
199 |
def __init__(self, config):
|
200 |
super().__init__()
|
201 |
self.config = config
|
@@ -212,6 +249,7 @@ class InternLM2MLP(nn.Module):
|
|
212 |
return down_proj
|
213 |
|
214 |
|
|
|
215 |
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
216 |
"""
|
217 |
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
@@ -224,27 +262,19 @@ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
|
224 |
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
225 |
|
226 |
|
|
|
227 |
class InternLM2Attention(nn.Module):
|
228 |
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
229 |
|
230 |
-
def __init__(self, config: InternLM2Config
|
231 |
super().__init__()
|
232 |
self.config = config
|
233 |
-
self.layer_idx = layer_idx
|
234 |
-
if layer_idx is None:
|
235 |
-
logger.warning_once(
|
236 |
-
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
|
237 |
-
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
|
238 |
-
"when creating this class."
|
239 |
-
)
|
240 |
-
|
241 |
self.hidden_size = config.hidden_size
|
242 |
self.num_heads = config.num_attention_heads
|
243 |
self.head_dim = self.hidden_size // self.num_heads
|
244 |
self.num_key_value_heads = config.num_key_value_heads
|
245 |
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
246 |
self.max_position_embeddings = config.max_position_embeddings
|
247 |
-
self.rope_theta = config.rope_theta
|
248 |
self.is_causal = True
|
249 |
|
250 |
if (self.head_dim * self.num_heads) != self.hidden_size:
|
@@ -258,8 +288,8 @@ class InternLM2Attention(nn.Module):
|
|
258 |
(self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
|
259 |
bias=config.bias,
|
260 |
)
|
261 |
-
self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
|
262 |
|
|
|
263 |
self._init_rope()
|
264 |
|
265 |
def _init_rope(self):
|
@@ -267,49 +297,51 @@ class InternLM2Attention(nn.Module):
|
|
267 |
self.rotary_emb = InternLM2RotaryEmbedding(
|
268 |
self.head_dim,
|
269 |
max_position_embeddings=self.max_position_embeddings,
|
270 |
-
base=self.rope_theta,
|
271 |
)
|
272 |
else:
|
273 |
scaling_type = self.config.rope_scaling["type"]
|
274 |
scaling_factor = self.config.rope_scaling["factor"]
|
275 |
-
if scaling_type == "
|
276 |
-
self.rotary_emb =
|
277 |
self.head_dim,
|
278 |
max_position_embeddings=self.max_position_embeddings,
|
|
|
279 |
scaling_factor=scaling_factor,
|
280 |
-
base=self.rope_theta,
|
281 |
)
|
282 |
-
elif scaling_type == "
|
283 |
-
self.rotary_emb =
|
284 |
self.head_dim,
|
285 |
max_position_embeddings=self.max_position_embeddings,
|
|
|
286 |
scaling_factor=scaling_factor,
|
287 |
-
base=self.rope_theta,
|
288 |
)
|
289 |
else:
|
290 |
-
raise ValueError(
|
|
|
|
|
|
|
|
|
291 |
|
292 |
def forward(
|
293 |
self,
|
294 |
hidden_states: torch.Tensor,
|
295 |
attention_mask: Optional[torch.Tensor] = None,
|
296 |
position_ids: Optional[torch.LongTensor] = None,
|
297 |
-
past_key_value: Optional[
|
298 |
output_attentions: bool = False,
|
299 |
-
use_cache: bool = False,
|
300 |
-
|
301 |
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
|
|
|
|
|
|
|
|
|
|
|
|
302 |
bsz, q_len, _ = hidden_states.size()
|
303 |
|
304 |
-
|
305 |
-
# split qkv_states by tp size
|
306 |
-
key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
|
307 |
-
qkv_slices = self.wqkv.weight.split(key_value_slicing, dim=0)
|
308 |
-
qkv_states = torch.cat(
|
309 |
-
[F.linear(hidden_states, qkv_slice) for qkv_slice in qkv_slices], dim=-1 # pylint: disable=E1102
|
310 |
-
)
|
311 |
-
else:
|
312 |
-
qkv_states = self.wqkv(hidden_states)
|
313 |
|
314 |
qkv_states = rearrange(
|
315 |
qkv_states,
|
@@ -319,26 +351,44 @@ class InternLM2Attention(nn.Module):
|
|
319 |
)
|
320 |
|
321 |
query_states = qkv_states[..., : self.num_key_value_groups, :]
|
322 |
-
query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d")
|
323 |
-
key_states = qkv_states[..., -2, :]
|
324 |
-
value_states = qkv_states[..., -1, :]
|
325 |
|
326 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
327 |
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
328 |
|
329 |
if past_key_value is not None:
|
330 |
-
#
|
331 |
-
|
332 |
-
|
|
|
|
|
333 |
|
334 |
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
335 |
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
336 |
|
337 |
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
338 |
|
339 |
-
if
|
340 |
-
|
341 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
342 |
|
343 |
# upcast attention to fp32
|
344 |
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
@@ -351,20 +401,9 @@ class InternLM2Attention(nn.Module):
|
|
351 |
)
|
352 |
|
353 |
attn_output = attn_output.transpose(1, 2).contiguous()
|
354 |
-
|
355 |
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
356 |
|
357 |
-
|
358 |
-
attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
|
359 |
-
o_proj_slices = self.wo.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
|
360 |
-
attn_output = sum(
|
361 |
-
[
|
362 |
-
F.linear(attn_output[i], o_proj_slices[i]) # pylint: disable=E1102
|
363 |
-
for i in range(self.config.pretraining_tp)
|
364 |
-
]
|
365 |
-
)
|
366 |
-
else:
|
367 |
-
attn_output = self.wo(attn_output)
|
368 |
|
369 |
if not output_attentions:
|
370 |
attn_weights = None
|
@@ -372,6 +411,7 @@ class InternLM2Attention(nn.Module):
|
|
372 |
return attn_output, attn_weights, past_key_value
|
373 |
|
374 |
|
|
|
375 |
class InternLM2FlashAttention2(InternLM2Attention):
|
376 |
"""
|
377 |
InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
|
@@ -379,34 +419,26 @@ class InternLM2FlashAttention2(InternLM2Attention):
|
|
379 |
flash attention and deal with padding tokens in case the input contains any of them.
|
380 |
"""
|
381 |
|
382 |
-
def __init__(self, *args, **kwargs):
|
383 |
-
super().__init__(*args, **kwargs)
|
384 |
-
|
385 |
-
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
386 |
-
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement,
|
387 |
-
# that was made default for flash_attn>=2.1. This attribute is used to handle this difference.
|
388 |
-
# Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
389 |
-
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1)
|
390 |
-
# produces a wrong mask (top-left).
|
391 |
-
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
392 |
-
|
393 |
def forward(
|
394 |
self,
|
395 |
hidden_states: torch.Tensor,
|
396 |
attention_mask: Optional[torch.LongTensor] = None,
|
397 |
position_ids: Optional[torch.LongTensor] = None,
|
398 |
-
past_key_value: Optional[
|
399 |
output_attentions: bool = False,
|
400 |
use_cache: bool = False,
|
401 |
-
|
402 |
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
"
|
407 |
-
"
|
408 |
)
|
409 |
|
|
|
|
|
|
|
410 |
output_attentions = False
|
411 |
|
412 |
bsz, q_len, _ = hidden_states.size()
|
@@ -429,61 +461,37 @@ class InternLM2FlashAttention2(InternLM2Attention):
|
|
429 |
key_states = key_states.transpose(1, 2)
|
430 |
value_states = value_states.transpose(1, 2)
|
431 |
|
432 |
-
|
433 |
-
|
|
|
|
|
|
|
|
|
|
|
434 |
|
435 |
if past_key_value is not None:
|
436 |
-
#
|
437 |
-
|
438 |
-
|
|
|
|
|
439 |
|
440 |
-
# TODO: These transpose are quite inefficient but Flash Attention requires the layout
|
441 |
-
# [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
|
442 |
-
# to be able to avoid many of these transpose/reshape/view.
|
443 |
query_states = query_states.transpose(1, 2)
|
444 |
key_states = key_states.transpose(1, 2)
|
445 |
value_states = value_states.transpose(1, 2)
|
446 |
|
447 |
-
|
448 |
-
dropout_rate = 0.0
|
449 |
-
|
450 |
-
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
451 |
-
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
452 |
-
# cast them back in the correct dtype just to be sure everything works as expected.
|
453 |
-
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
|
454 |
-
# in fp32. (InternLM2RMSNorm handles it correctly)
|
455 |
-
|
456 |
-
input_dtype = query_states.dtype
|
457 |
-
if input_dtype == torch.float32:
|
458 |
-
if torch.is_autocast_enabled():
|
459 |
-
target_dtype = torch.get_autocast_gpu_dtype()
|
460 |
-
# Handle the case where the model is quantized
|
461 |
-
elif hasattr(self.config, "_pre_quantization_dtype"):
|
462 |
-
target_dtype = self.config._pre_quantization_dtype
|
463 |
-
else:
|
464 |
-
target_dtype = self.wqkv.weight.dtype
|
465 |
-
|
466 |
-
logger.warning_once(
|
467 |
-
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
468 |
-
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
469 |
-
f" {target_dtype}."
|
470 |
-
)
|
471 |
-
|
472 |
-
query_states = query_states.to(target_dtype)
|
473 |
-
key_states = key_states.to(target_dtype)
|
474 |
-
value_states = value_states.to(target_dtype)
|
475 |
|
476 |
attn_output = self._flash_attention_forward(
|
477 |
query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
|
478 |
)
|
479 |
-
|
480 |
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
481 |
attn_output = self.wo(attn_output)
|
482 |
|
483 |
if not output_attentions:
|
484 |
attn_weights = None
|
485 |
|
486 |
-
return attn_output, attn_weights, past_key_value
|
487 |
|
488 |
def _flash_attention_forward(
|
489 |
self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
|
@@ -502,29 +510,23 @@ class InternLM2FlashAttention2(InternLM2Attention):
|
|
502 |
attention_mask (`torch.Tensor`):
|
503 |
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
504 |
position of padding tokens and 1 for the position of non-padding tokens.
|
505 |
-
dropout (`
|
506 |
Attention dropout
|
507 |
softmax_scale (`float`, *optional*):
|
508 |
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
509 |
"""
|
510 |
-
if not self._flash_attn_uses_top_left_mask:
|
511 |
-
causal = self.is_causal
|
512 |
-
else:
|
513 |
-
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1.
|
514 |
-
# For details, please see the comment in InternLM2FlashAttention2 __init__.
|
515 |
-
causal = self.is_causal and query_length != 1
|
516 |
-
|
517 |
# Contains at least one padding token in the sequence
|
|
|
518 |
if attention_mask is not None:
|
519 |
batch_size = query_states.shape[0]
|
520 |
-
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self.
|
521 |
query_states, key_states, value_states, attention_mask, query_length
|
522 |
)
|
523 |
|
524 |
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
525 |
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
526 |
|
527 |
-
attn_output_unpad = flash_attn_varlen_func(
|
528 |
query_states,
|
529 |
key_states,
|
530 |
value_states,
|
@@ -537,26 +539,27 @@ class InternLM2FlashAttention2(InternLM2Attention):
|
|
537 |
causal=causal,
|
538 |
)
|
539 |
|
540 |
-
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
541 |
else:
|
542 |
-
attn_output = flash_attn_func(
|
543 |
query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
|
544 |
)
|
545 |
|
546 |
return attn_output
|
547 |
|
548 |
-
def
|
549 |
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
550 |
batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
|
551 |
|
552 |
-
key_layer = index_first_axis(
|
553 |
key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
554 |
)
|
555 |
-
value_layer = index_first_axis(
|
556 |
value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
557 |
)
|
|
|
558 |
if query_length == kv_seq_len:
|
559 |
-
query_layer = index_first_axis(
|
560 |
query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
|
561 |
)
|
562 |
cu_seqlens_q = cu_seqlens_k
|
@@ -572,139 +575,29 @@ class InternLM2FlashAttention2(InternLM2Attention):
|
|
572 |
else:
|
573 |
# The -q_len: slice assumes left padding.
|
574 |
attention_mask = attention_mask[:, -query_length:]
|
575 |
-
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(
|
576 |
-
query_layer, attention_mask
|
577 |
-
)
|
578 |
|
579 |
return (
|
580 |
query_layer,
|
581 |
key_layer,
|
582 |
value_layer,
|
583 |
-
indices_q,
|
584 |
(cu_seqlens_q, cu_seqlens_k),
|
585 |
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
586 |
)
|
587 |
|
588 |
-
|
589 |
-
# Copied from transformers.models.llama.modeling_llama.LllamaSdpaAttention with Llama->InternLM2
|
590 |
-
class InternLM2SdpaAttention(InternLM2Attention):
|
591 |
-
"""
|
592 |
-
InternLM2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
593 |
-
`InternLM2Attention` as the weights of the module stays untouched. The only changes are on the forward pass
|
594 |
-
to adapt to SDPA API.
|
595 |
-
"""
|
596 |
-
|
597 |
-
# Adapted from InternLM2Attention.forward
|
598 |
-
def forward(
|
599 |
-
self,
|
600 |
-
hidden_states: torch.Tensor,
|
601 |
-
attention_mask: Optional[torch.Tensor] = None,
|
602 |
-
position_ids: Optional[torch.LongTensor] = None,
|
603 |
-
past_key_value: Optional[Cache] = None,
|
604 |
-
output_attentions: bool = False,
|
605 |
-
use_cache: bool = False,
|
606 |
-
cache_position: Optional[torch.LongTensor] = None,
|
607 |
-
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
608 |
-
if output_attentions:
|
609 |
-
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"`
|
610 |
-
# once this is implemented.
|
611 |
-
logger.warning_once(
|
612 |
-
"InternLM2Model uses InternLM2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` "
|
613 |
-
"does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
614 |
-
"but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. "
|
615 |
-
'This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
616 |
-
)
|
617 |
-
return super().forward(
|
618 |
-
hidden_states=hidden_states,
|
619 |
-
attention_mask=attention_mask,
|
620 |
-
position_ids=position_ids,
|
621 |
-
past_key_value=past_key_value,
|
622 |
-
output_attentions=output_attentions,
|
623 |
-
use_cache=use_cache,
|
624 |
-
cache_position=cache_position,
|
625 |
-
)
|
626 |
-
|
627 |
-
bsz, q_len, _ = hidden_states.size()
|
628 |
-
|
629 |
-
qkv_states = self.wqkv(hidden_states)
|
630 |
-
|
631 |
-
qkv_states = rearrange(
|
632 |
-
qkv_states,
|
633 |
-
"b q (h gs d) -> b q h gs d",
|
634 |
-
gs=2 + self.num_key_value_groups,
|
635 |
-
d=self.head_dim,
|
636 |
-
)
|
637 |
-
|
638 |
-
query_states = qkv_states[..., : self.num_key_value_groups, :]
|
639 |
-
query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d")
|
640 |
-
key_states = qkv_states[..., -2, :]
|
641 |
-
value_states = qkv_states[..., -1, :]
|
642 |
-
|
643 |
-
query_states = query_states.transpose(1, 2)
|
644 |
-
key_states = key_states.transpose(1, 2)
|
645 |
-
value_states = value_states.transpose(1, 2)
|
646 |
-
|
647 |
-
cos, sin = self.rotary_emb(value_states, position_ids)
|
648 |
-
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
649 |
-
|
650 |
-
if past_key_value is not None:
|
651 |
-
# sin and cos are specific to RoPE models; cache_position needed for the static cache
|
652 |
-
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
653 |
-
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
654 |
-
|
655 |
-
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
656 |
-
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
657 |
-
|
658 |
-
causal_mask = attention_mask
|
659 |
-
if attention_mask is not None:
|
660 |
-
causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
|
661 |
-
|
662 |
-
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with
|
663 |
-
# custom attn_mask, Reference: https://github.com/pytorch/pytorch/issues/112577.
|
664 |
-
if query_states.device.type == "cuda" and causal_mask is not None:
|
665 |
-
query_states = query_states.contiguous()
|
666 |
-
key_states = key_states.contiguous()
|
667 |
-
value_states = value_states.contiguous()
|
668 |
-
|
669 |
-
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of
|
670 |
-
# an inline conditional assignment in SDPA to support both torch.compile's dynamic shapes and full graph
|
671 |
-
# options. An inline conditional prevents dynamic shapes from compiling.
|
672 |
-
is_causal = bool(causal_mask is None and q_len > 1)
|
673 |
-
|
674 |
-
attn_output = torch.nn.functional.scaled_dot_product_attention( # pylint: disable=E1102
|
675 |
-
query_states,
|
676 |
-
key_states,
|
677 |
-
value_states,
|
678 |
-
attn_mask=causal_mask,
|
679 |
-
dropout_p=0.0,
|
680 |
-
is_causal=is_causal,
|
681 |
-
)
|
682 |
-
|
683 |
-
attn_output = attn_output.transpose(1, 2).contiguous()
|
684 |
-
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
685 |
-
|
686 |
-
attn_output = self.wo(attn_output)
|
687 |
-
|
688 |
-
return attn_output, None, past_key_value
|
689 |
-
|
690 |
-
|
691 |
INTERNLM2_ATTENTION_CLASSES = {
|
692 |
"eager": InternLM2Attention,
|
693 |
"flash_attention_2": InternLM2FlashAttention2,
|
694 |
-
"sdpa": InternLM2SdpaAttention,
|
695 |
}
|
696 |
|
697 |
-
|
698 |
-
# Modified from transformers.models.llama.modeling_llama.LlamaDecoderLayer with Llama->InternLM2
|
699 |
class InternLM2DecoderLayer(nn.Module):
|
700 |
-
|
701 |
-
|
702 |
-
def __init__(self, config: InternLM2Config, layer_idx: int):
|
703 |
super().__init__()
|
704 |
self.hidden_size = config.hidden_size
|
705 |
-
self.layer_idx = layer_idx
|
706 |
|
707 |
-
self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config
|
708 |
|
709 |
self.feed_forward = InternLM2MLP(config)
|
710 |
self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
@@ -715,10 +608,10 @@ class InternLM2DecoderLayer(nn.Module):
|
|
715 |
hidden_states: torch.Tensor,
|
716 |
attention_mask: Optional[torch.Tensor] = None,
|
717 |
position_ids: Optional[torch.LongTensor] = None,
|
718 |
-
past_key_value: Optional[
|
719 |
output_attentions: Optional[bool] = False,
|
720 |
use_cache: Optional[bool] = False,
|
721 |
-
|
722 |
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
723 |
"""
|
724 |
Args:
|
@@ -734,6 +627,12 @@ class InternLM2DecoderLayer(nn.Module):
|
|
734 |
(see `past_key_values`).
|
735 |
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
736 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
737 |
residual = hidden_states
|
738 |
|
739 |
hidden_states = self.attention_norm(hidden_states)
|
@@ -746,7 +645,7 @@ class InternLM2DecoderLayer(nn.Module):
|
|
746 |
past_key_value=past_key_value,
|
747 |
output_attentions=output_attentions,
|
748 |
use_cache=use_cache,
|
749 |
-
|
750 |
)
|
751 |
hidden_states = residual + hidden_states
|
752 |
|
@@ -790,20 +689,11 @@ InternLM2_START_DOCSTRING = r"""
|
|
790 |
InternLM2_START_DOCSTRING,
|
791 |
)
|
792 |
class InternLM2PreTrainedModel(PreTrainedModel):
|
793 |
-
"""
|
794 |
-
InternLM2 pretraiend model's base class.
|
795 |
-
"""
|
796 |
-
|
797 |
config_class = InternLM2Config
|
798 |
base_model_prefix = "model"
|
799 |
supports_gradient_checkpointing = True
|
800 |
_no_split_modules = ["InternLM2DecoderLayer"]
|
801 |
-
_skip_keys_device_placement =
|
802 |
-
_supports_flash_attn_2 = True
|
803 |
-
_supports_sdpa = True
|
804 |
-
_supports_cache_class = True
|
805 |
-
_supports_quantized_cache = True
|
806 |
-
_supports_static_cache = True
|
807 |
|
808 |
def _init_weights(self, module):
|
809 |
std = self.config.initializer_range
|
@@ -852,19 +742,14 @@ InternLM2_INPUTS_DOCSTRING = r"""
|
|
852 |
config.n_positions - 1]`.
|
853 |
|
854 |
[What are position IDs?](../glossary#position-ids)
|
855 |
-
past_key_values (`
|
856 |
-
|
857 |
-
|
858 |
-
|
859 |
-
|
860 |
-
Two formats are allowed:
|
861 |
-
- a [`~cache_utils.Cache`] instance;
|
862 |
-
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
863 |
-
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
864 |
-
cache format.
|
865 |
|
866 |
-
|
867 |
-
|
868 |
|
869 |
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
870 |
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
@@ -884,14 +769,10 @@ InternLM2_INPUTS_DOCSTRING = r"""
|
|
884 |
more detail.
|
885 |
return_dict (`bool`, *optional*):
|
886 |
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
887 |
-
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
888 |
-
Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
|
889 |
-
this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
|
890 |
-
the complete sequence length.
|
891 |
"""
|
892 |
|
893 |
|
894 |
-
# Modified from transformers.
|
895 |
@add_start_docstrings(
|
896 |
"The bare InternLM2 Model outputting raw hidden-states without any specific head on top.",
|
897 |
InternLM2_START_DOCSTRING,
|
@@ -914,9 +795,7 @@ class InternLM2Model(InternLM2PreTrainedModel):
|
|
914 |
|
915 |
self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
916 |
|
917 |
-
self.layers = nn.ModuleList(
|
918 |
-
[InternLM2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
919 |
-
)
|
920 |
self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
921 |
|
922 |
self.gradient_checkpointing = False
|
@@ -929,96 +808,142 @@ class InternLM2Model(InternLM2PreTrainedModel):
|
|
929 |
def set_input_embeddings(self, value):
|
930 |
self.tok_embeddings = value
|
931 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
932 |
@add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
|
933 |
def forward(
|
934 |
self,
|
935 |
input_ids: torch.LongTensor = None,
|
936 |
attention_mask: Optional[torch.Tensor] = None,
|
937 |
position_ids: Optional[torch.LongTensor] = None,
|
938 |
-
past_key_values: Optional[
|
939 |
inputs_embeds: Optional[torch.FloatTensor] = None,
|
940 |
use_cache: Optional[bool] = None,
|
941 |
output_attentions: Optional[bool] = None,
|
942 |
output_hidden_states: Optional[bool] = None,
|
943 |
return_dict: Optional[bool] = None,
|
944 |
-
cache_position: Optional[torch.LongTensor] = None,
|
945 |
) -> Union[Tuple, BaseModelOutputWithPast]:
|
946 |
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
947 |
output_hidden_states = (
|
948 |
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
949 |
)
|
950 |
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
|
|
951 |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
952 |
|
953 |
-
if
|
954 |
-
|
955 |
-
|
956 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
957 |
|
958 |
-
if
|
959 |
-
|
960 |
-
|
|
|
961 |
)
|
962 |
-
|
963 |
|
964 |
if inputs_embeds is None:
|
965 |
inputs_embeds = self.tok_embeddings(input_ids)
|
966 |
|
967 |
-
|
968 |
-
|
969 |
-
|
970 |
-
|
971 |
-
|
972 |
-
|
973 |
-
|
974 |
-
|
975 |
-
|
|
|
976 |
)
|
977 |
-
if position_ids is None:
|
978 |
-
position_ids = cache_position.unsqueeze(0)
|
979 |
-
|
980 |
-
causal_mask = self._update_causal_mask(
|
981 |
-
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
|
982 |
-
)
|
983 |
|
984 |
# embed positions
|
985 |
hidden_states = inputs_embeds
|
986 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
987 |
# decoder layers
|
988 |
all_hidden_states = () if output_hidden_states else None
|
989 |
all_self_attns = () if output_attentions else None
|
990 |
-
next_decoder_cache = None
|
991 |
|
992 |
-
for decoder_layer in self.layers:
|
993 |
if output_hidden_states:
|
994 |
all_hidden_states += (hidden_states,)
|
995 |
|
|
|
|
|
996 |
if self.gradient_checkpointing and self.training:
|
997 |
-
|
998 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
999 |
hidden_states,
|
1000 |
-
|
1001 |
position_ids,
|
1002 |
-
|
1003 |
-
output_attentions,
|
1004 |
-
use_cache,
|
1005 |
-
cache_position,
|
1006 |
)
|
1007 |
else:
|
1008 |
layer_outputs = decoder_layer(
|
1009 |
hidden_states,
|
1010 |
-
attention_mask=
|
1011 |
position_ids=position_ids,
|
1012 |
-
past_key_value=
|
1013 |
output_attentions=output_attentions,
|
1014 |
use_cache=use_cache,
|
1015 |
-
cache_position=cache_position,
|
1016 |
)
|
1017 |
|
1018 |
hidden_states = layer_outputs[0]
|
1019 |
|
1020 |
if use_cache:
|
1021 |
-
next_decoder_cache
|
1022 |
|
1023 |
if output_attentions:
|
1024 |
all_self_attns += (layer_outputs[1],)
|
@@ -1030,9 +955,6 @@ class InternLM2Model(InternLM2PreTrainedModel):
|
|
1030 |
all_hidden_states += (hidden_states,)
|
1031 |
|
1032 |
next_cache = next_decoder_cache if use_cache else None
|
1033 |
-
if return_legacy_cache:
|
1034 |
-
next_cache = next_cache.to_legacy_cache()
|
1035 |
-
|
1036 |
if not return_dict:
|
1037 |
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
1038 |
return BaseModelOutputWithPast(
|
@@ -1042,95 +964,11 @@ class InternLM2Model(InternLM2PreTrainedModel):
|
|
1042 |
attentions=all_self_attns,
|
1043 |
)
|
1044 |
|
1045 |
-
def _update_causal_mask(
|
1046 |
-
self,
|
1047 |
-
attention_mask: torch.Tensor,
|
1048 |
-
input_tensor: torch.Tensor,
|
1049 |
-
cache_position: torch.Tensor,
|
1050 |
-
past_key_values: Cache,
|
1051 |
-
output_attentions: bool,
|
1052 |
-
):
|
1053 |
-
# TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length
|
1054 |
-
# even when the static KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at
|
1055 |
-
# each decode steps due to the dynamic shapes. (`recording cudagraph tree for symint key 13`, etc.), which is
|
1056 |
-
# VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using `fullgraph=True`.
|
1057 |
-
# See more context in https://github.com/huggingface/transformers/pull/29114
|
1058 |
-
|
1059 |
-
if self.config.attn_implementation == "flash_attention_2":
|
1060 |
-
if attention_mask is not None and 0.0 in attention_mask:
|
1061 |
-
return attention_mask
|
1062 |
-
return None
|
1063 |
-
|
1064 |
-
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
|
1065 |
-
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
|
1066 |
-
# to infer the attention mask.
|
1067 |
-
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
1068 |
-
using_static_cache = isinstance(past_key_values, StaticCache)
|
1069 |
-
|
1070 |
-
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
|
1071 |
-
if self.config.attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
|
1072 |
-
if AttentionMaskConverter._ignore_causal_mask_sdpa(
|
1073 |
-
attention_mask,
|
1074 |
-
inputs_embeds=input_tensor,
|
1075 |
-
past_key_values_length=past_seen_tokens,
|
1076 |
-
is_training=self.training,
|
1077 |
-
):
|
1078 |
-
return None
|
1079 |
-
|
1080 |
-
dtype, device = input_tensor.dtype, input_tensor.device
|
1081 |
-
min_dtype = torch.finfo(dtype).min
|
1082 |
-
sequence_length = input_tensor.shape[1]
|
1083 |
-
if using_static_cache:
|
1084 |
-
target_length = past_key_values.get_max_length()
|
1085 |
-
else:
|
1086 |
-
target_length = (
|
1087 |
-
attention_mask.shape[-1]
|
1088 |
-
if isinstance(attention_mask, torch.Tensor)
|
1089 |
-
else past_seen_tokens + sequence_length + 1
|
1090 |
-
)
|
1091 |
|
1092 |
-
|
1093 |
-
# in this case we assume that the mask comes already in inverted form and requires no inversion or slicing
|
1094 |
-
if attention_mask.max() != 0:
|
1095 |
-
raise ValueError("Custom 4D attention mask should be passed in inverted form with max==0`")
|
1096 |
-
causal_mask = attention_mask
|
1097 |
-
else:
|
1098 |
-
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
|
1099 |
-
if sequence_length != 1:
|
1100 |
-
if support_bf16_triu or dtype == torch.float32:
|
1101 |
-
causal_mask = torch.triu(causal_mask, diagonal=1)
|
1102 |
-
else:
|
1103 |
-
triu_mask = torch.triu(torch.ones(causal_mask.size(), device=device), diagonal=1).bool()
|
1104 |
-
causal_mask.masked_fill_(~triu_mask, 0)
|
1105 |
-
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
|
1106 |
-
causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
|
1107 |
-
if attention_mask is not None:
|
1108 |
-
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
|
1109 |
-
mask_length = attention_mask.shape[-1]
|
1110 |
-
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
|
1111 |
-
padding_mask = padding_mask == 0
|
1112 |
-
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
|
1113 |
-
padding_mask, min_dtype
|
1114 |
-
)
|
1115 |
-
if (
|
1116 |
-
self.config.attn_implementation == "sdpa"
|
1117 |
-
and attention_mask is not None
|
1118 |
-
and attention_mask.device.type == "cuda"
|
1119 |
-
and not output_attentions
|
1120 |
-
):
|
1121 |
-
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
|
1122 |
-
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
|
1123 |
-
# Details: https://github.com/pytorch/pytorch/issues/110213
|
1124 |
-
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) # pylint: disable=E1120
|
1125 |
-
|
1126 |
-
return causal_mask
|
1127 |
-
|
1128 |
-
|
1129 |
-
# Modified from transformers.models.llama.modeling_llama.LlamaForCausalLM
|
1130 |
class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
1131 |
-
"""Causal language model (CLM) for InternLM2."""
|
1132 |
-
|
1133 |
_auto_class = "AutoModelForCausalLM"
|
|
|
1134 |
_tied_weights_keys = ["output.weight"]
|
1135 |
|
1136 |
def __init__(self, config):
|
@@ -1167,14 +1005,13 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
1167 |
input_ids: torch.LongTensor = None,
|
1168 |
attention_mask: Optional[torch.Tensor] = None,
|
1169 |
position_ids: Optional[torch.LongTensor] = None,
|
1170 |
-
past_key_values: Optional[
|
1171 |
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1172 |
labels: Optional[torch.LongTensor] = None,
|
1173 |
use_cache: Optional[bool] = None,
|
1174 |
output_attentions: Optional[bool] = None,
|
1175 |
output_hidden_states: Optional[bool] = None,
|
1176 |
return_dict: Optional[bool] = None,
|
1177 |
-
cache_position: Optional[torch.LongTensor] = None,
|
1178 |
) -> Union[Tuple, CausalLMOutputWithPast]:
|
1179 |
r"""
|
1180 |
Args:
|
@@ -1190,8 +1027,8 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
1190 |
```python
|
1191 |
>>> from transformers import AutoTokenizer, InternLM2ForCausalLM
|
1192 |
|
1193 |
-
>>> model = InternLM2ForCausalLM.from_pretrained(
|
1194 |
-
>>> tokenizer = AutoTokenizer.from_pretrained(
|
1195 |
|
1196 |
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
1197 |
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
@@ -1219,19 +1056,10 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
1219 |
output_attentions=output_attentions,
|
1220 |
output_hidden_states=output_hidden_states,
|
1221 |
return_dict=return_dict,
|
1222 |
-
cache_position=cache_position,
|
1223 |
)
|
1224 |
|
1225 |
hidden_states = outputs[0]
|
1226 |
-
|
1227 |
-
output_slices = self.output.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
|
1228 |
-
logits = [
|
1229 |
-
F.linear(hidden_states, output_slices[i]) # pylint: disable=not-callable
|
1230 |
-
for i in range(self.config.pretraining_tp)
|
1231 |
-
]
|
1232 |
-
logits = torch.cat(logits, dim=-1)
|
1233 |
-
else:
|
1234 |
-
logits = self.output(hidden_states)
|
1235 |
logits = logits.float()
|
1236 |
|
1237 |
loss = None
|
@@ -1260,48 +1088,19 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
1260 |
)
|
1261 |
|
1262 |
def prepare_inputs_for_generation(
|
1263 |
-
self,
|
1264 |
-
input_ids,
|
1265 |
-
past_key_values=None,
|
1266 |
-
attention_mask=None,
|
1267 |
-
inputs_embeds=None,
|
1268 |
-
cache_position=None,
|
1269 |
-
use_cache=True,
|
1270 |
-
**kwargs,
|
1271 |
):
|
1272 |
-
past_length = 0
|
1273 |
if past_key_values is not None:
|
1274 |
-
|
1275 |
-
|
1276 |
-
|
1277 |
-
|
1278 |
-
|
1279 |
-
else None
|
1280 |
-
)
|
1281 |
-
cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length)
|
1282 |
-
# TODO joao: remove this `else` after `generate` prioritizes `Cache` objects
|
1283 |
else:
|
1284 |
-
|
1285 |
-
|
1286 |
-
|
1287 |
-
|
1288 |
-
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
1289 |
-
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as input)
|
1290 |
-
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
1291 |
-
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
1292 |
-
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
1293 |
-
# input_ids based on the past_length.
|
1294 |
-
elif past_length < input_ids.shape[1]:
|
1295 |
-
input_ids = input_ids[:, past_length:]
|
1296 |
-
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
1297 |
-
|
1298 |
-
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
1299 |
-
if (
|
1300 |
-
max_cache_length is not None
|
1301 |
-
and attention_mask is not None
|
1302 |
-
and cache_length + input_ids.shape[1] > max_cache_length
|
1303 |
-
):
|
1304 |
-
attention_mask = attention_mask[:, -max_cache_length:] # pylint: disable=E1130
|
1305 |
|
1306 |
position_ids = kwargs.get("position_ids", None)
|
1307 |
if attention_mask is not None and position_ids is None:
|
@@ -1315,24 +1114,13 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
1315 |
if inputs_embeds is not None and past_key_values is None:
|
1316 |
model_inputs = {"inputs_embeds": inputs_embeds}
|
1317 |
else:
|
1318 |
-
|
1319 |
-
# recompiles graphs as the stride of the inputs is a guard.
|
1320 |
-
# Ref: https://github.com/huggingface/transformers/pull/29114
|
1321 |
-
# TODO: use `next_tokens` directly instead.
|
1322 |
-
model_inputs = {"input_ids": input_ids.contiguous()}
|
1323 |
-
|
1324 |
-
input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
|
1325 |
-
if cache_position is None:
|
1326 |
-
cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
|
1327 |
-
elif use_cache:
|
1328 |
-
cache_position = cache_position[-input_length:]
|
1329 |
|
1330 |
model_inputs.update(
|
1331 |
{
|
1332 |
"position_ids": position_ids,
|
1333 |
-
"cache_position": cache_position,
|
1334 |
"past_key_values": past_key_values,
|
1335 |
-
"use_cache": use_cache,
|
1336 |
"attention_mask": attention_mask,
|
1337 |
}
|
1338 |
)
|
@@ -1347,18 +1135,15 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
1347 |
)
|
1348 |
return reordered_past
|
1349 |
|
1350 |
-
def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] =
|
1351 |
-
|
1352 |
-
history = []
|
1353 |
-
if tokenizer.add_bos_token:
|
1354 |
-
prompt = ""
|
1355 |
-
else:
|
1356 |
-
prompt = tokenizer.bos_token
|
1357 |
if meta_instruction:
|
1358 |
-
prompt += f"""
|
|
|
|
|
1359 |
for record in history:
|
1360 |
-
prompt += f"""
|
1361 |
-
prompt += f"""
|
1362 |
return tokenizer([prompt], return_tensors="pt")
|
1363 |
|
1364 |
@torch.no_grad()
|
@@ -1366,25 +1151,21 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
1366 |
self,
|
1367 |
tokenizer,
|
1368 |
query: str,
|
1369 |
-
history:
|
1370 |
streamer: Optional[BaseStreamer] = None,
|
1371 |
max_new_tokens: int = 1024,
|
1372 |
do_sample: bool = True,
|
1373 |
temperature: float = 0.8,
|
1374 |
top_p: float = 0.8,
|
1375 |
meta_instruction: str = "You are an AI assistant whose name is InternLM (书生·浦语).\n"
|
1376 |
-
"- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory "
|
1377 |
-
"(
|
1378 |
-
"- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such "
|
1379 |
-
"as English and 中文.",
|
1380 |
**kwargs,
|
1381 |
):
|
1382 |
-
if history is None:
|
1383 |
-
history = []
|
1384 |
inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
|
1385 |
inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
|
1386 |
# also add end-of-assistant token in eos token id to avoid unnecessary generation
|
1387 |
-
eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(["
|
1388 |
outputs = self.generate(
|
1389 |
**inputs,
|
1390 |
streamer=streamer,
|
@@ -1397,7 +1178,7 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
1397 |
)
|
1398 |
outputs = outputs[0].cpu().tolist()[len(inputs["input_ids"][0]) :]
|
1399 |
response = tokenizer.decode(outputs, skip_special_tokens=True)
|
1400 |
-
response = response.split("
|
1401 |
history = history + [(query, response)]
|
1402 |
return response, history
|
1403 |
|
@@ -1406,15 +1187,13 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
1406 |
self,
|
1407 |
tokenizer,
|
1408 |
query: str,
|
1409 |
-
history: List[Tuple[str, str]] =
|
1410 |
max_new_tokens: int = 1024,
|
1411 |
do_sample: bool = True,
|
1412 |
temperature: float = 0.8,
|
1413 |
top_p: float = 0.8,
|
1414 |
**kwargs,
|
1415 |
):
|
1416 |
-
if history is None:
|
1417 |
-
history = []
|
1418 |
"""
|
1419 |
Return a generator in format: (response, history)
|
1420 |
Eg.
|
@@ -1430,10 +1209,6 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
1430 |
response_queue = queue.Queue(maxsize=20)
|
1431 |
|
1432 |
class ChatStreamer(BaseStreamer):
|
1433 |
-
"""
|
1434 |
-
Streamer used in generate to print words one by one.
|
1435 |
-
"""
|
1436 |
-
|
1437 |
def __init__(self, tokenizer) -> None:
|
1438 |
super().__init__()
|
1439 |
self.tokenizer = tokenizer
|
@@ -1441,7 +1216,6 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
1441 |
self.query = query
|
1442 |
self.history = history
|
1443 |
self.response = ""
|
1444 |
-
self.cache = []
|
1445 |
self.received_inputs = False
|
1446 |
self.queue.put((self.response, history + [(self.query, self.response)]))
|
1447 |
|
@@ -1456,15 +1230,11 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
1456 |
self.received_inputs = True
|
1457 |
return
|
1458 |
|
1459 |
-
self.
|
1460 |
-
token
|
1461 |
-
if token.strip() != "<|im_end|>":
|
1462 |
self.response = self.response + token
|
1463 |
history = self.history + [(self.query, self.response)]
|
1464 |
self.queue.put((self.response, history))
|
1465 |
-
self.cache = []
|
1466 |
-
else:
|
1467 |
-
self.end()
|
1468 |
|
1469 |
def end(self):
|
1470 |
self.queue.put(None)
|
@@ -1494,13 +1264,13 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
1494 |
return consumer()
|
1495 |
|
1496 |
|
1497 |
-
# Copied from transformers.
|
1498 |
@add_start_docstrings(
|
1499 |
"""
|
1500 |
The InternLM2 Model transformer with a sequence classification head on top (linear layer).
|
1501 |
|
1502 |
-
[`InternLM2ForSequenceClassification`] uses the last token in order to do the classification,
|
1503 |
-
(e.g. GPT-2) do.
|
1504 |
|
1505 |
Since it does classification on the last token, it requires to know the position of the last token. If a
|
1506 |
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
@@ -1511,8 +1281,6 @@ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
1511 |
InternLM2_START_DOCSTRING,
|
1512 |
)
|
1513 |
class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
|
1514 |
-
"""Sequence Classification Head for InternLM2 Model."""
|
1515 |
-
|
1516 |
def __init__(self, config):
|
1517 |
super().__init__(config)
|
1518 |
self.num_labels = config.num_labels
|
@@ -1534,7 +1302,7 @@ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
|
|
1534 |
input_ids: torch.LongTensor = None,
|
1535 |
attention_mask: Optional[torch.Tensor] = None,
|
1536 |
position_ids: Optional[torch.LongTensor] = None,
|
1537 |
-
past_key_values: Optional[
|
1538 |
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1539 |
labels: Optional[torch.LongTensor] = None,
|
1540 |
use_cache: Optional[bool] = None,
|
@@ -1575,10 +1343,9 @@ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
|
|
1575 |
sequence_lengths = -1
|
1576 |
else:
|
1577 |
if input_ids is not None:
|
1578 |
-
|
1579 |
-
|
1580 |
-
|
1581 |
-
sequence_lengths = sequence_lengths.to(logits.device)
|
1582 |
else:
|
1583 |
sequence_lengths = -1
|
1584 |
|
@@ -1590,7 +1357,7 @@ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
|
|
1590 |
if self.config.problem_type is None:
|
1591 |
if self.num_labels == 1:
|
1592 |
self.config.problem_type = "regression"
|
1593 |
-
elif self.num_labels > 1 and (labels.dtype
|
1594 |
self.config.problem_type = "single_label_classification"
|
1595 |
else:
|
1596 |
self.config.problem_type = "multi_label_classification"
|
@@ -1618,191 +1385,3 @@ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
|
|
1618 |
hidden_states=transformer_outputs.hidden_states,
|
1619 |
attentions=transformer_outputs.attentions,
|
1620 |
)
|
1621 |
-
|
1622 |
-
|
1623 |
-
# Copied from transformers.models.llama.modeling_llama.LlamaForQuestionAnswering with Llama->InternLM2
|
1624 |
-
@add_start_docstrings(
|
1625 |
-
"""
|
1626 |
-
The InternLM2 Model transformer with a span classification head on top for extractive question-answering tasks like
|
1627 |
-
SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
|
1628 |
-
""",
|
1629 |
-
InternLM2_START_DOCSTRING,
|
1630 |
-
)
|
1631 |
-
class InternLM2ForQuestionAnswering(InternLM2PreTrainedModel):
|
1632 |
-
"""Question Answering model for InternLM2."""
|
1633 |
-
|
1634 |
-
base_model_prefix = "transformer"
|
1635 |
-
|
1636 |
-
def __init__(self, config):
|
1637 |
-
super().__init__(config)
|
1638 |
-
self.transformer = InternLM2Model(config)
|
1639 |
-
self.qa_outputs = nn.Linear(config.hidden_size, 2)
|
1640 |
-
|
1641 |
-
# Initialize weights and apply final processing
|
1642 |
-
self.post_init()
|
1643 |
-
|
1644 |
-
def get_input_embeddings(self):
|
1645 |
-
return self.transformer.tok_embeddings
|
1646 |
-
|
1647 |
-
def set_input_embeddings(self, value):
|
1648 |
-
self.transformer.tok_embeddings = value
|
1649 |
-
|
1650 |
-
@add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
|
1651 |
-
def forward(
|
1652 |
-
self,
|
1653 |
-
input_ids: Optional[torch.LongTensor] = None,
|
1654 |
-
attention_mask: Optional[torch.FloatTensor] = None,
|
1655 |
-
position_ids: Optional[torch.LongTensor] = None,
|
1656 |
-
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
1657 |
-
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1658 |
-
start_positions: Optional[torch.LongTensor] = None,
|
1659 |
-
end_positions: Optional[torch.LongTensor] = None,
|
1660 |
-
output_attentions: Optional[bool] = None,
|
1661 |
-
output_hidden_states: Optional[bool] = None,
|
1662 |
-
return_dict: Optional[bool] = None,
|
1663 |
-
) -> Union[Tuple, QuestionAnsweringModelOutput]:
|
1664 |
-
r"""
|
1665 |
-
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1666 |
-
Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
1667 |
-
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
1668 |
-
are not taken into account for computing the loss.
|
1669 |
-
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1670 |
-
Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
1671 |
-
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
1672 |
-
are not taken into account for computing the loss.
|
1673 |
-
"""
|
1674 |
-
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1675 |
-
|
1676 |
-
outputs = self.transformer(
|
1677 |
-
input_ids,
|
1678 |
-
attention_mask=attention_mask,
|
1679 |
-
position_ids=position_ids,
|
1680 |
-
past_key_values=past_key_values,
|
1681 |
-
inputs_embeds=inputs_embeds,
|
1682 |
-
output_attentions=output_attentions,
|
1683 |
-
output_hidden_states=output_hidden_states,
|
1684 |
-
return_dict=return_dict,
|
1685 |
-
)
|
1686 |
-
|
1687 |
-
sequence_output = outputs[0]
|
1688 |
-
|
1689 |
-
logits = self.qa_outputs(sequence_output)
|
1690 |
-
start_logits, end_logits = logits.split(1, dim=-1)
|
1691 |
-
start_logits = start_logits.squeeze(-1).contiguous()
|
1692 |
-
end_logits = end_logits.squeeze(-1).contiguous()
|
1693 |
-
|
1694 |
-
total_loss = None
|
1695 |
-
if start_positions is not None and end_positions is not None:
|
1696 |
-
# If we are on multi-GPU, split add a dimension
|
1697 |
-
if len(start_positions.size()) > 1:
|
1698 |
-
start_positions = start_positions.squeeze(-1).to(start_logits.device)
|
1699 |
-
if len(end_positions.size()) > 1:
|
1700 |
-
end_positions = end_positions.squeeze(-1).to(end_logits.device)
|
1701 |
-
# sometimes the start/end positions are outside our model inputs, we ignore these terms
|
1702 |
-
ignored_index = start_logits.size(1)
|
1703 |
-
start_positions = start_positions.clamp(0, ignored_index)
|
1704 |
-
end_positions = end_positions.clamp(0, ignored_index)
|
1705 |
-
|
1706 |
-
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
|
1707 |
-
start_loss = loss_fct(start_logits, start_positions)
|
1708 |
-
end_loss = loss_fct(end_logits, end_positions)
|
1709 |
-
total_loss = (start_loss + end_loss) / 2
|
1710 |
-
|
1711 |
-
if not return_dict:
|
1712 |
-
output = (start_logits, end_logits) + outputs[2:]
|
1713 |
-
return ((total_loss,) + output) if total_loss is not None else output
|
1714 |
-
|
1715 |
-
return QuestionAnsweringModelOutput(
|
1716 |
-
loss=total_loss,
|
1717 |
-
start_logits=start_logits,
|
1718 |
-
end_logits=end_logits,
|
1719 |
-
hidden_states=outputs.hidden_states,
|
1720 |
-
attentions=outputs.attentions,
|
1721 |
-
)
|
1722 |
-
|
1723 |
-
|
1724 |
-
# Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->InternLM2
|
1725 |
-
@add_start_docstrings(
|
1726 |
-
"""
|
1727 |
-
The InternLM2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
|
1728 |
-
output) e.g. for Named-Entity-Recognition (NER) tasks.
|
1729 |
-
""",
|
1730 |
-
InternLM2_START_DOCSTRING,
|
1731 |
-
)
|
1732 |
-
class InternLM2ForTokenClassification(InternLM2PreTrainedModel):
|
1733 |
-
"""Token classification model for InternLM2."""
|
1734 |
-
|
1735 |
-
def __init__(self, config):
|
1736 |
-
super().__init__(config)
|
1737 |
-
self.num_labels = config.num_labels
|
1738 |
-
self.model = InternLM2Model(config)
|
1739 |
-
if getattr(config, "classifier_dropout", None) is not None:
|
1740 |
-
classifier_dropout = config.classifier_dropout
|
1741 |
-
elif getattr(config, "hidden_dropout", None) is not None:
|
1742 |
-
classifier_dropout = config.hidden_dropout
|
1743 |
-
else:
|
1744 |
-
classifier_dropout = 0.1
|
1745 |
-
self.dropout = nn.Dropout(classifier_dropout)
|
1746 |
-
self.score = nn.Linear(config.hidden_size, config.num_labels)
|
1747 |
-
|
1748 |
-
# Initialize weights and apply final processing
|
1749 |
-
self.post_init()
|
1750 |
-
|
1751 |
-
def get_input_embeddings(self):
|
1752 |
-
return self.model.tok_embeddings
|
1753 |
-
|
1754 |
-
def set_input_embeddings(self, value):
|
1755 |
-
self.model.tok_embeddings = value
|
1756 |
-
|
1757 |
-
@add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
|
1758 |
-
def forward(
|
1759 |
-
self,
|
1760 |
-
input_ids: torch.LongTensor = None,
|
1761 |
-
attention_mask: Optional[torch.Tensor] = None,
|
1762 |
-
position_ids: Optional[torch.LongTensor] = None,
|
1763 |
-
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1764 |
-
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1765 |
-
labels: Optional[torch.LongTensor] = None,
|
1766 |
-
use_cache: Optional[bool] = None,
|
1767 |
-
output_attentions: Optional[bool] = None,
|
1768 |
-
output_hidden_states: Optional[bool] = None,
|
1769 |
-
return_dict: Optional[bool] = None,
|
1770 |
-
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1771 |
-
r"""
|
1772 |
-
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1773 |
-
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1774 |
-
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1775 |
-
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1776 |
-
"""
|
1777 |
-
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1778 |
-
|
1779 |
-
outputs = self.model(
|
1780 |
-
input_ids,
|
1781 |
-
attention_mask=attention_mask,
|
1782 |
-
position_ids=position_ids,
|
1783 |
-
past_key_values=past_key_values,
|
1784 |
-
inputs_embeds=inputs_embeds,
|
1785 |
-
use_cache=use_cache,
|
1786 |
-
output_attentions=output_attentions,
|
1787 |
-
output_hidden_states=output_hidden_states,
|
1788 |
-
return_dict=return_dict,
|
1789 |
-
)
|
1790 |
-
sequence_output = outputs[0]
|
1791 |
-
sequence_output = self.dropout(sequence_output)
|
1792 |
-
logits = self.score(sequence_output)
|
1793 |
-
|
1794 |
-
loss = None
|
1795 |
-
if labels is not None:
|
1796 |
-
loss_fct = CrossEntropyLoss()
|
1797 |
-
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
1798 |
-
|
1799 |
-
if not return_dict:
|
1800 |
-
output = (logits,) + outputs[2:]
|
1801 |
-
return ((loss,) + output) if loss is not None else output
|
1802 |
-
|
1803 |
-
return TokenClassifierOutput(
|
1804 |
-
loss=loss,
|
1805 |
-
logits=logits,
|
1806 |
-
hidden_states=outputs.hidden_states,
|
1807 |
-
attentions=outputs.attentions,
|
1808 |
-
)
|
|
|
13 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
# See the License for the specific language governing permissions and
|
15 |
# limitations under the License.
|
16 |
+
""" PyTorch InternLM2 model."""
|
17 |
import math
|
18 |
import queue
|
19 |
import threading
|
20 |
+
import warnings
|
21 |
from typing import List, Optional, Tuple, Union
|
22 |
|
23 |
import torch
|
|
|
27 |
from torch import nn
|
28 |
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
29 |
from transformers.activations import ACT2FN
|
|
|
|
|
30 |
from transformers.modeling_outputs import (
|
31 |
BaseModelOutputWithPast,
|
32 |
CausalLMOutputWithPast,
|
|
|
33 |
SequenceClassifierOutputWithPast,
|
|
|
34 |
)
|
35 |
from transformers.modeling_utils import PreTrainedModel
|
|
|
36 |
from transformers.utils import (
|
37 |
add_start_docstrings,
|
38 |
add_start_docstrings_to_model_forward,
|
|
|
39 |
logging,
|
40 |
replace_return_docstrings,
|
41 |
)
|
42 |
|
43 |
try:
|
44 |
from transformers.generation.streamers import BaseStreamer
|
45 |
+
except: # noqa # pylint: disable=bare-except
|
46 |
BaseStreamer = None
|
47 |
|
48 |
+
from .configuration_internlm import InternLMConfig as InternLM2Config
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
logger = logging.get_logger(__name__)
|
51 |
|
52 |
_CONFIG_FOR_DOC = "InternLM2Config"
|
53 |
|
54 |
+
flash_attn_func, flash_attn_varlen_func = None, None
|
55 |
+
pad_input, index_first_axis, unpad_input = None, None, None
|
56 |
+
def _import_flash_attn():
|
57 |
+
global flash_attn_func, flash_attn_varlen_func
|
58 |
+
global pad_input, index_first_axis, unpad_input
|
59 |
+
try:
|
60 |
+
from flash_attn import flash_attn_func as _flash_attn_func, flash_attn_varlen_func as _flash_attn_varlen_func
|
61 |
+
from flash_attn.bert_padding import pad_input as _pad_input, index_first_axis as _index_first_axis, unpad_input as _unpad_input
|
62 |
+
flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
|
63 |
+
pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
|
64 |
+
except ImportError:
|
65 |
+
raise ImportError("flash_attn is not installed.")
|
66 |
+
|
67 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
68 |
def _get_unpad_data(attention_mask):
|
69 |
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
70 |
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
71 |
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
72 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
73 |
return (
|
74 |
indices,
|
75 |
cu_seqlens,
|
|
|
77 |
)
|
78 |
|
79 |
|
80 |
+
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
|
81 |
+
def _make_causal_mask(
|
82 |
+
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
83 |
+
):
|
84 |
+
"""
|
85 |
+
Make causal mask used for bi-directional self-attention.
|
86 |
+
"""
|
87 |
+
bsz, tgt_len = input_ids_shape
|
88 |
+
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
|
89 |
+
mask_cond = torch.arange(mask.size(-1), device=device)
|
90 |
+
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
|
91 |
+
mask = mask.to(dtype)
|
92 |
+
|
93 |
+
if past_key_values_length > 0:
|
94 |
+
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
|
95 |
+
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
|
96 |
+
|
97 |
|
98 |
+
# Copied from transformers.models.bart.modeling_bart._expand_mask
|
99 |
+
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
100 |
+
"""
|
101 |
+
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
102 |
+
"""
|
103 |
+
bsz, src_len = mask.size()
|
104 |
+
tgt_len = tgt_len if tgt_len is not None else src_len
|
105 |
+
|
106 |
+
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
|
107 |
+
|
108 |
+
inverted_mask = 1.0 - expanded_mask
|
109 |
+
|
110 |
+
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
|
111 |
+
|
112 |
+
|
113 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->InternLM2
|
114 |
+
class InternLM2RMSNorm(nn.Module):
|
115 |
def __init__(self, hidden_size, eps=1e-6):
|
116 |
+
"""
|
117 |
+
InternLM2RMSNorm is equivalent to T5LayerNorm
|
118 |
+
"""
|
119 |
super().__init__()
|
120 |
self.weight = nn.Parameter(torch.ones(hidden_size))
|
121 |
self.variance_epsilon = eps
|
|
|
128 |
return self.weight * hidden_states.to(input_dtype)
|
129 |
|
130 |
|
131 |
+
# Copied from transformers.model.llama.modeling_llama.LlamaRotaryEmbedding with Llama->InternLM2
|
|
|
|
|
132 |
class InternLM2RotaryEmbedding(nn.Module):
|
133 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
|
|
|
|
134 |
super().__init__()
|
135 |
+
|
136 |
self.dim = dim
|
137 |
self.max_position_embeddings = max_position_embeddings
|
138 |
self.base = base
|
139 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
140 |
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
|
|
|
|
141 |
|
142 |
+
# Build here to make `torch.jit.trace` work.
|
143 |
+
self._set_cos_sin_cache(
|
144 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
145 |
+
)
|
146 |
+
|
147 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
148 |
+
self.max_seq_len_cached = seq_len
|
149 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
150 |
+
|
151 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
152 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
153 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
154 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
155 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
156 |
+
|
157 |
+
def forward(self, x, seq_len=None):
|
158 |
# x: [bs, num_attention_heads, seq_len, head_size]
|
159 |
+
if seq_len > self.max_seq_len_cached:
|
160 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.float32)
|
161 |
+
|
162 |
+
return (
|
163 |
+
self.cos_cached[:seq_len].to(dtype=x.dtype),
|
164 |
+
self.sin_cached[:seq_len].to(dtype=x.dtype),
|
165 |
+
)
|
|
|
|
|
|
|
|
|
|
|
166 |
|
167 |
|
168 |
+
# Copied from transformers.model.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->InternLM2
|
169 |
class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
|
170 |
"""InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
|
171 |
|
172 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
173 |
+
self.scaling_factor = scaling_factor
|
174 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
175 |
+
|
176 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
177 |
+
self.max_seq_len_cached = seq_len
|
178 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
179 |
+
t = t / self.scaling_factor
|
180 |
|
181 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
182 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
183 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
184 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
185 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
186 |
|
187 |
+
|
188 |
+
# Copied from transformers.model.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->InternLM2
|
189 |
class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
|
190 |
"""InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
|
191 |
+
Credits to the Reddit users /u/bloc97 and /u/emozilla.
|
192 |
+
"""
|
193 |
+
|
194 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
195 |
+
self.scaling_factor = scaling_factor
|
196 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
197 |
+
|
198 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
199 |
+
self.max_seq_len_cached = seq_len
|
200 |
|
|
|
|
|
|
|
201 |
if seq_len > self.max_position_embeddings:
|
202 |
base = self.base * (
|
203 |
(self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
|
204 |
) ** (self.dim / (self.dim - 2))
|
205 |
+
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
206 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
207 |
+
|
208 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
|
209 |
|
210 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
211 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
212 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
213 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
214 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
215 |
|
216 |
|
217 |
+
# Copied from transformers.model.llama.modeling_llama.rotate_half
|
218 |
def rotate_half(x):
|
219 |
"""Rotates half the hidden dims of the input."""
|
220 |
x1 = x[..., : x.shape[-1] // 2]
|
|
|
222 |
return torch.cat((-x2, x1), dim=-1)
|
223 |
|
224 |
|
225 |
+
# Copied from transformers.model.llama.modeling_llama.apply_rotary_pos_emb
|
226 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
|
227 |
+
"""Applies Rotary Position Embedding to the query and key tensors."""
|
228 |
+
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
|
229 |
+
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
230 |
q_embed = (q * cos) + (rotate_half(q) * sin)
|
231 |
k_embed = (k * cos) + (rotate_half(k) * sin)
|
232 |
return q_embed, k_embed
|
233 |
|
234 |
|
235 |
class InternLM2MLP(nn.Module):
|
|
|
|
|
236 |
def __init__(self, config):
|
237 |
super().__init__()
|
238 |
self.config = config
|
|
|
249 |
return down_proj
|
250 |
|
251 |
|
252 |
+
# Copied from transformers.model.llama.modeling_llama.repeat_kv
|
253 |
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
254 |
"""
|
255 |
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
|
|
262 |
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
263 |
|
264 |
|
265 |
+
# Modified from transformers.model.llama.modeling_llama.LlamaAttention
|
266 |
class InternLM2Attention(nn.Module):
|
267 |
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
268 |
|
269 |
+
def __init__(self, config: InternLM2Config):
|
270 |
super().__init__()
|
271 |
self.config = config
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
272 |
self.hidden_size = config.hidden_size
|
273 |
self.num_heads = config.num_attention_heads
|
274 |
self.head_dim = self.hidden_size // self.num_heads
|
275 |
self.num_key_value_heads = config.num_key_value_heads
|
276 |
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
277 |
self.max_position_embeddings = config.max_position_embeddings
|
|
|
278 |
self.is_causal = True
|
279 |
|
280 |
if (self.head_dim * self.num_heads) != self.hidden_size:
|
|
|
288 |
(self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
|
289 |
bias=config.bias,
|
290 |
)
|
|
|
291 |
|
292 |
+
self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
|
293 |
self._init_rope()
|
294 |
|
295 |
def _init_rope(self):
|
|
|
297 |
self.rotary_emb = InternLM2RotaryEmbedding(
|
298 |
self.head_dim,
|
299 |
max_position_embeddings=self.max_position_embeddings,
|
300 |
+
base=self.config.rope_theta,
|
301 |
)
|
302 |
else:
|
303 |
scaling_type = self.config.rope_scaling["type"]
|
304 |
scaling_factor = self.config.rope_scaling["factor"]
|
305 |
+
if scaling_type == "dynamic":
|
306 |
+
self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
|
307 |
self.head_dim,
|
308 |
max_position_embeddings=self.max_position_embeddings,
|
309 |
+
base=self.config.rope_theta,
|
310 |
scaling_factor=scaling_factor,
|
|
|
311 |
)
|
312 |
+
elif scaling_type == "linear":
|
313 |
+
self.rotary_emb = InternLM2LinearScalingRotaryEmbedding(
|
314 |
self.head_dim,
|
315 |
max_position_embeddings=self.max_position_embeddings,
|
316 |
+
base=self.config.rope_theta,
|
317 |
scaling_factor=scaling_factor,
|
|
|
318 |
)
|
319 |
else:
|
320 |
+
raise ValueError("Currently we only support rotary embedding's type being 'dynamic' or 'linear'.")
|
321 |
+
return self.rotary_emb
|
322 |
+
|
323 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
324 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
325 |
|
326 |
def forward(
|
327 |
self,
|
328 |
hidden_states: torch.Tensor,
|
329 |
attention_mask: Optional[torch.Tensor] = None,
|
330 |
position_ids: Optional[torch.LongTensor] = None,
|
331 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
332 |
output_attentions: bool = False,
|
333 |
+
use_cache: bool = False,
|
334 |
+
**kwargs,
|
335 |
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
336 |
+
if "padding_mask" in kwargs:
|
337 |
+
warnings.warn(
|
338 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. "
|
339 |
+
"Please make sure use `attention_mask` instead.`"
|
340 |
+
)
|
341 |
+
|
342 |
bsz, q_len, _ = hidden_states.size()
|
343 |
|
344 |
+
qkv_states = self.wqkv(hidden_states)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
345 |
|
346 |
qkv_states = rearrange(
|
347 |
qkv_states,
|
|
|
351 |
)
|
352 |
|
353 |
query_states = qkv_states[..., : self.num_key_value_groups, :]
|
354 |
+
query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d")
|
355 |
+
key_states = qkv_states[..., -2, :]
|
356 |
+
value_states = qkv_states[..., -1, :]
|
357 |
|
358 |
+
query_states = query_states.transpose(1, 2)
|
359 |
+
key_states = key_states.transpose(1, 2)
|
360 |
+
value_states = value_states.transpose(1, 2)
|
361 |
+
|
362 |
+
kv_seq_len = key_states.shape[-2]
|
363 |
+
if past_key_value is not None:
|
364 |
+
kv_seq_len += past_key_value[0].shape[-2]
|
365 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
366 |
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
367 |
|
368 |
if past_key_value is not None:
|
369 |
+
# reuse k, v, self_attention
|
370 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
371 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
372 |
+
|
373 |
+
past_key_value = (key_states, value_states) if use_cache else None
|
374 |
|
375 |
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
376 |
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
377 |
|
378 |
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
379 |
|
380 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
381 |
+
raise ValueError(
|
382 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
383 |
+
f" {attn_weights.size()}"
|
384 |
+
)
|
385 |
+
|
386 |
+
if attention_mask is not None:
|
387 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
388 |
+
raise ValueError(
|
389 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
390 |
+
)
|
391 |
+
attn_weights = attn_weights + attention_mask
|
392 |
|
393 |
# upcast attention to fp32
|
394 |
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
|
|
401 |
)
|
402 |
|
403 |
attn_output = attn_output.transpose(1, 2).contiguous()
|
|
|
404 |
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
405 |
|
406 |
+
attn_output = self.wo(attn_output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
407 |
|
408 |
if not output_attentions:
|
409 |
attn_weights = None
|
|
|
411 |
return attn_output, attn_weights, past_key_value
|
412 |
|
413 |
|
414 |
+
# Modified from transformers.model.llama.modeling_llama.InternLM2FlashAttention2
|
415 |
class InternLM2FlashAttention2(InternLM2Attention):
|
416 |
"""
|
417 |
InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
|
|
|
419 |
flash attention and deal with padding tokens in case the input contains any of them.
|
420 |
"""
|
421 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
422 |
def forward(
|
423 |
self,
|
424 |
hidden_states: torch.Tensor,
|
425 |
attention_mask: Optional[torch.LongTensor] = None,
|
426 |
position_ids: Optional[torch.LongTensor] = None,
|
427 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
428 |
output_attentions: bool = False,
|
429 |
use_cache: bool = False,
|
430 |
+
**kwargs,
|
431 |
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
432 |
+
# InternLM2FlashAttention2 attention does not support output_attentions
|
433 |
+
if "padding_mask" in kwargs:
|
434 |
+
warnings.warn(
|
435 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. "
|
436 |
+
"Please make sure use `attention_mask` instead.`"
|
437 |
)
|
438 |
|
439 |
+
# overwrite attention_mask with padding_mask
|
440 |
+
attention_mask = kwargs.pop("padding_mask")
|
441 |
+
|
442 |
output_attentions = False
|
443 |
|
444 |
bsz, q_len, _ = hidden_states.size()
|
|
|
461 |
key_states = key_states.transpose(1, 2)
|
462 |
value_states = value_states.transpose(1, 2)
|
463 |
|
464 |
+
kv_seq_len = key_states.shape[-2]
|
465 |
+
if past_key_value is not None:
|
466 |
+
kv_seq_len += past_key_value[0].shape[-2]
|
467 |
+
|
468 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
469 |
+
|
470 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
471 |
|
472 |
if past_key_value is not None:
|
473 |
+
# reuse k, v, self_attention
|
474 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
475 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
476 |
+
|
477 |
+
past_key_value = (key_states, value_states) if use_cache else None
|
478 |
|
|
|
|
|
|
|
479 |
query_states = query_states.transpose(1, 2)
|
480 |
key_states = key_states.transpose(1, 2)
|
481 |
value_states = value_states.transpose(1, 2)
|
482 |
|
483 |
+
dropout_rate = 0.0 if not self.training else self.attention_dropout
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
484 |
|
485 |
attn_output = self._flash_attention_forward(
|
486 |
query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
|
487 |
)
|
|
|
488 |
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
489 |
attn_output = self.wo(attn_output)
|
490 |
|
491 |
if not output_attentions:
|
492 |
attn_weights = None
|
493 |
|
494 |
+
return attn_output, attn_weights, past_key_value
|
495 |
|
496 |
def _flash_attention_forward(
|
497 |
self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
|
|
|
510 |
attention_mask (`torch.Tensor`):
|
511 |
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
512 |
position of padding tokens and 1 for the position of non-padding tokens.
|
513 |
+
dropout (`int`, *optional*):
|
514 |
Attention dropout
|
515 |
softmax_scale (`float`, *optional*):
|
516 |
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
517 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
518 |
# Contains at least one padding token in the sequence
|
519 |
+
causal = self.is_causal and query_length != 1
|
520 |
if attention_mask is not None:
|
521 |
batch_size = query_states.shape[0]
|
522 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._unpad_input(
|
523 |
query_states, key_states, value_states, attention_mask, query_length
|
524 |
)
|
525 |
|
526 |
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
527 |
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
528 |
|
529 |
+
attn_output_unpad = flash_attn_varlen_func(
|
530 |
query_states,
|
531 |
key_states,
|
532 |
value_states,
|
|
|
539 |
causal=causal,
|
540 |
)
|
541 |
|
542 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
543 |
else:
|
544 |
+
attn_output = flash_attn_func(
|
545 |
query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
|
546 |
)
|
547 |
|
548 |
return attn_output
|
549 |
|
550 |
+
def _unpad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
551 |
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
552 |
batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
|
553 |
|
554 |
+
key_layer = index_first_axis(
|
555 |
key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
556 |
)
|
557 |
+
value_layer = index_first_axis(
|
558 |
value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
559 |
)
|
560 |
+
|
561 |
if query_length == kv_seq_len:
|
562 |
+
query_layer = index_first_axis(
|
563 |
query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
|
564 |
)
|
565 |
cu_seqlens_q = cu_seqlens_k
|
|
|
575 |
else:
|
576 |
# The -q_len: slice assumes left padding.
|
577 |
attention_mask = attention_mask[:, -query_length:]
|
578 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
|
|
|
|
579 |
|
580 |
return (
|
581 |
query_layer,
|
582 |
key_layer,
|
583 |
value_layer,
|
584 |
+
indices_q.to(torch.int64),
|
585 |
(cu_seqlens_q, cu_seqlens_k),
|
586 |
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
587 |
)
|
588 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
589 |
INTERNLM2_ATTENTION_CLASSES = {
|
590 |
"eager": InternLM2Attention,
|
591 |
"flash_attention_2": InternLM2FlashAttention2,
|
|
|
592 |
}
|
593 |
|
594 |
+
# Modified from transformers.model.llama.modeling_llama.LlamaDecoderLayer
|
|
|
595 |
class InternLM2DecoderLayer(nn.Module):
|
596 |
+
def __init__(self, config: InternLM2Config):
|
|
|
|
|
597 |
super().__init__()
|
598 |
self.hidden_size = config.hidden_size
|
|
|
599 |
|
600 |
+
self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config)
|
601 |
|
602 |
self.feed_forward = InternLM2MLP(config)
|
603 |
self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
|
|
608 |
hidden_states: torch.Tensor,
|
609 |
attention_mask: Optional[torch.Tensor] = None,
|
610 |
position_ids: Optional[torch.LongTensor] = None,
|
611 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
612 |
output_attentions: Optional[bool] = False,
|
613 |
use_cache: Optional[bool] = False,
|
614 |
+
**kwargs,
|
615 |
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
616 |
"""
|
617 |
Args:
|
|
|
627 |
(see `past_key_values`).
|
628 |
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
629 |
"""
|
630 |
+
if "padding_mask" in kwargs:
|
631 |
+
warnings.warn(
|
632 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. "
|
633 |
+
"Please make sure use `attention_mask` instead.`"
|
634 |
+
)
|
635 |
+
|
636 |
residual = hidden_states
|
637 |
|
638 |
hidden_states = self.attention_norm(hidden_states)
|
|
|
645 |
past_key_value=past_key_value,
|
646 |
output_attentions=output_attentions,
|
647 |
use_cache=use_cache,
|
648 |
+
**kwargs,
|
649 |
)
|
650 |
hidden_states = residual + hidden_states
|
651 |
|
|
|
689 |
InternLM2_START_DOCSTRING,
|
690 |
)
|
691 |
class InternLM2PreTrainedModel(PreTrainedModel):
|
|
|
|
|
|
|
|
|
692 |
config_class = InternLM2Config
|
693 |
base_model_prefix = "model"
|
694 |
supports_gradient_checkpointing = True
|
695 |
_no_split_modules = ["InternLM2DecoderLayer"]
|
696 |
+
_skip_keys_device_placement = "past_key_values"
|
|
|
|
|
|
|
|
|
|
|
697 |
|
698 |
def _init_weights(self, module):
|
699 |
std = self.config.initializer_range
|
|
|
742 |
config.n_positions - 1]`.
|
743 |
|
744 |
[What are position IDs?](../glossary#position-ids)
|
745 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
|
746 |
+
when `config.use_cache=True`):
|
747 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
748 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
749 |
+
`(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
|
|
|
|
|
|
750 |
|
751 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
752 |
+
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
753 |
|
754 |
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
755 |
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
|
|
769 |
more detail.
|
770 |
return_dict (`bool`, *optional*):
|
771 |
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
|
|
|
|
|
|
|
|
772 |
"""
|
773 |
|
774 |
|
775 |
+
# Modified from transformers.model.llama.modeling_llama.LlamaModel
|
776 |
@add_start_docstrings(
|
777 |
"The bare InternLM2 Model outputting raw hidden-states without any specific head on top.",
|
778 |
InternLM2_START_DOCSTRING,
|
|
|
795 |
|
796 |
self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
797 |
|
798 |
+
self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)])
|
|
|
|
|
799 |
self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
800 |
|
801 |
self.gradient_checkpointing = False
|
|
|
808 |
def set_input_embeddings(self, value):
|
809 |
self.tok_embeddings = value
|
810 |
|
811 |
+
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
|
812 |
+
# create causal mask
|
813 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
814 |
+
combined_attention_mask = None
|
815 |
+
if input_shape[-1] > 1:
|
816 |
+
combined_attention_mask = _make_causal_mask(
|
817 |
+
input_shape,
|
818 |
+
inputs_embeds.dtype,
|
819 |
+
device=inputs_embeds.device,
|
820 |
+
past_key_values_length=past_key_values_length,
|
821 |
+
)
|
822 |
+
|
823 |
+
if attention_mask is not None:
|
824 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
825 |
+
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
|
826 |
+
inputs_embeds.device
|
827 |
+
)
|
828 |
+
combined_attention_mask = (
|
829 |
+
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
|
830 |
+
)
|
831 |
+
|
832 |
+
return combined_attention_mask
|
833 |
+
|
834 |
@add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
|
835 |
def forward(
|
836 |
self,
|
837 |
input_ids: torch.LongTensor = None,
|
838 |
attention_mask: Optional[torch.Tensor] = None,
|
839 |
position_ids: Optional[torch.LongTensor] = None,
|
840 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
841 |
inputs_embeds: Optional[torch.FloatTensor] = None,
|
842 |
use_cache: Optional[bool] = None,
|
843 |
output_attentions: Optional[bool] = None,
|
844 |
output_hidden_states: Optional[bool] = None,
|
845 |
return_dict: Optional[bool] = None,
|
|
|
846 |
) -> Union[Tuple, BaseModelOutputWithPast]:
|
847 |
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
848 |
output_hidden_states = (
|
849 |
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
850 |
)
|
851 |
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
852 |
+
|
853 |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
854 |
|
855 |
+
if self.config.attn_implementation == "flash_attention_2":
|
856 |
+
_import_flash_attn()
|
857 |
+
|
858 |
+
# retrieve input_ids and inputs_embeds
|
859 |
+
if input_ids is not None and inputs_embeds is not None:
|
860 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
861 |
+
elif input_ids is not None:
|
862 |
+
batch_size, seq_length = input_ids.shape[:2]
|
863 |
+
elif inputs_embeds is not None:
|
864 |
+
batch_size, seq_length = inputs_embeds.shape[:2]
|
865 |
+
else:
|
866 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
867 |
+
|
868 |
+
seq_length_with_past = seq_length
|
869 |
+
past_key_values_length = 0
|
870 |
+
if past_key_values is not None:
|
871 |
+
past_key_values_length = past_key_values[0][0].shape[2]
|
872 |
+
seq_length_with_past = seq_length_with_past + past_key_values_length
|
873 |
|
874 |
+
if position_ids is None:
|
875 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
876 |
+
position_ids = torch.arange(
|
877 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
878 |
)
|
879 |
+
position_ids = position_ids.unsqueeze(0)
|
880 |
|
881 |
if inputs_embeds is None:
|
882 |
inputs_embeds = self.tok_embeddings(input_ids)
|
883 |
|
884 |
+
if self.config.attn_implementation == "flash_attention_2":
|
885 |
+
# 2d mask is passed through the layers
|
886 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
887 |
+
else:
|
888 |
+
if attention_mask is None:
|
889 |
+
attention_mask = torch.ones(
|
890 |
+
(batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
|
891 |
+
)
|
892 |
+
attention_mask = self._prepare_decoder_attention_mask(
|
893 |
+
attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
|
894 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
895 |
|
896 |
# embed positions
|
897 |
hidden_states = inputs_embeds
|
898 |
|
899 |
+
if self.gradient_checkpointing and self.training:
|
900 |
+
if use_cache:
|
901 |
+
logger.warning_once(
|
902 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
903 |
+
)
|
904 |
+
use_cache = False
|
905 |
+
|
906 |
# decoder layers
|
907 |
all_hidden_states = () if output_hidden_states else None
|
908 |
all_self_attns = () if output_attentions else None
|
909 |
+
next_decoder_cache = () if use_cache else None
|
910 |
|
911 |
+
for idx, decoder_layer in enumerate(self.layers):
|
912 |
if output_hidden_states:
|
913 |
all_hidden_states += (hidden_states,)
|
914 |
|
915 |
+
past_key_value = past_key_values[idx] if past_key_values is not None else None
|
916 |
+
|
917 |
if self.gradient_checkpointing and self.training:
|
918 |
+
|
919 |
+
def create_custom_forward(module):
|
920 |
+
def custom_forward(*inputs):
|
921 |
+
# None for past_key_value
|
922 |
+
return module(*inputs, output_attentions, None)
|
923 |
+
|
924 |
+
return custom_forward
|
925 |
+
|
926 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
927 |
+
create_custom_forward(decoder_layer),
|
928 |
hidden_states,
|
929 |
+
attention_mask,
|
930 |
position_ids,
|
931 |
+
None,
|
|
|
|
|
|
|
932 |
)
|
933 |
else:
|
934 |
layer_outputs = decoder_layer(
|
935 |
hidden_states,
|
936 |
+
attention_mask=attention_mask,
|
937 |
position_ids=position_ids,
|
938 |
+
past_key_value=past_key_value,
|
939 |
output_attentions=output_attentions,
|
940 |
use_cache=use_cache,
|
|
|
941 |
)
|
942 |
|
943 |
hidden_states = layer_outputs[0]
|
944 |
|
945 |
if use_cache:
|
946 |
+
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
|
947 |
|
948 |
if output_attentions:
|
949 |
all_self_attns += (layer_outputs[1],)
|
|
|
955 |
all_hidden_states += (hidden_states,)
|
956 |
|
957 |
next_cache = next_decoder_cache if use_cache else None
|
|
|
|
|
|
|
958 |
if not return_dict:
|
959 |
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
960 |
return BaseModelOutputWithPast(
|
|
|
964 |
attentions=all_self_attns,
|
965 |
)
|
966 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
967 |
|
968 |
+
# Modified from transformers.model.llama.modeling_llama.LlamaForCausalLM
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
969 |
class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
|
|
|
|
970 |
_auto_class = "AutoModelForCausalLM"
|
971 |
+
|
972 |
_tied_weights_keys = ["output.weight"]
|
973 |
|
974 |
def __init__(self, config):
|
|
|
1005 |
input_ids: torch.LongTensor = None,
|
1006 |
attention_mask: Optional[torch.Tensor] = None,
|
1007 |
position_ids: Optional[torch.LongTensor] = None,
|
1008 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1009 |
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1010 |
labels: Optional[torch.LongTensor] = None,
|
1011 |
use_cache: Optional[bool] = None,
|
1012 |
output_attentions: Optional[bool] = None,
|
1013 |
output_hidden_states: Optional[bool] = None,
|
1014 |
return_dict: Optional[bool] = None,
|
|
|
1015 |
) -> Union[Tuple, CausalLMOutputWithPast]:
|
1016 |
r"""
|
1017 |
Args:
|
|
|
1027 |
```python
|
1028 |
>>> from transformers import AutoTokenizer, InternLM2ForCausalLM
|
1029 |
|
1030 |
+
>>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
1031 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
1032 |
|
1033 |
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
1034 |
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
1056 |
output_attentions=output_attentions,
|
1057 |
output_hidden_states=output_hidden_states,
|
1058 |
return_dict=return_dict,
|
|
|
1059 |
)
|
1060 |
|
1061 |
hidden_states = outputs[0]
|
1062 |
+
logits = self.output(hidden_states)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1063 |
logits = logits.float()
|
1064 |
|
1065 |
loss = None
|
|
|
1088 |
)
|
1089 |
|
1090 |
def prepare_inputs_for_generation(
|
1091 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1092 |
):
|
|
|
1093 |
if past_key_values is not None:
|
1094 |
+
past_length = past_key_values[0][0].shape[2]
|
1095 |
+
|
1096 |
+
# Some generation methods already pass only the last input ID
|
1097 |
+
if input_ids.shape[1] > past_length:
|
1098 |
+
remove_prefix_length = past_length
|
|
|
|
|
|
|
|
|
1099 |
else:
|
1100 |
+
# Default to old behavior: keep only final ID
|
1101 |
+
remove_prefix_length = input_ids.shape[1] - 1
|
1102 |
+
|
1103 |
+
input_ids = input_ids[:, remove_prefix_length:]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1104 |
|
1105 |
position_ids = kwargs.get("position_ids", None)
|
1106 |
if attention_mask is not None and position_ids is None:
|
|
|
1114 |
if inputs_embeds is not None and past_key_values is None:
|
1115 |
model_inputs = {"inputs_embeds": inputs_embeds}
|
1116 |
else:
|
1117 |
+
model_inputs = {"input_ids": input_ids}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1118 |
|
1119 |
model_inputs.update(
|
1120 |
{
|
1121 |
"position_ids": position_ids,
|
|
|
1122 |
"past_key_values": past_key_values,
|
1123 |
+
"use_cache": kwargs.get("use_cache"),
|
1124 |
"attention_mask": attention_mask,
|
1125 |
}
|
1126 |
)
|
|
|
1135 |
)
|
1136 |
return reordered_past
|
1137 |
|
1138 |
+
def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=""):
|
1139 |
+
prompt = ""
|
|
|
|
|
|
|
|
|
|
|
1140 |
if meta_instruction:
|
1141 |
+
prompt += f"""<s>[UNUSED_TOKEN_146]system\n{meta_instruction}[UNUSED_TOKEN_145]\n"""
|
1142 |
+
else:
|
1143 |
+
prompt += "<s>"
|
1144 |
for record in history:
|
1145 |
+
prompt += f"""[UNUSED_TOKEN_146]user\n{record[0]}[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n{record[1]}[UNUSED_TOKEN_145]\n"""
|
1146 |
+
prompt += f"""[UNUSED_TOKEN_146]user\n{query}[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n"""
|
1147 |
return tokenizer([prompt], return_tensors="pt")
|
1148 |
|
1149 |
@torch.no_grad()
|
|
|
1151 |
self,
|
1152 |
tokenizer,
|
1153 |
query: str,
|
1154 |
+
history: List[Tuple[str, str]] = [],
|
1155 |
streamer: Optional[BaseStreamer] = None,
|
1156 |
max_new_tokens: int = 1024,
|
1157 |
do_sample: bool = True,
|
1158 |
temperature: float = 0.8,
|
1159 |
top_p: float = 0.8,
|
1160 |
meta_instruction: str = "You are an AI assistant whose name is InternLM (书生·浦语).\n"
|
1161 |
+
"- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n"
|
1162 |
+
"- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.",
|
|
|
|
|
1163 |
**kwargs,
|
1164 |
):
|
|
|
|
|
1165 |
inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
|
1166 |
inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
|
1167 |
# also add end-of-assistant token in eos token id to avoid unnecessary generation
|
1168 |
+
eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(["[UNUSED_TOKEN_145]"])[0]]
|
1169 |
outputs = self.generate(
|
1170 |
**inputs,
|
1171 |
streamer=streamer,
|
|
|
1178 |
)
|
1179 |
outputs = outputs[0].cpu().tolist()[len(inputs["input_ids"][0]) :]
|
1180 |
response = tokenizer.decode(outputs, skip_special_tokens=True)
|
1181 |
+
response = response.split("[UNUSED_TOKEN_145]")[0]
|
1182 |
history = history + [(query, response)]
|
1183 |
return response, history
|
1184 |
|
|
|
1187 |
self,
|
1188 |
tokenizer,
|
1189 |
query: str,
|
1190 |
+
history: List[Tuple[str, str]] = [],
|
1191 |
max_new_tokens: int = 1024,
|
1192 |
do_sample: bool = True,
|
1193 |
temperature: float = 0.8,
|
1194 |
top_p: float = 0.8,
|
1195 |
**kwargs,
|
1196 |
):
|
|
|
|
|
1197 |
"""
|
1198 |
Return a generator in format: (response, history)
|
1199 |
Eg.
|
|
|
1209 |
response_queue = queue.Queue(maxsize=20)
|
1210 |
|
1211 |
class ChatStreamer(BaseStreamer):
|
|
|
|
|
|
|
|
|
1212 |
def __init__(self, tokenizer) -> None:
|
1213 |
super().__init__()
|
1214 |
self.tokenizer = tokenizer
|
|
|
1216 |
self.query = query
|
1217 |
self.history = history
|
1218 |
self.response = ""
|
|
|
1219 |
self.received_inputs = False
|
1220 |
self.queue.put((self.response, history + [(self.query, self.response)]))
|
1221 |
|
|
|
1230 |
self.received_inputs = True
|
1231 |
return
|
1232 |
|
1233 |
+
token = self.tokenizer.decode([value[-1]], skip_special_tokens=True)
|
1234 |
+
if token.strip() != "[UNUSED_TOKEN_145]":
|
|
|
1235 |
self.response = self.response + token
|
1236 |
history = self.history + [(self.query, self.response)]
|
1237 |
self.queue.put((self.response, history))
|
|
|
|
|
|
|
1238 |
|
1239 |
def end(self):
|
1240 |
self.queue.put(None)
|
|
|
1264 |
return consumer()
|
1265 |
|
1266 |
|
1267 |
+
# Copied from transformers.model.llama.modeling_llama.LlamaForSequenceClassification with Llama->InternLM2
|
1268 |
@add_start_docstrings(
|
1269 |
"""
|
1270 |
The InternLM2 Model transformer with a sequence classification head on top (linear layer).
|
1271 |
|
1272 |
+
[`InternLM2ForSequenceClassification`] uses the last token in order to do the classification,
|
1273 |
+
as other causal models (e.g. GPT-2) do.
|
1274 |
|
1275 |
Since it does classification on the last token, it requires to know the position of the last token. If a
|
1276 |
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
|
|
1281 |
InternLM2_START_DOCSTRING,
|
1282 |
)
|
1283 |
class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
|
|
|
|
|
1284 |
def __init__(self, config):
|
1285 |
super().__init__(config)
|
1286 |
self.num_labels = config.num_labels
|
|
|
1302 |
input_ids: torch.LongTensor = None,
|
1303 |
attention_mask: Optional[torch.Tensor] = None,
|
1304 |
position_ids: Optional[torch.LongTensor] = None,
|
1305 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1306 |
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1307 |
labels: Optional[torch.LongTensor] = None,
|
1308 |
use_cache: Optional[bool] = None,
|
|
|
1343 |
sequence_lengths = -1
|
1344 |
else:
|
1345 |
if input_ids is not None:
|
1346 |
+
sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
|
1347 |
+
logits.device
|
1348 |
+
)
|
|
|
1349 |
else:
|
1350 |
sequence_lengths = -1
|
1351 |
|
|
|
1357 |
if self.config.problem_type is None:
|
1358 |
if self.num_labels == 1:
|
1359 |
self.config.problem_type = "regression"
|
1360 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
1361 |
self.config.problem_type = "single_label_classification"
|
1362 |
else:
|
1363 |
self.config.problem_type = "multi_label_classification"
|
|
|
1385 |
hidden_states=transformer_outputs.hidden_states,
|
1386 |
attentions=transformer_outputs.attentions,
|
1387 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model-00004-of-00008.safetensors → pytorch_model-00001-of-00008.bin
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fdd03c1ea040b17ce992eb303d0824f5694932902d0c3fa57b5a8d1df2dc8082
|
3 |
+
size 1949342053
|
model-00001-of-00008.safetensors → pytorch_model-00002-of-00008.bin
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:377d66327bcc2dc4b3e5f4e90f24b428c55832ece07194990248647ea65c2db5
|
3 |
+
size 1946249825
|
model-00002-of-00008.safetensors → pytorch_model-00003-of-00008.bin
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:98aa01b7ffd7901699b4588b5e6b8f753e66c1874f8b855c36b23de2c95ae9d8
|
3 |
+
size 1979786923
|
model-00003-of-00008.safetensors → pytorch_model-00004-of-00008.bin
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9362abdb2afd077e579a7860ad570769255c2d64af2b0831413578c77a44d779
|
3 |
+
size 1946249889
|
pytorch_model-00005-of-00008.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:56761cf09eb95e676bce5c1dc53fd4f8c317775d146fe2313dcb7b81c48cfa39
|
3 |
+
size 1979786987
|
pytorch_model-00006-of-00008.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7a825cc1e85ffc038efaed5a06ae398f424916bdd768bcf62d7fb4bc552d71ef
|
3 |
+
size 1946249889
|
pytorch_model-00007-of-00008.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:224105eee8f19409994aea890cc6cc9da16ef4278ad831fdd4e9fcf86f925256
|
3 |
+
size 1979786987
|
pytorch_model-00008-of-00008.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0ed276d23e8a4ab9ec5bafe491cb7147b4b51232a66ce0b9f304bc8a5430d82f
|
3 |
+
size 1748039973
|
pytorch_model.bin.index.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8a1efb6998624330a0564f9bba63eb8ccae0ad54a6d0176c64f2eb30721f2b5
|
3 |
+
size 18179
|
special_tokens_map.json
CHANGED
@@ -1,38 +1,6 @@
|
|
1 |
{
|
2 |
-
"
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
"<|action_end|>",
|
7 |
-
"<|interpreter|>",
|
8 |
-
"<|plugin|>"
|
9 |
-
],
|
10 |
-
"bos_token": {
|
11 |
-
"content": "<s>",
|
12 |
-
"lstrip": false,
|
13 |
-
"normalized": false,
|
14 |
-
"rstrip": false,
|
15 |
-
"single_word": false
|
16 |
-
},
|
17 |
-
"eos_token": {
|
18 |
-
"content": "</s>",
|
19 |
-
"lstrip": false,
|
20 |
-
"normalized": false,
|
21 |
-
"rstrip": false,
|
22 |
-
"single_word": false
|
23 |
-
},
|
24 |
-
"pad_token": {
|
25 |
-
"content": "</s>",
|
26 |
-
"lstrip": false,
|
27 |
-
"normalized": false,
|
28 |
-
"rstrip": false,
|
29 |
-
"single_word": false
|
30 |
-
},
|
31 |
-
"unk_token": {
|
32 |
-
"content": "<unk>",
|
33 |
-
"lstrip": false,
|
34 |
-
"normalized": false,
|
35 |
-
"rstrip": false,
|
36 |
-
"single_word": false
|
37 |
-
}
|
38 |
}
|
|
|
1 |
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"eos_token": "</s>",
|
4 |
+
"pad_token": "</s>",
|
5 |
+
"unk_token": "<unk>"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
}
|
tokenization_internlm2.py → tokenization_internlm.py
RENAMED
@@ -1,7 +1,10 @@
|
|
1 |
# coding=utf-8
|
2 |
-
# Copyright (c)
|
3 |
#
|
4 |
-
# This code is based on
|
|
|
|
|
|
|
5 |
#
|
6 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
# you may not use this file except in compliance with the License.
|
@@ -15,7 +18,7 @@
|
|
15 |
# See the License for the specific language governing permissions and
|
16 |
# limitations under the License.
|
17 |
|
18 |
-
"""Tokenization classes for
|
19 |
import os
|
20 |
from shutil import copyfile
|
21 |
from typing import Any, Dict, List, Optional, Tuple
|
@@ -31,10 +34,9 @@ VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
|
|
31 |
PRETRAINED_VOCAB_FILES_MAP = {}
|
32 |
|
33 |
|
34 |
-
|
35 |
-
class InternLM2Tokenizer(PreTrainedTokenizer):
|
36 |
"""
|
37 |
-
Construct a
|
38 |
|
39 |
Args:
|
40 |
vocab_file (`str`):
|
@@ -77,6 +79,8 @@ class InternLM2Tokenizer(PreTrainedTokenizer):
|
|
77 |
**kwargs,
|
78 |
)
|
79 |
|
|
|
|
|
80 |
@property
|
81 |
def no_prefix_space_tokens(self):
|
82 |
if self._no_prefix_space_tokens is None:
|
|
|
1 |
# coding=utf-8
|
2 |
+
# Copyright (c) InternLM. All rights reserved.
|
3 |
#
|
4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
5 |
+
# and OPT implementations in this library. It has been modified from its
|
6 |
+
# original forms to accommodate minor architectural differences compared
|
7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
8 |
#
|
9 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
10 |
# you may not use this file except in compliance with the License.
|
|
|
18 |
# See the License for the specific language governing permissions and
|
19 |
# limitations under the License.
|
20 |
|
21 |
+
"""Tokenization classes for IntermLM."""
|
22 |
import os
|
23 |
from shutil import copyfile
|
24 |
from typing import Any, Dict, List, Optional, Tuple
|
|
|
34 |
PRETRAINED_VOCAB_FILES_MAP = {}
|
35 |
|
36 |
|
37 |
+
class InternLMTokenizer(PreTrainedTokenizer):
|
|
|
38 |
"""
|
39 |
+
Construct a InternLM tokenizer. Based on byte-level Byte-Pair-Encoding.
|
40 |
|
41 |
Args:
|
42 |
vocab_file (`str`):
|
|
|
79 |
**kwargs,
|
80 |
)
|
81 |
|
82 |
+
""" Initialization"""
|
83 |
+
|
84 |
@property
|
85 |
def no_prefix_space_tokens(self):
|
86 |
if self._no_prefix_space_tokens is None:
|
tokenization_internlm2_fast.py
DELETED
@@ -1,214 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
|
3 |
-
#
|
4 |
-
# This code is based on transformers/src/transformers/models/llama/tokenization_llama_fast.py
|
5 |
-
#
|
6 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
-
# you may not use this file except in compliance with the License.
|
8 |
-
# You may obtain a copy of the License at
|
9 |
-
#
|
10 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
-
#
|
12 |
-
# Unless required by applicable law or agreed to in writing, software
|
13 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
-
# See the License for the specific language governing permissions and
|
16 |
-
# limitations under the License.
|
17 |
-
|
18 |
-
"""Tokenization Fast class for InternLM."""
|
19 |
-
import os
|
20 |
-
from shutil import copyfile
|
21 |
-
from typing import Any, Dict, Optional, Tuple
|
22 |
-
|
23 |
-
from tokenizers import processors, decoders, Tokenizer, normalizers
|
24 |
-
from tokenizers.models import BPE
|
25 |
-
|
26 |
-
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
|
27 |
-
from transformers.utils import logging
|
28 |
-
|
29 |
-
from transformers.convert_slow_tokenizer import (
|
30 |
-
SLOW_TO_FAST_CONVERTERS,
|
31 |
-
SpmConverter,
|
32 |
-
SentencePieceExtractor,
|
33 |
-
)
|
34 |
-
|
35 |
-
from .tokenization_internlm2 import InternLM2Tokenizer
|
36 |
-
|
37 |
-
logger = logging.get_logger(__name__)
|
38 |
-
|
39 |
-
VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
|
40 |
-
|
41 |
-
# Modified from transformers.convert_slow_tokenizer.LlamaConverter
|
42 |
-
class InternLM2Converter(SpmConverter):
|
43 |
-
handle_byte_fallback = True
|
44 |
-
|
45 |
-
def vocab(self, proto):
|
46 |
-
vocab = [
|
47 |
-
("<unk>", 0.0),
|
48 |
-
("<s>", 0.0),
|
49 |
-
("</s>", 0.0),
|
50 |
-
]
|
51 |
-
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
|
52 |
-
return vocab
|
53 |
-
|
54 |
-
def unk_id(self, proto):
|
55 |
-
unk_id = 0
|
56 |
-
return unk_id
|
57 |
-
|
58 |
-
def decoder(self, replacement, add_prefix_space):
|
59 |
-
decoders_sequence = [
|
60 |
-
decoders.Replace("▁", " "),
|
61 |
-
decoders.ByteFallback(),
|
62 |
-
decoders.Fuse(),
|
63 |
-
]
|
64 |
-
if self.proto.normalizer_spec.add_dummy_prefix:
|
65 |
-
decoders_sequence.append(decoders.Strip(content=" ", left=1))
|
66 |
-
return decoders.Sequence(decoders_sequence)
|
67 |
-
|
68 |
-
def tokenizer(self, proto):
|
69 |
-
model_type = proto.trainer_spec.model_type
|
70 |
-
vocab_scores = self.vocab(proto)
|
71 |
-
# special tokens
|
72 |
-
added_tokens = self.original_tokenizer.added_tokens_decoder
|
73 |
-
for i in range(len(vocab_scores)):
|
74 |
-
piece, score = vocab_scores[i]
|
75 |
-
if i in added_tokens:
|
76 |
-
vocab_scores[i] = (added_tokens[i].content, score)
|
77 |
-
if model_type == 1:
|
78 |
-
raise RuntimeError("InternLM2 is supposed to be a BPE model!")
|
79 |
-
|
80 |
-
elif model_type == 2:
|
81 |
-
_, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores)
|
82 |
-
bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)}
|
83 |
-
tokenizer = Tokenizer(
|
84 |
-
BPE(bpe_vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True, byte_fallback=True)
|
85 |
-
)
|
86 |
-
tokenizer.add_special_tokens(
|
87 |
-
[ added_token for index, added_token in added_tokens.items()]
|
88 |
-
)
|
89 |
-
else:
|
90 |
-
raise Exception(
|
91 |
-
"You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
|
92 |
-
)
|
93 |
-
|
94 |
-
return tokenizer
|
95 |
-
|
96 |
-
def normalizer(self, proto):
|
97 |
-
normalizers_list = []
|
98 |
-
if proto.normalizer_spec.add_dummy_prefix:
|
99 |
-
normalizers_list.append(normalizers.Prepend(prepend="▁"))
|
100 |
-
normalizers_list.append(normalizers.Replace(pattern=" ", content="▁"))
|
101 |
-
return normalizers.Sequence(normalizers_list)
|
102 |
-
|
103 |
-
def pre_tokenizer(self, replacement, add_prefix_space):
|
104 |
-
return None
|
105 |
-
|
106 |
-
SLOW_TO_FAST_CONVERTERS["InternLM2Tokenizer"] = InternLM2Converter
|
107 |
-
|
108 |
-
|
109 |
-
# Modified from transformers.model.llama.tokenization_llama_fast.LlamaTokenizerFast -> InternLM2TokenizerFast
|
110 |
-
class InternLM2TokenizerFast(PreTrainedTokenizerFast):
|
111 |
-
vocab_files_names = VOCAB_FILES_NAMES
|
112 |
-
slow_tokenizer_class = InternLM2Tokenizer
|
113 |
-
padding_side = "left"
|
114 |
-
model_input_names = ["input_ids", "attention_mask"]
|
115 |
-
_auto_class = "AutoTokenizer"
|
116 |
-
|
117 |
-
def __init__(
|
118 |
-
self,
|
119 |
-
vocab_file,
|
120 |
-
unk_token="<unk>",
|
121 |
-
bos_token="<s>",
|
122 |
-
eos_token="</s>",
|
123 |
-
pad_token="</s>",
|
124 |
-
sp_model_kwargs: Optional[Dict[str, Any]] = None,
|
125 |
-
add_bos_token=True,
|
126 |
-
add_eos_token=False,
|
127 |
-
decode_with_prefix_space=False,
|
128 |
-
clean_up_tokenization_spaces=False,
|
129 |
-
**kwargs,
|
130 |
-
):
|
131 |
-
super().__init__(
|
132 |
-
vocab_file=vocab_file,
|
133 |
-
unk_token=unk_token,
|
134 |
-
bos_token=bos_token,
|
135 |
-
eos_token=eos_token,
|
136 |
-
pad_token=pad_token,
|
137 |
-
sp_model_kwargs=sp_model_kwargs,
|
138 |
-
add_bos_token=add_bos_token,
|
139 |
-
add_eos_token=add_eos_token,
|
140 |
-
decode_with_prefix_space=decode_with_prefix_space,
|
141 |
-
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
142 |
-
**kwargs,
|
143 |
-
)
|
144 |
-
self._add_bos_token = add_bos_token
|
145 |
-
self._add_eos_token = add_eos_token
|
146 |
-
self.update_post_processor()
|
147 |
-
self.vocab_file = vocab_file
|
148 |
-
|
149 |
-
@property
|
150 |
-
def can_save_slow_tokenizer(self) -> bool:
|
151 |
-
return os.path.isfile(self.vocab_file) if self.vocab_file else False
|
152 |
-
|
153 |
-
def update_post_processor(self):
|
154 |
-
"""
|
155 |
-
Updates the underlying post processor with the current `bos_token` and `eos_token`.
|
156 |
-
"""
|
157 |
-
bos = self.bos_token
|
158 |
-
bos_token_id = self.bos_token_id
|
159 |
-
if bos is None and self.add_bos_token:
|
160 |
-
raise ValueError("add_bos_token = True but bos_token = None")
|
161 |
-
|
162 |
-
eos = self.eos_token
|
163 |
-
eos_token_id = self.eos_token_id
|
164 |
-
if eos is None and self.add_eos_token:
|
165 |
-
raise ValueError("add_eos_token = True but eos_token = None")
|
166 |
-
|
167 |
-
single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
|
168 |
-
pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
|
169 |
-
|
170 |
-
special_tokens = []
|
171 |
-
if self.add_bos_token:
|
172 |
-
special_tokens.append((bos, bos_token_id))
|
173 |
-
if self.add_eos_token:
|
174 |
-
special_tokens.append((eos, eos_token_id))
|
175 |
-
self._tokenizer.post_processor = processors.TemplateProcessing(
|
176 |
-
single=single, pair=pair, special_tokens=special_tokens
|
177 |
-
)
|
178 |
-
|
179 |
-
@property
|
180 |
-
def add_eos_token(self):
|
181 |
-
return self._add_eos_token
|
182 |
-
|
183 |
-
@property
|
184 |
-
def add_bos_token(self):
|
185 |
-
return self._add_bos_token
|
186 |
-
|
187 |
-
@add_eos_token.setter
|
188 |
-
def add_eos_token(self, value):
|
189 |
-
self._add_eos_token = value
|
190 |
-
self.update_post_processor()
|
191 |
-
|
192 |
-
@add_bos_token.setter
|
193 |
-
def add_bos_token(self, value):
|
194 |
-
self._add_bos_token = value
|
195 |
-
self.update_post_processor()
|
196 |
-
|
197 |
-
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
198 |
-
if not self.can_save_slow_tokenizer:
|
199 |
-
raise ValueError(
|
200 |
-
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
|
201 |
-
"tokenizer."
|
202 |
-
)
|
203 |
-
|
204 |
-
if not os.path.isdir(save_directory):
|
205 |
-
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
206 |
-
return
|
207 |
-
out_vocab_file = os.path.join(
|
208 |
-
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
209 |
-
)
|
210 |
-
|
211 |
-
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
|
212 |
-
copyfile(self.vocab_file, out_vocab_file)
|
213 |
-
|
214 |
-
return (out_vocab_file,)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer_config.json
CHANGED
@@ -1,102 +1,15 @@
|
|
1 |
{
|
2 |
-
"add_bos_token": true,
|
3 |
-
"add_eos_token": false,
|
4 |
-
"added_tokens_decoder": {
|
5 |
-
"0": {
|
6 |
-
"content": "<unk>",
|
7 |
-
"lstrip": false,
|
8 |
-
"normalized": false,
|
9 |
-
"rstrip": false,
|
10 |
-
"single_word": false,
|
11 |
-
"special": true
|
12 |
-
},
|
13 |
-
"1": {
|
14 |
-
"content": "<s>",
|
15 |
-
"lstrip": false,
|
16 |
-
"normalized": false,
|
17 |
-
"rstrip": false,
|
18 |
-
"single_word": false,
|
19 |
-
"special": true
|
20 |
-
},
|
21 |
-
"2": {
|
22 |
-
"content": "</s>",
|
23 |
-
"lstrip": false,
|
24 |
-
"normalized": false,
|
25 |
-
"rstrip": false,
|
26 |
-
"single_word": false,
|
27 |
-
"special": true
|
28 |
-
},
|
29 |
-
"92538": {
|
30 |
-
"content": "<|plugin|>",
|
31 |
-
"lstrip": false,
|
32 |
-
"normalized": false,
|
33 |
-
"rstrip": false,
|
34 |
-
"single_word": false,
|
35 |
-
"special": true
|
36 |
-
},
|
37 |
-
"92539": {
|
38 |
-
"content": "<|interpreter|>",
|
39 |
-
"lstrip": false,
|
40 |
-
"normalized": false,
|
41 |
-
"rstrip": false,
|
42 |
-
"single_word": false,
|
43 |
-
"special": true
|
44 |
-
},
|
45 |
-
"92540": {
|
46 |
-
"content": "<|action_end|>",
|
47 |
-
"lstrip": false,
|
48 |
-
"normalized": false,
|
49 |
-
"rstrip": false,
|
50 |
-
"single_word": false,
|
51 |
-
"special": true
|
52 |
-
},
|
53 |
-
"92541": {
|
54 |
-
"content": "<|action_start|>",
|
55 |
-
"lstrip": false,
|
56 |
-
"normalized": false,
|
57 |
-
"rstrip": false,
|
58 |
-
"single_word": false,
|
59 |
-
"special": true
|
60 |
-
},
|
61 |
-
"92542": {
|
62 |
-
"content": "<|im_end|>",
|
63 |
-
"lstrip": false,
|
64 |
-
"normalized": false,
|
65 |
-
"rstrip": false,
|
66 |
-
"single_word": false,
|
67 |
-
"special": true
|
68 |
-
},
|
69 |
-
"92543": {
|
70 |
-
"content": "<|im_start|>",
|
71 |
-
"lstrip": false,
|
72 |
-
"normalized": false,
|
73 |
-
"rstrip": false,
|
74 |
-
"single_word": false,
|
75 |
-
"special": true
|
76 |
-
}
|
77 |
-
},
|
78 |
-
"additional_special_tokens": [
|
79 |
-
"<|im_start|>",
|
80 |
-
"<|im_end|>",
|
81 |
-
"<|action_start|>",
|
82 |
-
"<|action_end|>",
|
83 |
-
"<|interpreter|>",
|
84 |
-
"<|plugin|>"
|
85 |
-
],
|
86 |
"auto_map": {
|
87 |
"AutoTokenizer": [
|
88 |
-
"
|
89 |
-
|
90 |
]
|
91 |
},
|
92 |
"bos_token": "<s>",
|
93 |
-
"chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
94 |
"clean_up_tokenization_spaces": false,
|
95 |
-
"decode_with_prefix_space": false,
|
96 |
"eos_token": "</s>",
|
97 |
"model_max_length": 1000000000000000019884624838656,
|
98 |
"pad_token": "</s>",
|
99 |
-
"
|
100 |
-
"tokenizer_class": "InternLM2Tokenizer",
|
101 |
"unk_token": "<unk>"
|
102 |
}
|
|
|
1 |
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
"auto_map": {
|
3 |
"AutoTokenizer": [
|
4 |
+
"tokenization_internlm.InternLMTokenizer",
|
5 |
+
null
|
6 |
]
|
7 |
},
|
8 |
"bos_token": "<s>",
|
|
|
9 |
"clean_up_tokenization_spaces": false,
|
|
|
10 |
"eos_token": "</s>",
|
11 |
"model_max_length": 1000000000000000019884624838656,
|
12 |
"pad_token": "</s>",
|
13 |
+
"tokenizer_class": "InternLMTokenizer",
|
|
|
14 |
"unk_token": "<unk>"
|
15 |
}
|