czczup commited on
Commit
b7f394a
1 Parent(s): ce3f67a

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -1,13 +1,9 @@
1
  {
2
  "_commit_hash": null,
3
- "_name_or_path": "OpenGVLab/Mini-InternVL-Chat-V1-5",
4
  "architectures": [
5
  "InternVLChatModel"
6
  ],
7
- "auto_map": {
8
- "AutoConfig": "configuration_internvl_chat.InternVLChatConfig",
9
- "AutoModel": "modeling_internvl_chat.InternVLChatModel"
10
- },
11
  "downsample_ratio": 0.5,
12
  "dynamic_image_size": true,
13
  "force_image_size": 448,
@@ -77,7 +73,10 @@
77
  "return_dict": true,
78
  "return_dict_in_generate": false,
79
  "rms_norm_eps": 1e-05,
80
- "rope_scaling": null,
 
 
 
81
  "rope_theta": 1000000,
82
  "sep_token_id": null,
83
  "suppress_tokens": null,
@@ -91,7 +90,7 @@
91
  "top_p": 1.0,
92
  "torch_dtype": "bfloat16",
93
  "torchscript": false,
94
- "transformers_version": "4.37.2",
95
  "typical_p": 1.0,
96
  "use_bfloat16": false,
97
  "use_cache": false,
@@ -188,7 +187,7 @@
188
  "top_p": 1.0,
189
  "torch_dtype": "float32",
190
  "torchscript": false,
191
- "transformers_version": "4.37.2",
192
  "typical_p": 1.0,
193
  "use_bfloat16": false,
194
  "use_flash_attn": true
 
1
  {
2
  "_commit_hash": null,
3
+ "_name_or_path": "OpenGVLab/Mini-InternVL-Chat-2B-V1-5",
4
  "architectures": [
5
  "InternVLChatModel"
6
  ],
 
 
 
 
7
  "downsample_ratio": 0.5,
8
  "dynamic_image_size": true,
9
  "force_image_size": 448,
 
73
  "return_dict": true,
74
  "return_dict_in_generate": false,
75
  "rms_norm_eps": 1e-05,
76
+ "rope_scaling": {
77
+ "factor": 3.0,
78
+ "type": "dynamic"
79
+ },
80
  "rope_theta": 1000000,
81
  "sep_token_id": null,
82
  "suppress_tokens": null,
 
90
  "top_p": 1.0,
91
  "torch_dtype": "bfloat16",
92
  "torchscript": false,
93
+ "transformers_version": "4.36.2",
94
  "typical_p": 1.0,
95
  "use_bfloat16": false,
96
  "use_cache": false,
 
187
  "top_p": 1.0,
188
  "torch_dtype": "float32",
189
  "torchscript": false,
190
+ "transformers_version": "4.36.2",
191
  "typical_p": 1.0,
192
  "use_bfloat16": false,
193
  "use_flash_attn": true
generation_config.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
  "_from_model_config": true,
3
- "transformers_version": "4.37.2"
4
  }
 
1
  {
2
  "_from_model_config": true,
3
+ "transformers_version": "4.36.2"
4
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:508428116bf0dd1e782f9738ed8dd1742e91bd61ab46eac47c5a77c8c2154f1f
3
  size 4411571040
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d5f814d2759a5de0e628ef0003c45a68fa4b6183ff905cc905c3d7ca3831805
3
  size 4411571040
modeling_internlm2.py CHANGED
@@ -49,6 +49,21 @@ _CONFIG_FOR_DOC = 'InternLM2Config'
49
  flash_attn_func, flash_attn_varlen_func = None, None
50
  pad_input, index_first_axis, unpad_input = None, None, None
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
  def _import_flash_attn():
54
  global flash_attn_func, flash_attn_varlen_func
@@ -855,7 +870,7 @@ class InternLM2Model(InternLM2PreTrainedModel):
855
 
856
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
857
 
858
- if self.config.attn_implementation == 'flash_attention_2':
859
  _import_flash_attn()
860
 
861
  # retrieve input_ids and inputs_embeds
 
49
  flash_attn_func, flash_attn_varlen_func = None, None
50
  pad_input, index_first_axis, unpad_input = None, None, None
51
 
52
+ try:
53
+ from flash_attn import flash_attn_func as _flash_attn_func
54
+ from flash_attn import \
55
+ flash_attn_varlen_func as _flash_attn_varlen_func
56
+ from flash_attn.bert_padding import \
57
+ index_first_axis as _index_first_axis
58
+ from flash_attn.bert_padding import pad_input as _pad_input
59
+ from flash_attn.bert_padding import unpad_input as _unpad_input
60
+
61
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
62
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
63
+ has_flash_attn = True
64
+ except:
65
+ has_flash_attn = False
66
+
67
 
68
  def _import_flash_attn():
69
  global flash_attn_func, flash_attn_varlen_func
 
870
 
871
  return_dict = return_dict if return_dict is not None else self.config.use_return_dict
872
 
873
+ if self.config.attn_implementation == 'flash_attention_2' and has_flash_attn:
874
  _import_flash_attn()
875
 
876
  # retrieve input_ids and inputs_embeds
modeling_internvl_chat.py CHANGED
@@ -246,7 +246,7 @@ class InternVLChatModel(PreTrainedModel):
246
 
247
  queries = []
248
  image_bs = pixel_values.shape[0]
249
- print(f'dynamic ViT batch size: {image_bs}, image_counts: {image_counts}')
250
  for idx, image_count in enumerate(image_counts):
251
  image_token = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * image_count + IMG_END_TOKEN
252
  question = image_token + '\n' + questions[idx]
 
246
 
247
  queries = []
248
  image_bs = pixel_values.shape[0]
249
+ # print(f'dynamic ViT batch size: {image_bs}, image_counts: {image_counts}')
250
  for idx, image_count in enumerate(image_counts):
251
  image_token = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * image_count + IMG_END_TOKEN
252
  question = image_token + '\n' + questions[idx]
tokenizer_config.json CHANGED
@@ -171,8 +171,8 @@
171
  "bos_token": "<s>",
172
  "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
173
  "clean_up_tokenization_spaces": false,
174
- "eos_token": "</s>",
175
- "model_max_length": 5120,
176
  "pad_token": "</s>",
177
  "tokenizer_class": "InternLM2Tokenizer",
178
  "unk_token": "<unk>"
 
171
  "bos_token": "<s>",
172
  "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
173
  "clean_up_tokenization_spaces": false,
174
+ "eos_token": "<|im_end|>",
175
+ "model_max_length": 8192,
176
  "pad_token": "</s>",
177
  "tokenizer_class": "InternLM2Tokenizer",
178
  "unk_token": "<unk>"