crumb commited on
Commit
cc554fd
1 Parent(s): 7d1e3d5

Upload model

Browse files
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "silu",
3
+ "architectures": [
4
+ "PlusModelForCausalLM"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_nano.NanoConfig",
8
+ "AutoModelForCausalLM": "modeling_nano.PlusModelForCausalLM"
9
+ },
10
+ "bos_token_id": 1,
11
+ "combined_qkv": true,
12
+ "eos_token_id": 2,
13
+ "expanded_lm_head_size": 8192,
14
+ "expanded_wte_size": 8192,
15
+ "experimental_full_adaption_rank": null,
16
+ "ffn": "parallel",
17
+ "full_adaptation_has_pre_proj": false,
18
+ "full_adaptation_type": "no",
19
+ "hidden_size": 768,
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 3072,
22
+ "kv_hidden_size": null,
23
+ "layer_norm_epsilon": 1e-06,
24
+ "layernorm": "llamarmsnorm",
25
+ "lm_head_bias": false,
26
+ "lm_head_projection_bias": false,
27
+ "max_position_embeddings": 2048,
28
+ "model_type": "nano",
29
+ "num_attention_heads": 12,
30
+ "num_hidden_layers": 10,
31
+ "pre_proj_dim": null,
32
+ "rope_scaling": null,
33
+ "rope_theta": 10000,
34
+ "torch_dtype": "bfloat16",
35
+ "transformers_version": "4.36.2",
36
+ "use_bias": false,
37
+ "use_cache": true,
38
+ "vocab_size": 32000
39
+ }
configuration_nano.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import OrderedDict
2
+ from typing import Any, List, Mapping, Optional
3
+
4
+ from transformers import PreTrainedTokenizer, TensorType, is_torch_available
5
+ from transformers.configuration_utils import PretrainedConfig
6
+ from transformers.utils import logging
7
+
8
+ logger = logging.get_logger(__name__)
9
+
10
+
11
+ class NanoConfig(PretrainedConfig):
12
+ model_type = "nano"
13
+ keys_to_ignore_at_inference = ["past_key_values"]
14
+ attribute_map = {
15
+ "hidden_size": "hidden_size",
16
+ "max_position_embeddings": "max_position_embeddings",
17
+ "num_attention_heads": "num_attention_heads",
18
+ "num_hidden_layers": "num_hidden_layers",
19
+ }
20
+
21
+ def __init__(
22
+ self,
23
+ vocab_size=32000,
24
+ max_position_embeddings=2048,
25
+ expanded_wte_size=None,
26
+ expanded_lm_head_size=None,
27
+ hidden_size=768,
28
+ kv_hidden_size=None, # in case you want to use cross-attention
29
+ num_hidden_layers=10,
30
+ num_attention_heads=12,
31
+ intermediate_size=None,
32
+ activation_function="silu",
33
+ layer_norm_epsilon=1e-6,
34
+ initializer_range=0.02,
35
+ use_cache=True,
36
+ bos_token_id=1,
37
+ eos_token_id=2,
38
+ combined_qkv=True,
39
+ use_bias=False,
40
+ lm_head_projection_bias=False,
41
+ lm_head_bias=False,
42
+ layernorm="llamarmsnorm", # layernorm, llamarmsnorm
43
+ rope_scaling=None,
44
+ rope_theta=10000,
45
+ ffn="llama-like",
46
+ experimental_full_adaption_rank = None, # 8
47
+ full_adaptation_has_pre_proj = True,
48
+ pre_proj_dim = 1536,
49
+ full_adaptation_type="no", # "lora", "no", "linear", "linear-r", "linear-ra"
50
+ tie_word_embeddings=False,
51
+ **kwargs,
52
+ ):
53
+ self.pre_proj_dim = pre_proj_dim
54
+ self.full_adaptation_has_pre_proj = full_adaptation_has_pre_proj
55
+ self.full_adaptation_type = full_adaptation_type
56
+ self.tie_word_embeddings = tie_word_embeddings
57
+ self.experimental_full_adaption_rank = experimental_full_adaption_rank
58
+ self.ffn = ffn
59
+ self.rope_theta=rope_theta
60
+ self.layernorm = layernorm
61
+ self.rope_scaling=rope_scaling
62
+ self.lm_head_projection_bias = lm_head_projection_bias
63
+ self.kv_hidden_size = kv_hidden_size
64
+ self.lm_head_bias = lm_head_bias
65
+ self.use_bias = use_bias
66
+ self.expanded_wte_size = expanded_wte_size
67
+ self.expanded_lm_head_size = expanded_lm_head_size
68
+ self.combined_qkv = combined_qkv
69
+ self.vocab_size = vocab_size
70
+ self.max_position_embeddings = max_position_embeddings
71
+ self.hidden_size = hidden_size
72
+ self.num_hidden_layers = num_hidden_layers
73
+ self.num_attention_heads = num_attention_heads
74
+ self.intermediate_size = (
75
+ intermediate_size if intermediate_size is not None else hidden_size * 4
76
+ )
77
+ self.activation_function = activation_function
78
+ self.layer_norm_epsilon = layer_norm_epsilon
79
+ self.initializer_range = initializer_range
80
+
81
+ self.use_cache = use_cache
82
+
83
+ self.bos_token_id = bos_token_id
84
+ self.eos_token_id = eos_token_id
85
+
86
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.36.2"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5ca3f770208ce4f3cd1e358482ca79f38018aedb49a65f091b9794594f44bda
3
+ size 738239160
modeling_nano.py ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ import random
4
+ import warnings
5
+ from dataclasses import dataclass
6
+ from typing import Optional, Tuple, Union
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ import torch.utils.checkpoint
12
+ from einops import repeat
13
+ from torch import nn
14
+ from torch.cuda.amp import autocast
15
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
16
+ from transformers.activations import ACT2FN
17
+ from transformers.modeling_outputs import (
18
+ BaseModelOutputWithPastAndCrossAttentions,
19
+ CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput,
20
+ SequenceClassifierOutputWithPast, TokenClassifierOutput)
21
+ from transformers.modeling_utils import PreTrainedModel, SequenceSummary
22
+ from transformers.utils import (ModelOutput, logging)
23
+ from transformers.utils.model_parallel_utils import (assert_device_map,
24
+ get_device_map)
25
+
26
+ from .configuration_nano import NanoConfig
27
+ from transformers.models.llama.modeling_llama import LlamaRMSNorm, LlamaDynamicNTKScalingRotaryEmbedding, LlamaRotaryEmbedding, LlamaLinearScalingRotaryEmbedding
28
+
29
+ def rotate_half(x):
30
+ """Rotates half the hidden dims of the input."""
31
+ x1 = x[..., : x.shape[-1] // 2]
32
+ x2 = x[..., x.shape[-1] // 2 :]
33
+ return torch.cat((-x2, x1), dim=-1)
34
+
35
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
36
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
37
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
38
+ q_embed = (q * cos) + (rotate_half(q) * sin)
39
+ k_embed = (k * cos) + (rotate_half(k) * sin)
40
+ return q_embed, k_embed
41
+
42
+ class NanoAttention(nn.Module):
43
+ def __init__(self, config):
44
+ super().__init__()
45
+ self.config = config
46
+ self.head_dim = config.hidden_size // config.num_attention_heads
47
+ assert (
48
+ self.head_dim * config.num_attention_heads == config.hidden_size
49
+ ), "d_model must be divisible by n_head"
50
+ self.use_bias = config.use_bias
51
+
52
+ if not config.combined_qkv or config.kv_hidden_size is not None:
53
+ self.query = nn.Linear(
54
+ config.hidden_size, config.hidden_size, bias=self.use_bias
55
+ )
56
+ self.key = nn.Linear(
57
+ config.hidden_size
58
+ if not config.kv_hidden_size
59
+ else config.kv_hidden_size,
60
+ config.hidden_size,
61
+ bias=self.use_bias,
62
+ )
63
+ self.value = nn.Linear(
64
+ config.hidden_size
65
+ if not config.kv_hidden_size
66
+ else config.kv_hidden_size,
67
+ config.hidden_size,
68
+ bias=self.use_bias,
69
+ )
70
+ else:
71
+ self.qkv = nn.Linear(
72
+ config.hidden_size, config.hidden_size * 3, bias=self.use_bias
73
+ )
74
+ self.out = nn.Linear(config.hidden_size, config.hidden_size, bias=self.use_bias)
75
+ self._init_rope()
76
+
77
+ def _init_rope(self):
78
+ if self.config.rope_scaling is None:
79
+ self.rotary_emb = LlamaRotaryEmbedding(
80
+ self.head_dim,
81
+ max_position_embeddings=self.config.max_position_embeddings,
82
+ base=self.config.rope_theta,
83
+ )
84
+ else:
85
+ scaling_type = self.config.rope_scaling["type"]
86
+ scaling_factor = self.config.rope_scaling["factor"]
87
+ if scaling_type == "linear":
88
+ self.rotary_emb = LlamaLinearScalingRotaryEmbedding(
89
+ self.head_dim,
90
+ max_position_embeddings=self.config.max_position_embeddings,
91
+ scaling_factor=scaling_factor,
92
+ base=self.config.rope_theta,
93
+ )
94
+ elif scaling_type == "dynamic":
95
+ self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(
96
+ self.head_dim,
97
+ max_position_embeddings=self.max_position_embeddings,
98
+ scaling_factor=scaling_factor,
99
+ base=self.config.rope_theta,
100
+ )
101
+ else:
102
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
103
+
104
+ def forward(self, x0, x1=None, causal=False, mask=None, position_ids=None, use_cache=True, layer_past=None):
105
+ batch_size = x0.size(0)
106
+
107
+ def split_heads(x):
108
+ return x.view(
109
+ batch_size, -1, self.config.num_attention_heads, self.head_dim
110
+ ).transpose(1, 2)
111
+
112
+ if not self.config.combined_qkv:
113
+ q = split_heads(self.query(x0))
114
+ k = split_heads(self.key(x1) if x1 is not None else self.key(x0))
115
+ v = split_heads(
116
+ self.value(x1 if x1 is not None else x0)
117
+ )
118
+ else:
119
+ q, k, v = self.qkv(x0).chunk(3,-1)
120
+ q = split_heads(q)
121
+ k = split_heads(k)
122
+ v = split_heads(v)
123
+
124
+ if layer_past is not None:
125
+ past_key, past_value = layer_past
126
+ k = torch.cat((past_key, k), dim=-2)
127
+ v = torch.cat((past_value, v), dim=-2)
128
+
129
+ cos, sin = self.rotary_emb(v, seq_len=v.shape[-2])
130
+ if self.config.experimental_full_adaption_rank is not None:
131
+ position_ids = position_ids.repeat_interleave(x0.shape[1]//position_ids.shape[-1],dim=1)
132
+ q, k = apply_rotary_pos_emb(q, k, cos, sin, position_ids)
133
+
134
+ if use_cache is True:
135
+ present = (k,v)
136
+ else:
137
+ present = None
138
+
139
+ attn_output = F.scaled_dot_product_attention(
140
+ q, k, v, attn_mask=None, dropout_p=0.0, is_causal=causal
141
+ )
142
+ attn_output = (
143
+ attn_output.transpose(1, 2)
144
+ .contiguous()
145
+ .view(batch_size, -1, self.config.hidden_size)
146
+ )
147
+ return self.out(attn_output), present
148
+
149
+
150
+ class NanoGLU(nn.Module):
151
+ def __init__(self, config):
152
+ super().__init__()
153
+ self.config = config
154
+ self.gate_proj = nn.Linear(
155
+ config.hidden_size, config.intermediate_size, bias=False
156
+ )
157
+ self.up_proj = nn.Linear(
158
+ config.hidden_size, config.intermediate_size, bias=False
159
+ )
160
+ self.down_proj = nn.Linear(
161
+ config.intermediate_size, config.hidden_size, bias=False
162
+ )
163
+ self.act_fn = ACT2FN[config.activation_function]
164
+
165
+ def forward(self, x):
166
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
167
+
168
+
169
+ class NanoBlock(nn.Module):
170
+ def __init__(self, config):
171
+ super().__init__()
172
+ self.config = config
173
+ self.attn = NanoAttention(config)
174
+ self.ffn = NanoGLU(config)
175
+
176
+ ln_class = LlamaRMSNorm if config.layernorm=="llamarmsnorm" else nn.LayerNorm
177
+ self.ln1 = ln_class(config.hidden_size, eps=config.layer_norm_epsilon)
178
+ self.ln2 = ln_class(config.hidden_size, eps=config.layer_norm_epsilon)
179
+
180
+ def forward(self, x, mask=None, position_ids=None, use_cache=True, layer_past=None):
181
+
182
+ if self.config.ffn == "llamalike":
183
+ residual = x
184
+ x = self.ln1(x)
185
+ attn_out, attn_outs = self.attn(x, causal=True, mask=mask, position_ids=position_ids, use_cache=use_cache, layer_past=layer_past)
186
+ x = residual + attn_out
187
+
188
+ residual = x
189
+ x = self.ln2(x)
190
+ x = self.ffn(x)
191
+ x = residual + x
192
+ else: # ffn == "parallel"
193
+ attn_in = self.ln1(x)
194
+ ffn_in = self.ln2(x)
195
+
196
+ attn_out, attn_outs = self.attn(attn_in, causal=True, mask=mask, position_ids=position_ids, use_cache=use_cache, layer_past=layer_past)
197
+ ffn_out = self.ffn(ffn_in)
198
+
199
+ x = x + attn_out + ffn_out
200
+
201
+ if not use_cache: attn_outs = None
202
+ return (x, attn_outs)
203
+
204
+
205
+
206
+ class NanoPreTrainedModel(PreTrainedModel):
207
+ config_class = NanoConfig
208
+ base_model_prefix = "transformer"
209
+ is_parallelizable = False
210
+ supports_gradient_checkpointing = True
211
+ _no_split_modules = ["NanoBlock"]
212
+ _skip_keys_device_placement = "past_key_values"
213
+
214
+ def __init__(self, *inputs, **kwargs):
215
+ super().__init__(*inputs, **kwargs)
216
+
217
+ def _init_weights(self, module):
218
+ """Initialize the weights."""
219
+ if isinstance(module, (nn.Linear)):
220
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
221
+ if module.bias is not None:
222
+ module.bias.data.zero_()
223
+ elif isinstance(module, nn.Embedding):
224
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
225
+ if module.padding_idx is not None:
226
+ module.weight.data[module.padding_idx].zero_()
227
+ elif isinstance(module, nn.LayerNorm):
228
+ module.bias.data.zero_()
229
+ module.weight.data.fill_(1.0)
230
+
231
+ def _set_gradient_checkpointing(self, module, value=False):
232
+ if isinstance(module, NanoModel):
233
+ module.gradient_checkpointing = value
234
+
235
+ class Split(nn.Module):
236
+ def __init__(self, splits):
237
+ super().__init__()
238
+ self.splits=splits
239
+ def forward(self, x):
240
+ bs, tokens, _ = x.shape
241
+ # print("SPLIT X0 SHAPE", x.shape)
242
+ x = x.view(bs, tokens, self.splits, -1)
243
+ x = x.permute(0, 1, 2, 3).reshape(bs, tokens * self.splits, -1)
244
+ # print("SPLIT X1 SHAPE", x.shape)
245
+ return x
246
+
247
+ class Recombine(nn.Module):
248
+ def __init__(self, splits):
249
+ super().__init__()
250
+ self.splits = splits
251
+ def forward(self, x):
252
+ bs, _, _ = x.shape
253
+ # print("RECOMBINE X SHAPE", x.shape)
254
+ tokens = x.shape[1] // self.splits
255
+ # print("RECOMBINE TOKENS", tokens, bs)
256
+ x = x.view(bs, tokens, -1)
257
+ # print("RECOMBINE X1.SHAPE", x.shape)
258
+ return x
259
+
260
+ class Residual(nn.Module):
261
+ def __init__(self, module, a=None):
262
+ super().__init__()
263
+ self.module = module
264
+ self.a = nn.Parameter(torch.tensor(a, dtype=torch.bfloat16)) if a is not None else None
265
+ def forward(self, x):
266
+ return self.module(x) * (self.a if self.a is not None else 1) + x
267
+
268
+ class LoRA(nn.Module):
269
+ def __init__(self, d, r, a=1):
270
+ super().__init__()
271
+ self.fn_i = nn.Linear(d, r)
272
+ self.fn_o = nn.Linear(r, d)
273
+ self.a = nn.Parameter(torch.tensor(a, dtype=self.fn_i.weight.dtype))
274
+ def forward(self, x):
275
+ return self.fn_o(self.fn_i(x)) * self.a + x
276
+ def get_delta_w(self):
277
+ return torch.mm(self.fn_o.weight, self.fn_i.weight) * self.a
278
+
279
+ class NanoModel(NanoPreTrainedModel):
280
+ def __init__(self, config):
281
+ super().__init__(config)
282
+ ln_class = LlamaRMSNorm if config.layernorm=="llamarmsnorm" else nn.LayerNorm
283
+
284
+ if config.experimental_full_adaption_rank is None:
285
+ if config.expanded_wte_size is not None:
286
+ self.wte = nn.Sequential(
287
+ nn.Embedding(config.vocab_size, config.expanded_wte_size),
288
+ nn.Linear(config.expanded_wte_size, config.hidden_size),
289
+ )
290
+ else:
291
+ self.wte = nn.Embedding(config.vocab_size, config.hidden_size)
292
+ else:
293
+ assert config.expanded_wte_size is not None, "experimental full adaptation of token embeddings requires expanded_wte_size to be set"
294
+ # self.wte = nn.Sequential(
295
+ # nn.Embedding(config.vocab_size, config.expanded_wte_size),
296
+ # LoRA(config.expanded_wte_size, config.experimental_full_adaption_rank),
297
+ # Split(config.expanded_wte_size//config.hidden_size)
298
+ # )
299
+ self.d_0 = config.expanded_wte_size if (config.full_adaptation_has_pre_proj == False) else config.pre_proj_dim
300
+ # print("d_0", d_0)
301
+ self.wte = nn.Sequential(
302
+ nn.Embedding(config.vocab_size, config.expanded_wte_size),
303
+ (
304
+ nn.Linear(config.expanded_wte_size, config.pre_proj_dim) if config.full_adaptation_has_pre_proj else nn.Identity()
305
+ ),
306
+ (
307
+ LoRA(self.d_0, config.experimental_full_adaption_rank)
308
+ if config.full_adaptation_type == "lora" else
309
+ nn.Linear(self.d_0, self.d_0)
310
+ if config.full_adaptation_type == "linear" else
311
+ Residual(
312
+ nn.Linear(self.d_0, self.d_0)
313
+ )
314
+ if config.full_adaptation_type == "linear-r" else
315
+ Residual(
316
+ nn.Linear(self.d_0, self.d_0), 1
317
+ )
318
+ if config.full_adaptation_type == "linear-ra" else
319
+ nn.Identity()
320
+ ),
321
+ Split(self.d_0//config.hidden_size)
322
+ )
323
+ self.h = nn.ModuleList(
324
+ [NanoBlock(config) for i in range(config.num_hidden_layers)]
325
+ )
326
+ self.ln_f = ln_class(config.hidden_size, eps=config.layer_norm_epsilon)
327
+ self.model_parallel = False
328
+ self.device_map = None
329
+ self.gradient_checkpointing = False
330
+ self.post_init()
331
+
332
+ def get_input_embeddings(self):
333
+ return self.wte[0] if self.config.expanded_wte_size is not None else self.wte
334
+
335
+ def set_input_embeddings(self, new_embeddings):
336
+ if self.config.expanded_wte_size is not None:
337
+ self.wte[0] = new_embeddings
338
+ else:
339
+ self.wte = new_embeddings
340
+
341
+ def forward(
342
+ self,
343
+ input_ids: Optional[torch.LongTensor] = None,
344
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
345
+ attention_mask: Optional[torch.FloatTensor] = None,
346
+ token_type_ids: Optional[torch.LongTensor] = None,
347
+ position_ids: Optional[torch.LongTensor] = None,
348
+ head_mask: Optional[torch.FloatTensor] = None,
349
+ inputs_embeds: Optional[torch.FloatTensor] = None,
350
+ encoder_hidden_states: Optional[torch.Tensor] = None,
351
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
352
+ use_cache: Optional[bool] = None,
353
+ output_attentions: Optional[bool] = None,
354
+ output_hidden_states: Optional[bool] = None,
355
+ return_dict: Optional[bool] = None,
356
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
357
+ # soooo not all of the params are able to be used, since I just copied this framework from modeling_gpt2
358
+
359
+ output_attentions = (
360
+ output_attentions
361
+ if output_attentions is not None
362
+ else self.config.output_attentions
363
+ )
364
+ output_hidden_states = (
365
+ output_hidden_states
366
+ if output_hidden_states is not None
367
+ else self.config.output_hidden_states
368
+ )
369
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
370
+ return_dict = (
371
+ return_dict if return_dict is not None else self.config.use_return_dict
372
+ )
373
+ if input_ids is not None and inputs_embeds is not None:
374
+ raise ValueError(
375
+ "You cannot specify both input_ids and inputs_embeds at the same time"
376
+ )
377
+ elif input_ids is not None:
378
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
379
+ input_shape = input_ids.size()
380
+ input_ids = input_ids.view(-1, input_shape[-1])
381
+ batch_size = input_ids.shape[0]
382
+ elif inputs_embeds is not None:
383
+ input_shape = inputs_embeds.size()[:-1]
384
+ batch_size = inputs_embeds.shape[0]
385
+ else:
386
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
387
+
388
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
389
+
390
+ if token_type_ids is not None:
391
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
392
+ if position_ids is not None:
393
+ position_ids = position_ids.view(-1, input_shape[-1])
394
+
395
+ if past_key_values is None:
396
+ past_length = 0
397
+ past_key_values = tuple([None] * len(self.h))
398
+ else:
399
+ past_length = past_key_values[0][0].size(-2)
400
+ if position_ids is None:
401
+ position_ids = torch.arange(
402
+ past_length,
403
+ input_shape[-1] + past_length,
404
+ dtype=torch.long,
405
+ device=device,
406
+ )
407
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
408
+
409
+ if attention_mask is not None:
410
+ if batch_size <= 0:
411
+ raise ValueError("batch_size has to be defined and > 0")
412
+ attention_mask = attention_mask.view(batch_size, -1)
413
+ attention_mask = attention_mask[:, None, None, :]
414
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
415
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
416
+
417
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
418
+ (
419
+ encoder_batch_size,
420
+ encoder_sequence_length,
421
+ _,
422
+ ) = encoder_hidden_states.size()
423
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
424
+ if encoder_attention_mask is None:
425
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
426
+ encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
427
+ else:
428
+ encoder_attention_mask = None
429
+
430
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
431
+
432
+ if inputs_embeds is None:
433
+ inputs_embeds = self.wte(input_ids)
434
+ # print("inputs embeds shape", inputs_embeds.shape)
435
+
436
+ hidden_states = inputs_embeds
437
+
438
+ if token_type_ids is not None:
439
+ token_type_embeds = self.wte(token_type_ids)
440
+ hidden_states = hidden_states + token_type_embeds
441
+
442
+ # output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
443
+ output_shape = (-1,) + (hidden_states.shape[1],) + (hidden_states.size(-1),)
444
+ # print(output_shape, "output shape")
445
+
446
+ if self.gradient_checkpointing and self.training:
447
+ if use_cache:
448
+ # logger.warning_once(
449
+ # "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
450
+ # )
451
+ use_cache = False
452
+
453
+ presents = () if use_cache else None
454
+ all_self_attentions = () if output_attentions else None
455
+ all_cross_attentions = (
456
+ () if output_attentions and self.config.add_cross_attention else None
457
+ )
458
+ all_hidden_states = () if output_hidden_states else None
459
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
460
+ if self.model_parallel:
461
+ torch.cuda.set_device(hidden_states.device)
462
+ if layer_past is not None:
463
+ layer_past = tuple(
464
+ past_state.to(hidden_states.device)
465
+ for past_state in layer_past
466
+ )
467
+ if attention_mask is not None:
468
+ attention_mask = attention_mask.to(hidden_states.device)
469
+ if isinstance(head_mask, torch.Tensor):
470
+ head_mask = head_mask.to(hidden_states.device)
471
+ if output_hidden_states:
472
+ all_hidden_states = all_hidden_states + (hidden_states,)
473
+ outputs = block(hidden_states, mask=attention_mask, position_ids=position_ids, use_cache=use_cache, layer_past=layer_past)
474
+ hidden_states = outputs[0]
475
+ if use_cache == True:
476
+ presents = presents + (outputs[1],)
477
+
478
+ hidden_states = self.ln_f(hidden_states)
479
+ hidden_states = hidden_states.view(output_shape)
480
+ if output_hidden_states:
481
+ all_hidden_states = all_hidden_states + (hidden_states,)
482
+
483
+ if not return_dict:
484
+ return tuple(
485
+ v
486
+ for v in [hidden_states, None, all_hidden_states, None, None]
487
+ if v is not None
488
+ )
489
+
490
+ return BaseModelOutputWithPastAndCrossAttentions(
491
+ last_hidden_state=hidden_states,
492
+ past_key_values=presents,
493
+ hidden_states=all_hidden_states,
494
+ attentions=None,
495
+ cross_attentions=None,
496
+ )
497
+
498
+ class NanoModelForCausalLM(NanoPreTrainedModel):
499
+ _tied_weights_keys = ["lm_head.weight"]
500
+ def __init__(self, config):
501
+ super().__init__(config)
502
+ self.transformer = NanoModel(config)
503
+ if config.experimental_full_adaption_rank is None or config.full_adaptation_type == "no":
504
+ if (config.expanded_lm_head_size is not None):
505
+ self.lm_head = nn.Sequential(
506
+ nn.Linear(
507
+ config.hidden_size, config.expanded_lm_head_size, bias=config.lm_head_projection_bias
508
+ ),
509
+ nn.Linear(
510
+ config.expanded_lm_head_size, config.vocab_size, bias=config.lm_head_bias
511
+ ),
512
+ )
513
+ else:
514
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size)
515
+ else:
516
+ d_0 = config.expanded_lm_head_size if (not config.full_adaptation_has_pre_proj) else config.pre_proj_dim
517
+ self.lm_head = nn.Sequential(
518
+ Recombine(d_0//config.hidden_size),
519
+ nn.Identity() if not config.full_adaptation_has_pre_proj else nn.Linear(d_0, config.expanded_lm_head_size),
520
+ (
521
+ LoRA(config.expanded_lm_head_size, config.experimental_full_adaption_rank)
522
+ if config.full_adaptation_type == "lora" else
523
+ nn.Linear(config.expanded_lm_head_size, config.expanded_lm_head_size)
524
+ if config.full_adaptation_type == "linear" else
525
+ Residual(
526
+ nn.Linear(config.expanded_lm_head_size, config.expanded_lm_head_size)
527
+ )
528
+ if config.full_adaptation_type == "linear-r" else
529
+ Residual(
530
+ nn.Linear(config.expanded_lm_head_size, config.expanded_lm_head_size), 1
531
+ )
532
+ if config.full_adaptation_type == "linear-ra" else
533
+ nn.Identity()
534
+ ),
535
+
536
+ nn.Linear(config.expanded_lm_head_size, config.vocab_size)
537
+ )
538
+ self.model_parallel = False
539
+ self.device_map = None
540
+ self.post_init()
541
+
542
+ def get_output_embeddings(self):
543
+ return self.lm_head if (self.config.experimental_full_adaption_rank is None and self.config.expanded_lm_head_size is None) else self.lm_head[-1]
544
+
545
+ def set_output_embeddings(self, new_embeddings):
546
+ self.lm_head = new_embeddings
547
+
548
+ def prepare_inputs_for_generation(
549
+ self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs
550
+ ):
551
+ token_type_ids = kwargs.get("token_type_ids", None)
552
+ # only last token for inputs_ids if past is defined in kwargs
553
+ if past_key_values:
554
+ input_ids = input_ids[:, -1].unsqueeze(-1)
555
+ if token_type_ids is not None:
556
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
557
+
558
+ attention_mask = kwargs.get("attention_mask", None)
559
+ position_ids = kwargs.get("position_ids", None)
560
+
561
+ if attention_mask is not None and position_ids is None:
562
+ # create position_ids on the fly for batch generation
563
+ position_ids = attention_mask.long().cumsum(-1) - 1
564
+ position_ids.masked_fill_(attention_mask == 0, 1)
565
+ if past_key_values:
566
+ position_ids = position_ids[:, -1].unsqueeze(-1)
567
+ else:
568
+ position_ids = None
569
+
570
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
571
+ if inputs_embeds is not None and past_key_values is None:
572
+ model_inputs = {"inputs_embeds": inputs_embeds}
573
+ else:
574
+ model_inputs = {"input_ids": input_ids}
575
+
576
+ model_inputs.update(
577
+ {
578
+ "past_key_values": past_key_values,
579
+ "use_cache": kwargs.get("use_cache"),
580
+ "position_ids": position_ids,
581
+ "attention_mask": attention_mask,
582
+ "token_type_ids": token_type_ids,
583
+ }
584
+ )
585
+ return model_inputs
586
+
587
+ def forward(
588
+ self,
589
+ input_ids: Optional[torch.LongTensor] = None,
590
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
591
+ attention_mask: Optional[torch.FloatTensor] = None,
592
+ token_type_ids: Optional[torch.LongTensor] = None,
593
+ position_ids: Optional[torch.LongTensor] = None,
594
+ head_mask: Optional[torch.FloatTensor] = None,
595
+ inputs_embeds: Optional[torch.FloatTensor] = None,
596
+ encoder_hidden_states: Optional[torch.Tensor] = None,
597
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
598
+ labels: Optional[torch.LongTensor] = None,
599
+ use_cache: Optional[bool] = None,
600
+ output_attentions: Optional[bool] = None,
601
+ output_hidden_states: Optional[bool] = None,
602
+ return_dict: Optional[bool] = None,
603
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
604
+ r"""
605
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
606
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
607
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
608
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
609
+ """
610
+ return_dict = (
611
+ return_dict if return_dict is not None else self.config.use_return_dict
612
+ )
613
+
614
+ transformer_outputs = self.transformer(
615
+ input_ids,
616
+ past_key_values=past_key_values,
617
+ attention_mask=attention_mask,
618
+ token_type_ids=token_type_ids,
619
+ position_ids=position_ids,
620
+ head_mask=head_mask,
621
+ inputs_embeds=inputs_embeds,
622
+ encoder_hidden_states=encoder_hidden_states,
623
+ encoder_attention_mask=encoder_attention_mask,
624
+ use_cache=use_cache,
625
+ output_attentions=output_attentions,
626
+ output_hidden_states=output_hidden_states,
627
+ return_dict=return_dict,
628
+ )
629
+ hidden_states = transformer_outputs[0]
630
+ # print("Hidden states shape", hidden_states.shape)
631
+ if self.model_parallel:
632
+ torch.cuda.set_device(self.transformer.first_device)
633
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
634
+
635
+ lm_logits = self.lm_head(hidden_states)
636
+
637
+ loss = None
638
+ if labels is not None:
639
+ # move labels to correct device to enable model parallelism
640
+ labels = labels.to(lm_logits.device)
641
+ # Shift so that tokens < n predict n
642
+ shift_logits = lm_logits[..., :-1, :].contiguous()
643
+ shift_labels = labels[..., 1:].contiguous()
644
+ # Flatten the tokens
645
+ loss_fct = CrossEntropyLoss()
646
+ loss = loss_fct(
647
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
648
+ )
649
+
650
+ if not return_dict:
651
+ output = (lm_logits,) + transformer_outputs[1:]
652
+ return ((loss,) + output) if loss is not None else output
653
+
654
+ return CausalLMOutputWithCrossAttentions(
655
+ loss=loss,
656
+ logits=lm_logits,
657
+ past_key_values=transformer_outputs.past_key_values,
658
+ hidden_states=transformer_outputs.hidden_states,
659
+ attentions=transformer_outputs.attentions,
660
+ cross_attentions=transformer_outputs.cross_attentions,
661
+ )
662
+
663
+ @staticmethod
664
+ def _reorder_cache(
665
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
666
+ ) -> Tuple[Tuple[torch.Tensor]]:
667
+ """
668
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
669
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
670
+ beam_idx at every generation step.
671
+ """
672
+ return tuple(
673
+ tuple(
674
+ past_state.index_select(0, beam_idx.to(past_state.device))
675
+ for past_state in layer_past
676
+ )
677
+ for layer_past in past_key_values
678
+ )
679
+
680
+
681
+ class VTMModelForCausalLM(NanoModelForCausalLM):
682
+ _tied_weights_keys = ["lm_head.3.weight"]
683
+ def __init__(self, config):
684
+ super().__init__(config)
685
+
686
+ class VTMPreProjModelForCausalLM(NanoModelForCausalLM):
687
+ _tied_weights_keys = ["lm_head.3.weight"]
688
+ def __init__(self, config):
689
+ super().__init__(config)
690
+
691
+ class PlusModelForCausalLM(NanoModelForCausalLM):
692
+ _tied_weights_keys = ["lm_head.1.weight"]
693
+ def __init__(self, config):
694
+ super().__init__(config)