|
import torch |
|
import torch.nn as nn |
|
import torch.utils.checkpoint |
|
|
|
from videosys.models.modules.normalization import LlamaRMSNorm |
|
|
|
|
|
class Attention(nn.Module): |
|
def __init__( |
|
self, |
|
dim: int, |
|
num_heads: int = 8, |
|
qkv_bias: bool = False, |
|
qk_norm: bool = False, |
|
attn_drop: float = 0.0, |
|
proj_drop: float = 0.0, |
|
norm_layer: nn.Module = LlamaRMSNorm, |
|
enable_flash_attn: bool = False, |
|
rope=None, |
|
qk_norm_legacy: bool = False, |
|
) -> None: |
|
super().__init__() |
|
assert dim % num_heads == 0, "dim should be divisible by num_heads" |
|
self.dim = dim |
|
self.num_heads = num_heads |
|
self.head_dim = dim // num_heads |
|
self.scale = self.head_dim**-0.5 |
|
self.enable_flash_attn = enable_flash_attn |
|
|
|
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) |
|
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() |
|
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() |
|
self.qk_norm_legacy = qk_norm_legacy |
|
self.attn_drop = nn.Dropout(attn_drop) |
|
self.proj = nn.Linear(dim, dim) |
|
self.proj_drop = nn.Dropout(proj_drop) |
|
|
|
self.rope = False |
|
if rope is not None: |
|
self.rope = True |
|
self.rotary_emb = rope |
|
|
|
def forward(self, x: torch.Tensor) -> torch.Tensor: |
|
B, N, C = x.shape |
|
|
|
enable_flash_attn = self.enable_flash_attn and (N > B) |
|
qkv = self.qkv(x) |
|
qkv_shape = (B, N, 3, self.num_heads, self.head_dim) |
|
|
|
qkv = qkv.view(qkv_shape).permute(2, 0, 3, 1, 4) |
|
q, k, v = qkv.unbind(0) |
|
if self.qk_norm_legacy: |
|
|
|
if self.rope: |
|
q = self.rotary_emb(q) |
|
k = self.rotary_emb(k) |
|
q, k = self.q_norm(q), self.k_norm(k) |
|
else: |
|
q, k = self.q_norm(q), self.k_norm(k) |
|
if self.rope: |
|
q = self.rotary_emb(q) |
|
k = self.rotary_emb(k) |
|
|
|
if enable_flash_attn: |
|
from flash_attn import flash_attn_func |
|
|
|
|
|
q = q.permute(0, 2, 1, 3) |
|
k = k.permute(0, 2, 1, 3) |
|
v = v.permute(0, 2, 1, 3) |
|
x = flash_attn_func( |
|
q, |
|
k, |
|
v, |
|
dropout_p=self.attn_drop.p if self.training else 0.0, |
|
softmax_scale=self.scale, |
|
) |
|
else: |
|
dtype = q.dtype |
|
q = q * self.scale |
|
attn = q @ k.transpose(-2, -1) |
|
attn = attn.to(torch.float32) |
|
attn = attn.softmax(dim=-1) |
|
attn = attn.to(dtype) |
|
attn = self.attn_drop(attn) |
|
x = attn @ v |
|
|
|
x_output_shape = (B, N, C) |
|
if not enable_flash_attn: |
|
x = x.transpose(1, 2) |
|
x = x.reshape(x_output_shape) |
|
x = self.proj(x) |
|
x = self.proj_drop(x) |
|
return x |
|
|
|
|
|
class MultiHeadCrossAttention(nn.Module): |
|
def __init__(self, d_model, num_heads, attn_drop=0.0, proj_drop=0.0): |
|
super(MultiHeadCrossAttention, self).__init__() |
|
assert d_model % num_heads == 0, "d_model must be divisible by num_heads" |
|
|
|
self.d_model = d_model |
|
self.num_heads = num_heads |
|
self.head_dim = d_model // num_heads |
|
|
|
self.q_linear = nn.Linear(d_model, d_model) |
|
self.kv_linear = nn.Linear(d_model, d_model * 2) |
|
self.attn_drop = nn.Dropout(attn_drop) |
|
self.proj = nn.Linear(d_model, d_model) |
|
self.proj_drop = nn.Dropout(proj_drop) |
|
|
|
def forward(self, x, cond, mask=None): |
|
|
|
B, N, C = x.shape |
|
|
|
q = self.q_linear(x).view(1, -1, self.num_heads, self.head_dim) |
|
kv = self.kv_linear(cond).view(1, -1, 2, self.num_heads, self.head_dim) |
|
k, v = kv.unbind(2) |
|
|
|
attn_bias = None |
|
|
|
import xformers.ops |
|
|
|
if mask is not None: |
|
attn_bias = xformers.ops.fmha.BlockDiagonalMask.from_seqlens([N] * B, mask) |
|
x = xformers.ops.memory_efficient_attention(q, k, v, p=self.attn_drop.p, attn_bias=attn_bias) |
|
|
|
x = x.view(B, -1, C) |
|
x = self.proj(x) |
|
x = self.proj_drop(x) |
|
return x |
|
|