Spaces:
Runtime error
Runtime error
| """ | |
| Torch 2.0 Optimized Resampler - Compatible with InstantID weights | |
| """ | |
| import math | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| def FeedForward(dim, mult=4): | |
| inner_dim = int(dim * mult) | |
| return nn.Sequential( | |
| nn.LayerNorm(dim), | |
| nn.Linear(dim, inner_dim, bias=False), | |
| nn.GELU(), | |
| nn.Linear(inner_dim, dim, bias=False), | |
| ) | |
| def reshape_tensor(x, heads): | |
| bs, length, width = x.shape | |
| x = x.view(bs, length, heads, -1) | |
| x = x.transpose(1, 2) | |
| x = x.reshape(bs, heads, length, -1) | |
| return x | |
| class PerceiverAttentionTorch2(nn.Module): | |
| """Perceiver attention with torch 2.0 optimizations.""" | |
| def __init__(self, *, dim, dim_head=64, heads=8): | |
| super().__init__() | |
| self.scale = dim_head**-0.5 | |
| self.dim_head = dim_head | |
| self.heads = heads | |
| inner_dim = dim_head * heads | |
| self.norm1 = nn.LayerNorm(dim) | |
| self.norm2 = nn.LayerNorm(dim) | |
| self.to_q = nn.Linear(dim, inner_dim, bias=False) | |
| self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) | |
| self.to_out = nn.Linear(inner_dim, dim, bias=False) | |
| self.use_torch2 = hasattr(F, "scaled_dot_product_attention") | |
| def forward(self, x, latents): | |
| x = self.norm1(x) | |
| latents = self.norm2(latents) | |
| b, l, _ = latents.shape | |
| q = self.to_q(latents) | |
| kv_input = torch.cat((x, latents), dim=-2) | |
| k, v = self.to_kv(kv_input).chunk(2, dim=-1) | |
| q = reshape_tensor(q, self.heads) | |
| k = reshape_tensor(k, self.heads) | |
| v = reshape_tensor(v, self.heads) | |
| if self.use_torch2: | |
| out = F.scaled_dot_product_attention( | |
| q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False, scale=self.scale | |
| ) | |
| else: | |
| scale = 1 / math.sqrt(math.sqrt(self.dim_head)) | |
| weight = (q * scale) @ (k * scale).transpose(-2, -1) | |
| weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) | |
| out = weight @ v | |
| out = out.permute(0, 2, 1, 3).reshape(b, l, -1) | |
| return self.to_out(out) | |
| class ResamplerCompatible(nn.Module): | |
| """Resampler compatible with InstantID pretrained weights.""" | |
| def __init__(self, dim=1024, depth=8, dim_head=64, heads=16, num_queries=8, | |
| embedding_dim=768, output_dim=1024, ff_mult=4): | |
| super().__init__() | |
| self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5) | |
| self.proj_in = nn.Linear(embedding_dim, dim) | |
| self.proj_out = nn.Linear(dim, output_dim) | |
| self.norm_out = nn.LayerNorm(output_dim) | |
| self.layers = nn.ModuleList([]) | |
| for _ in range(depth): | |
| self.layers.append(nn.ModuleList([ | |
| PerceiverAttentionTorch2(dim=dim, dim_head=dim_head, heads=heads), | |
| FeedForward(dim=dim, mult=ff_mult), | |
| ])) | |
| def forward(self, x): | |
| latents = self.latents.repeat(x.size(0), 1, 1) | |
| x = self.proj_in(x) | |
| for attn, ff in self.layers: | |
| latents = attn(x, latents) + latents | |
| latents = ff(latents) + latents | |
| latents = self.proj_out(latents) | |
| return self.norm_out(latents) | |
| def create_compatible_resampler(num_queries=4, embedding_dim=512, output_dim=2048, | |
| device="cuda", dtype=torch.float16, quality_mode="balanced"): | |
| """Create Resampler compatible with InstantID weights.""" | |
| resampler = ResamplerCompatible( | |
| dim=1024, depth=8, dim_head=64, heads=16, num_queries=num_queries, | |
| embedding_dim=embedding_dim, output_dim=output_dim, ff_mult=4 | |
| ) | |
| return resampler.to(device, dtype=dtype) | |
| Resampler = ResamplerCompatible | |
| print("[OK] Compatible Resampler with Torch 2.0 loaded") | |