no need to do projection to combine attention heads for T2Ts initial one-headed attention layers

This commit is contained in:
Phil Wang
2021-02-24 12:23:39 -08:00
parent a82894846d
commit 6760d554aa
2 changed files with 5 additions and 2 deletions

View File

@@ -3,7 +3,7 @@ from setuptools import setup, find_packages
setup(
name = 'vit-pytorch',
packages = find_packages(exclude=['examples']),
version = '0.7.3',
version = '0.7.4',
license='MIT',
description = 'Vision Transformer (ViT) - Pytorch',
author = 'Phil Wang',

View File

@@ -37,14 +37,17 @@ class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
) if project_out else nn.Identity()
def forward(self, x, mask = None):
b, n, _, h = *x.shape, self.heads