norm not needed when reusing attention in lookvit

This commit is contained in:
Phil Wang
2024-07-19 10:00:03 -07:00
parent 547bf94d07
commit ec6c48b8ff
2 changed files with 2 additions and 2 deletions

View File

@@ -6,7 +6,7 @@ with open('README.md') as f:
setup( setup(
name = 'vit-pytorch', name = 'vit-pytorch',
packages = find_packages(exclude=['examples']), packages = find_packages(exclude=['examples']),
version = '1.7.1', version = '1.7.2',
license='MIT', license='MIT',
description = 'Vision Transformer (ViT) - Pytorch', description = 'Vision Transformer (ViT) - Pytorch',
long_description=long_description, long_description=long_description,

View File

@@ -77,7 +77,7 @@ class Attention(Module):
self.split_heads = Rearrange('b n (h d) -> b h n d', h = heads) self.split_heads = Rearrange('b n (h d) -> b h n d', h = heads)
self.norm = LayerNorm(dim) self.norm = LayerNorm(dim) if not reuse_attention else nn.Identity()
self.attend = nn.Softmax(dim = -1) self.attend = nn.Softmax(dim = -1)
self.dropout = nn.Dropout(dropout) self.dropout = nn.Dropout(dropout)