From ee5e4e9929086fba200b79f84ae7eff9c1bd5ab1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Long=20M=2E=20L=C6=B0u?= <55311435+minhlong94@users.noreply.github.com> Date: Sat, 21 Nov 2020 18:23:04 +0700 Subject: [PATCH] Update vit_pytorch.py --- vit_pytorch/vit_pytorch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vit_pytorch/vit_pytorch.py b/vit_pytorch/vit_pytorch.py index 1520fe3..3bc2280 100644 --- a/vit_pytorch/vit_pytorch.py +++ b/vit_pytorch/vit_pytorch.py @@ -85,10 +85,10 @@ class Transformer(nn.Module): class ViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dropout = 0., emb_dropout = 0.): super().__init__() - assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size' + assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.' num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 - assert num_patches > MIN_NUM_PATCHES, f'your number of patches ({num_patches}) is way too small for attention to be effective. try decreasing your patch size' + assert num_patches > MIN_NUM_PATCHES, f'your number of patches ({num_patches}) is way too small for attention to be effective (at least 16). Try decreasing your patch size' self.patch_size = patch_size