refine repo structure

This commit is contained in:
nttstar
2020-11-06 13:59:21 +08:00
parent 9fc3cc9c0b
commit b774d6a1b7
309 changed files with 24974 additions and 34253 deletions

View File

@@ -32,7 +32,9 @@ class MarginSoftmax(nn.Module):
def forward(self, cosine, label):
index = torch.where(label != -1)[0]
m_hot = torch.zeros(index.size()[0], cosine.size()[1], device=cosine.device)
m_hot = torch.zeros(index.size()[0],
cosine.size()[1],
device=cosine.device)
m_hot.scatter_(1, label[index, None], self.m)
cosine[index] -= m_hot
ret = cosine * self.s
@@ -41,16 +43,24 @@ class MarginSoftmax(nn.Module):
# .......
def main(local_rank, world_size, init_method='tcp://127.0.0.1:23499'):
dist.init_process_group(backend='nccl', init_method=init_method, rank=local_rank, world_size=world_size)
dist.init_process_group(backend='nccl',
init_method=init_method,
rank=local_rank,
world_size=world_size)
cfg.local_rank = local_rank
torch.cuda.set_device(local_rank)
cfg.rank = dist.get_rank()
cfg.world_size = world_size
trainset = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset, shuffle=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(
trainset, shuffle=True)
train_loader = DataLoaderX(local_rank=local_rank,
dataset=trainset, batch_size=cfg.batch_size, sampler=train_sampler,
num_workers=0, pin_memory=True, drop_last=False)
dataset=trainset,
batch_size=cfg.batch_size,
sampler=train_sampler,
num_workers=0,
pin_memory=True,
drop_last=False)
backbone = backbones.iresnet100(False).to(local_rank)
backbone.train()
@@ -61,28 +71,31 @@ def main(local_rank, world_size, init_method='tcp://127.0.0.1:23499'):
# DDP
backbone = torch.nn.parallel.DistributedDataParallel(
module=backbone,
broadcast_buffers=False,
device_ids=[dist.get_rank()])
module=backbone, broadcast_buffers=False, device_ids=[dist.get_rank()])
backbone.train()
# Memory classifer
dist_sample_classifer = DistSampleClassifier(
rank=dist.get_rank(),
local_rank=local_rank,
world_size=world_size)
dist_sample_classifer = DistSampleClassifier(rank=dist.get_rank(),
local_rank=local_rank,
world_size=world_size)
# Margin softmax
margin_softmax = MarginSoftmax(s=64.0, m=0.4)
# Optimizer for backbone and classifer
optimizer = SGD([
{'params': backbone.parameters()},
{'params': dist_sample_classifer.parameters()}
], lr=0.1, momentum=0.9, weight_decay=cfg.weight_decay, rescale=world_size)
optimizer = SGD([{
'params': backbone.parameters()
}, {
'params': dist_sample_classifer.parameters()
}],
lr=0.1,
momentum=0.9,
weight_decay=cfg.weight_decay,
rescale=world_size)
# Lr scheduler
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=cfg.lr_func)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,
lr_lambda=cfg.lr_func)
n_epochs = cfg.num_epoch
start_epoch = 0
@@ -94,12 +107,16 @@ def main(local_rank, world_size, init_method='tcp://127.0.0.1:23499'):
train_sampler.set_epoch(epoch)
for step, (img, label) in enumerate(train_loader):
start = time.time()
total_label, norm_weight = dist_sample_classifer.prepare(label, optimizer)
total_label, norm_weight = dist_sample_classifer.prepare(
label, optimizer)
features = F.normalize(backbone(img))
# Features all-gather
total_features = torch.zeros(features.size()[0] * world_size, cfg.embedding_size, device=local_rank)
dist.all_gather(list(total_features.chunk(world_size, dim=0)), features.data)
total_features = torch.zeros(features.size()[0] * world_size,
cfg.embedding_size,
device=local_rank)
dist.all_gather(list(total_features.chunk(world_size, dim=0)),
features.data)
total_features.requires_grad = True
# Calculate logits
@@ -121,7 +138,9 @@ def main(local_rank, world_size, init_method='tcp://127.0.0.1:23499'):
# Get one-hot
grad = logits_exp
index = torch.where(total_label != -1)[0]
one_hot = torch.zeros(index.size()[0], grad.size()[1], device=grad.device)
one_hot = torch.zeros(index.size()[0],
grad.size()[1],
device=grad.device)
one_hot.scatter_(1, total_label[index, None], 1)
# Calculate loss
@@ -140,7 +159,8 @@ def main(local_rank, world_size, init_method='tcp://127.0.0.1:23499'):
x_grad = torch.zeros_like(features)
# Feature gradient all-reduce
dist.reduce_scatter(x_grad, list(total_features.grad.chunk(world_size, dim=0)))
dist.reduce_scatter(
x_grad, list(total_features.grad.chunk(world_size, dim=0)))
# Backward backbone
features.backward(x_grad)
@@ -152,8 +172,10 @@ def main(local_rank, world_size, init_method='tcp://127.0.0.1:23499'):
if cfg.rank == 0:
writer.add_scalar('loss', loss_v, global_step)
print("Speed %d samples/sec Loss %.4f Epoch: %d Global Step: %d" %
((cfg.batch_size / (time.time() - start) * world_size), loss_v, epoch, global_step))
print(
"Speed %d samples/sec Loss %.4f Epoch: %d Global Step: %d"
% ((cfg.batch_size / (time.time() - start) * world_size),
loss_v, epoch, global_step))
global_step += 1
scheduler.step()
@@ -161,7 +183,8 @@ def main(local_rank, world_size, init_method='tcp://127.0.0.1:23499'):
import os
if not os.path.exists('models'):
os.makedirs('models')
torch.save(backbone.module.state_dict(), "models/" + str(epoch) + 'backbone.pth')
torch.save(backbone.module.state_dict(),
"models/" + str(epoch) + 'backbone.pth')
dist.destroy_process_group()