"""
This is an implementation of VGG net.
Reference
---------
..[1]Simonyan, Karen, and Andrew Zisserman. "Very deep convolutional networks for large-scale image recognition." arXiv preprint arXiv:1409.1556 (2014).
..[2]Original implementation: https://github.com/kuangliu/pytorch-cifar
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
[docs]class VGG(nn.Module):
"""VGG.
"""
def __init__(self, vgg_name):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 10)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
[docs]def test(model, device, test_loader):
"""test.
Parameters
----------
model :
model
device :
device
test_loader :
test_loader
"""
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
#test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
test_loss += F.cross_entropy(output, target)
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
[docs]def train(model, device, train_loader, optimizer, epoch):
"""train.
Parameters
----------
model :
model
device :
device
train_loader :
train_loader
optimizer :
optimizer
epoch :
epoch
"""
model.train()
# lr = util.adjust_learning_rate(optimizer, epoch, args) # don't need it if we use Adam
for batch_idx, (data, target) in enumerate(train_loader):
data, target = torch.tensor(data).to(device), torch.tensor(target).to(device)
optimizer.zero_grad()
output = model(data)
# loss = F.nll_loss(output, target)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()/data.shape[0]))