PyTorch Lightning 1.1: research : CIFAR10 (ResNeXt-29)
作成 : (株)クラスキャット セールスインフォメーション
作成日時 : 02/22/2021 (1.1.x)
* 本ページは、以下のリソースを参考にして遂行した実験結果のレポートです:
* ご自由にリンクを張って頂いてかまいませんが、sales-info@classcat.com までご一報いただけると嬉しいです。
★ 無料セミナー実施中 ★ クラスキャット主催 人工知能 & ビジネス Web セミナー

人工知能とビジネスをテーマにウェビナー (WEB セミナー) を定期的に開催しています。スケジュールは弊社 公式 Web サイト でご確認頂けます。
- お住まいの地域に関係なく Web ブラウザからご参加頂けます。事前登録 が必要ですのでご注意ください。
- Windows PC のブラウザからご参加が可能です。スマートデバイスもご利用可能です。
クラスキャットは人工知能・テレワークに関する各種サービスを提供しております :
| 人工知能研究開発支援 | 人工知能研修サービス | テレワーク & オンライン授業を支援 |
| PoC(概念実証)を失敗させないための支援 (本支援はセミナーに参加しアンケートに回答した方を対象としています。) | ||
◆ お問合せ : 本件に関するお問い合わせ先は下記までお願いいたします。
| 株式会社クラスキャット セールス・マーケティング本部 セールス・インフォメーション |
| E-Mail:sales-info@classcat.com ; WebSite: https://www.classcat.com/ |
| Facebook: https://www.facebook.com/ClassCatJP/ |
research: CIFAR10 (ResNeXt-29)
結果
150 エポック: ReduceLROnPlateau
- ResNeXt-29 (2x64d) – {‘test_acc’: 0.9379000067710876, ‘test_loss’: 0.20406362414360046} – Wall time: 3h 45min 5s
コード
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''Grouped convolution block.'''
expansion = 2
def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1):
super(Block, self).__init__()
group_width = cardinality * bottleneck_width
self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(group_width)
self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn2 = nn.BatchNorm2d(group_width)
self.conv3 = nn.Conv2d(group_width, self.expansion*group_width, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*group_width)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*group_width:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*group_width, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*group_width)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNeXt(nn.Module):
def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=10):
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(num_blocks[0], 1)
self.layer2 = self._make_layer(num_blocks[1], 2)
self.layer3 = self._make_layer(num_blocks[2], 2)
# self.layer4 = self._make_layer(num_blocks[3], 2)
self.linear = nn.Linear(cardinality*bottleneck_width*8, num_classes)
def _make_layer(self, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride))
self.in_planes = Block.expansion * self.cardinality * self.bottleneck_width
# Increase bottleneck_width by 2 after each stage.
self.bottleneck_width *= 2
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
# out = self.layer4(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNeXt29_2x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=2, bottleneck_width=64)
def ResNeXt29_4x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=4, bottleneck_width=64)
def ResNeXt29_8x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=8, bottleneck_width=64)
def ResNeXt29_32x4d():
return ResNeXt(num_blocks=[3,3,3], cardinality=32, bottleneck_width=4)
net = ResNeXt29_2x64d() print(net) x = torch.randn(1,3,32,32) y = net(x) print(y.size())
ResNeXt(
(conv1): Conv2d(3, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(layer1): Sequential(
(0): Block(
(conv1): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2, bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential(
(0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Block(
(conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2, bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential()
)
(2): Block(
(conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2, bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential()
)
)
(layer2): Sequential(
(0): Block(
(conv1): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=2, bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential(
(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Block(
(conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2, bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential()
)
(2): Block(
(conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2, bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential()
)
)
(layer3): Sequential(
(0): Block(
(conv1): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=2, bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential(
(0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Block(
(conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2, bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential()
)
(2): Block(
(conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2, bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential()
)
)
(linear): Linear(in_features=1024, out_features=10, bias=True)
)
torch.Size([1, 10])
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device
device(type='cuda')
from torchsummary import summary
summary(ResNeXt29_2x64d().to('cuda'), (3, 32, 32))
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 64, 32, 32] 192
BatchNorm2d-2 [-1, 64, 32, 32] 128
Conv2d-3 [-1, 128, 32, 32] 8,192
BatchNorm2d-4 [-1, 128, 32, 32] 256
Conv2d-5 [-1, 128, 32, 32] 73,728
BatchNorm2d-6 [-1, 128, 32, 32] 256
Conv2d-7 [-1, 256, 32, 32] 32,768
BatchNorm2d-8 [-1, 256, 32, 32] 512
Conv2d-9 [-1, 256, 32, 32] 16,384
BatchNorm2d-10 [-1, 256, 32, 32] 512
Block-11 [-1, 256, 32, 32] 0
Conv2d-12 [-1, 128, 32, 32] 32,768
BatchNorm2d-13 [-1, 128, 32, 32] 256
Conv2d-14 [-1, 128, 32, 32] 73,728
BatchNorm2d-15 [-1, 128, 32, 32] 256
Conv2d-16 [-1, 256, 32, 32] 32,768
BatchNorm2d-17 [-1, 256, 32, 32] 512
Block-18 [-1, 256, 32, 32] 0
Conv2d-19 [-1, 128, 32, 32] 32,768
BatchNorm2d-20 [-1, 128, 32, 32] 256
Conv2d-21 [-1, 128, 32, 32] 73,728
BatchNorm2d-22 [-1, 128, 32, 32] 256
Conv2d-23 [-1, 256, 32, 32] 32,768
BatchNorm2d-24 [-1, 256, 32, 32] 512
Block-25 [-1, 256, 32, 32] 0
Conv2d-26 [-1, 256, 32, 32] 65,536
BatchNorm2d-27 [-1, 256, 32, 32] 512
Conv2d-28 [-1, 256, 16, 16] 294,912
BatchNorm2d-29 [-1, 256, 16, 16] 512
Conv2d-30 [-1, 512, 16, 16] 131,072
BatchNorm2d-31 [-1, 512, 16, 16] 1,024
Conv2d-32 [-1, 512, 16, 16] 131,072
BatchNorm2d-33 [-1, 512, 16, 16] 1,024
Block-34 [-1, 512, 16, 16] 0
Conv2d-35 [-1, 256, 16, 16] 131,072
BatchNorm2d-36 [-1, 256, 16, 16] 512
Conv2d-37 [-1, 256, 16, 16] 294,912
BatchNorm2d-38 [-1, 256, 16, 16] 512
Conv2d-39 [-1, 512, 16, 16] 131,072
BatchNorm2d-40 [-1, 512, 16, 16] 1,024
Block-41 [-1, 512, 16, 16] 0
Conv2d-42 [-1, 256, 16, 16] 131,072
BatchNorm2d-43 [-1, 256, 16, 16] 512
Conv2d-44 [-1, 256, 16, 16] 294,912
BatchNorm2d-45 [-1, 256, 16, 16] 512
Conv2d-46 [-1, 512, 16, 16] 131,072
BatchNorm2d-47 [-1, 512, 16, 16] 1,024
Block-48 [-1, 512, 16, 16] 0
Conv2d-49 [-1, 512, 16, 16] 262,144
BatchNorm2d-50 [-1, 512, 16, 16] 1,024
Conv2d-51 [-1, 512, 8, 8] 1,179,648
BatchNorm2d-52 [-1, 512, 8, 8] 1,024
Conv2d-53 [-1, 1024, 8, 8] 524,288
BatchNorm2d-54 [-1, 1024, 8, 8] 2,048
Conv2d-55 [-1, 1024, 8, 8] 524,288
BatchNorm2d-56 [-1, 1024, 8, 8] 2,048
Block-57 [-1, 1024, 8, 8] 0
Conv2d-58 [-1, 512, 8, 8] 524,288
BatchNorm2d-59 [-1, 512, 8, 8] 1,024
Conv2d-60 [-1, 512, 8, 8] 1,179,648
BatchNorm2d-61 [-1, 512, 8, 8] 1,024
Conv2d-62 [-1, 1024, 8, 8] 524,288
BatchNorm2d-63 [-1, 1024, 8, 8] 2,048
Block-64 [-1, 1024, 8, 8] 0
Conv2d-65 [-1, 512, 8, 8] 524,288
BatchNorm2d-66 [-1, 512, 8, 8] 1,024
Conv2d-67 [-1, 512, 8, 8] 1,179,648
BatchNorm2d-68 [-1, 512, 8, 8] 1,024
Conv2d-69 [-1, 1024, 8, 8] 524,288
BatchNorm2d-70 [-1, 1024, 8, 8] 2,048
Block-71 [-1, 1024, 8, 8] 0
Linear-72 [-1, 10] 10,250
================================================================
Total params: 9,128,778
Trainable params: 9,128,778
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.01
Forward/backward pass size (MB): 65.00
Params size (MB): 34.82
Estimated Total Size (MB): 99.84
----------------------------------------------------------------
ReduceLROnPlateau スケジューラ
import torch import torch.nn as nn import torch.nn.functional as F from torch.optim.lr_scheduler import OneCycleLR, CyclicLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau from torch.optim.swa_utils import AveragedModel, update_bn import torchvision import pytorch_lightning as pl from pytorch_lightning.callbacks import LearningRateMonitor, GPUStatsMonitor, EarlyStopping from pytorch_lightning.metrics.functional import accuracy from pl_bolts.datamodules import CIFAR10DataModule from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
pl.seed_everything(7);
batch_size = 50
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
cifar10_normalization(),
])
test_transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
cifar10_normalization(),
])
cifar10_dm = CIFAR10DataModule(
batch_size=batch_size,
train_transforms=train_transforms,
test_transforms=test_transforms,
val_transforms=test_transforms,
)
class LitCifar10(pl.LightningModule):
def __init__(self, lr=0.05, factor=0.8):
super().__init__()
self.save_hyperparameters()
self.model = ResNeXt29_2x64d()
def forward(self, x):
out = self.model(x)
return F.log_softmax(out, dim=1)
def training_step(self, batch, batch_idx):
x, y = batch
logits = F.log_softmax(self.model(x), dim=1)
loss = F.nll_loss(logits, y)
self.log('train_loss', loss)
return loss
def evaluate(self, batch, stage=None):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
if stage:
self.log(f'{stage}_loss', loss, prog_bar=True)
self.log(f'{stage}_acc', acc, prog_bar=True)
def validation_step(self, batch, batch_idx):
self.evaluate(batch, 'val')
def test_step(self, batch, batch_idx):
self.evaluate(batch, 'test')
def configure_optimizers(self):
if False:
optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.lr, weight_decay=0, eps=1e-3)
else:
optimizer = torch.optim.SGD(self.parameters(), lr=self.hparams.lr, momentum=0.9, weight_decay=5e-4)
return {
'optimizer': optimizer,
'lr_scheduler': ReduceLROnPlateau(optimizer, 'max', patience=4, factor=self.hparams.factor, verbose=True, threshold=0.0001, threshold_mode='abs', cooldown=1, min_lr=1e-5),
#'lr_scheduler': ReduceLROnPlateau(optimizer, 'max', patience=4, factor=0.8, verbose=True, threshold=0.0001, threshold_mode='abs', cooldown=1, min_lr=1e-5),
'monitor': 'val_acc'
}
def xconfigure_optimizers(self):
#print("###")
#print(self.hparams)
optimizer = torch.optim.SGD(self.parameters(), lr=self.hparams.lr, momentum=0.9, weight_decay=5e-4)
steps_per_epoch = 45000 // batch_size
scheduler_dict = {
#'scheduler': ExponentialLR(optimizer, gamma=0.1),
#'interval': 'epoch',
'scheduler': OneCycleLR(optimizer, max_lr=0.1, pct_start=0.2, epochs=self.trainer.max_epochs, steps_per_epoch=steps_per_epoch),
#'scheduler': CyclicLR(optimizer, base_lr=0.001, max_lr=0.1, step_size_up=steps_per_epoch*2, mode="triangular2"),
#'scheduler': CyclicLR(optimizer, base_lr=0.001, max_lr=0.1, step_size_up=steps_per_epoch, mode="exp_range", gamma=0.85),
#'scheduler': CosineAnnealingLR(optimizer, T_max=200),
'interval': 'step',
}
return {'optimizer': optimizer, 'lr_scheduler': scheduler_dict}
%%time
model = LitCifar10(lr=0.05, factor=0.8)
model.datamodule = cifar10_dm
trainer = pl.Trainer(
gpus=1,
max_epochs=150,
auto_scale_batch_size=True,
auto_lr_find = True,
progress_bar_refresh_rate=100,
logger=pl.loggers.TensorBoardLogger('tblogs/', name='resnext29_2x64d'),
callbacks=[LearningRateMonitor(logging_interval='step')],
)
trainer.fit(model, cifar10_dm)
trainer.test(model, datamodule=cifar10_dm);
GPU available: True, used: True
TPU available: None, using: 0 TPU cores
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to /content/cifar-10-python.tar.gz
170500096/? [00:20<00:00, 33247174.37it/s]
Extracting /content/cifar-10-python.tar.gz to /content
Files already downloaded and verified
| Name | Type | Params
----------------------------------
0 | model | ResNeXt | 9.1 M
----------------------------------
9.1 M Trainable params
0 Non-trainable params
9.1 M Total params
36.515 Total estimated model params size (MB)
(...)
Epoch 23: reducing learning rate of group 0 to 4.0000e-02.
Epoch 40: reducing learning rate of group 0 to 3.2000e-02.
Epoch 50: reducing learning rate of group 0 to 2.5600e-02.
Epoch 62: reducing learning rate of group 0 to 2.0480e-02.
Epoch 68: reducing learning rate of group 0 to 1.6384e-02.
Epoch 76: reducing learning rate of group 0 to 1.3107e-02.
Epoch 83: reducing learning rate of group 0 to 1.0486e-02.
Epoch 89: reducing learning rate of group 0 to 8.3886e-03.
Epoch 97: reducing learning rate of group 0 to 6.7109e-03.
Epoch 110: reducing learning rate of group 0 to 5.3687e-03.
Epoch 128: reducing learning rate of group 0 to 4.2950e-03.
Epoch 134: reducing learning rate of group 0 to 3.4360e-03.
Epoch 140: reducing learning rate of group 0 to 2.7488e-03.
Epoch 146: reducing learning rate of group 0 to 2.1990e-03.
(...)
--------------------------------------------------------------------------------
DATALOADER:0 TEST RESULTS
{'test_acc': 0.9379000067710876, 'test_loss': 0.20406362414360046}
--------------------------------------------------------------------------------
CPU times: user 2h 28min 40s, sys: 1h 10min 53s, total: 3h 39min 34s
Wall time: 3h 45min 5s



以上