|
from torch import nn |
|
|
|
|
|
class DropoutNet(nn.Module): |
|
def __init__(self): |
|
super(DropoutNet, self).__init__() |
|
self.layer1 = nn.Sequential( |
|
nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2), |
|
nn.BatchNorm2d(16), |
|
nn.ReLU(), |
|
nn.Dropout2d(0.1)) |
|
self.layer2 = nn.Sequential( |
|
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2), |
|
nn.BatchNorm2d(32), |
|
nn.ReLU(), |
|
nn.Dropout2d(0.1)) |
|
|
|
self.layer3 = nn.Sequential( |
|
nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2), |
|
nn.BatchNorm2d(64), |
|
nn.ReLU(), |
|
nn.Dropout2d(0.1)) |
|
self.layer4 = nn.Sequential( |
|
nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), |
|
nn.BatchNorm2d(128), |
|
nn.ReLU(), |
|
nn.Dropout2d(0.1)) |
|
self.layer5 = nn.Sequential( |
|
nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), |
|
nn.BatchNorm2d(256), |
|
nn.ReLU(), |
|
nn.Dropout2d(0.1)) |
|
self.fc = nn.Sequential( |
|
nn.Linear(256*28*28, 256), |
|
nn.ReLU(), |
|
nn.Linear(256, 128), |
|
nn.ReLU(), |
|
nn.Linear(128, 64), |
|
nn.ReLU(), |
|
nn.Linear(64, 32), |
|
nn.ReLU(), |
|
nn.Linear(32, 16), |
|
nn.ReLU(), |
|
nn.Linear(16, 4) |
|
) |
|
|
|
def forward(self, x): |
|
x = self.layer1(x) |
|
x = self.layer2(x) |
|
x = self.layer3(x) |
|
x = self.layer4(x) |
|
x = self.layer5(x) |
|
x = x.view(x.size(0), -1) |
|
x = self.fc(x) |
|
return x |
|
|