|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
from torch.nn import init as init |
|
from torch.nn.modules.batchnorm import _BatchNorm |
|
import matplotlib.pyplot as plt |
|
|
|
|
|
class CvBlock(nn.Module): |
|
'''(Conv2d => BN => ReLU) x 2''' |
|
def __init__(self, in_ch, out_ch): |
|
super(CvBlock, self).__init__() |
|
self.convblock = nn.Sequential( |
|
nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1, bias=False), |
|
nn.BatchNorm2d(out_ch), |
|
nn.ReLU(inplace=True), |
|
nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1, bias=False), |
|
nn.BatchNorm2d(out_ch), |
|
nn.ReLU(inplace=True) |
|
) |
|
|
|
def forward(self, x): |
|
return self.convblock(x) |
|
|
|
|
|
class InputCvBlock(nn.Module): |
|
'''(Conv with num_in_frames groups => BN => ReLU) + (Conv => BN => ReLU)''' |
|
def __init__(self, num_in_frames, out_ch): |
|
super(InputCvBlock, self).__init__() |
|
self.interm_ch = 30 |
|
self.convblock = nn.Sequential( |
|
nn.Conv2d(num_in_frames*(3+1), num_in_frames*self.interm_ch, \ |
|
kernel_size=3, padding=1, groups=num_in_frames, bias=False), |
|
nn.BatchNorm2d(num_in_frames*self.interm_ch), |
|
nn.ReLU(inplace=True), |
|
nn.Conv2d(num_in_frames*self.interm_ch, out_ch, kernel_size=3, padding=1, bias=False), |
|
nn.BatchNorm2d(out_ch), |
|
nn.ReLU(inplace=True) |
|
) |
|
|
|
def forward(self, x): |
|
return self.convblock(x) |
|
|
|
|
|
class InputCvBlock_1(nn.Module): |
|
'''(Conv with num_in_frames groups => BN => ReLU) + (Conv => BN => ReLU)''' |
|
def __init__(self, num_in_frames, out_ch): |
|
super(InputCvBlock_1, self).__init__() |
|
self.interm_ch = 30 |
|
self.convblock = nn.Sequential( |
|
nn.Conv2d(num_in_frames*(3+1), num_in_frames*self.interm_ch, \ |
|
kernel_size=3, padding=1, groups=num_in_frames, bias=False), |
|
nn.BatchNorm2d(num_in_frames*self.interm_ch), |
|
nn.ReLU(inplace=True), |
|
nn.Conv2d(num_in_frames*self.interm_ch, out_ch, kernel_size=3, padding=1, bias=False), |
|
nn.BatchNorm2d(out_ch), |
|
nn.ReLU(inplace=True) |
|
) |
|
|
|
|
|
|
|
|
|
def forward(self, x): |
|
x = self.convblock(x) |
|
|
|
|
|
return x |
|
|
|
|
|
class DownBlock(nn.Module): |
|
'''Downscale + (Conv2d => BN => ReLU)*2''' |
|
def __init__(self, in_ch, out_ch): |
|
super(DownBlock, self).__init__() |
|
self.convblock = nn.Sequential( |
|
nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1, stride=2, bias=False), |
|
nn.BatchNorm2d(out_ch), |
|
nn.ReLU(inplace=True), |
|
CvBlock(out_ch, out_ch) |
|
) |
|
|
|
def forward(self, x): |
|
return self.convblock(x) |
|
|
|
|
|
class DownBlock_1(nn.Module): |
|
'''Downscale + (Conv2d => BN => ReLU)*2''' |
|
def __init__(self, in_ch, out_ch): |
|
super(DownBlock_1, self).__init__() |
|
self.convblock = nn.Sequential( |
|
nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1, stride=2, bias=False), |
|
nn.BatchNorm2d(out_ch), |
|
nn.ReLU(inplace=True), |
|
CvBlock(out_ch, out_ch) |
|
) |
|
|
|
self.NAF1 = NAFBlock(in_ch) |
|
self.NAF2 = NAFBlock(in_ch) |
|
|
|
|
|
def forward(self, x): |
|
|
|
|
|
x = self.NAF1(x) |
|
x = self.NAF2(x) |
|
return self.convblock(x) |
|
|
|
|
|
class UpBlock(nn.Module): |
|
'''(Conv2d => BN => ReLU)*2 + Upscale''' |
|
def __init__(self, in_ch, out_ch): |
|
super(UpBlock, self).__init__() |
|
self.convblock = nn.Sequential( |
|
CvBlock(in_ch, in_ch), |
|
nn.Conv2d(in_ch, out_ch*4, kernel_size=3, padding=1, bias=False), |
|
nn.PixelShuffle(2) |
|
) |
|
|
|
def forward(self, x): |
|
return self.convblock(x) |
|
|
|
|
|
class UpBlock_1(nn.Module): |
|
'''(Conv2d => BN => ReLU)*2 + Upscale''' |
|
def __init__(self, in_ch, out_ch): |
|
super(UpBlock_1, self).__init__() |
|
self.convblock = nn.Sequential( |
|
CvBlock(in_ch, in_ch), |
|
nn.Conv2d(in_ch, out_ch*4, kernel_size=3, padding=1, bias=False), |
|
nn.PixelShuffle(2) |
|
) |
|
|
|
self.NAF1 = NAFBlock(in_ch) |
|
self.NAF2 = NAFBlock(in_ch) |
|
|
|
def forward(self, x): |
|
x = self.NAF1(x) |
|
x = self.NAF2(x) |
|
return self.convblock(x) |
|
|
|
|
|
class OutputCvBlock(nn.Module): |
|
'''Conv2d => BN => ReLU => Conv2d''' |
|
def __init__(self, in_ch, out_ch): |
|
super(OutputCvBlock, self).__init__() |
|
self.convblock = nn.Sequential( |
|
nn.Conv2d(in_ch, in_ch, kernel_size=3, padding=1, bias=False), |
|
nn.BatchNorm2d(in_ch), |
|
nn.ReLU(inplace=True), |
|
nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1, bias=False) |
|
) |
|
|
|
def forward(self, x): |
|
return self.convblock(x) |
|
|
|
|
|
class OutputCvBlock_1(nn.Module): |
|
'''Conv2d => BN => ReLU => Conv2d''' |
|
def __init__(self, in_ch, out_ch): |
|
super(OutputCvBlock_1, self).__init__() |
|
self.convblock = nn.Sequential( |
|
nn.Conv2d(in_ch, in_ch, kernel_size=3, padding=1, bias=False), |
|
nn.BatchNorm2d(in_ch), |
|
nn.ReLU(inplace=True), |
|
nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1, bias=False) |
|
) |
|
|
|
|
|
|
|
|
|
def forward(self, x): |
|
|
|
|
|
return self.convblock(x) |
|
|
|
|
|
class SimpleGate(nn.Module): |
|
def forward(self, x): |
|
x1, x2 = x.chunk(2, dim=1) |
|
return x1 * x2 |
|
|
|
|
|
class LayerNormFunction(torch.autograd.Function): |
|
|
|
@staticmethod |
|
def forward(ctx, x, weight, bias, eps): |
|
ctx.eps = eps |
|
N, C, H, W = x.size() |
|
mu = x.mean(1, keepdim=True) |
|
var = (x - mu).pow(2).mean(1, keepdim=True) |
|
y = (x - mu) / (var + eps).sqrt() |
|
ctx.save_for_backward(y, var, weight) |
|
y = weight.view(1, C, 1, 1) * y + bias.view(1, C, 1, 1) |
|
return y |
|
|
|
@staticmethod |
|
def backward(ctx, grad_output): |
|
eps = ctx.eps |
|
|
|
N, C, H, W = grad_output.size() |
|
y, var, weight = ctx.saved_variables |
|
g = grad_output * weight.view(1, C, 1, 1) |
|
mean_g = g.mean(dim=1, keepdim=True) |
|
|
|
mean_gy = (g * y).mean(dim=1, keepdim=True) |
|
gx = 1. / torch.sqrt(var + eps) * (g - y * mean_gy - mean_g) |
|
return gx, (grad_output * y).sum(dim=3).sum(dim=2).sum(dim=0), grad_output.sum(dim=3).sum(dim=2).sum( |
|
dim=0), None |
|
|
|
|
|
class LayerNorm2d(nn.Module): |
|
|
|
def __init__(self, channels, eps=1e-6): |
|
super(LayerNorm2d, self).__init__() |
|
self.register_parameter('weight', nn.Parameter(torch.ones(channels))) |
|
self.register_parameter('bias', nn.Parameter(torch.zeros(channels))) |
|
self.eps = eps |
|
|
|
def forward(self, x): |
|
return LayerNormFunction.apply(x, self.weight, self.bias, self.eps) |
|
|
|
|
|
class NAFBlock(nn.Module): |
|
def __init__(self, c, DW_Expand=2, FFN_Expand=2, drop_out_rate=0.): |
|
super().__init__() |
|
dw_channel = c * DW_Expand |
|
self.conv1 = nn.Conv2d(in_channels=c, out_channels=dw_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True) |
|
self.conv2 = nn.Conv2d(in_channels=dw_channel, out_channels=dw_channel, kernel_size=3, padding=1, stride=1, groups=dw_channel, |
|
bias=True) |
|
self.conv3 = nn.Conv2d(in_channels=dw_channel // 2, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True) |
|
|
|
|
|
self.sca = nn.Sequential( |
|
nn.AdaptiveAvgPool2d(1), |
|
nn.Conv2d(in_channels=dw_channel // 2, out_channels=dw_channel // 2, kernel_size=1, padding=0, stride=1, |
|
groups=1, bias=True), |
|
) |
|
|
|
|
|
self.sg = SimpleGate() |
|
|
|
ffn_channel = FFN_Expand * c |
|
self.conv4 = nn.Conv2d(in_channels=c, out_channels=ffn_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True) |
|
self.conv5 = nn.Conv2d(in_channels=ffn_channel // 2, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True) |
|
|
|
self.norm1 = LayerNorm2d(c) |
|
self.norm2 = LayerNorm2d(c) |
|
|
|
self.dropout1 = nn.Dropout(drop_out_rate) if drop_out_rate > 0. else nn.Identity() |
|
self.dropout2 = nn.Dropout(drop_out_rate) if drop_out_rate > 0. else nn.Identity() |
|
|
|
self.beta = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True) |
|
self.gamma = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True) |
|
|
|
def forward(self, inp): |
|
x = inp |
|
|
|
x = self.norm1(x) |
|
|
|
x = self.conv1(x) |
|
x = self.conv2(x) |
|
x = self.sg(x) |
|
x = x * self.sca(x) |
|
x = self.conv3(x) |
|
|
|
x = self.dropout1(x) |
|
|
|
y = inp + x * self.beta |
|
|
|
x = self.conv4(self.norm2(y)) |
|
x = self.sg(x) |
|
x = self.conv5(x) |
|
|
|
x = self.dropout2(x) |
|
|
|
return y + x * self.gamma |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class DenBlock(nn.Module): |
|
""" Definition of the denosing block of FastDVDnet. |
|
Inputs of constructor: |
|
num_input_frames: int. number of input frames |
|
Inputs of forward(): |
|
xn: input frames of dim [N, C, H, W], (C=3 RGB) |
|
noise_map: array with noise map of dim [N, 1, H, W] |
|
""" |
|
|
|
def __init__(self, num_input_frames=3): |
|
super(DenBlock, self).__init__() |
|
self.chs_lyr0 = 32 |
|
self.chs_lyr1 = 64 |
|
self.chs_lyr2 = 128 |
|
|
|
self.inc = InputCvBlock(num_in_frames=num_input_frames, out_ch=self.chs_lyr0) |
|
self.downc0 = DownBlock(in_ch=self.chs_lyr0, out_ch=self.chs_lyr1) |
|
self.downc1 = DownBlock(in_ch=self.chs_lyr1, out_ch=self.chs_lyr2) |
|
self.upc2 = UpBlock(in_ch=self.chs_lyr2, out_ch=self.chs_lyr1) |
|
self.upc1 = UpBlock(in_ch=self.chs_lyr1, out_ch=self.chs_lyr0) |
|
self.outc = OutputCvBlock(in_ch=self.chs_lyr0, out_ch=3) |
|
|
|
|
|
def forward(self, in0, in1, in2, noise_map): |
|
'''Args: |
|
inX: Tensor, [N, C, H, W] in the [0., 1.] range |
|
noise_map: Tensor [N, 1, H, W] in the [0., 1.] range |
|
''' |
|
|
|
x0 = self.inc(torch.cat((in0, noise_map, in1, noise_map, in2, noise_map), dim=1)) |
|
|
|
x1 = self.downc0(x0) |
|
x2 = self.downc1(x1) |
|
|
|
x2 = self.upc2(x2) |
|
x1 = self.upc1(x1+x2) |
|
|
|
x = self.outc(x0+x1) |
|
|
|
|
|
x = in1 - x |
|
|
|
return x |
|
|
|
|
|
class DenBlock_1(nn.Module): |
|
""" Definition of the denosing block of FastDVDnet. |
|
Inputs of constructor: |
|
num_input_frames: int. number of input frames |
|
Inputs of forward(): |
|
xn: input frames of dim [N, C, H, W], (C=3 RGB) |
|
noise_map: array with noise map of dim [N, 1, H, W] |
|
""" |
|
|
|
def __init__(self, num_input_frames=3): |
|
super(DenBlock_1, self).__init__() |
|
self.chs_lyr0 = 32 |
|
self.chs_lyr1 = 64 |
|
self.chs_lyr2 = 128 |
|
|
|
self.inc = InputCvBlock_1(num_in_frames=num_input_frames, out_ch=self.chs_lyr0) |
|
self.downc0 = DownBlock_1(in_ch=self.chs_lyr0, out_ch=self.chs_lyr1) |
|
self.downc1 = DownBlock_1(in_ch=self.chs_lyr1, out_ch=self.chs_lyr2) |
|
self.upc2 = UpBlock_1(in_ch=self.chs_lyr2, out_ch=self.chs_lyr1) |
|
self.upc1 = UpBlock_1(in_ch=self.chs_lyr1, out_ch=self.chs_lyr0) |
|
self.outc = OutputCvBlock_1(in_ch=self.chs_lyr0, out_ch=3) |
|
|
|
|
|
def forward(self, in0, in1, in2, noise_map): |
|
'''Args: |
|
inX: Tensor, [N, C, H, W] in the [0., 1.] range |
|
noise_map: Tensor [N, 1, H, W] in the [0., 1.] range |
|
''' |
|
|
|
x0 = self.inc(torch.cat((in0, noise_map, in1, noise_map, in2, noise_map), dim=1)) |
|
|
|
x1 = self.downc0(x0) |
|
x2 = self.downc1(x1) |
|
|
|
x2 = self.upc2(x2) |
|
x1 = self.upc1(x1+x2) |
|
|
|
x = self.outc(x0+x1) |
|
|
|
|
|
x = in1 - x |
|
|
|
return x |