Source code for neural_de.external.prenet.networks

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable


[docs] class PReNet(nn.Module): def __init__(self, recurrent_iter=6, use_GPU=True): super(PReNet, self).__init__() self.iteration = recurrent_iter self.use_GPU = use_GPU self.conv0 = nn.Sequential( nn.Conv2d(6, 32, 3, 1, 1), nn.ReLU() ) self.res_conv1 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv2 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv3 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv4 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv5 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.conv_i = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Sigmoid() ) self.conv_f = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Sigmoid() ) self.conv_g = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Tanh() ) self.conv_o = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Sigmoid() ) self.conv = nn.Sequential( nn.Conv2d(32, 3, 3, 1, 1), )
[docs] def forward(self, input): batch_size, row, col = input.size(0), input.size(2), input.size(3) x = input h = Variable(torch.zeros(batch_size, 32, row, col)) c = Variable(torch.zeros(batch_size, 32, row, col)) # Formats the tensor for GPU use (instead of regular CPU use) if self.use_GPU: h = h.cuda() c = c.cuda() x_list = [] # Stocks the output of the different stages # We iterate on the number of stages (6 in the paper) for i in range(self.iteration): # Concatenation x = torch.cat((input, x), 1) x = self.conv0(x) # LSTM Cell before the ResBlock (cf Wikipedia LSTM Cell) x = torch.cat((x, h), 1) i = self.conv_i(x) f = self.conv_f(x) g = self.conv_g(x) o = self.conv_o(x) c = f * c + i * g h = o * torch.tanh(c) # ResBlock from architecture, with 5 2-Conv blocks x = h resx = x x = F.relu(self.res_conv1(x) + resx) resx = x x = F.relu(self.res_conv2(x) + resx) resx = x x = F.relu(self.res_conv3(x) + resx) resx = x x = F.relu(self.res_conv4(x) + resx) resx = x x = F.relu(self.res_conv5(x) + resx) # Last conv block x = self.conv(x) x = x + input x_list.append(x) return x, x_list
[docs] class PReNet_LSTM(nn.Module): def __init__(self, recurrent_iter=6, use_GPU=True): super(PReNet_LSTM, self).__init__() self.iteration = recurrent_iter self.use_GPU = use_GPU self.conv0 = nn.Sequential( nn.Conv2d(6, 32, 3, 1, 1), nn.ReLU() ) self.res_conv1 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv2 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv3 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv4 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv5 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.conv_i = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Sigmoid() ) self.conv_f = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Sigmoid() ) self.conv_g = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Tanh() ) self.conv_o = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Sigmoid() ) self.conv = nn.Sequential( nn.Conv2d(32, 3, 3, 1, 1), )
[docs] def forward(self, input): batch_size, row, col = input.size(0), input.size(2), input.size(3) x = input h = Variable(torch.zeros(batch_size, 32, row, col)) c = Variable(torch.zeros(batch_size, 32, row, col)) if self.use_GPU: h = h.cuda() c = c.cuda() x_list = [] for i in range(self.iteration): x = torch.cat((input, x), 1) x = self.conv0(x) x = torch.cat((x, h), 1) i = self.conv_i(x) f = self.conv_f(x) g = self.conv_g(x) o = self.conv_o(x) c = f * c + i * g h = o * torch.tanh(c) x = h resx = x x = F.relu(self.res_conv1(x) + resx) resx = x x = F.relu(self.res_conv2(x) + resx) resx = x x = F.relu(self.res_conv3(x) + resx) resx = x x = F.relu(self.res_conv4(x) + resx) resx = x x = F.relu(self.res_conv5(x) + resx) x = self.conv(x) x_list.append(x) return x, x_list
[docs] class PReNet_GRU(nn.Module): def __init__(self, recurrent_iter=6, use_GPU=True): super(PReNet_GRU, self).__init__() self.iteration = recurrent_iter self.use_GPU = use_GPU self.conv0 = nn.Sequential( nn.Conv2d(6, 32, 3, 1, 1), nn.ReLU() ) self.res_conv1 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv2 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv3 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv4 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv5 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.conv_z = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Sigmoid() ) self.conv_b = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Sigmoid() ) self.conv_g = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Tanh() ) # self.conv_o = nn.Sequential( # nn.Conv2d(32 + 32, 32, 3, 1, 1), # nn.Sigmoid() # ) self.conv = nn.Sequential( nn.Conv2d(32, 3, 3, 1, 1), )
[docs] def forward(self, input): batch_size, row, col = input.size(0), input.size(2), input.size(3) x = input h = Variable(torch.zeros(batch_size, 32, row, col)) if self.use_GPU: h = h.cuda() x_list = [] for i in range(self.iteration): x = torch.cat((input, x), 1) x = self.conv0(x) x1 = torch.cat((x, h), 1) z = self.conv_z(x1) b = self.conv_b(x1) s = b * h s = torch.cat((s, x), 1) g = self.conv_g(s) h = (1 - z) * h + z * g x = h resx = x x = F.relu(self.res_conv1(x) + resx) resx = x x = F.relu(self.res_conv2(x) + resx) resx = x x = F.relu(self.res_conv3(x) + resx) resx = x x = F.relu(self.res_conv4(x) + resx) resx = x x = F.relu(self.res_conv5(x) + resx) x = self.conv(x) x_list.append(x) return x, x_list
[docs] class PReNet_x(nn.Module): # PReNet without cat(x, input) def __init__(self, recurrent_iter=6, use_GPU=True): super(PReNet_x, self).__init__() self.iteration = recurrent_iter self.use_GPU = use_GPU self.conv0 = nn.Sequential( nn.Conv2d(3, 32, 3, 1, 1), nn.ReLU() ) self.res_conv1 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv2 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv3 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv4 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv5 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.conv_i = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Sigmoid() ) self.conv_f = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Sigmoid() ) self.conv_g = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Tanh() ) self.conv_o = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Sigmoid() ) self.conv = nn.Sequential( nn.Conv2d(32, 3, 3, 1, 1), )
[docs] def forward(self, input): batch_size, row, col = input.size(0), input.size(2), input.size(3) x = input h = Variable(torch.zeros(batch_size, 32, row, col)) c = Variable(torch.zeros(batch_size, 32, row, col)) if self.use_GPU: h = h.cuda() c = c.cuda() x_list = [] for i in range(self.iteration): # x = torch.cat((input, x), 1) x = self.conv0(x) x = torch.cat((x, h), 1) i = self.conv_i(x) f = self.conv_f(x) g = self.conv_g(x) o = self.conv_o(x) c = f * c + i * g h = o * torch.tanh(c) x = h resx = x x = F.relu(self.res_conv1(x) + resx) resx = x x = F.relu(self.res_conv2(x) + resx) resx = x x = F.relu(self.res_conv3(x) + resx) resx = x x = F.relu(self.res_conv4(x) + resx) resx = x x = F.relu(self.res_conv5(x) + resx) x = self.conv(x) x_list.append(x) return x, x_list
[docs] class PReNet_r(nn.Module): # PReNet with the same ResNet layer in the ResBlock def __init__(self, recurrent_iter=6, use_GPU=True): super(PReNet_r, self).__init__() self.iteration = recurrent_iter self.use_GPU = use_GPU self.conv0 = nn.Sequential( nn.Conv2d(6, 32, 3, 1, 1), nn.ReLU() ) self.res_conv1 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.conv_i = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Sigmoid() ) self.conv_f = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Sigmoid() ) self.conv_g = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Tanh() ) self.conv_o = nn.Sequential( nn.Conv2d(32 + 32, 32, 3, 1, 1), nn.Sigmoid() ) self.conv = nn.Sequential( nn.Conv2d(32, 3, 3, 1, 1), )
[docs] def forward(self, input): batch_size, row, col = input.size(0), input.size(2), input.size(3) # mask = Variable(torch.ones(batch_size, 3, row, col)).cuda() x = input h = Variable(torch.zeros(batch_size, 32, row, col)) c = Variable(torch.zeros(batch_size, 32, row, col)) if self.use_GPU: h = h.cuda() c = c.cuda() x_list = [] for i in range(self.iteration): x = torch.cat((input, x), 1) x = self.conv0(x) x = torch.cat((x, h), 1) i = self.conv_i(x) f = self.conv_f(x) g = self.conv_g(x) o = self.conv_o(x) c = f * c + i * g h = o * torch.tanh(c) x = h for j in range(5): resx = x x = F.relu(self.res_conv1(x) + resx) x = self.conv(x) x = input + x x_list.append(x) return x, x_list
# PRN
[docs] class PRN(nn.Module): def __init__(self, recurrent_iter=6, use_GPU=True): super(PRN, self).__init__() self.iteration = recurrent_iter self.use_GPU = use_GPU self.conv0 = nn.Sequential( nn.Conv2d(6, 32, 3, 1, 1), nn.ReLU() ) self.res_conv1 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv2 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv3 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv4 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.res_conv5 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.conv = nn.Sequential( nn.Conv2d(32, 3, 3, 1, 1), )
[docs] def forward(self, input): x = input x_list = [] for i in range(self.iteration): x = torch.cat((input, x), 1) x = self.conv0(x) resx = x x = F.relu(self.res_conv1(x) + resx) resx = x x = F.relu(self.res_conv2(x) + resx) resx = x x = F.relu(self.res_conv3(x) + resx) resx = x x = F.relu(self.res_conv4(x) + resx) resx = x x = F.relu(self.res_conv5(x) + resx) x = self.conv(x) x = x + input x_list.append(x) return x, x_list
[docs] class PRN_r(nn.Module): def __init__(self, recurrent_iter=6, use_GPU=True): super(PRN_r, self).__init__() self.iteration = recurrent_iter self.use_GPU = use_GPU self.conv0 = nn.Sequential( nn.Conv2d(6, 32, 3, 1, 1), nn.ReLU() ) self.res_conv1 = nn.Sequential( nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, 1), nn.ReLU() ) self.conv = nn.Sequential( nn.Conv2d(32, 3, 3, 1, 1), )
[docs] def forward(self, input): x = input x_list = [] for i in range(self.iteration): x = torch.cat((input, x), 1) x = self.conv0(x) for j in range(5): resx = x x = F.relu(self.res_conv1(x) + resx) x = self.conv(x) x = input + x x_list.append(x) return x, x_list