Python torch.nn.UpsamplingNearest2d() Examples

The following are 30 code examples of torch.nn.UpsamplingNearest2d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: HourglassNetwork.py    From YouTube3D with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self):
		super(Channels1, self).__init__()
		self.list = nn.ModuleList()
		self.list.append(
			nn.Sequential(
				inception(256,[[64],[3,32,64],[5,32,64],[7,32,64]]),
				inception(256,[[64],[3,32,64],[5,32,64],[7,32,64]])
				)
			) #EE
		self.list.append(
			nn.Sequential(
				nn.AvgPool2d(2),
				inception(256,[[64],[3,32,64],[5,32,64],[7,32,64]]),
				inception(256,[[64],[3,32,64],[5,32,64],[7,32,64]]),
				inception(256,[[64],[3,32,64],[5,32,64],[7,32,64]]), 
				nn.UpsamplingNearest2d(scale_factor=2)
				)
			) #EEE 
Example #2
Source File: models.py    From self-driving-truck with MIT License 6 votes vote down vote up
def forward(self, embedding):
        def act(x):
            return F.relu(x, inplace=True)
        def up(x):
            m = nn.UpsamplingNearest2d(scale_factor=2)
            return m(x)
        x_ae = embedding # Bx256
        x_ae = act(self.ae_fc1_bn(self.ae_fc1(x_ae))) # 128x3x5
        x_ae = x_ae.view(-1, 128, 3, 5)
        x_ae = up(x_ae) # 6x10
        x_ae = act(self.ae_c1_bn(self.ae_c1(x_ae))) # 6x10
        x_ae = up(x_ae) # 12x20
        x_ae = act(self.ae_c2_bn(self.ae_c2(x_ae))) # 12x20 -> 10x20
        x_ae = F.pad(x_ae, (0, 0, 1, 0)) # 11x20
        x_ae = up(x_ae) # 22x40
        x_ae = act(self.ae_c3_bn(self.ae_c3(x_ae))) # 22x40
        x_ae = up(x_ae) # 44x80
        x_ae = F.pad(x_ae, (0, 0, 1, 0)) # add 1px at top (from 44 to 45)
        x_ae = F.sigmoid(self.ae_c4(x_ae))
        return x_ae 
Example #3
Source File: modelsNIPS.py    From PytorchWCT with MIT License 6 votes vote down vote up
def __init__(self,d):
        super(decoder2,self).__init__()
        # decoder
        self.reflecPad5 = nn.ReflectionPad2d((1,1,1,1))
        self.conv5 = nn.Conv2d(128,64,3,1,0)
        self.conv5.weight = torch.nn.Parameter(d.get(1).weight.float())
        self.conv5.bias = torch.nn.Parameter(d.get(1).bias.float())
        self.relu5 = nn.ReLU(inplace=True)
        # 112 x 112

        self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
        # 224 x 224

        self.reflecPad6 = nn.ReflectionPad2d((1,1,1,1))
        self.conv6 = nn.Conv2d(64,64,3,1,0)
        self.conv6.weight = torch.nn.Parameter(d.get(5).weight.float())
        self.conv6.bias = torch.nn.Parameter(d.get(5).bias.float())
        self.relu6 = nn.ReLU(inplace=True)
        # 224 x 224

        self.reflecPad7 = nn.ReflectionPad2d((1,1,1,1))
        self.conv7 = nn.Conv2d(64,3,3,1,0)
        self.conv7.weight = torch.nn.Parameter(d.get(8).weight.float())
        self.conv7.bias = torch.nn.Parameter(d.get(8).bias.float()) 
Example #4
Source File: HourglassNetwork.py    From YouTube3D with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self):
		super(Channels4, self).__init__()
		self.list = nn.ModuleList()
		self.list.append(
			nn.Sequential(
				nn.AvgPool2d(2),
				inception(128, [[32], [3,32,32], [5,32,32], [7,32,32]]),
				inception(128, [[32], [3,32,32], [5,32,32], [7,32,32]]),
				Channels3(),
				inception(128, [[32], [3,64,32], [5,64,32], [7,64,32]]),
				inception(128, [[16], [3,32,16], [7,32,16], [11,32,16]]),
				nn.UpsamplingNearest2d(scale_factor=2)
				)
			)#BB3BA
		self.list.append(
			nn.Sequential(
				inception(128, [[16], [3,64,16], [7,64,16], [11,64,16]])
				)
			)#A 
Example #5
Source File: HourglassNetwork.py    From YouTube3D with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self):
		super(Channels3, self).__init__()
		self.list = nn.ModuleList()
		self.list.append(
			nn.Sequential(
				nn.AvgPool2d(2),
				inception(128, [[32], [3,32,32], [5,32,32], [7,32,32]]),
				inception(128, [[64], [3,32,64], [5,32,64], [7,32,64]]),
				Channels2(),
				inception(256, [[64], [3,32,64], [5,32,64], [7,32,64]]), 
				inception(256, [[32], [3,32,32], [5,32,32], [7,32,32]]), 
				nn.UpsamplingNearest2d(scale_factor=2)
				)
			)#BD2EG
		self.list.append(
			nn.Sequential(
				inception(128, [[32], [3,32,32], [5,32,32], [7,32,32]]), 
				inception(128, [[32], [3,64,32], [7,64,32], [11,64,32]])
				)
			)#BC 
Example #6
Source File: HourglassNetwork.py    From YouTube3D with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self):
		super(Channels2, self).__init__()
		self.list = nn.ModuleList()
		self.list.append(
			nn.Sequential(
				inception(256, [[64], [3,32,64], [5,32,64], [7,32,64]]), 
				inception(256, [[64], [3,64,64], [7,64,64], [11,64,64]])
				)
			)#EF
		self.list.append( 
			nn.Sequential(
				nn.AvgPool2d(2),
				inception(256, [[64], [3,32,64], [5,32,64], [7,32,64]]), 
				inception(256, [[64], [3,32,64], [5,32,64], [7,32,64]]), 
				Channels1(),
				inception(256, [[64], [3,32,64], [5,32,64], [7,32,64]]), 
				inception(256, [[64], [3,64,64], [7,64,64], [11,64,64]]),
				nn.UpsamplingNearest2d(scale_factor=2)
				)
			)#EE1EF 
Example #7
Source File: cu_net_prev_version_wig.py    From CU-Net with Apache License 2.0 6 votes vote down vote up
def __init__(self, in_num, neck_size, growth_rate, layer_num, max_link):
        super(_CU_Net, self).__init__()
        self.down_blocks = []
        self.up_blocks = []
        self.num_blocks = 4
        print('creating hg ...')
        for i in range(0, self.num_blocks):
            print('creating down block %d ...' % i)
            self.down_blocks.append(_DenseBlock(in_num=in_num, neck_size=neck_size,
                                      growth_rate=growth_rate, layer_num=layer_num,
                                      max_link=max_link, requires_skip=True))
            print('creating up block %d ...' % i)
            self.up_blocks.append(_DenseBlock(in_num=in_num*2, neck_size=neck_size,
                                      growth_rate=growth_rate, layer_num=layer_num,
                                      max_link=max_link, requires_skip=False, is_up=True))
        self.down_blocks = nn.ModuleList(self.down_blocks)
        self.up_blocks = nn.ModuleList(self.up_blocks)
        print('creating neck block ...')
        self.neck_block = _DenseBlock(in_num=in_num, neck_size=neck_size,
                                     growth_rate=growth_rate, layer_num=layer_num,
                                     max_link=max_link, requires_skip=False)
        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.upsample = nn.UpsamplingNearest2d(scale_factor=2) 
Example #8
Source File: cu_net_prev_version.py    From CU-Net with Apache License 2.0 6 votes vote down vote up
def __init__(self, in_num, neck_size, growth_rate, layer_num, max_link):
        super(_CU_Net, self).__init__()
        self.down_blocks = []
        self.up_blocks = []
        self.num_blocks = 4
        print('creating hg ...')
        for i in range(0, self.num_blocks):
            print('creating down block %d ...' % i)
            self.down_blocks.append(_DenseBlock(in_num=in_num, neck_size=neck_size,
                                      growth_rate=growth_rate, layer_num=layer_num,
                                      max_link=max_link, requires_skip=True))
            print('creating up block %d ...' % i)
            self.up_blocks.append(_DenseBlock(in_num=in_num*2, neck_size=neck_size,
                                      growth_rate=growth_rate, layer_num=layer_num,
                                      max_link=max_link, requires_skip=False, is_up=True))
        self.down_blocks = nn.ModuleList(self.down_blocks)
        self.up_blocks = nn.ModuleList(self.up_blocks)
        print('creating neck block ...')
        self.neck_block = _DenseBlock(in_num=in_num, neck_size=neck_size,
                                     growth_rate=growth_rate, layer_num=layer_num,
                                     max_link=max_link, requires_skip=False)
        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.upsample = nn.UpsamplingNearest2d(scale_factor=2) 
Example #9
Source File: resnet_dec.py    From GCA-Matting with MIT License 6 votes vote down vote up
def _make_layer(self, block, planes, blocks, stride=1):
        if blocks == 0:
            return nn.Sequential(nn.Identity())
        norm_layer = self._norm_layer
        upsample = None
        if stride != 1:
            upsample = nn.Sequential(
                nn.UpsamplingNearest2d(scale_factor=2),
                SpectralNorm(conv1x1(self.inplanes, planes * block.expansion)),
                norm_layer(planes * block.expansion),
            )
        elif self.inplanes != planes * block.expansion:
            upsample = nn.Sequential(
                SpectralNorm(conv1x1(self.inplanes, planes * block.expansion)),
                norm_layer(planes * block.expansion),
            )

        layers = [block(self.inplanes, planes, stride, upsample, norm_layer, self.large_kernel)]
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.inplanes, planes, norm_layer=norm_layer, large_kernel=self.large_kernel))

        return nn.Sequential(*layers) 
Example #10
Source File: prediction.py    From reseg-pytorch with GNU General Public License v3.0 5 votes vote down vote up
def upsample_prediction(self, prediction, image_height, image_width):

        #assert len(prediction.size()) == 4   # n, c, h, w  

        #return nn.UpsamplingNearest2d((image_height, image_width))(prediction)
        resizer = ImageUtilities.image_resizer(image_height, image_width, interpolation=Image.NEAREST)
        return resizer(prediction) 
Example #11
Source File: layers.py    From pose-ae-train with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, n, f, bn=None, increase=128):
        super(Hourglass, self).__init__()
        nf = f + increase
        self.up1 = Conv(f, f, 3, bn=bn)
        # Lower branch
        self.pool1 = Pool(2, 2)
        self.low1 = Conv(f, nf, 3, bn=bn)
        # Recursive hourglass
        if n > 1:
            self.low2 = Hourglass(n-1, nf, bn=bn)
        else:
            self.low2 = Conv(nf, nf, 3, bn=bn)
        self.low3 = Conv(nf, f, 3)
        self.up2  = nn.UpsamplingNearest2d(scale_factor=2) 
Example #12
Source File: modelsNIPS.py    From PytorchWCT with MIT License 5 votes vote down vote up
def __init__(self,d):
        super(decoder3,self).__init__()
        # decoder
        self.reflecPad7 = nn.ReflectionPad2d((1,1,1,1))
        self.conv7 = nn.Conv2d(256,128,3,1,0)
        self.conv7.weight = torch.nn.Parameter(d.get(1).weight.float())
        self.conv7.bias = torch.nn.Parameter(d.get(1).bias.float())
        self.relu7 = nn.ReLU(inplace=True)
        # 56 x 56

        self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
        # 112 x 112

        self.reflecPad8 = nn.ReflectionPad2d((1,1,1,1))
        self.conv8 = nn.Conv2d(128,128,3,1,0)
        self.conv8.weight = torch.nn.Parameter(d.get(5).weight.float())
        self.conv8.bias = torch.nn.Parameter(d.get(5).bias.float())
        self.relu8 = nn.ReLU(inplace=True)
        # 112 x 112

        self.reflecPad9 = nn.ReflectionPad2d((1,1,1,1))
        self.conv9 = nn.Conv2d(128,64,3,1,0)
        self.conv9.weight = torch.nn.Parameter(d.get(8).weight.float())
        self.conv9.bias = torch.nn.Parameter(d.get(8).bias.float())
        self.relu9 = nn.ReLU(inplace=True)

        self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
        # 224 x 224

        self.reflecPad10 = nn.ReflectionPad2d((1,1,1,1))
        self.conv10 = nn.Conv2d(64,64,3,1,0)
        self.conv10.weight = torch.nn.Parameter(d.get(12).weight.float())
        self.conv10.bias = torch.nn.Parameter(d.get(12).bias.float())
        self.relu10 = nn.ReLU(inplace=True)

        self.reflecPad11 = nn.ReflectionPad2d((1,1,1,1))
        self.conv11 = nn.Conv2d(64,3,3,1,0)
        self.conv11.weight = torch.nn.Parameter(d.get(15).weight.float())
        self.conv11.bias = torch.nn.Parameter(d.get(15).bias.float()) 
Example #13
Source File: hgPRM.py    From video-to-pose3D with MIT License 5 votes vote down vote up
def __init__(self, n, nFeats, nModules, inputResH, inputResW, net_type, B, C):
        super(Hourglass, self).__init__()

        self.ResidualUp = ResidualPyramid if n >= 2 else Residual
        self.ResidualDown = ResidualPyramid if n >= 3 else Residual

        self.depth = n
        self.nModules = nModules
        self.nFeats = nFeats
        self.net_type = net_type
        self.B = B
        self.C = C
        self.inputResH = inputResH
        self.inputResW = inputResW

        up1 = self._make_residual(self.ResidualUp, False, inputResH, inputResW)
        low1 = nn.Sequential(
            nn.MaxPool2d(2),
            self._make_residual(self.ResidualDown, False, inputResH / 2, inputResW / 2)
        )
        if n > 1:
            low2 = Hourglass(n - 1, nFeats, nModules, inputResH / 2, inputResW / 2, net_type, B, C)
        else:
            low2 = self._make_residual(self.ResidualDown, False, inputResH / 2, inputResW / 2)

        low3 = self._make_residual(self.ResidualDown, True, inputResH / 2, inputResW / 2)
        up2 = nn.UpsamplingNearest2d(scale_factor=2)

        self.upperBranch = up1
        self.lowerBranch = nn.Sequential(
            low1,
            low2,
            low3,
            up2
        ) 
Example #14
Source File: hg-prm.py    From video-to-pose3D with MIT License 5 votes vote down vote up
def __init__(self, n, nFeats, nModules, inputResH, inputResW, net_type, B, C):
        super(Hourglass, self).__init__()

        self.ResidualUp = ResidualPyramid if n >= 2 else Residual
        self.ResidualDown = ResidualPyramid if n >= 3 else Residual
        
        self.depth = n
        self.nModules = nModules
        self.nFeats = nFeats
        self.net_type = net_type
        self.B = B
        self.C = C
        self.inputResH = inputResH
        self.inputResW = inputResW

        self.up1 = self._make_residual(self.ResidualUp, False, inputResH, inputResW)
        self.low1 = nn.Sequential(
            nn.MaxPool2d(2),
            self._make_residual(self.ResidualDown, False, inputResH / 2, inputResW / 2)
        )
        if n > 1:
            self.low2 = Hourglass(n - 1, nFeats, nModules, inputResH / 2, inputResW / 2, net_type, B, C)
        else:
            self.low2 = self._make_residual(self.ResidualDown, False, inputResH / 2, inputResW / 2)
        
        self.low3 = self._make_residual(self.ResidualDown, True, inputResH / 2, inputResW / 2)
        self.up2 = nn.UpsamplingNearest2d(scale_factor=2)

        self.upperBranch = self.up1
        self.lowerBranch = nn.Sequential(
            self.low1,
            self.low2,
            self.low3,
            self.up2
        ) 
Example #15
Source File: loadOpenFace.py    From OpenFacePytorch with Apache License 2.0 5 votes vote down vote up
def __init__(self, useCuda, gpuDevice=0):
        super(netOpenFace, self).__init__()

        self.gpuDevice = gpuDevice

        self.layer1 = Conv2d(3, 64, (7,7), (2,2), (3,3))
        self.layer2 = BatchNorm(64)
        self.layer3 = nn.ReLU()
        self.layer4 = nn.MaxPool2d((3,3), stride=(2,2), padding=(1,1))
        self.layer5 = CrossMapLRN(5, 0.0001, 0.75, gpuDevice=gpuDevice)
        self.layer6 = Conv2d(64, 64, (1,1), (1,1), (0,0))
        self.layer7 = BatchNorm(64)
        self.layer8 = nn.ReLU()
        self.layer9 = Conv2d(64, 192, (3,3), (1,1), (1,1))
        self.layer10 = BatchNorm(192)
        self.layer11 = nn.ReLU()
        self.layer12 = CrossMapLRN(5, 0.0001, 0.75, gpuDevice=gpuDevice)
        self.layer13 = nn.MaxPool2d((3,3), stride=(2,2), padding=(1,1))
        self.layer14 = Inception(192, (3,5), (1,1), (128,32), (96,16,32,64), nn.MaxPool2d((3,3), stride=(2,2), padding=(0,0)), True)
        self.layer15 = Inception(256, (3,5), (1,1), (128,64), (96,32,64,64), nn.LPPool2d(2, (3,3), stride=(3,3)), True)
        self.layer16 = Inception(320, (3,5), (2,2), (256,64), (128,32,None,None), nn.MaxPool2d((3,3), stride=(2,2), padding=(0,0)), True)
        self.layer17 = Inception(640, (3,5), (1,1), (192,64), (96,32,128,256), nn.LPPool2d(2, (3,3), stride=(3,3)), True)
        self.layer18 = Inception(640, (3,5), (2,2), (256,128), (160,64,None,None), nn.MaxPool2d((3,3), stride=(2,2), padding=(0,0)), True)
        self.layer19 = Inception(1024, (3,), (1,), (384,), (96,96,256), nn.LPPool2d(2, (3,3), stride=(3,3)), True)
        self.layer21 = Inception(736, (3,), (1,), (384,), (96,96,256), nn.MaxPool2d((3,3), stride=(2,2), padding=(0,0)), True)
        self.layer22 = nn.AvgPool2d((3,3), stride=(1,1), padding=(0,0))
        self.layer25 = Linear(736, 128)

        #
        self.resize1 = nn.UpsamplingNearest2d(scale_factor=3)
        self.resize2 = nn.AvgPool2d(4)

        #
        # self.eval()

        if useCuda:
            self.cuda(gpuDevice) 
Example #16
Source File: SharpMask.py    From deepmask-pytorch with MIT License 5 votes vote down vote up
def refinement(self, neth, netv):
        return RefineModule(neth, netv,
                            nn.Sequential(nn.ReLU(inplace=True),
                             nn.UpsamplingNearest2d(scale_factor=2))) 
Example #17
Source File: resnet.py    From pytorch-mono-depth with MIT License 5 votes vote down vote up
def __init__(self, num_input_features):
        super().__init__()
        num_output_features = int(num_input_features / 2)
        self.conv1 = nn.Conv2d(num_input_features, num_output_features,
                               kernel_size=5, padding=2)
        self.bn1 = nn.BatchNorm2d(num_output_features)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(num_output_features, num_output_features,
                               kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm2d(num_output_features)
        self.conv_proj = nn.Conv2d(num_input_features, num_output_features,
                                   kernel_size=5, padding=2)
        self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
        self._unpool_masks = dict() 
Example #18
Source File: models.py    From LinearStyleTransfer with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self):
        super(decoder3,self).__init__()
        # decoder
        self.reflecPad7 = nn.ReflectionPad2d((1,1,1,1))
        self.conv7 = nn.Conv2d(256,128,3,1,0)
        self.relu7 = nn.ReLU(inplace=True)
        # 56 x 56

        self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
        # 112 x 112

        self.reflecPad8 = nn.ReflectionPad2d((1,1,1,1))
        self.conv8 = nn.Conv2d(128,128,3,1,0)
        self.relu8 = nn.ReLU(inplace=True)
        # 112 x 112

        self.reflecPad9 = nn.ReflectionPad2d((1,1,1,1))
        self.conv9 = nn.Conv2d(128,64,3,1,0)
        self.relu9 = nn.ReLU(inplace=True)

        self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
        # 224 x 224

        self.reflecPad10 = nn.ReflectionPad2d((1,1,1,1))
        self.conv10 = nn.Conv2d(64,64,3,1,0)
        self.relu10 = nn.ReLU(inplace=True)

        self.reflecPad11 = nn.ReflectionPad2d((1,1,1,1))
        self.conv11 = nn.Conv2d(64,3,3,1,0) 
Example #19
Source File: hourglass.py    From soccerontable with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, block, num_blocks, planes, depth):
        super(Hourglass, self).__init__()
        self.depth = depth
        self.block = block
        self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
        self.hg = self._make_hour_glass(block, num_blocks, planes, depth) 
Example #20
Source File: cu_net_prev_version_wig.py    From CU-Net with Apache License 2.0 5 votes vote down vote up
def __init__(self, in_num, out_num):
        super(_Bn_Relu_Conv1x1, self).__init__()
        self.add_module('norm', nn.BatchNorm2d(in_num))
        self.add_module('relu', nn.ReLU(inplace=True))
        # QuanInput2d: No.2 
        if bitsI <= 15:
            self.add_module('quaninput', QuanInput2d())
        self.add_module('conv', nn.Conv2d(in_num, out_num,
                                          kernel_size=1, stride=1, bias=False))

# class _TransitionDown(nn.Module):
#     def __init__(self, in_num_list, out_num, num_units):
#         super(_TransitionDown, self).__init__()
#         self.adapters = []
#         for i in range(0, num_units):
#             self.adapters.append(_Bn_Relu_Conv1x1(in_num=in_num_list[i], out_num=out_num))
#         self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
#
#     def forward(self, x, i):
#         x = self.adapters[i](x)
#         out = self.pool(x)
#         return out
#
# class _TransitionUp(nn.Module):
#     def __init__(self, in_num_list, out_num_list, num_units):
#         super(_TransitionUp, self).__init__()
#         self.adapters = []
#         for i in range(0, num_units):
#             self.adapters.append(_Bn_Relu_Conv1x1(in_num=in_num_list[i], out_num=out_num_list[i]))
#         self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
#
#     def forward(self, x, i):
#         x = self.adapters[i](x)
#         out = self.upsample(x)
#         return out 
Example #21
Source File: cu_net_prev_version.py    From CU-Net with Apache License 2.0 5 votes vote down vote up
def __init__(self, in_num, out_num):
        super(_Bn_Relu_Conv1x1, self).__init__()
        self.add_module('norm', nn.BatchNorm2d(in_num))
        self.add_module('relu', nn.ReLU(inplace=True))
        self.add_module('conv', nn.Conv2d(in_num, out_num,
                                          kernel_size=1, stride=1, bias=False))

# class _TransitionDown(nn.Module):
#     def __init__(self, in_num_list, out_num, num_units):
#         super(_TransitionDown, self).__init__()
#         self.adapters = []
#         for i in range(0, num_units):
#             self.adapters.append(_Bn_Relu_Conv1x1(in_num=in_num_list[i], out_num=out_num))
#         self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
#
#     def forward(self, x, i):
#         x = self.adapters[i](x)
#         out = self.pool(x)
#         return out
#
# class _TransitionUp(nn.Module):
#     def __init__(self, in_num_list, out_num_list, num_units):
#         super(_TransitionUp, self).__init__()
#         self.adapters = []
#         for i in range(0, num_units):
#             self.adapters.append(_Bn_Relu_Conv1x1(in_num=in_num_list[i], out_num=out_num_list[i]))
#         self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
#
#     def forward(self, x, i):
#         x = self.adapters[i](x)
#         out = self.upsample(x)
#         return out 
Example #22
Source File: hg.py    From StarMap with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, n, nModules, nFeats):
    super(Hourglass, self).__init__()
    self.n = n
    self.nModules = nModules
    self.nFeats = nFeats
    
    _up1_, _low1_, _low2_, _low3_ = [], [], [], []
    for j in range(self.nModules):
      _up1_.append(Residual(self.nFeats, self.nFeats))
    self.low1 = nn.MaxPool2d(kernel_size = 2, stride = 2)
    for j in range(self.nModules):
      _low1_.append(Residual(self.nFeats, self.nFeats))
    
    if self.n > 1:
      self.low2 = Hourglass(n - 1, self.nModules, self.nFeats)
    else:
      for j in range(self.nModules):
        _low2_.append(Residual(self.nFeats, self.nFeats))
      self.low2_ = nn.ModuleList(_low2_)
    
    for j in range(self.nModules):
      _low3_.append(Residual(self.nFeats, self.nFeats))
    
    self.up1_ = nn.ModuleList(_up1_)
    self.low1_ = nn.ModuleList(_low1_)
    self.low3_ = nn.ModuleList(_low3_)
    
    #self.up2 = nn.Upsample(scale_factor = 2)
    self.up2 = nn.UpsamplingNearest2d(scale_factor = 2) 
Example #23
Source File: unet.py    From Torchelie with MIT License 5 votes vote down vote up
def __init__(self, in_ch, out_ch, inner=None):
        super(UBlock, self).__init__()
        self.in_conv = nn.Sequential(
            OrderedDict([
                ('pad1', nn.ReflectionPad2d(1)),
                ('conv1', tu.kaiming(nn.Conv2d(in_ch, out_ch, 3))),
                ('relu1', nn.ReLU(inplace=True)),
                ('pad2', nn.ReflectionPad2d(1)),
                ('conv2', tu.kaiming(nn.Conv2d(out_ch, out_ch, 3))),
                ('relu2', nn.ReLU(inplace=True)),
            ]))

        self.inner = inner
        if inner is not None:
            self.inner = nn.Sequential(
                    nn.MaxPool2d(2, 2),
                    inner,
                    nn.UpsamplingNearest2d(scale_factor=2),
                    nn.ReflectionPad2d(1),
                    tu.kaiming(nn.Conv2d(out_ch, out_ch, 3)),
                )
            self.skip = nn.Sequential(
                    tu.kaiming(nn.Conv2d(out_ch, out_ch, 1)))

        inner_ch = out_ch * (1 if inner is None else 2)
        self.out_conv = nn.Sequential(
            OrderedDict([
                ('pad1', nn.ReflectionPad2d(1)),
                ('conv1', tu.kaiming(nn.Conv2d(inner_ch, out_ch, 3))),
                ('relu1', nn.ReLU(inplace=True)),
                ('pad2', nn.ReflectionPad2d(1)),
                ('conv2', tu.kaiming(nn.Conv2d(out_ch, in_ch, 3))),
                ('relu2', nn.ReLU(inplace=True)),
            ])) 
Example #24
Source File: vgg.py    From Torchelie with MIT License 5 votes vote down vote up
def VggBNBone(arch, in_ch=3, leak=0, block=tnn.Conv2dBNReLU, debug=False):
    """
    Construct a VGG net

    How to specify a VGG architecture:

    It's a list of blocks specifications. Blocks are either:

    - 'M' for maxpool of kernel size 2 and stride 2
    - 'A' for average pool of kernel size 2 and stride 2
    - 'U' for nearest neighbors upsampling (scale factor 2)
    - an integer `ch` for a block with `ch` output channels

    Args:
        arch (list): architecture specification
        in_ch (int): number of input channels
        leak (float): leak in relus
        block (fn): block ctor

    Returns:
        A VGG instance
    """
    layers = []

    if debug:
        layers.append(tnn.Debug('Input'))

    for i, layer in enumerate(arch):
        if layer == 'M':
            layers.append(nn.MaxPool2d(2, 2))
        elif layer == 'A':
            layers.append(nn.AvgPool2d(2, 2))
        elif layer == 'U':
            layers.append(nn.UpsamplingNearest2d(scale_factor=2))
        else:
            layers.append(block(in_ch, layer, ks=3, leak=leak))
            in_ch = layer
        if debug:
            layer_name = 'layer_{}_{}'.format(layers[-1].__class__.__name__, i)
            layers.append(tnn.Debug(layer_name))
    return tnn.CondSeq(*layers) 
Example #25
Source File: upsampling_nearest.py    From pytorch2keras with MIT License 5 votes vote down vote up
def __init__(self, scale_factor=2):
        super(LayerTest, self).__init__()
        self.up = nn.UpsamplingNearest2d(scale_factor=scale_factor) 
Example #26
Source File: upsample_nearest.py    From pytorch2keras with MIT License 5 votes vote down vote up
def __init__(self, inp=10, out=16, kernel_size=3, bias=True):
        super(TestUpsampleNearest2d, self).__init__()
        self.conv2d = nn.Conv2d(inp, out, kernel_size=kernel_size, bias=bias)
        self.up = nn.UpsamplingNearest2d(scale_factor=2) 
Example #27
Source File: rtpose_cu_net.py    From EverybodyDanceNow_reproduce_pytorch with MIT License 5 votes vote down vote up
def __init__(self, in_num, out_num):
        super(_Bn_Relu_Conv1x1, self).__init__()
        self.add_module('norm', nn.BatchNorm2d(in_num))
        self.add_module('relu', nn.ReLU(inplace=True))
        self.add_module('conv', nn.Conv2d(in_num, out_num, kernel_size=1,
                                          stride=1, bias=False))

# class _TransitionDown(nn.Module):
#     def __init__(self, in_num_list, out_num, num_units):
#         super(_TransitionDown, self).__init__()
#         self.adapters = []
#         for i in range(0, num_units):
#             self.adapters.append(_Bn_Relu_Conv1x1(in_num=in_num_list[i], out_num=out_num))
#         self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
#
#     def forward(self, x, i):
#         x = self.adapters[i](x)
#         out = self.pool(x)
#         return out
#
# class _TransitionUp(nn.Module):
#     def __init__(self, in_num_list, out_num_list, num_units):
#         super(_TransitionUp, self).__init__()
#         self.adapters = []
#         for i in range(0, num_units):
#             self.adapters.append(_Bn_Relu_Conv1x1(in_num=in_num_list[i], out_num=out_num_list[i]))
#         self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
#
#     def forward(self, x, i):
#         x = self.adapters[i](x)
#         out = self.upsample(x)
#         return out 
Example #28
Source File: cu_net.py    From CU-Net with Apache License 2.0 5 votes vote down vote up
def __init__(self, in_num, out_num):
        super(_Bn_Relu_Conv1x1, self).__init__()
        self.add_module('norm', nn.BatchNorm2d(in_num))
        self.add_module('relu', nn.ReLU(inplace=True))
        self.add_module('conv', nn.Conv2d(in_num, out_num, kernel_size=1,
                                          stride=1, bias=False))

# class _TransitionDown(nn.Module):
#     def __init__(self, in_num_list, out_num, num_units):
#         super(_TransitionDown, self).__init__()
#         self.adapters = []
#         for i in range(0, num_units):
#             self.adapters.append(_Bn_Relu_Conv1x1(in_num=in_num_list[i], out_num=out_num))
#         self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
#
#     def forward(self, x, i):
#         x = self.adapters[i](x)
#         out = self.pool(x)
#         return out
#
# class _TransitionUp(nn.Module):
#     def __init__(self, in_num_list, out_num_list, num_units):
#         super(_TransitionUp, self).__init__()
#         self.adapters = []
#         for i in range(0, num_units):
#             self.adapters.append(_Bn_Relu_Conv1x1(in_num=in_num_list[i], out_num=out_num_list[i]))
#         self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
#
#     def forward(self, x, i):
#         x = self.adapters[i](x)
#         out = self.upsample(x)
#         return out 
Example #29
Source File: models.py    From LinearStyleTransfer with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def __init__(self):
        super(decoder4,self).__init__()
        # decoder
        self.reflecPad11 = nn.ReflectionPad2d((1,1,1,1))
        self.conv11 = nn.Conv2d(512,256,3,1,0)
        self.relu11 = nn.ReLU(inplace=True)
        # 28 x 28

        self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
        # 56 x 56

        self.reflecPad12 = nn.ReflectionPad2d((1,1,1,1))
        self.conv12 = nn.Conv2d(256,256,3,1,0)
        self.relu12 = nn.ReLU(inplace=True)
        # 56 x 56

        self.reflecPad13 = nn.ReflectionPad2d((1,1,1,1))
        self.conv13 = nn.Conv2d(256,256,3,1,0)
        self.relu13 = nn.ReLU(inplace=True)
        # 56 x 56

        self.reflecPad14 = nn.ReflectionPad2d((1,1,1,1))
        self.conv14 = nn.Conv2d(256,256,3,1,0)
        self.relu14 = nn.ReLU(inplace=True)
        # 56 x 56

        self.reflecPad15 = nn.ReflectionPad2d((1,1,1,1))
        self.conv15 = nn.Conv2d(256,128,3,1,0)
        self.relu15 = nn.ReLU(inplace=True)
        # 56 x 56

        self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
        # 112 x 112

        self.reflecPad16 = nn.ReflectionPad2d((1,1,1,1))
        self.conv16 = nn.Conv2d(128,128,3,1,0)
        self.relu16 = nn.ReLU(inplace=True)
        # 112 x 112

        self.reflecPad17 = nn.ReflectionPad2d((1,1,1,1))
        self.conv17 = nn.Conv2d(128,64,3,1,0)
        self.relu17 = nn.ReLU(inplace=True)
        # 112 x 112

        self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2)
        # 224 x 224

        self.reflecPad18 = nn.ReflectionPad2d((1,1,1,1))
        self.conv18 = nn.Conv2d(64,64,3,1,0)
        self.relu18 = nn.ReLU(inplace=True)
        # 224 x 224

        self.reflecPad19 = nn.ReflectionPad2d((1,1,1,1))
        self.conv19 = nn.Conv2d(64,3,3,1,0) 
Example #30
Source File: models.py    From LinearStyleTransfer with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def __init__(self):
        super(decoder4,self).__init__()
        # decoder
        self.reflecPad11 = nn.ReflectionPad2d((1,1,1,1))
        self.conv11 = nn.Conv2d(512,256,3,1,0)
        self.relu11 = nn.ReLU(inplace=True)
        # 28 x 28

        self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
        # 56 x 56

        self.reflecPad12 = nn.ReflectionPad2d((1,1,1,1))
        self.conv12 = nn.Conv2d(256,256,3,1,0)
        self.relu12 = nn.ReLU(inplace=True)
        # 56 x 56

        self.reflecPad13 = nn.ReflectionPad2d((1,1,1,1))
        self.conv13 = nn.Conv2d(256,256,3,1,0)
        self.relu13 = nn.ReLU(inplace=True)
        # 56 x 56

        self.reflecPad14 = nn.ReflectionPad2d((1,1,1,1))
        self.conv14 = nn.Conv2d(256,256,3,1,0)
        self.relu14 = nn.ReLU(inplace=True)
        # 56 x 56

        self.reflecPad15 = nn.ReflectionPad2d((1,1,1,1))
        self.conv15 = nn.Conv2d(256,128,3,1,0)
        self.relu15 = nn.ReLU(inplace=True)
        # 56 x 56

        self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
        # 112 x 112

        self.reflecPad16 = nn.ReflectionPad2d((1,1,1,1))
        self.conv16 = nn.Conv2d(128,128,3,1,0)
        self.relu16 = nn.ReLU(inplace=True)
        # 112 x 112

        self.reflecPad17 = nn.ReflectionPad2d((1,1,1,1))
        self.conv17 = nn.Conv2d(128,64,3,1,0)
        self.relu17 = nn.ReLU(inplace=True)
        # 112 x 112

        self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2)
        # 224 x 224

        self.reflecPad18 = nn.ReflectionPad2d((1,1,1,1))
        self.conv18 = nn.Conv2d(64,64,3,1,0)
        self.relu18 = nn.ReLU(inplace=True)
        # 224 x 224

        self.reflecPad19 = nn.ReflectionPad2d((1,1,1,1))
        self.conv19 = nn.Conv2d(64,3,3,1,0)