Python torch.nn.PReLU() Examples

The following are 30 code examples of torch.nn.PReLU(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: simple_kaggle.py    From argus-freesound with MIT License 6 votes vote down vote up
def __init__(self, num_classes, base_size=64, dropout=0.2):
        super().__init__()

        self.conv = nn.Sequential(
            ConvBlock(in_channels=3, out_channels=base_size),
            ConvBlock(in_channels=base_size, out_channels=base_size*2),
            ConvBlock(in_channels=base_size*2, out_channels=base_size*4),
            ConvBlock(in_channels=base_size*4, out_channels=base_size*8),
        )

        self.fc = nn.Sequential(
            nn.Dropout(dropout),
            nn.Linear(base_size*8, base_size*2),
            nn.PReLU(),
            nn.BatchNorm1d(base_size*2),
            nn.Dropout(dropout/2),
            nn.Linear(base_size*2, num_classes),
        ) 
Example #2
Source File: ddbpn.py    From OISR-PyTorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, in_channels, nr, scale, up=True, bottleneck=True):
        super(DenseProjection, self).__init__()
        if bottleneck:
            self.bottleneck = nn.Sequential(*[
                nn.Conv2d(in_channels, nr, 1),
                nn.PReLU(nr)
            ])
            inter_channels = nr
        else:
            self.bottleneck = None
            inter_channels = in_channels

        self.conv_1 = nn.Sequential(*[
            projection_conv(inter_channels, nr, scale, up),
            nn.PReLU(nr)
        ])
        self.conv_2 = nn.Sequential(*[
            projection_conv(nr, inter_channels, scale, not up),
            nn.PReLU(inter_channels)
        ])
        self.conv_3 = nn.Sequential(*[
            projection_conv(inter_channels, nr, scale, up),
            nn.PReLU(nr)
        ]) 
Example #3
Source File: base_networks.py    From STARnet with MIT License 6 votes vote down vote up
def __init__(self, scale, n_feat, bn=False, act='prelu', bias=True):
        super(Upsampler, self).__init__()
        modules = []
        for _ in range(int(math.log(scale, 2))):
            modules.append(ConvBlock(n_feat, 4 * n_feat, 3, 1, 1, bias, activation=None, norm=None))
            modules.append(nn.PixelShuffle(2))
            if bn: modules.append(nn.BatchNorm2d(n_feat))
            #modules.append(nn.PReLU())
        self.up = nn.Sequential(*modules)
        
        self.activation = act
        if self.activation == 'relu':
            self.act = nn.ReLU(True)
        elif self.activation == 'prelu':
            self.act = nn.PReLU()
        elif self.activation == 'lrelu':
            self.act = nn.LeakyReLU(0.1, True)
        elif self.activation == 'tanh':
            self.act = nn.Tanh()
        elif self.activation == 'sigmoid':
            self.act = nn.Sigmoid() 
Example #4
Source File: base_networks.py    From STARnet with MIT License 6 votes vote down vote up
def __init__(self, input_size, output_size, bias=True, activation='relu', norm='batch'):
        super(DenseBlock, self).__init__()
        self.fc = nn.Linear(input_size, output_size, bias=bias)

        self.norm = norm
        if self.norm =='batch':
            self.bn = nn.BatchNorm1d(output_size)
        elif self.norm == 'instance':
            self.bn = nn.InstanceNorm1d(output_size)

        self.activation = activation
        if self.activation == 'relu':
            self.act = nn.ReLU(True)
        elif self.activation == 'prelu':
            self.act = nn.PReLU()
        elif self.activation == 'lrelu':
            self.act = nn.LeakyReLU(0.1, True)
        elif self.activation == 'tanh':
            self.act = nn.Tanh()
        elif self.activation == 'sigmoid':
            self.act = nn.Sigmoid() 
Example #5
Source File: highresnet_3D.py    From pytorch-mri-segmentation-3D with MIT License 6 votes vote down vote up
def __init__(self,NoLabels):
        super(HighResNet,self).__init__()
        self.conv1 = nn.Conv3d(1, 16, kernel_size=3, stride=2, padding=1, bias=False)
        self.bn1 = nn.BatchNorm3d(16, affine = affine_par)
        for i in self.bn1.parameters():
            i.requires_grad = False
        self.relu = nn.PReLU()

        self.block1_1 = HighResNetBlock(inplanes=16, outplanes=16, padding_=1, dilation_=1)

        self.block2_1 = HighResNetBlock(inplanes=16, outplanes=32, padding_=2, dilation_=2)
        self.block2_2 = HighResNetBlock(inplanes=32, outplanes=32, padding_=2, dilation_=2)

        self.block3_1 = HighResNetBlock(inplanes=32, outplanes=64, padding_=4, dilation_=4)
        self.block3_2 = HighResNetBlock(inplanes=64, outplanes=64, padding_=4, dilation_=4)

        self.conv2 = nn.Conv3d(64, 80, kernel_size=1, stride=1, padding=0, bias=False)
        self.upsample = nn.ConvTranspose3d(80, 80, kernel_size=2, stride=2, bias=False)
        self.conv3 = nn.Conv3d(80, NoLabels, kernel_size=1, stride=1, padding=0, bias=False) 
Example #6
Source File: smallhighresnet_3D.py    From pytorch-mri-segmentation-3D with MIT License 6 votes vote down vote up
def __init__(self,NoLabels):
        super(SmallHighResNet,self).__init__()
        self.conv1 = nn.Conv3d(1, 8, kernel_size=3, stride=8, padding=1, bias=False)
        self.bn1 = nn.BatchNorm3d(8, affine = affine_par)
        for i in self.bn1.parameters():
            i.requires_grad = False
        self.relu = nn.PReLU()

        self.block1_1 = HighResNetBlock(inplanes=8, outplanes=8, padding_=1, dilation_=1)

        self.block2_1 = HighResNetBlock(inplanes=8, outplanes=16,padding_=2, dilation_=2)
        self.block2_2 = HighResNetBlock(inplanes=16, outplanes=16, padding_=2, dilation_=2)

        self.block3_1 = HighResNetBlock(inplanes=16, outplanes=16, padding_=4, dilation_=4)
        self.block3_2 = HighResNetBlock(inplanes=16, outplanes=16, padding_=4, dilation_=4)

        self.conv2 = nn.Conv3d(16, NoLabels, kernel_size=1, stride=1, padding=0, bias=False) 
Example #7
Source File: base_networks.py    From STARnet with MIT License 6 votes vote down vote up
def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None):
        super(ConvBlock3D, self).__init__()
        self.conv = nn.Conv3d(input_size, output_size, kernel_size, stride, padding)

        self.norm = norm
        if self.norm =='batch':
            self.bn = nn.BatchNorm2d(output_size)
        elif self.norm == 'instance':
            self.bn = nn.InstanceNorm2d(output_size)

        self.activation = activation
        if self.activation == 'relu':
            self.act = nn.ReLU(True)
        elif self.activation == 'prelu':
            self.act = nn.PReLU()
        elif self.activation == 'lrelu':
            self.act = nn.LeakyReLU(0.1, True)
        elif self.activation == 'tanh':
            self.act = nn.Tanh()
        elif self.activation == 'sigmoid':
            self.act = nn.Sigmoid() 
Example #8
Source File: common.py    From OISR-PyTorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(
        self, conv, n_feats, kernel_size,
        bias=True, bn=False, act=nn.PReLU(1, 0.25), res_scale=1):

        super(ResBlock, self).__init__()

        self.conv1 = conv(n_feats, n_feats, kernel_size, bias=bias)
        self.conv2 = conv(n_feats, n_feats, kernel_size, bias=bias)
        self.conv3 = conv(n_feats, n_feats, kernel_size, bias=bias)
        self.conv4 = conv(n_feats, n_feats, kernel_size, bias=bias)
        self.relu1 = nn.PReLU(n_feats, 0.25)
        self.relu2 = nn.PReLU(n_feats, 0.25)
        self.relu3 = nn.PReLU(n_feats, 0.25)
        self.relu4 = nn.PReLU(n_feats, 0.25)
        self.scale1 = nn.Parameter(torch.FloatTensor([2.0]), requires_grad=True)
        self.scale2 = nn.Parameter(torch.FloatTensor([2.0]), requires_grad=True)
        self.scale3 = nn.Parameter(torch.FloatTensor([2.0]), requires_grad=True)
        self.scale4 = nn.Parameter(torch.FloatTensor([2.0]), requires_grad=True) 
Example #9
Source File: smallhighresnet_3D.py    From pytorch-mri-segmentation-3D with MIT License 6 votes vote down vote up
def __init__(self, inplanes, outplanes, padding_=1, stride=1, dilation_ = 1):
        super(HighResNetBlock, self).__init__()

        self.conv1 = nn.Conv3d(inplanes, outplanes, kernel_size=3, stride=1, 
                                padding=padding_, bias=False, dilation = dilation_)
        self.conv2 = nn.Conv3d(outplanes, outplanes, kernel_size=3, stride=1, 
                                padding=padding_, bias=False, dilation = dilation_)
        #2 convolutions of same dilation. residual block
        self.bn1 = nn.BatchNorm3d(outplanes, affine = affine_par)
        for i in self.bn1.parameters():
            i.requires_grad = False

        self.bn2 = nn.BatchNorm3d(outplanes, affine = affine_par)
        for i in self.bn2.parameters():
            i.requires_grad = False

        self.relu = nn.PReLU()
        self.diff_dims = (inplanes != outplanes)

        self.downsample = nn.Sequential(
            nn.Conv3d(inplanes, outplanes, kernel_size=1, stride=stride, bias=False),
            nn.BatchNorm3d(outplanes, affine = affine_par)
        )
        for i in self.downsample._modules['1'].parameters():
                i.requires_grad = False 
Example #10
Source File: exp_net_3D.py    From pytorch-mri-segmentation-3D with MIT License 6 votes vote down vote up
def __init__(self,dilation_series, padding_series, inplanes, midplanes, outplanes):
        super(ASPP_Module, self).__init__()
        self.conv3d_list = nn.ModuleList()
        self.bn3d_list = nn.ModuleList()
        for dilation,padding in zip(dilation_series,padding_series):
            self.conv3d_list.append(nn.Conv3d(inplanes, midplanes,kernel_size=3,stride=1, padding =padding, dilation = dilation,bias = True))
            self.bn3d_list.append(nn.BatchNorm3d(midplanes, affine = affine_par))
        self.num_concats = len(self.conv3d_list) + 2
        #add global pooling, add batchnorm
        self.conv1x1_1 = nn.Conv3d(inplanes, midplanes, kernel_size=1, stride=1)
        self.conv1x1_2 = nn.Conv3d(inplanes, midplanes, kernel_size=1, stride=1)
        self.conv1x1_3 = nn.Conv3d(midplanes*self.num_concats, outplanes, kernel_size=1, stride=1)

        self.relu = nn.PReLU()

        self.bn1 = nn.BatchNorm3d(midplanes, affine = affine_par)
        self.bn2 = nn.BatchNorm3d(midplanes*self.num_concats, affine= affine_par)
        self.bn3 = nn.BatchNorm3d(midplanes, affine= affine_par) 
Example #11
Source File: exp_net_3D.py    From pytorch-mri-segmentation-3D with MIT License 6 votes vote down vote up
def __init__(self, inplanes, outplanes, padding_=1, stride=1, dilation_ = 1):
        super(HighResNetBlock, self).__init__()

        self.conv1 = nn.Conv3d(inplanes, outplanes, kernel_size=3, stride=1, 
                                padding=padding_, bias=False, dilation = dilation_)
        self.conv2 = nn.Conv3d(outplanes, outplanes, kernel_size=3, stride=1, 
                                padding=padding_, bias=False, dilation = dilation_)
        #2 convolutions of same dilation. residual block
        self.bn1 = nn.BatchNorm3d(outplanes, affine = affine_par)
        for i in self.bn1.parameters():
            i.requires_grad = False

        self.bn2 = nn.BatchNorm3d(outplanes, affine = affine_par)
        for i in self.bn2.parameters():
            i.requires_grad = False

        self.relu = nn.PReLU()
        self.diff_dims = (inplanes != outplanes)

        self.downsample = nn.Sequential(
            nn.Conv3d(inplanes, outplanes, kernel_size=1, stride=stride, bias=False),
            nn.BatchNorm3d(outplanes, affine = affine_par)
        )
        for i in self.downsample._modules['1'].parameters():
                i.requires_grad = False 
Example #12
Source File: base_networks.py    From STARnet with MIT License 6 votes vote down vote up
def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None):
        super(ConvBlock, self).__init__()
        self.conv = nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias)

        self.norm = norm
        if self.norm =='batch':
            self.bn = nn.BatchNorm2d(output_size)
        elif self.norm == 'instance':
            self.bn = nn.InstanceNorm2d(output_size)

        self.activation = activation
        if self.activation == 'relu':
            self.act = nn.ReLU(True)
        elif self.activation == 'prelu':
            self.act = nn.PReLU()
        elif self.activation == 'lrelu':
            self.act = nn.LeakyReLU(0.1, True)
        elif self.activation == 'tanh':
            self.act = nn.Tanh()
        elif self.activation == 'sigmoid':
            self.act = nn.Sigmoid() 
Example #13
Source File: base_networks.py    From STARnet with MIT License 6 votes vote down vote up
def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None):
        super(DeconvBlock, self).__init__()
        self.deconv = nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias)

        self.norm = norm
        if self.norm == 'batch':
            self.bn = nn.BatchNorm2d(output_size)
        elif self.norm == 'instance':
            self.bn = nn.InstanceNorm2d(output_size)

        self.activation = activation
        if self.activation == 'relu':
            self.act = nn.ReLU(True)
        elif self.activation == 'prelu':
            self.act = nn.PReLU()
        elif self.activation == 'lrelu':
            self.act = nn.LeakyReLU(0.1, True)
        elif self.activation == 'tanh':
            self.act = nn.Tanh()
        elif self.activation == 'sigmoid':
            self.act = nn.Sigmoid() 
Example #14
Source File: ddbpn.py    From OISR-PyTorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, in_channels, nr, scale, up=True, bottleneck=True):
        super(DenseProjection, self).__init__()
        if bottleneck:
            self.bottleneck = nn.Sequential(*[
                nn.Conv2d(in_channels, nr, 1),
                nn.PReLU(nr)
            ])
            inter_channels = nr
        else:
            self.bottleneck = None
            inter_channels = in_channels

        self.conv_1 = nn.Sequential(*[
            projection_conv(inter_channels, nr, scale, up),
            nn.PReLU(nr)
        ])
        self.conv_2 = nn.Sequential(*[
            projection_conv(nr, inter_channels, scale, not up),
            nn.PReLU(inter_channels)
        ])
        self.conv_3 = nn.Sequential(*[
            projection_conv(inter_channels, nr, scale, up),
            nn.PReLU(nr)
        ]) 
Example #15
Source File: simple_attention.py    From argus-freesound with MIT License 6 votes vote down vote up
def __init__(self, num_classes, base_size=64, dropout=0.2,
                 ratio=16, kernel_size=7):
        super().__init__()

        self.conv = nn.Sequential(
            ConvBlock(in_channels=3, out_channels=base_size),
            ConvBlock(in_channels=base_size, out_channels=base_size*2),
            ConvBlock(in_channels=base_size*2, out_channels=base_size*4),
            ConvBlock(in_channels=base_size*4, out_channels=base_size*8),
        )
        self.attention = ConvolutionalBlockAttentionModule(base_size*8,
                                                           ratio=ratio,
                                                           kernel_size=kernel_size)
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Dropout(dropout),
            nn.Linear(base_size*8, base_size*2),
            nn.PReLU(),
            nn.BatchNorm1d(base_size*2),
            nn.Dropout(dropout/2),
            nn.Linear(base_size*2, num_classes),
        ) 
Example #16
Source File: helper.py    From torchscope with Apache License 2.0 6 votes vote down vote up
def compute_flops(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_flops(module, inp, out) // 2
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_flops(module, inp, out) // 2
    elif isinstance(module, (nn.AvgPool2d, nn.MaxPool2d)):
        return compute_Pool2d_flops(module, inp, out) // 2
    elif isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU)):
        return compute_ReLU_flops(module, inp, out) // 2
    elif isinstance(module, nn.Upsample):
        return compute_Upsample_flops(module, inp, out) // 2
    elif isinstance(module, nn.Linear):
        return compute_Linear_flops(module, inp, out) // 2
    else:
        return 0 
Example #17
Source File: helper.py    From torchscope with Apache License 2.0 6 votes vote down vote up
def compute_ReLU_flops(module, inp, out):
    assert isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU))
    batch_size = inp.size()[0]
    active_elements_count = batch_size

    for s in inp.size()[1:]:
        active_elements_count *= s

    return active_elements_count 
Example #18
Source File: common.py    From OISR-PyTorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(
        self, conv, n_feats, kernel_size,
        bias=True, bn=False, act=nn.PReLU(1, 0.25), res_scale=1):

        super(ResBlock, self).__init__()

        self.conv1 = conv(n_feats, n_feats, kernel_size, bias=bias)
        self.conv2 = conv(n_feats, n_feats, kernel_size, bias=bias)
        self.conv3 = conv(n_feats, n_feats, kernel_size, bias=bias)
        self.relu1 = nn.PReLU(n_feats, 0.25)
        self.relu2 = nn.PReLU(n_feats, 0.25)
        self.relu3 = nn.PReLU(n_feats, 0.25)
        self.scale1 = nn.Parameter(torch.FloatTensor([0.5]), requires_grad=True)
        self.scale2 = nn.Parameter(torch.FloatTensor([2.0]), requires_grad=True)
        self.scale3 = nn.Parameter(torch.FloatTensor([-1.0]), requires_grad=True)
        self.scale4 = nn.Parameter(torch.FloatTensor([4.0]), requires_grad=True)
        self.scale5 = nn.Parameter(torch.FloatTensor([1/6]), requires_grad=True) 
Example #19
Source File: ddbpn.py    From OISR-PyTorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, in_channels, nr, scale, up=True, bottleneck=True):
        super(DenseProjection, self).__init__()
        if bottleneck:
            self.bottleneck = nn.Sequential(*[
                nn.Conv2d(in_channels, nr, 1),
                nn.PReLU(nr)
            ])
            inter_channels = nr
        else:
            self.bottleneck = None
            inter_channels = in_channels

        self.conv_1 = nn.Sequential(*[
            projection_conv(inter_channels, nr, scale, up),
            nn.PReLU(nr)
        ])
        self.conv_2 = nn.Sequential(*[
            projection_conv(nr, inter_channels, scale, not up),
            nn.PReLU(inter_channels)
        ])
        self.conv_3 = nn.Sequential(*[
            projection_conv(inter_channels, nr, scale, up),
            nn.PReLU(nr)
        ]) 
Example #20
Source File: extremeC3.py    From ext_portrait_segmentation with MIT License 6 votes vote down vote up
def __init__(self, nIn, nOut, kSize, stride=1):
        '''

        :param nIn: number of input channels
        :param nOut: number of output channels
        :param kSize: kernel size
        :param stride: stride rate for down-sampling. Default is 1
        '''
        super().__init__()
        padding = int((kSize - 1) / 2)
        # self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False)
        self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False)
        # self.conv1 = nn.Conv2d(nOut, nOut, (1, kSize), stride=1, padding=(0, padding), bias=False)
        self.bn = nn.BatchNorm2d(nOut, eps=1e-03)
        self.act = nn.PReLU(nOut)
        # self.act = nn.ReLU() 
Example #21
Source File: ddbpn.py    From OISR-PyTorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, in_channels, nr, scale, up=True, bottleneck=True):
        super(DenseProjection, self).__init__()
        if bottleneck:
            self.bottleneck = nn.Sequential(*[
                nn.Conv2d(in_channels, nr, 1),
                nn.PReLU(nr)
            ])
            inter_channels = nr
        else:
            self.bottleneck = None
            inter_channels = in_channels

        self.conv_1 = nn.Sequential(*[
            projection_conv(inter_channels, nr, scale, up),
            nn.PReLU(nr)
        ])
        self.conv_2 = nn.Sequential(*[
            projection_conv(nr, inter_channels, scale, not up),
            nn.PReLU(inter_channels)
        ])
        self.conv_3 = nn.Sequential(*[
            projection_conv(inter_channels, nr, scale, up),
            nn.PReLU(nr)
        ]) 
Example #22
Source File: ddbpn.py    From OISR-PyTorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, in_channels, nr, scale, up=True, bottleneck=True):
        super(DenseProjection, self).__init__()
        if bottleneck:
            self.bottleneck = nn.Sequential(*[
                nn.Conv2d(in_channels, nr, 1),
                nn.PReLU(nr)
            ])
            inter_channels = nr
        else:
            self.bottleneck = None
            inter_channels = in_channels

        self.conv_1 = nn.Sequential(*[
            projection_conv(inter_channels, nr, scale, up),
            nn.PReLU(nr)
        ])
        self.conv_2 = nn.Sequential(*[
            projection_conv(nr, inter_channels, scale, not up),
            nn.PReLU(inter_channels)
        ])
        self.conv_3 = nn.Sequential(*[
            projection_conv(inter_channels, nr, scale, up),
            nn.PReLU(nr)
        ]) 
Example #23
Source File: SINet.py    From ext_portrait_segmentation with MIT License 6 votes vote down vote up
def __init__(self, nIn, nOut, kSize, stride=1):
        '''

        :param nIn: number of input channels
        :param nOut: number of output channels
        :param kSize: kernel size
        :param stride: stride rate for down-sampling. Default is 1
        '''
        super().__init__()
        padding = int((kSize - 1) / 2)

        self.conv = nn.Sequential(
            nn.Conv2d(nIn, nIn, (kSize, kSize), stride=stride, padding=(padding, padding), groups=nIn, bias=False),
            nn.Conv2d(nIn, nOut,  kernel_size=1, stride=1, bias=False),
        )
        self.bn = nn.BatchNorm2d(nOut, eps=1e-03, momentum= BN_moment)
        self.act = nn.PReLU(nOut) 
Example #24
Source File: SINet.py    From ext_portrait_segmentation with MIT License 6 votes vote down vote up
def __init__(self, nIn, nOut, kSize, stride=1, divide=2.0):
        '''

        :param nIn: number of input channels
        :param nOut: number of output channels
        :param kSize: kernel size
        :param stride: stride rate for down-sampling. Default is 1
        '''
        super().__init__()
        padding = int((kSize - 1) / 2)

        self.conv = nn.Sequential(
            nn.Conv2d(nIn, nIn, (kSize, kSize), stride=stride, padding=(padding, padding), groups=nIn, bias=False),
            SqueezeBlock(nIn,divide=divide),
            nn.Conv2d(nIn, nOut,  kernel_size=1, stride=1, bias=False),
        )

        self.bn = nn.BatchNorm2d(nOut, eps=1e-03, momentum= BN_moment)
        self.act = nn.PReLU(nOut) 
Example #25
Source File: common.py    From OISR-PyTorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(
        self, conv, n_feats, kernel_size,
        bias=True, bn=False, act=nn.PReLU(1, 0.25), res_scale=1):

        super(ResBlock, self).__init__()

        self.conv1 = conv(n_feats, n_feats, kernel_size, bias=bias)
        self.conv2 = conv(n_feats, n_feats, kernel_size, bias=bias)
        self.conv3 = conv(n_feats, n_feats, kernel_size, bias=bias)
        self.conv4 = conv(n_feats, n_feats, kernel_size, bias=bias)
        self.relu1 = nn.PReLU(n_feats, 0.25)
        self.relu2 = nn.PReLU(n_feats, 0.25)
        self.relu3 = nn.PReLU(n_feats, 0.25)
        self.relu4 = nn.PReLU(n_feats, 0.25)
        self.scale1 = nn.Parameter(torch.FloatTensor([2.0]), requires_grad=True)
        self.scale2 = nn.Parameter(torch.FloatTensor([2.0]), requires_grad=True)
        self.scale3 = nn.Parameter(torch.FloatTensor([2.0]), requires_grad=True)
        self.scale4 = nn.Parameter(torch.FloatTensor([2.0]), requires_grad=True) 
Example #26
Source File: ddbpn.py    From OISR-PyTorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, in_channels, nr, scale, up=True, bottleneck=True):
        super(DenseProjection, self).__init__()
        if bottleneck:
            self.bottleneck = nn.Sequential(*[
                nn.Conv2d(in_channels, nr, 1),
                nn.PReLU(nr)
            ])
            inter_channels = nr
        else:
            self.bottleneck = None
            inter_channels = in_channels

        self.conv_1 = nn.Sequential(*[
            projection_conv(inter_channels, nr, scale, up),
            nn.PReLU(nr)
        ])
        self.conv_2 = nn.Sequential(*[
            projection_conv(nr, inter_channels, scale, not up),
            nn.PReLU(inter_channels)
        ])
        self.conv_3 = nn.Sequential(*[
            projection_conv(inter_channels, nr, scale, up),
            nn.PReLU(nr)
        ]) 
Example #27
Source File: interest.py    From prediction-flow with MIT License 6 votes vote down vote up
def __init__(self, input_size, hidden_layers, activation='sigmoid'):
        super(AuxiliaryNet, self).__init__()
        modules = OrderedDict()

        previous_size = input_size
        for index, hidden_layer in enumerate(hidden_layers):
            modules[f"dense{index}"] = nn.Linear(previous_size, hidden_layer)
            if activation:
                if activation.lower() == 'relu':
                    modules[f"activation{index}"] = nn.ReLU()
                elif activation.lower() == 'prelu':
                    modules[f"activation{index}"] = nn.PReLU()
                elif activation.lower() == 'sigmoid':
                    modules[f"activation{index}"] = nn.Sigmoid()
                else:
                    raise NotImplementedError(f"{activation} is not supported")
            previous_size = hidden_layer
        modules["final_layer"] = nn.Linear(previous_size, 1)
        self._sequential = nn.Sequential(modules) 
Example #28
Source File: mlp.py    From prediction-flow with MIT License 6 votes vote down vote up
def __init__(self, input_size, hidden_layers,
                 dropout=0.0, batchnorm=True, activation='relu'):
        super(MLP, self).__init__()
        modules = OrderedDict()

        previous_size = input_size
        for index, hidden_layer in enumerate(hidden_layers):
            modules[f"dense{index}"] = nn.Linear(previous_size, hidden_layer)
            if batchnorm:
                modules[f"batchnorm{index}"] = nn.BatchNorm1d(hidden_layer)
            if activation:
                if activation.lower() == 'relu':
                    modules[f"activation{index}"] = nn.ReLU()
                elif activation.lower() == 'prelu':
                    modules[f"activation{index}"] = nn.PReLU()
                elif activation.lower() == 'sigmoid':
                    modules[f"activation{index}"] = nn.Sigmoid()
                else:
                    raise NotImplementedError(f"{activation} is not supported")
            if dropout:
                modules[f"dropout{index}"] = nn.Dropout(dropout)
            previous_size = hidden_layer
        self._sequential = nn.Sequential(modules) 
Example #29
Source File: cgnet.py    From SegmenTron with Apache License 2.0 6 votes vote down vote up
def __init__(self, in_channels, out_channels, dilation=2, reduction=16, down=False,
                 residual=True, norm_layer=nn.BatchNorm2d):
        super(ContextGuidedBlock, self).__init__()
        inter_channels = out_channels // 2 if not down else out_channels
        if down:
            self.conv = _ConvBNPReLU(in_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer)
            self.reduce = nn.Conv2d(inter_channels * 2, out_channels, 1, bias=False)
        else:
            self.conv = _ConvBNPReLU(in_channels, inter_channels, 1, 1, 0, norm_layer=norm_layer)
        self.f_loc = _ChannelWiseConv(inter_channels, inter_channels)
        self.f_sur = _ChannelWiseConv(inter_channels, inter_channels, dilation)
        self.bn = norm_layer(inter_channels * 2)
        self.prelu = nn.PReLU(inter_channels * 2)
        self.f_glo = _FGlo(out_channels, reduction)
        self.down = down
        self.residual = residual 
Example #30
Source File: module.py    From SegmenTron with Apache License 2.0 6 votes vote down vote up
def __init__(self, in_channels, out_channels, stride=1, k=4, r_lim=7, down_method='esp', norm_layer=nn.BatchNorm2d):
        super(EESP, self).__init__()
        self.stride = stride
        n = int(out_channels / k)
        n1 = out_channels - (k - 1) * n
        assert down_method in ['avg', 'esp'], 'One of these is suppported (avg or esp)'
        assert n == n1, "n(={}) and n1(={}) should be equal for Depth-wise Convolution ".format(n, n1)
        self.proj_1x1 = _ConvBNPReLU(in_channels, n, 1, stride=1, groups=k, norm_layer=norm_layer)

        map_receptive_ksize = {3: 1, 5: 2, 7: 3, 9: 4, 11: 5, 13: 6, 15: 7, 17: 8}
        self.k_sizes = list()
        for i in range(k):
            ksize = int(3 + 2 * i)
            ksize = ksize if ksize <= r_lim else 3
            self.k_sizes.append(ksize)
        self.k_sizes.sort()
        self.spp_dw = nn.ModuleList()
        for i in range(k):
            dilation = map_receptive_ksize[self.k_sizes[i]]
            self.spp_dw.append(nn.Conv2d(n, n, 3, stride, dilation, dilation=dilation, groups=n, bias=False))
        self.conv_1x1_exp = _ConvBN(out_channels, out_channels, 1, 1, groups=k, norm_layer=norm_layer)
        self.br_after_cat = _BNPReLU(out_channels, norm_layer)
        self.module_act = nn.PReLU(out_channels)
        self.downAvg = True if down_method == 'avg' else False