Python torch.nn.GroupNorm() Examples

The following are 30 code examples of torch.nn.GroupNorm(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: resnet.py    From cgnl-network.pytorch with MIT License 6 votes vote down vote up
def __init__(self, inplanes, planes, use_scale=False, groups=None):
        self.use_scale = use_scale
        self.groups = groups

        super(SpatialCGNL, self).__init__()
        # conv theta
        self.t = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv phi
        self.p = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv g
        self.g = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv z
        self.z = nn.Conv2d(planes, inplanes, kernel_size=1, stride=1,
                                                  groups=self.groups, bias=False)
        self.gn = nn.GroupNorm(num_groups=self.groups, num_channels=inplanes)

        if self.use_scale:
            cprint("=> WARN: SpatialCGNL block uses 'SCALE'", \
                   'yellow')
        if self.groups:
            cprint("=> WARN: SpatialCGNL block uses '{}' groups".format(self.groups), \
                   'yellow') 
Example #2
Source File: init.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __call__(self, module):
        if isinstance(module, (nn.Conv2d, nn.Conv3d)):
            self.initializer(
                module.weight.data,
                self.slope,
                self.mode,
                self.nonlinearity)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)):
            if module.weight is not None:
                module.weight.data.fill_(1)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Linear):
            self.initializer(
                module.weight.data,
                self.slope,
                self.mode,
                self.nonlinearity)
            if module.bias is not None:
                module.bias.data.zero_() 
Example #3
Source File: common_net.py    From iSketchNFill with GNU General Public License v3.0 6 votes vote down vote up
def get_norm(planes,norm_type='batch',num_groups=4):
    if norm_type == 'batch':
        norm_layer = nn.BatchNorm2d(planes, affine=True)
    elif norm_type == 'instance':
        norm_layer = nn.InstanceNorm2d(planes, affine=False)
    elif norm_type == 'group':
        norm_layer = nn.GroupNorm(num_groups,planes)
    elif norm_type == 'adain':
        norm_layer = AdaptiveInstanceNorm2d(planes)
    elif norm_type == 'none':
        norm_layer = None
    else:
        raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
    return norm_layer

##############################################################
## Simple Gated Operations (Affine) and (Multiplicative)
############################################################## 
Example #4
Source File: hooks.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def patch_norm_fp32(module):
    """Recursively convert normalization layers from FP16 to FP32.

    Args:
        module (nn.Module): The modules to be converted in FP16.

    Returns:
        nn.Module: The converted module, the normalization layers have been
            converted to FP32.
    """
    if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
        module.float()
        if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3':
            module.forward = patch_forward_method(module.forward, torch.half,
                                                  torch.float)
    for child in module.children():
        patch_norm_fp32(child)
    return module 
Example #5
Source File: resnet.py    From AerialDetection with Apache License 2.0 6 votes vote down vote up
def init_weights(self, pretrained=None):
        if isinstance(pretrained, str):
            logger = logging.getLogger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.dcn is not None:
                for m in self.modules():
                    if isinstance(m, Bottleneck) and hasattr(
                            m, 'conv2_offset'):
                        constant_init(m.conv2_offset, 0)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None') 
Example #6
Source File: hrnet.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def init_weights(self, pretrained=None):
        """Initialize the weights in backbone.

        Args:
            pretrained (str, optional): Path to pre-trained weights.
                Defaults to None.
        """
        if isinstance(pretrained, str):
            logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None') 
Example #7
Source File: hrnet.py    From AerialDetection with Apache License 2.0 6 votes vote down vote up
def init_weights(self, pretrained=None):
        if isinstance(pretrained, str):
            logger = logging.getLogger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None') 
Example #8
Source File: plard.py    From PLARD with MIT License 6 votes vote down vote up
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, final_relu=True, use_gn=False):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            if use_gn:
                downsample = nn.Sequential(
                    nn.Conv2d(self.inplanes, planes * block.expansion,
                              kernel_size=1, stride=stride, bias=False),
                    nn.GroupNorm(4, planes * block.expansion), 
                )
            else:
                downsample = nn.Sequential(
                    nn.Conv2d(self.inplanes, planes * block.expansion,
                              kernel_size=1, stride=stride, bias=False),
                    nn.BatchNorm2d(planes * block.expansion),
                )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation, use_gn=use_gn))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks-1):
            layers.append(block(self.inplanes, planes, dilation=dilation, use_gn=use_gn))
        layers.append(block(self.inplanes, planes, dilation=dilation, use_gn=use_gn, final_relu=final_relu))

        return nn.Sequential(*layers) 
Example #9
Source File: net.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def make_norm(c, norm='bn', eps=1e-5, an_k=10):
    if norm == 'bn':
        return nn.BatchNorm2d(c, eps=eps)
    elif norm == 'affine':
        return ops.AffineChannel2d(c)
    elif norm == 'gn':
        group = 32 if c >= 32 else c
        assert c % group == 0
        return nn.GroupNorm(group, c, eps=eps)
    elif norm == 'an_bn':
        return ops.MixtureBatchNorm2d(c, an_k)
    elif norm == 'an_gn':
        group = 32 if c >= 32 else c
        assert c % group == 0
        return ops.MixtureGroupNorm(c, group, an_k)
    elif norm == 'none':
        return None
    else:
        return nn.BatchNorm2d(c, eps=eps) 
Example #10
Source File: blocks.py    From nussl with MIT License 6 votes vote down vote up
def __init__(self, num_layers, chunk_size, hop_size, in_features,
                 bottleneck_size, skip_connection=False, **kwargs):
        super().__init__()
        
        self.chunk_size = chunk_size
        self.hop_size = hop_size
        
        blocks = []
        for i in range(num_layers):
            _block = DualPathBlock(n_features=bottleneck_size, **kwargs)
            blocks.append(_block)
            self.add_module(f'layer{i}', _block)
        self.layers = blocks
        self.skip_connection = skip_connection
        self.prelu = nn.PReLU()
        self.bottleneck = nn.Linear(in_features, bottleneck_size)
        self.bottleneck_norm = nn.GroupNorm(1, in_features)
        self.inv_bottleneck = nn.Linear(
            bottleneck_size, in_features)
        self.output_norm = nn.GroupNorm(1, in_features) 
Example #11
Source File: vovnet.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def _init_weights(self):
        # weight initialization
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.0001)
                nn.init.constant_(m.bias, 0)
        # zero init deform conv offset
        for m in self.modules():
            if isinstance(m, ops.DeformConvPack):
                nn.init.constant_(m.conv_offset.weight, 0)
                nn.init.constant_(m.conv_offset.bias, 0)
            if isinstance(m, ops.ModulatedDeformConvPack):
                nn.init.constant_(m.conv_offset_mask.weight, 0)
                nn.init.constant_(m.conv_offset_mask.bias, 0) 
Example #12
Source File: nonlocal2d.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def __init__(self, dim_in, dim_inner, dim_out, use_gn=False, use_scale=True):
        super().__init__()
        self.dim_inner = dim_inner
        self.use_gn = use_gn
        self.use_scale = use_scale

        self.theta_scale1 = Conv2d(dim_in, dim_inner, 1, stride=1, padding=0)
        self.theta_scale2 = Conv2d(dim_in, dim_inner * 4, 1, stride=2, padding=0)
        self.theta_scale3 = Conv2d(dim_in, dim_inner * 16, 1, stride=4, padding=0)

        self.phi = Conv2d(dim_in, dim_inner, 1, stride=1, padding=0)
        self.g = Conv2d(dim_in, dim_inner, 1, stride=1, padding=0)

        self.out = Conv2d(dim_inner, dim_out, 1, stride=1, padding=0)
        if self.use_gn:
            self.gn = nn.GroupNorm(32, dim_out, eps=1e-5)

        self.apply(self._init_modules) 
Example #13
Source File: hrnet.py    From GCNet with Apache License 2.0 6 votes vote down vote up
def init_weights(self, pretrained=None):
        if isinstance(pretrained, str):
            logger = logging.getLogger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None') 
Example #14
Source File: resnet.py    From GCNet with Apache License 2.0 6 votes vote down vote up
def init_weights(self, pretrained=None):
        if isinstance(pretrained, str):
            logger = logging.getLogger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.dcn is not None:
                for m in self.modules():
                    if isinstance(m, Bottleneck) and hasattr(
                            m, 'conv2_offset'):
                        constant_init(m.conv2_offset, 0)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None') 
Example #15
Source File: resnet.py    From cgnl-network.pytorch with MIT License 6 votes vote down vote up
def __init__(self, inplanes, planes, use_scale=False, groups=None):
        self.use_scale = use_scale
        self.groups = groups

        super(SpatialCGNL, self).__init__()
        # conv theta
        self.t = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv phi
        self.p = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv g
        self.g = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv z
        self.z = nn.Conv2d(planes, inplanes, kernel_size=1, stride=1,
                                                  groups=self.groups, bias=False)
        self.gn = nn.GroupNorm(num_groups=self.groups, num_channels=inplanes)

        if self.use_scale:
            cprint("=> WARN: SpatialCGNL block uses 'SCALE'", \
                   'yellow')
        if self.groups:
            cprint("=> WARN: SpatialCGNL block uses '{}' groups".format(self.groups), \
                   'yellow') 
Example #16
Source File: models.py    From adversarial-attacks-pytorch with MIT License 5 votes vote down vote up
def __init__(self):
        super(Target, self).__init__()
        
        self.conv_layer = nn.Sequential(
            nn.Conv2d(3,96,3), # 96*30*30
            nn.GroupNorm(32, 96),
            nn.ELU(),
            
            nn.Dropout2d(0.2),
            
            nn.Conv2d(96, 96, 3), # 96*28*28
            nn.GroupNorm(32, 96),
            nn.ELU(),
            
            nn.Conv2d(96, 96, 3), # 96*26*26
            nn.GroupNorm(32, 96),
            nn.ELU(),
            
            nn.Dropout2d(0.5),
            
            nn.Conv2d(96, 192, 3), # 192*24*24
            nn.GroupNorm(32, 192),
            nn.ELU(),
            
            nn.Conv2d(192, 192, 3), # 192*22*22
            nn.GroupNorm(32, 192),
            nn.ELU(),
           
            nn.Dropout2d(0.5),
            
            nn.Conv2d(192, 256, 3), # 256*20*20
            nn.GroupNorm(32, 256),
            nn.ELU(),
            
            nn.Conv2d(256, 256, 1), # 256*20*20
            nn.GroupNorm(32, 256),
            nn.ELU(),
            
            nn.Conv2d(256, 10, 1), # 10*20*20
            nn.AvgPool2d(20) # 10*1*1
        ) 
Example #17
Source File: networks_sparse.py    From iSketchNFill with GNU General Public License v3.0 5 votes vote down vote up
def get_norm(planes,norm_type='batch',num_groups=4):
    if norm_type == 'batch':
        norm_layer = nn.BatchNorm2d(planes, affine=True)
    elif norm_type == 'instance':
        norm_layer = nn.InstanceNorm2d(planes, affine=False)
    elif norm_type == 'group':
        norm_layer = nn.GroupNorm(num_groups,planes)
    elif norm_type == 'adain':
        norm_layer = AdaptiveInstanceNorm2d(planes)
    elif norm_type == 'none':
        norm_layer = None
    else:
        raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
    return norm_layer 
Example #18
Source File: conditional_mnist_ebm.py    From torchsupport with MIT License 5 votes vote down vote up
def __init__(self, depth=4):
    super(ConvEnergy, self).__init__()
    self.preprocess = nn.Conv2d(1, 32, 1)
    self.blocks = nn.ModuleList([
      spectral_norm(nn.Conv2d(32, 32, 3, padding=1))
      for idx in range(depth)
    ])
    self.bn = nn.ModuleList([
      nn.GroupNorm(8, 32)
      for idx in range(depth)
    ])
    self.postprocess = nn.Conv2d(32, 128, 1)
    self.condition = MLP(10, 128, depth=3, batch_norm=False, normalization=spectral_norm)
    self.combine = MLP(128, 1, hidden_size=64, depth=3, batch_norm=False, normalization=spectral_norm) 
Example #19
Source File: resnet.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def init_weights(self, pretrained=None):
        """Initialize the weights in backbone.

        Args:
            pretrained (str, optional): Path to pre-trained weights.
                Defaults to None.
        """
        if isinstance(pretrained, str):
            logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.dcn is not None:
                for m in self.modules():
                    if isinstance(m, Bottleneck) and hasattr(
                            m.conv2, 'conv_offset'):
                        constant_init(m.conv2.conv_offset, 0)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None') 
Example #20
Source File: backbone.py    From yolact with MIT License 5 votes vote down vote up
def __init__(self, layers, num_groups=32):
        super().__init__(layers, norm_layer=lambda x: nn.GroupNorm(num_groups, x)) 
Example #21
Source File: scorenet.py    From ncsn with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, config):
        super().__init__()
        self.config = config
        nef = config.model.nef * 4
        self.u_net = nn.Sequential(
            # input is (nc) x 10 x 10
            nn.Conv2d(config.data.channels, nef, 4, stride=2, padding=1),
            # nn.Softplus(),
            nn.GroupNorm(4, nef),
            nn.ELU(),
            # state size. (nef) x 6 x 6
            nn.Conv2d(nef, nef * 2, 3, stride=1, padding=1),
            nn.GroupNorm(4, nef * 2),
            # nn.Softplus(),
            nn.ELU(),
            # state size. (nef*2) x 6 x 6
            nn.ConvTranspose2d(nef * 2, nef, 3, stride=1, padding=1),
            nn.GroupNorm(4, nef),
            # nn.Softplus(),
            nn.ELU(),
            # state size. (nef*2) x 6 x 6
            nn.ConvTranspose2d(nef, config.data.channels, 4, stride=2, padding=1),
            # nn.Softplus(),
            nn.ELU(),
        )
        self.fc = nn.Sequential(
            nn.Linear(config.data.channels * 10 ** 2, 256),
            nn.LayerNorm(256),
            nn.ELU(),
            nn.Linear(256, config.data.channels * 10 ** 2)
        ) 
Example #22
Source File: scorenet.py    From ncsn with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, in_channel, out_channel, resize=False, act='relu'):
        super().__init__()
        self.resize = resize

        def get_act():
            if act == 'relu':
                return nn.ReLU(inplace=True)
            elif act == 'softplus':
                return nn.Softplus()
            elif act == 'elu':
                return nn.ELU()
            elif act == 'leakyrelu':
                return nn.LeakyReLU(0.2, True)

        if not resize:
            self.main = nn.Sequential(
                nn.ConvTranspose2d(in_channel, out_channel, 3, stride=1, padding=1),
                nn.GroupNorm(8, out_channel),
                get_act(),
                nn.ConvTranspose2d(out_channel, out_channel, 3, stride=1, padding=1),
                nn.GroupNorm(8, out_channel)
            )
        else:
            self.main = nn.Sequential(
                nn.ConvTranspose2d(in_channel, out_channel, 3, stride=1, padding=1),
                nn.GroupNorm(8, out_channel),
                get_act(),
                nn.ConvTranspose2d(out_channel, out_channel, 3, stride=2, padding=1, output_padding=1),
                nn.GroupNorm(8, out_channel)
            )
            self.residual = nn.ConvTranspose2d(in_channel, out_channel, 3, stride=2, padding=1, output_padding=1)

        self.final_act = get_act() 
Example #23
Source File: scorenet.py    From ncsn with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, in_channel, out_channel, resize=False, act='relu'):
        super().__init__()
        self.resize = resize

        def get_act():
            if act == 'relu':
                return nn.ReLU(inplace=True)
            elif act == 'softplus':
                return nn.Softplus()
            elif act == 'elu':
                return nn.ELU()
            elif act == 'leakyrelu':
                return nn.LeakyReLU(0.2, inplace=True)

        if not resize:
            self.main = nn.Sequential(
                nn.Conv2d(in_channel, out_channel, 3, stride=1, padding=1),
                nn.GroupNorm(8, out_channel),
                get_act(),
                nn.Conv2d(out_channel, out_channel, 3, stride=1, padding=1),
                nn.GroupNorm(8, out_channel)
            )
        else:
            self.main = nn.Sequential(
                nn.Conv2d(in_channel, out_channel, 3, stride=2, padding=1),
                nn.GroupNorm(8, out_channel),
                get_act(),
                nn.Conv2d(out_channel, out_channel, 3, stride=1, padding=1),
                nn.GroupNorm(8, out_channel)
            )
            self.residual = nn.Conv2d(in_channel, out_channel, 3, stride=2, padding=1)

        self.final_act = get_act() 
Example #24
Source File: weight_init.py    From vedaseg with Apache License 2.0 5 votes vote down vote up
def init_weights(modules):
    for m in modules:
        if isinstance(m, nn.Conv2d):
            kaiming_init(m)
        elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
            constant_init(m, 1) 
Example #25
Source File: resnet.py    From cgnl-network.pytorch with MIT License 5 votes vote down vote up
def __init__(self, inplanes, planes, use_scale=False, groups=None, order=2):
        self.use_scale = use_scale
        self.groups = groups
        self.order = order

        super(SpatialCGNLx, self).__init__()
        # conv theta
        self.t = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv phi
        self.p = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv g
        self.g = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv z
        self.z = nn.Conv2d(planes, inplanes, kernel_size=1, stride=1,
                                                  groups=self.groups, bias=False)
        self.gn = nn.GroupNorm(num_groups=self.groups, num_channels=inplanes)

        if self.use_scale:
            cprint("=> WARN: SpatialCGNLx block uses 'SCALE'", \
                   'yellow')
        if self.groups:
            cprint("=> WARN: SpatialCGNLx block uses '{}' groups".format(self.groups), \
                   'yellow')

        cprint('=> WARN: The Taylor expansion order in SpatialCGNLx block is {}'.format(self.order), \
               'yellow') 
Example #26
Source File: resnet.py    From cgnl-network.pytorch with MIT License 5 votes vote down vote up
def __init__(self, inplanes, planes, use_scale=False, groups=None, order=2):
        self.use_scale = use_scale
        self.groups = groups
        self.order = order

        super(SpatialCGNLx, self).__init__()
        # conv theta
        self.t = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv phi
        self.p = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv g
        self.g = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv z
        self.z = nn.Conv2d(planes, inplanes, kernel_size=1, stride=1,
                                                  groups=self.groups, bias=False)
        self.gn = nn.GroupNorm(num_groups=self.groups, num_channels=inplanes)

        if self.use_scale:
            cprint("=> WARN: SpatialCGNLx block uses 'SCALE'", \
                   'yellow')
        if self.groups:
            cprint("=> WARN: SpatialCGNLx block uses '{}' groups".format(self.groups), \
                   'yellow')

        cprint('=> WARN: The Taylor expansion order in SpatialCGNLx block is {}'.format(self.order), \
               'yellow') 
Example #27
Source File: operations.py    From lightDSFD with MIT License 5 votes vote down vote up
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True, bn=False):
        super(SepConv, self).__init__()
        if not bn:
            op = nn.Sequential(
                # nn.ReLU(),
                nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=True,),
                nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=True),
            )
        else:
            if cfg['GN']:
                bn_layer = nn.GroupNorm(32, C_out)
            elif cfg["syncBN"]:
                bn_layer = nn.SyncBatchNorm(C_out)
            else:
                bn_layer = nn.BatchNorm2d(C_out)
                
            op = nn.Sequential(
                # nn.ReLU(),
                nn.Conv2d(
                    C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False,
                ),
                nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
                bn_layer,
            )

        if RELU_FIRST:
            self.op = nn.Sequential(nn.ReLU())
            # self.op.add_module('0', nn.ReLU())
            for i in range(1, len(op)+1):
                self.op.add_module(str(i), op[i-1])
        else:
            self.op = op
            self.op.add_module(str(len(op)), nn.ReLU())
        # self.op = op 
Example #28
Source File: models.py    From domainadaptation with Apache License 2.0 5 votes vote down vote up
def __init__(self, in_feat, out_feat, drop_rate=0.4, bn_momentum=0.1):
        super(DownConv, self).__init__()
        self.conv1 = nn.Conv2d(in_feat, out_feat, kernel_size=3, padding=1)
        self.conv1_bn = nn.GroupNorm(32, out_feat)
        self.conv1_drop = nn.Dropout2d(drop_rate)

        self.conv2 = nn.Conv2d(out_feat, out_feat, kernel_size=3, padding=1)
        self.conv2_bn = nn.GroupNorm(32, out_feat)
        self.conv2_drop = nn.Dropout2d(drop_rate) 
Example #29
Source File: ACNN.py    From Text_Segmentation_Image_Inpainting with GNU General Public License v3.0 5 votes vote down vote up
def make_body(self, channels, atrous_rates, num_groups):
        m = []
        for i in atrous_rates:
            m.append(nn.Sequential(
                GroupNorm2D(num_groups=num_groups, num_channels=channels),
                # nn.GroupNorm(num_groups, num_channels=channels),
                self.act_fn,
                nn.Conv2d(channels, channels, kernel_size=3, padding=(2 * i + 1) // 2, dilation=i, bias=False),
            ))
        m.append(SpatialChannelSqueezeExcitation(channels, reduction=16))
        return nn.Sequential(*m) 
Example #30
Source File: ACNN.py    From Text_Segmentation_Image_Inpainting with GNU General Public License v3.0 5 votes vote down vote up
def make_body(self, channels, atrous_rates, num_groups):
        m = []
        for i in atrous_rates:
            m.append(nn.Sequential(
                GroupNorm2D(num_groups=num_groups, num_channels=channels),
                # nn.GroupNorm(num_groups, num_channels=channels),
                self.act_fn,
                nn.Conv2d(channels, channels, kernel_size=3, padding=(2 * i + 1) // 2, dilation=i, bias=False),
            ))
        return nn.Sequential(*m)