Python torch.nn.AvgPool2d() Examples

The following are 30 code examples of torch.nn.AvgPool2d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: model.py    From cat-bbs with MIT License 8 votes vote down vote up
def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(MyResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        # note the increasing dilation
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilation=1)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)

        # these layers will not be used
        self.avgpool = nn.AvgPool2d(7)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example #2
Source File: SemBranch.py    From Semantic-Aware-Scene-Recognition with MIT License 6 votes vote down vote up
def __init__(self, scene_classes, semantic_classes=151):
        super(SemBranch, self).__init__()

        # Semantic Branch
        self.in_block_sem = nn.Sequential(
            nn.Conv2d(semantic_classes + 1, 64, kernel_size=7, stride=2, padding=3, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
        )
        self.in_block_sem_1 = BasicBlockSem(64, 128, kernel_size=3, stride=2, padding=1)
        self.in_block_sem_2 = BasicBlockSem(128, 256, kernel_size=3, stride=2, padding=1)
        self.in_block_sem_3 = BasicBlockSem(256, 512, kernel_size=3, stride=2, padding=1)

        # Semantic Scene Classification Layers
        self.dropout = nn.Dropout(0.3)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc_SEM = nn.Linear(512, scene_classes)

        # Loss
        self.criterion = nn.CrossEntropyLoss() 
Example #3
Source File: cnns.py    From cvpr2018-hnd with MIT License 6 votes vote down vote up
def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example #4
Source File: vgg.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _make_layers(self, cfg):
        layers = []
        in_channels = 3
        for x in cfg:
            if x == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            else:
                layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                           nn.BatchNorm2d(x),
                           nn.ReLU(inplace=True)]
                in_channels = x
        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers)

# net = VGG('VGG11')
# x = torch.randn(2,3,32,32)
# print(net(Variable(x)).size()) 
Example #5
Source File: vgg.py    From bgd with MIT License 6 votes vote down vote up
def _make_layers(cfg, use_bn=True):
        layers = []
        in_channels = 3
        for x in cfg:
            if x == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            else:
                if use_bn:
                    layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1, bias=False),
                               nn.BatchNorm2d(x),
                               nn.ReLU(inplace=True)]
                else:
                    layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                               nn.ReLU(inplace=True)]
                in_channels = x
        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers) 
Example #6
Source File: resnet.py    From cascade-rcnn_Pytorch with MIT License 6 votes vote down vote up
def __init__(self, block, layers, num_classes=1000):
    self.inplanes = 64
    super(ResNet, self).__init__()
    self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                 bias=False)
    self.bn1 = nn.BatchNorm2d(64)
    self.relu = nn.ReLU(inplace=True)
    self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change
    self.layer1 = self._make_layer(block, 64, layers[0])
    self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
    self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
    self.layer4 = self._make_layer(block, 512, layers[3], stride=2)   # different
    self.avgpool = nn.AvgPool2d(7)
    self.fc = nn.Linear(512 * block.expansion, num_classes)

    for m in self.modules():
      if isinstance(m, nn.Conv2d):
        n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
        m.weight.data.normal_(0, math.sqrt(2. / n))
      elif isinstance(m, nn.BatchNorm2d):
        m.weight.data.fill_(1)
        m.bias.data.zero_() 
Example #7
Source File: models.py    From VTuber_Unity with MIT License 6 votes vote down vote up
def __init__(self, block=Bottleneck, layers=[3, 8, 36, 3], num_classes=68):
        self.inplanes = 64
        super(ResNetDepth, self).__init__()
        self.conv1 = nn.Conv2d(3 + 68, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example #8
Source File: convert_Basset_to_pytorch.py    From models with MIT License 6 votes vote down vote up
def simplify_source(s):
    s = map(lambda x: x.replace(',(1, 1),(0, 0),1,1,bias=True),#Conv2d',')'),s)
    s = map(lambda x: x.replace(',(0, 0),1,1,bias=True),#Conv2d',')'),s)
    s = map(lambda x: x.replace(',1,1,bias=True),#Conv2d',')'),s)
    s = map(lambda x: x.replace(',bias=True),#Conv2d',')'),s)
    s = map(lambda x: x.replace('),#Conv2d',')'),s)
    s = map(lambda x: x.replace(',1e-05,0.1,True),#BatchNorm2d',')'),s)
    s = map(lambda x: x.replace('),#BatchNorm2d',')'),s)
    s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#MaxPool2d',')'),s)
    s = map(lambda x: x.replace(',ceil_mode=False),#MaxPool2d',')'),s)
    s = map(lambda x: x.replace('),#MaxPool2d',')'),s)
    s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#AvgPool2d',')'),s)
    s = map(lambda x: x.replace(',ceil_mode=False),#AvgPool2d',')'),s)
    s = map(lambda x: x.replace(',bias=True)),#Linear',')), # Linear'),s)
    s = map(lambda x: x.replace(')),#Linear',')), # Linear'),s)

    s = map(lambda x: '{},\n'.format(x),s)
    s = map(lambda x: x[1:],s)
    s = reduce(lambda x,y: x+y, s)
    return s 
Example #9
Source File: vgg.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _make_layers(self, cfg):
        layers = []
        in_channels = 3
        for x in cfg:
            if x == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            else:
                layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                           nn.BatchNorm2d(x),
                           nn.ReLU(inplace=True)]
                in_channels = x
        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers)

# net = VGG('VGG11')
# x = torch.randn(2,3,32,32)
# print(net(Variable(x)).size()) 
Example #10
Source File: googlenet.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self):
        super(GoogLeNet, self).__init__()
        self.pre_layers = nn.Sequential(
            nn.Conv2d(3, 192, kernel_size=3, padding=1),
            nn.BatchNorm2d(192),
            nn.ReLU(True),
        )

        self.a3 = Inception(192,  64,  96, 128, 16, 32, 32)
        self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)

        self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)

        self.a4 = Inception(480, 192,  96, 208, 16,  48,  64)
        self.b4 = Inception(512, 160, 112, 224, 24,  64,  64)
        self.c4 = Inception(512, 128, 128, 256, 24,  64,  64)
        self.d4 = Inception(512, 112, 144, 288, 32,  64,  64)
        self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)

        self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
        self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)

        self.avgpool = nn.AvgPool2d(8, stride=1)
        self.linear = nn.Linear(1024, 10) 
Example #11
Source File: googlenet.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self):
        super(GoogLeNet, self).__init__()
        self.pre_layers = nn.Sequential(
            nn.Conv2d(3, 192, kernel_size=3, padding=1),
            nn.BatchNorm2d(192),
            nn.ReLU(True),
        )

        self.a3 = Inception(192,  64,  96, 128, 16, 32, 32)
        self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)

        self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)

        self.a4 = Inception(480, 192,  96, 208, 16,  48,  64)
        self.b4 = Inception(512, 160, 112, 224, 24,  64,  64)
        self.c4 = Inception(512, 128, 128, 256, 24,  64,  64)
        self.d4 = Inception(512, 112, 144, 288, 32,  64,  64)
        self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)

        self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
        self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)

        self.avgpool = nn.AvgPool2d(8, stride=1)
        self.linear = nn.Linear(1024, 10) 
Example #12
Source File: vgg.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _make_layers(self, cfg):
        layers = []
        in_channels = 3
        for x in cfg:
            if x == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            else:
                layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                           nn.BatchNorm2d(x),
                           nn.ReLU(inplace=True)]
                in_channels = x
        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers)

# net = VGG('VGG11')
# x = torch.randn(2,3,32,32)
# print(net(Variable(x)).size()) 
Example #13
Source File: sadecoder.py    From ACAN with MIT License 6 votes vote down vote up
def __init__(self, in_channels=2048, key_channels=512, value_channels=2048, height=224, width=304):
        super(SADecoder, self).__init__()
        out_channels = 512
        self.saconv = SelfAttentionBlock_(in_channels, key_channels, value_channels)
        self.image_context = nn.Sequential(OrderedDict([
            ('avgpool', nn.AvgPool2d((height // 8, width // 8), padding=0)),
            ('dropout', nn.Dropout2d(0.5, inplace=True)),
            ('reshape1', Reshape(2048)),
            ('linear1', nn.Linear(2048, 512)),
            ('relu1', nn.ReLU(inplace=True)),
            ('linear2', nn.Linear(512, 512)),
            ('relu2', nn.ReLU(inplace=True)),
            ('reshape2', Reshape(512, 1, 1)),
            ('upsample', nn.Upsample(size=(height // 8, width // 8), mode='bilinear', align_corners=True))]))
        self.merge = nn.Sequential(OrderedDict([
            ('dropout1', nn.Dropout2d(0.5, inplace=True)),
            ('conv1',    nn.Conv2d(value_channels+out_channels, value_channels, kernel_size=1, stride=1)),
            ('relu',     nn.ReLU(inplace=True)),
            ('dropout2', nn.Dropout2d(0.5, inplace=False))])) 
Example #14
Source File: resnet.py    From cascade-rcnn_Pytorch with MIT License 6 votes vote down vote up
def __init__(self, block, layers, num_classes=1000):
    self.inplanes = 64
    super(ResNet, self).__init__()
    self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                 bias=False)
    self.bn1 = nn.BatchNorm2d(64)
    self.relu = nn.ReLU(inplace=True)
    self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change
    self.layer1 = self._make_layer(block, 64, layers[0])
    self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
    self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
    self.layer4 = self._make_layer(block, 512, layers[3], stride=2)   # different
    self.avgpool = nn.AvgPool2d(7)
    self.fc = nn.Linear(512 * block.expansion, num_classes)

    for m in self.modules():
      if isinstance(m, nn.Conv2d):
        n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
        m.weight.data.normal_(0, math.sqrt(2. / n))
      elif isinstance(m, nn.BatchNorm2d):
        m.weight.data.fill_(1)
        m.bias.data.zero_() 
Example #15
Source File: ResNet.py    From transferlearning with MIT License 6 votes vote down vote up
def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(8, stride=1)
        self.baselayer = [self.conv1, self.bn1, self.layer1, self.layer2, self.layer3, self.layer4]
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example #16
Source File: ResNet.py    From transferlearning with MIT License 6 votes vote down vote up
def __init__(self, in_channels, pool_features, num_classes):
        super(InceptionA, self).__init__()
        self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)

        self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)
        self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)

        self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
        self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
        self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)

        self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1)

        self.avg_pool = nn.AvgPool2d(7, stride=1)

        self.source_fc = nn.Linear(288, num_classes) 
Example #17
Source File: ResNet.py    From transferlearning with MIT License 6 votes vote down vote up
def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.baselayer = [self.conv1, self.bn1, self.layer1, self.layer2, self.layer3, self.layer4]
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example #18
Source File: Inception_all.py    From Pytorch-Networks with MIT License 6 votes vote down vote up
def __init__(self,in_dim,conv1,conv3_r,conv3_1,conv3_2,conv5_12,conv5_34,conv5,pool):
        super(_InceptionB,self).__init__()
        self.conv1_branch = BasicConv2d(in_dim,conv1,1,1,0)              
        self.conv3_branch = nn.Sequential(
                            BasicConv2d(in_dim,conv3_r,1,1,0),
                            BasicConv2d(conv3_r,conv3_1,(1,7),1,(0,3)),
                            BasicConv2d(conv3_1,conv3_2,(7,1),1,(3,0)),)
        self.conv5_branch = nn.Sequential(
                            BasicConv2d(in_dim,conv5_12,1,1,0),
                            BasicConv2d(conv5_12,conv5_12,(7,1),1,(3,0)),
                            BasicConv2d(conv5_12,conv5_34,(1,7),1,(0,3)),
                            BasicConv2d(conv5_34,conv5_34,(7,1),1,(3,0)),
                            BasicConv2d(conv5_34,conv5,(1,7),1,(0,3)),)
        self.pool_branch = nn.Sequential(
                           nn.AvgPool2d(3,1,1),
                           BasicConv2d(in_dim,pool,1,1,0),) 
Example #19
Source File: pytorch_ops.py    From deep_architect with MIT License 6 votes vote down vote up
def avg_pool2d(h_kernel_size, h_stride=1):

    def compile_fn(di, dh):
        (_, _, height, width) = di['in'].size()
        padding = nn.ZeroPad2d(
            calculate_same_padding(height, width, dh['stride'],
                                   dh['kernel_size']))
        avg_pool = nn.AvgPool2d(dh['kernel_size'], stride=dh['stride'])

        def fn(di):
            x = padding(di['in'])
            return {'out': avg_pool(x)}

        return fn, [padding, avg_pool]

    return siso_pytorch_module('AvgPool2D', compile_fn, {
        'kernel_size': h_kernel_size,
        'stride': h_stride
    }) 
Example #20
Source File: ShuffleNet.py    From Pytorch-Networks with MIT License 6 votes vote down vote up
def __init__(self,in_dim,out_dim,stride,groups):
        super(BottleNeck,self).__init__()
        self.subconv_1 = nn.Sequential(
            nn.Conv2d(in_dim,int(out_dim/self.expansion),1,stride,0,bias=False),
            nn.BatchNorm2d(int(out_dim/self.expansion)),
            nn.ReLU(inplace=True),)
        self.groups = groups
        self.subconv_2 = nn.Sequential(
            nn.Conv2d(int(out_dim/self.expansion),
                      int(out_dim/self.expansion),3,1,1,bias=False,groups=groups),
            nn.BatchNorm2d(int(out_dim/self.expansion)),
            nn.ReLU(inplace=True),)
        self.subconv_3 = nn.Sequential(
            nn.Conv2d(int(out_dim/self.expansion),out_dim,1,1,0,bias=False),
            nn.BatchNorm2d(out_dim),)
        if in_dim == out_dim and stride == 1:
            self.downsample = None
        else :
            self.downsample = nn.Sequential(
                 
                 nn.AvgPool2d(3,stride,1),
                 nn.Conv2d(in_dim,out_dim,1,1,0,bias=False),
                 nn.BatchNorm2d(out_dim),
            ) 
Example #21
Source File: hrnet.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, conv='normal'):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            if self.avg_down:
                downsample = nn.Sequential(
                    nn.AvgPool2d(kernel_size=stride, stride=stride),
                    nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=1, bias=False),
                    make_norm(planes * block.expansion, norm=self.norm),
                )
            else:
                downsample = nn.Sequential(
                    nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
                    make_norm(planes * block.expansion, norm=self.norm),
                )

        layers = []
        layers.append(block(self.inplanes, planes, stride, dilation, self.norm, conv, self.use_se, True,
                            downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, 1, dilation, self.norm, conv, self.use_se, True))

        return nn.Sequential(*layers) 
Example #22
Source File: helper.py    From torchscope with Apache License 2.0 6 votes vote down vote up
def compute_flops(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_flops(module, inp, out) // 2
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_flops(module, inp, out) // 2
    elif isinstance(module, (nn.AvgPool2d, nn.MaxPool2d)):
        return compute_Pool2d_flops(module, inp, out) // 2
    elif isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU)):
        return compute_ReLU_flops(module, inp, out) // 2
    elif isinstance(module, nn.Upsample):
        return compute_Upsample_flops(module, inp, out) // 2
    elif isinstance(module, nn.Linear):
        return compute_Linear_flops(module, inp, out) // 2
    else:
        return 0 
Example #23
Source File: helper.py    From torchscope with Apache License 2.0 6 votes vote down vote up
def compute_madd(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_madd(module, inp, out)
    elif isinstance(module, nn.ConvTranspose2d):
        return compute_ConvTranspose2d_madd(module, inp, out)
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_madd(module, inp, out)
    elif isinstance(module, nn.MaxPool2d):
        return compute_MaxPool2d_madd(module, inp, out)
    elif isinstance(module, nn.AvgPool2d):
        return compute_AvgPool2d_madd(module, inp, out)
    elif isinstance(module, (nn.ReLU, nn.ReLU6)):
        return compute_ReLU_madd(module, inp, out)
    elif isinstance(module, nn.Softmax):
        return compute_Softmax_madd(module, inp, out)
    elif isinstance(module, nn.Linear):
        return compute_Linear_madd(module, inp, out)
    elif isinstance(module, nn.Bilinear):
        return compute_Bilinear_madd(module, inp[0], inp[1], out)
    else:
        return 0 
Example #24
Source File: ResNet.py    From overhaul-distillation with MIT License 6 votes vote down vote up
def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example #25
Source File: resnet.py    From Visualizing-CNNs-for-monocular-depth-estimation with MIT License 6 votes vote down vote up
def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example #26
Source File: resnet_ilsvrc.py    From L2T-ww with MIT License 6 votes vote down vote up
def __init__(self, block, layers, num_classes=1000, meta=None):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)
        self.lwf = False

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example #27
Source File: inception4.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 6 votes vote down vote up
def __init__(self, config_channels, prefix, bn=True, ratio=1):
        nn.Module.__init__(self)
        channels = config_channels.channels
        self.branch0 = Conv2d(config_channels.channels, config_channels(int(96 * ratio), '%s.branch0.conv.weight' % prefix), kernel_size=1, stride=1, bn=bn)
        # branch1
        config_channels.channels = channels
        branch = []
        branch.append(Conv2d(config_channels.channels, config_channels(int(64 * ratio), '%s.branch1.%d.conv.weight' % (prefix, len(branch))), kernel_size=1, stride=1, bn=bn))
        branch.append(Conv2d(config_channels.channels, config_channels(int(96 * ratio), '%s.branch1.%d.conv.weight' % (prefix, len(branch))), kernel_size=3, stride=1, padding=1, bn=bn))
        self.branch1 = nn.Sequential(*branch)
        # branch2
        config_channels.channels = channels
        branch = []
        branch.append(Conv2d(config_channels.channels, config_channels(int(64 * ratio), '%s.branch2.%d.conv.weight' % (prefix, len(branch))), kernel_size=1, stride=1, bn=bn))
        branch.append(Conv2d(config_channels.channels, config_channels(int(96 * ratio), '%s.branch2.%d.conv.weight' % (prefix, len(branch))), kernel_size=3, stride=1, padding=1, bn=bn))
        branch.append(Conv2d(config_channels.channels, config_channels(int(96 * ratio), '%s.branch2.%d.conv.weight' % (prefix, len(branch))), kernel_size=3, stride=1, padding=1, bn=bn))
        self.branch2 = nn.Sequential(*branch)
        #branch3
        config_channels.channels = channels
        branch = []
        branch.append(nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False))
        branch.append(Conv2d(config_channels.channels, config_channels(int(96 * ratio), '%s.branch3.%d.conv.weight' % (prefix, len(branch))), kernel_size=1, stride=1, bn=bn))
        self.branch3 = nn.Sequential(*branch)
        # output
        config_channels.channels = self.branch0.conv.weight.size(0) + self.branch1[-1].conv.weight.size(0) + self.branch2[-1].conv.weight.size(0) + self.branch3[-1].conv.weight.size(0) 
Example #28
Source File: pspnet.py    From pytorch-semantic-segmentation with MIT License 5 votes vote down vote up
def __init__(self, in_features, out_features, downsize, upsize=60):
        super().__init__()

        self.features = nn.Sequential(
            nn.AvgPool2d(downsize, stride=downsize),
            nn.Conv2d(in_features, out_features, 1, bias=False),
            nn.BatchNorm2d(out_features, momentum=.95),
            nn.ReLU(inplace=True),
            nn.UpsamplingBilinear2d(upsize)
        ) 
Example #29
Source File: linknet.py    From PLARD with MIT License 5 votes vote down vote up
def __init__(self, feature_scale=4, n_classes=21, is_deconv=True, in_channels=3, is_batchnorm=True):
        super(linknet, self).__init__()
        self.is_deconv = is_deconv
        self.in_channels = in_channels
        self.is_batchnorm = is_batchnorm
        self.feature_scale = feature_scale
        self.layers = [2, 2, 2, 2] # Currently hardcoded for ResNet-18

        filters = [64, 128, 256, 512]
        filters = [x / self.feature_scale for x in filters]

        self.inplanes = filters[0]


        # Encoder
        self.convbnrelu1 = conv2DBatchNormRelu(in_channels=3, k_size=7, n_filters=64,
                                               padding=3, stride=2, bias=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        block = residualBlock
        self.encoder1 = self._make_layer(block, filters[0], self.layers[0])
        self.encoder2 = self._make_layer(block, filters[1], self.layers[1], stride=2)
        self.encoder3 = self._make_layer(block, filters[2], self.layers[2], stride=2)
        self.encoder4 = self._make_layer(block, filters[3], self.layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7)


        # Decoder
        self.decoder4 = linknetUp(filters[3], filters[2])
        self.decoder4 = linknetUp(filters[2], filters[1])
        self.decoder4 = linknetUp(filters[1], filters[0])
        self.decoder4 = linknetUp(filters[0], filters[0])

        # Final Classifier
        self.finaldeconvbnrelu1 = nn.Sequential(nn.ConvTranspose2d(filters[0], 32/feature_scale, 3, 2, 1),
                                      nn.BatchNorm2d(32/feature_scale),
                                      nn.ReLU(inplace=True),)
        self.finalconvbnrelu2 = conv2DBatchNormRelu(in_channels=32/feature_scale, k_size=3, n_filters=32/feature_scale, padding=1, stride=1)
        self.finalconv3 = nn.Conv2d(32/feature_scale, n_classes, 2, 2, 0) 
Example #30
Source File: residual_attention_network.py    From Attentive-Filtering-Network with MIT License 5 votes vote down vote up
def __init__(self):

        super(ResidualAttentionModel, self).__init__()
        
        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )
        self.rb1 = ResidualBlock(32, 1)
        self.mpool1 = nn.MaxPool2d(kernel_size=2)
        self.features = nn.Sequential(
            AttentionModule_stg0(32, 32)
        )
        self.classifier = nn.Sequential( # dimension reduction
            CRResidualBlock(32, 8, (4,16)), 
            CRResidualBlock(8, 4, (8,32)), 
            CRResidualBlock(4, 2, (16,64)),
            CRResidualBlock(2, 1, (32,128))
        )
        self.mpool2 = nn.Sequential( # dimension reduction
            nn.BatchNorm2d(1),
            nn.ReLU(inplace=True),
            nn.AvgPool2d(kernel_size=(3,20), stride=2)
        )
        self.fc = nn.Linear(189,1)
        
        ## Weights initialization
        def _weights_init(m):
            classname = m.__class__.__name__
            if classname.find('Conv') != -1:
                xavier_normal_(m.weight)
            elif classname.find('Linear') != -1:
                xavier_normal_(m.weight)
                m.bias.data.zero_()
            elif classname.find('BatchNorm') != -1:
                m.weight.data.fill_(1)
                m.bias.data.zero_()
        
        self.apply(_weights_init)