Python torchvision.models.vgg16_bn() Examples

The following are 17 code examples of torchvision.models.vgg16_bn(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torchvision.models , or try the search function .
Example #1
Source File: NetWork.py    From XueLangTianchi with MIT License 6 votes vote down vote up
def __init__(self):
        super(VGG16_bo,self).__init__()
        model=vgg16_bn(pretrained=True)

        # 设置网络名称
        self.moduel_name=str("VGG16_bo")

        #固定提取特征层,权重为预训练权重
        self.features=model.features
        # 固定权重
        for param in  self.features.parameters():
            param.requires_grad=False

        # 分类层
        self.classifier=nn.Sequential(
            t.nn.Linear(86528, 4096),
            t.nn.ReLU(),
            t.nn.Dropout(p=0.5),
            t.nn.Linear(4096, 4096),
            t.nn.ReLU(),
            t.nn.Dropout(p=0.5),
            t.nn.Linear(4096,2)
        )
        # 仅对分类层初始化
        self._initialize_weights() 
Example #2
Source File: HED.py    From pytorch-HED with MIT License 6 votes vote down vote up
def extract_layer(self, model, backbone_mode, ind):
        #pdb.set_trace()
        if backbone_mode=='vgg16':
            index_dict = {
                1: (0,4), 
                2: (4,9), 
                3: (9,16), 
                4: (16,23),
                5: (23,30) }
        elif backbone_mode=='vgg16_bn':
            index_dict = {
                1: (0,6), 
                2: (6,13), 
                3: (13,23), 
                4: (23,33),
                5: (33,43) }

        start, end = index_dict[ind]
        modified_model = nn.Sequential(*list(model.features.children())[start:end])
        return modified_model 
Example #3
Source File: featureModels.py    From multi-modal-regression with MIT License 6 votes vote down vote up
def __init__(self, model_type='vgg13', layer_type='fc6'):
		super().__init__()
		# get model
		if model_type == 'vgg13':
			self.original_model = models.vgg13_bn(pretrained=True)
		elif model_type == 'vgg16':
			self.original_model = models.vgg16_bn(pretrained=True)
		else:
			raise NameError('Unknown model_type passed')
		self.features = self.original_model.features
		if layer_type == 'fc6':
			self.classifier = nn.Sequential(*list(self.original_model.classifier.children())[:2])
		elif layer_type == 'fc7':
			self.classifier = nn.Sequential(*list(self.original_model.classifier.children())[:-2])
		else:
			raise NameError('Uknown layer_type passed') 
Example #4
Source File: test-ww.py    From AutoDL-Projects with MIT License 6 votes vote down vote up
def main():
  # model = models.vgg19_bn(pretrained=True)
  # _, summary = weight_watcher.analyze(model, alphas=False)
  # for key, value in summary.items():
  #   print('{:10s} : {:}'.format(key, value))

  _, summary = weight_watcher.analyze(models.vgg13(pretrained=True), alphas=False)
  print('vgg-13 : {:}'.format(summary['lognorm']))
  _, summary = weight_watcher.analyze(models.vgg13_bn(pretrained=True), alphas=False)
  print('vgg-13-BN : {:}'.format(summary['lognorm']))
  _, summary = weight_watcher.analyze(models.vgg16(pretrained=True), alphas=False)
  print('vgg-16 : {:}'.format(summary['lognorm']))
  _, summary = weight_watcher.analyze(models.vgg16_bn(pretrained=True), alphas=False)
  print('vgg-16-BN : {:}'.format(summary['lognorm']))
  _, summary = weight_watcher.analyze(models.vgg19(pretrained=True), alphas=False)
  print('vgg-19 : {:}'.format(summary['lognorm']))
  _, summary = weight_watcher.analyze(models.vgg19_bn(pretrained=True), alphas=False)
  print('vgg-19-BN : {:}'.format(summary['lognorm'])) 
Example #5
Source File: model.py    From skorch with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, pretrained=False):
        super().__init__()
        encoder = models.vgg16_bn(pretrained=pretrained).features

        self.conv1 = encoder[:6]
        self.conv2 = encoder[6:13]
        self.conv3 = encoder[13:23]
        self.conv4 = encoder[23:33]
        self.conv5 = encoder[33:43]

        self.center = nn.Sequential(
            encoder[43],  # MaxPool
            make_decoder_block(512, 512, 256))

        self.dec5 = make_decoder_block(256 + 512, 512, 256)
        self.dec4 = make_decoder_block(256 + 512, 512, 256)
        self.dec3 = make_decoder_block(256 + 256, 256, 64)
        self.dec2 = make_decoder_block(64 + 128, 128, 32)
        self.dec1 = nn.Sequential(
            nn.Conv2d(32 + 64, 32, 3, padding=1), nn.ReLU(inplace=True))
        self.final = nn.Conv2d(32, 1, kernel_size=1) 
Example #6
Source File: utils.py    From PSSR with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_feat_loss():
    vgg_m = vgg16_bn(True).features.cuda().eval()
    requires_grad(vgg_m, False)
    blocks = [i-1 for i,o in enumerate(children(vgg_m)) if isinstance(o,nn.MaxPool2d)]
    feat_loss = FeatureLoss(vgg_m, blocks[2:5], [5,15,2])
    return feat_loss 
Example #7
Source File: torchvision_models.py    From models-comparison.pytorch with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vgg16_bn(num_classes=1000, pretrained='imagenet'):
    """VGG 16-layer model (configuration "D") with batch normalization
    """
    model = models.vgg16_bn(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['vgg16_bn'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    return model 
Example #8
Source File: model.py    From University1652-Baseline with MIT License 5 votes vote down vote up
def __init__(self, class_num, droprate=0.5, stride=2, init_model=None, pool='avg'):
        super(ft_net_VGG16, self).__init__()
        model_ft = models.vgg16_bn(pretrained=True)
        # avg pooling to global pooling
        #if stride == 1:
        #    model_ft.layer4[0].downsample[0].stride = (1,1)
        #    model_ft.layer4[0].conv2.stride = (1,1)

        self.pool = pool
        if pool =='avg+max':
            model_ft.avgpool2 = nn.AdaptiveAvgPool2d((1,1))
            model_ft.maxpool2 = nn.AdaptiveMaxPool2d((1,1))
            self.model = model_ft
            #self.classifier = ClassBlock(4096, class_num, droprate)
        elif pool=='avg':
            model_ft.avgpool2 = nn.AdaptiveAvgPool2d((1,1))
            self.model = model_ft
            #self.classifier = ClassBlock(2048, class_num, droprate)
        elif pool=='max':
            model_ft.maxpool2 = nn.AdaptiveMaxPool2d((1,1))
            self.model = model_ft

        if init_model!=None:
            self.model = init_model.model
            self.pool = init_model.pool
            #self.classifier.add_block = init_model.classifier.add_block 
Example #9
Source File: vgg16_bn.py    From CRAFT-pytorch with MIT License 5 votes vote down vote up
def __init__(self, pretrained=True, freeze=True):
        super(vgg16_bn, self).__init__()
        model_urls['vgg16_bn'] = model_urls['vgg16_bn'].replace('https://', 'http://')
        vgg_pretrained_features = models.vgg16_bn(pretrained=pretrained).features
        self.slice1 = torch.nn.Sequential()
        self.slice2 = torch.nn.Sequential()
        self.slice3 = torch.nn.Sequential()
        self.slice4 = torch.nn.Sequential()
        self.slice5 = torch.nn.Sequential()
        for x in range(12):         # conv2_2
            self.slice1.add_module(str(x), vgg_pretrained_features[x])
        for x in range(12, 19):         # conv3_3
            self.slice2.add_module(str(x), vgg_pretrained_features[x])
        for x in range(19, 29):         # conv4_3
            self.slice3.add_module(str(x), vgg_pretrained_features[x])
        for x in range(29, 39):         # conv5_3
            self.slice4.add_module(str(x), vgg_pretrained_features[x])

        # fc6, fc7 without atrous conv
        self.slice5 = torch.nn.Sequential(
                nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
                nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6),
                nn.Conv2d(1024, 1024, kernel_size=1)
        )

        if not pretrained:
            init_weights(self.slice1.modules())
            init_weights(self.slice2.modules())
            init_weights(self.slice3.modules())
            init_weights(self.slice4.modules())

        init_weights(self.slice5.modules())        # no pretrained model for fc6 and fc7

        if freeze:
            for param in self.slice1.parameters():      # only first conv
                param.requires_grad= False 
Example #10
Source File: torchvision_models.py    From pretorched-x with MIT License 5 votes vote down vote up
def vgg16_bn(num_classes=1000, pretrained='imagenet'):
    """VGG 16-layer model (configuration "D") with batch normalization
    """
    model = models.vgg16_bn(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['vgg16_bn'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    model = modify_vggs(model)
    return model 
Example #11
Source File: torchvision_models.py    From pretrained-models.pytorch with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vgg16_bn(num_classes=1000, pretrained='imagenet'):
    """VGG 16-layer model (configuration "D") with batch normalization
    """
    model = models.vgg16_bn(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['vgg16_bn'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    model = modify_vggs(model)
    return model 
Example #12
Source File: torch_imports.py    From pytorch-serverless with MIT License 5 votes vote down vote up
def vgg16(pre): return children(vgg16_bn(pre))[0] 
Example #13
Source File: tools.py    From perceptron-benchmark with Apache License 2.0 5 votes vote down vote up
def load_pytorch_model(model_name):
    import torchvision.models as models
    switcher = {
        'alexnet': lambda: models.alexnet(pretrained=True).eval(),
        "vgg11": lambda: models.vgg11(pretrained=True).eval(),
        "vgg11_bn": lambda: models.vgg11_bn(pretrained=True).eval(),
        "vgg13": lambda: models.vgg13(pretrained=True).eval(),
        "vgg13_bn": lambda: models.vgg13_bn(pretrained=True).eval(),
        "vgg16": lambda: models.vgg16(pretrained=True).eval(),
        "vgg16_bn": lambda: models.vgg16_bn(pretrained=True).eval(),
        "vgg19": lambda: models.vgg19(pretrained=True).eval(),
        "vgg19_bn": lambda: models.vgg19_bn(pretrained=True).eval(),
        "resnet18": lambda: models.resnet18(pretrained=True).eval(),
        "resnet34": lambda: models.resnet34(pretrained=True).eval(),
        "resnet50": lambda: models.resnet50(pretrained=True).eval(),
        "resnet101": lambda: models.resnet101(pretrained=True).eval(),
        "resnet152": lambda: models.resnet152(pretrained=True).eval(),
        "squeezenet1_0": lambda: models.squeezenet1_0(pretrained=True).eval(),
        "squeezenet1_1": lambda: models.squeezenet1_1(pretrained=True).eval(),
        "densenet121": lambda: models.densenet121(pretrained=True).eval(),
        "densenet161": lambda: models.densenet161(pretrained=True).eval(),
        "densenet201": lambda: models.densenet201(pretrained=True).eval(),
        "inception_v3": lambda: models.inception_v3(pretrained=True).eval(),
    }

    _load_model = switcher.get(model_name, None)
    _model = _load_model()
    return _model 
Example #14
Source File: tools.py    From perceptron-benchmark with Apache License 2.0 5 votes vote down vote up
def _load_pytorch_model(model_name, summary):
    import torchvision.models as models
    switcher = {
        'alexnet': lambda: models.alexnet(pretrained=True).eval(),
        "vgg11": lambda: models.vgg11(pretrained=True).eval(),
        "vgg11_bn": lambda: models.vgg11_bn(pretrained=True).eval(),
        "vgg13": lambda: models.vgg13(pretrained=True).eval(),
        "vgg13_bn": lambda: models.vgg13_bn(pretrained=True).eval(),
        "vgg16": lambda: models.vgg16(pretrained=True).eval(),
        "vgg16_bn": lambda: models.vgg16_bn(pretrained=True).eval(),
        "vgg19": lambda: models.vgg19(pretrained=True).eval(),
        "vgg19_bn": lambda: models.vgg19_bn(pretrained=True).eval(),
        "resnet18": lambda: models.resnet18(pretrained=True).eval(),
        "resnet34": lambda: models.resnet34(pretrained=True).eval(),
        "resnet50": lambda: models.resnet50(pretrained=True).eval(),
        "resnet101": lambda: models.resnet101(pretrained=True).eval(),
        "resnet152": lambda: models.resnet152(pretrained=True).eval(),
        "squeezenet1_0": lambda: models.squeezenet1_0(pretrained=True).eval(),
        "squeezenet1_1": lambda: models.squeezenet1_1(pretrained=True).eval(),
        "densenet121": lambda: models.densenet121(pretrained=True).eval(),
        "densenet161": lambda: models.densenet161(pretrained=True).eval(),
        "densenet201": lambda: models.densenet201(pretrained=True).eval(),
        "inception_v3": lambda: models.inception_v3(pretrained=True).eval(),
    }

    _load_model = switcher.get(model_name, None)
    _model = _load_model()
    import torch
    if torch.cuda.is_available():
        _model = _model.cuda()
    from perceptron.models.classification.pytorch import PyTorchModel as ClsPyTorchModel
    import numpy as np
    mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
    std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
    pmodel = ClsPyTorchModel(
        _model, bounds=(
            0, 1), num_classes=1000, preprocessing=(
            mean, std))
    return pmodel 
Example #15
Source File: segnet.py    From pytorch_segmentation with MIT License 4 votes vote down vote up
def __init__(self, num_classes, in_channels=3, pretrained=True, freeze_bn=False, **_):
        super(SegNet, self).__init__()
        vgg_bn = models.vgg16_bn(pretrained= pretrained)
        encoder = list(vgg_bn.features.children())

        # Adjust the input size
        if in_channels != 3:
            encoder[0] = nn.Conv2d(in_channels, 64, kernel_size=3, stride=1, padding=1)

        # Encoder, VGG without any maxpooling
        self.stage1_encoder = nn.Sequential(*encoder[:6])
        self.stage2_encoder = nn.Sequential(*encoder[7:13])
        self.stage3_encoder = nn.Sequential(*encoder[14:23])
        self.stage4_encoder = nn.Sequential(*encoder[24:33])
        self.stage5_encoder = nn.Sequential(*encoder[34:-1])
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)

        # Decoder, same as the encoder but reversed, maxpool will not be used
        decoder = encoder
        decoder = [i for i in list(reversed(decoder)) if not isinstance(i, nn.MaxPool2d)]
        # Replace the last conv layer
        decoder[-1] = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
        # When reversing, we also reversed conv->batchN->relu, correct it
        decoder = [item for i in range(0, len(decoder), 3) for item in decoder[i:i+3][::-1]]
        # Replace some conv layers & batchN after them
        for i, module in enumerate(decoder):
            if isinstance(module, nn.Conv2d):
                if module.in_channels != module.out_channels:
                    decoder[i+1] = nn.BatchNorm2d(module.in_channels)
                    decoder[i] = nn.Conv2d(module.out_channels, module.in_channels, kernel_size=3, stride=1, padding=1)

        self.stage1_decoder = nn.Sequential(*decoder[0:9])
        self.stage2_decoder = nn.Sequential(*decoder[9:18])
        self.stage3_decoder = nn.Sequential(*decoder[18:27])
        self.stage4_decoder = nn.Sequential(*decoder[27:33])
        self.stage5_decoder = nn.Sequential(*decoder[33:],
                nn.Conv2d(64, num_classes, kernel_size=3, stride=1, padding=1)
        )
        self.unpool = nn.MaxUnpool2d(kernel_size=2, stride=2)

        self._initialize_weights(self.stage1_decoder, self.stage2_decoder, self.stage3_decoder,
                                    self.stage4_decoder, self.stage5_decoder)
        if freeze_bn: self.freeze_bn() 
Example #16
Source File: model.py    From SCNN_Pytorch with MIT License 4 votes vote down vote up
def net_init(self, input_size, ms_ks):
        input_w, input_h = input_size
        self.fc_input_feature = 5 * int(input_w/16) * int(input_h/16)
        self.backbone = models.vgg16_bn(pretrained=self.pretrained).features

        # ----------------- process backbone -----------------
        for i in [34, 37, 40]:
            conv = self.backbone._modules[str(i)]
            dilated_conv = nn.Conv2d(
                conv.in_channels, conv.out_channels, conv.kernel_size, stride=conv.stride,
                padding=tuple(p * 2 for p in conv.padding), dilation=2, bias=(conv.bias is not None)
            )
            dilated_conv.load_state_dict(conv.state_dict())
            self.backbone._modules[str(i)] = dilated_conv
        self.backbone._modules.pop('33')
        self.backbone._modules.pop('43')

        # ----------------- SCNN part -----------------
        self.layer1 = nn.Sequential(
            nn.Conv2d(512, 1024, 3, padding=4, dilation=4, bias=False),
            nn.BatchNorm2d(1024),
            nn.ReLU(),
            nn.Conv2d(1024, 128, 1, bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU()  # (nB, 128, 36, 100)
        )

        # ----------------- add message passing -----------------
        self.message_passing = nn.ModuleList()
        self.message_passing.add_module('up_down', nn.Conv2d(128, 128, (1, ms_ks), padding=(0, ms_ks // 2), bias=False))
        self.message_passing.add_module('down_up', nn.Conv2d(128, 128, (1, ms_ks), padding=(0, ms_ks // 2), bias=False))
        self.message_passing.add_module('left_right',
                                        nn.Conv2d(128, 128, (ms_ks, 1), padding=(ms_ks // 2, 0), bias=False))
        self.message_passing.add_module('right_left',
                                        nn.Conv2d(128, 128, (ms_ks, 1), padding=(ms_ks // 2, 0), bias=False))
        # (nB, 128, 36, 100)

        # ----------------- SCNN part -----------------
        self.layer2 = nn.Sequential(
            nn.Dropout2d(0.1),
            nn.Conv2d(128, 5, 1)  # get (nB, 5, 36, 100)
        )

        self.layer3 = nn.Sequential(
            nn.Softmax(dim=1),  # (nB, 5, 36, 100)
            nn.AvgPool2d(2, 2),  # (nB, 5, 18, 50)
        )
        self.fc = nn.Sequential(
            nn.Linear(self.fc_input_feature, 128),
            nn.ReLU(),
            nn.Linear(128, 4),
            nn.Sigmoid()
        ) 
Example #17
Source File: model.py    From mvfnet with MIT License 4 votes vote down vote up
def __init__(self):
        super(VggEncoder, self).__init__()

        self.featChannel = 512
        self.layer1 = tvmodel.vgg16_bn(pretrained=True).features
        self.layer1 = nn.Sequential(OrderedDict([
            ('conv1',  nn.Conv2d(3, 64, (3, 3), (1, 1), (1, 1))),
            ('bn1',  nn.BatchNorm2d(64)),
            ('relu1', nn.ReLU(True)),
            ('pool1',  nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True)),
       
            ('conv2',  nn.Conv2d(64, 128, (3, 3), (1, 1), (1, 1))),
            ('bn2',  nn.BatchNorm2d(128)),
            ('relu2', nn.ReLU(True)),
            ('pool2',  nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True)),
        
            ('conv3',  nn.Conv2d(128, 256, (3, 3), (1, 1), (1, 1))),
            ('bn3',  nn.BatchNorm2d(256)),
            ('relu3', nn.ReLU(True)),
        
            ('conv4', nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1))),
            ('bn4',  nn.BatchNorm2d(256)),
            ('relu4', nn.ReLU(True)),
            ('pool3',  nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True)),
        
            ('conv5',  nn.Conv2d(256, 512, (3, 3), (1, 1), 1)),
            ('bn5',  nn.BatchNorm2d(512)),
            ('relu5', nn.ReLU(True)),
            ('pool4',  nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True)),
        
            ('conv6',  nn.Conv2d(512, 512, (3, 3), stride=1, padding=1)),
            ('bn6',  nn.BatchNorm2d(512)),
            ('relu6', nn.ReLU(True)),
        
            ('conv7',  nn.Conv2d(512, 512, (3, 3), (1, 1), 1)),
            ('bn7',  nn.BatchNorm2d(512)),
            ('relu7', nn.ReLU(True)),
            ('pool5',  nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True)),
            ]))

        
            
        self.fc_3dmm = nn.Sequential(OrderedDict([
            ('fc1', nn.Linear(self.featChannel*3, 256*3)),
            ('relu1', nn.ReLU(True)),
            ('fc2', nn.Linear(256*3, 228))]))
        
        self.fc_pose = nn.Sequential(OrderedDict([
           ('fc3', nn.Linear(512, 256)),
           ('relu2', nn.ReLU(True)),
           ('fc4', nn.Linear(256, 7))]))
        reset_params(self.fc_3dmm)
        reset_params(self.fc_pose)