Python torch.nn.BatchNorm1d() Examples

The following are 30 code examples of torch.nn.BatchNorm1d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: _wavernn.py    From audio with BSD 2-Clause "Simplified" License 11 votes vote down vote up
def __init__(self,
                 n_res_block: int = 10,
                 n_freq: int = 128,
                 n_hidden: int = 128,
                 n_output: int = 128,
                 kernel_size: int = 5) -> None:
        super().__init__()

        ResBlocks = [_ResBlock(n_hidden) for _ in range(n_res_block)]

        self.melresnet_model = nn.Sequential(
            nn.Conv1d(in_channels=n_freq, out_channels=n_hidden, kernel_size=kernel_size, bias=False),
            nn.BatchNorm1d(n_hidden),
            nn.ReLU(inplace=True),
            *ResBlocks,
            nn.Conv1d(in_channels=n_hidden, out_channels=n_output, kernel_size=1)
        ) 
Example #2
Source File: dan.py    From qb with MIT License 7 votes vote down vote up
def __init__(self, embedding_dim, n_hidden_layers, n_hidden_units, dropout_prob):
        super(DanEncoder, self).__init__()
        encoder_layers = []
        for i in range(n_hidden_layers):
            if i == 0:
                input_dim = embedding_dim
            else:
                input_dim = n_hidden_units

            encoder_layers.extend([
                nn.Linear(input_dim, n_hidden_units),
                nn.BatchNorm1d(n_hidden_units),
                nn.ELU(),
                nn.Dropout(dropout_prob),
            ])
        self.encoder = nn.Sequential(*encoder_layers) 
Example #3
Source File: model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self):
        super(CW2_Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, 3)
        self.bnm1 = nn.BatchNorm2d(32, momentum=0.1)
        self.conv2 = nn.Conv2d(32, 64, 3)
        self.bnm2 = nn.BatchNorm2d(64, momentum=0.1)
        self.conv3 = nn.Conv2d(64, 128, 3)
        self.bnm3 = nn.BatchNorm2d(128, momentum=0.1)
        self.conv4 = nn.Conv2d(128, 128, 3)
        self.bnm4 = nn.BatchNorm2d(128, momentum=0.1)
        self.fc1 = nn.Linear(3200, 256)
        #self.dropout1 = nn.Dropout(p=0.35, inplace=False)
        self.bnm5 = nn.BatchNorm1d(256, momentum=0.1)
        self.fc2 = nn.Linear(256, 256)
        self.bnm6 = nn.BatchNorm1d(256, momentum=0.1)
        self.fc3 = nn.Linear(256, 10)
        #self.dropout2 = nn.Dropout(p=0.35, inplace=False)
        #self.dropout3 = nn.Dropout(p=0.35, inplace=False) 
Example #4
Source File: model.py    From FormulaNet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, nFeats_in, nFeats1, nFeats2, nFeats_out, bias=False):
        super(PairForward, self).__init__()
        self.l1 = nn.Linear(nFeats_in, nFeats1, bias=bias)
        init.kaiming_normal(self.l1.weight)
        self.bn1 = nn.BatchNorm1d(nFeats1)
        self.relu1 = nn.ReLU(inplace=True)

        self.l2 = nn.Linear(nFeats1, nFeats2, bias=bias)
        init.kaiming_normal(self.l2.weight)
        self.bn2 = nn.BatchNorm1d(nFeats2)
        self.relu2 = nn.ReLU(inplace=True)

        self.l3 = nn.Linear(nFeats2, nFeats_out, bias=bias)
        init.kaiming_normal(self.l3.weight)
        self.bn3 = nn.BatchNorm1d(nFeats_out)
        self.relu3 = nn.ReLU(inplace=True) 
Example #5
Source File: models_sherlock.py    From sato with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 name: str,
                 input_dim: int,
                 embedding_dim: int = 300,
                 dropout_ratio: float = 0.5,
                 skip_conversion: bool = False):
        super(FeatureEncoder, self).__init__()
        self.name = name
        self.input_dim = input_dim
        self.embedding_dim = embedding_dim
        self.skip_conversion = skip_conversion
        if self.skip_conversion:
            self.embedding_dim = self.input_dim

        # TODO(Yoshi): Check if applying Batch normalization to the input is good
        self.bn1 = nn.BatchNorm1d(num_features=input_dim)
        self.linear1 = nn.Linear(input_dim,
                                 embedding_dim)
        self.relu1 = nn.ReLU()
        self.dp1 = nn.Dropout(dropout_ratio)
        self.linear2 = nn.Linear(embedding_dim,
                                 embedding_dim)
        self.relu2 = nn.ReLU() 
Example #6
Source File: model.py    From FormulaNet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, *layers):
        '''
        layers : list of int
            There are dimensions in the sequence
        '''
        super(FullyConnectedNet, self).__init__()
        self.linear = nn.ModuleList()
        self.bn = nn.ModuleList()
        self.relu = nn.ModuleList()
        pre_dim = layers[0]
        self.nLayers = 0
        for dim in layers[1:]:
            self.linear.append(nn.Linear(pre_dim, dim, bias=False))
            self.bn.append(nn.BatchNorm1d(dim))
            self.relu.append(nn.ReLU(inplace=True))
            init.kaiming_normal(self.linear[-1].weight)
            self.nLayers += 1
            pre_dim = dim 
Example #7
Source File: models_sherlock.py    From sato with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 encoders: Dict[str, nn.Module],
                 embedding_dim: int = 500,
                 num_classes: int = 78,
                 dropout_ratio: float = 0.5): # params: Dict[str, Any]):
        super(SherlockClassifier, self).__init__()
        self.encoders = encoders
        # Register encoders as parameters
        for n, e in self.encoders.items():
            self.add_module("FeatureEncoder_{}".format(n), e)

        self.feature_names = sorted(encoders.keys()) # Fix the order of encoders
        # Sum of input_dim of all encoders
        total_input_dim = sum([x.embedding_dim for x in encoders.values()])
        self.bn1 = nn.BatchNorm1d(num_features=total_input_dim)
        self.linear1 = nn.Linear(total_input_dim, embedding_dim)
        self.relu1 = nn.ReLU()
        self.dp1 = nn.Dropout(dropout_ratio)
        self.linear2 = nn.Linear(embedding_dim, embedding_dim)
        self.relu2 = nn.ReLU()
        self.linear3 = nn.Linear(embedding_dim, num_classes)
        self.relu2 = nn.ReLU() 
Example #8
Source File: openchem_mlp.py    From OpenChem with MIT License 6 votes vote down vote up
def __init__(self, params):
        super(OpenChemMLP, self).__init__()
        check_params(params, self.get_required_params(),
                     self.get_optional_params())
        self.params = params
        self.hidden_size = self.params['hidden_size']
        self.input_size = [self.params['input_size']] + self.hidden_size[:-1]
        self.n_layers = self.params['n_layers']
        self.activation = self.params['activation']
        if type(self.activation) is list:
            assert len(self.activation) == self.n_layers
        else:
            self.activation = [self.activation]*self.n_layers
        if 'dropout' in self.params.keys():
            self.dropout = self.params['dropout']
        else:
            self.dropout = 0
        self.layers = nn.ModuleList([])
        self.bn = nn.ModuleList([])
        self.dropouts = nn.ModuleList([])
        for i in range(self.n_layers):
            self.dropouts.append(nn.Dropout(self.dropout))
            self.bn.append(nn.BatchNorm1d(self.hidden_size[i]))
            self.layers.append(nn.Linear(in_features=self.input_size[i],
                                      out_features=self.hidden_size[i])) 
Example #9
Source File: basic_layers.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def __init__(self, in_channels, dilation=1):
        
        super(LocalAttenBlock, self).__init__()
        
        self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=(3,3), padding=(1,1), dilation=dilation)
        self.bn1   = nn.BatchNorm2d(in_channels)
        self.relu  = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(in_channels, in_channels, kernel_size=(3,3), padding=(1,1), dilation=1)
        self.bn2   = nn.BatchNorm2d(in_channels)
        self.relu  = nn.ReLU(inplace=True)
        
        def _weights_init(m):
            if isinstance(m, nn.Conv2d or nn.Linear):
                xavier_normal_(m.weight)
                m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d or nn.BatchNorm1d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
        
        self.apply(_weights_init) 
Example #10
Source File: basic_layers.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def __init__(self, in_channels, dilation=1):
        
        super(PlainConvBlock, self).__init__()
        
        self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=(3,3), padding=(1,1), dilation=dilation)
        self.bn1   = nn.BatchNorm2d(in_channels)
        self.relu  = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(in_channels, in_channels, kernel_size=(3,3), padding=(1,1), dilation=1)
        self.bn2   = nn.BatchNorm2d(in_channels)
        self.relu  = nn.ReLU(inplace=True)
        
        def _weights_init(m):
            if isinstance(m, nn.Conv2d or nn.Linear):
                xavier_normal_(m.weight)
                m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d or nn.BatchNorm1d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
        
        self.apply(_weights_init) 
Example #11
Source File: network.py    From reconstructing_faces_from_voices with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, input_channel, channels, output_channel):
        super(VoiceEmbedNet, self).__init__()
        self.model = nn.Sequential(
            nn.Conv1d(input_channel, channels[0], 3, 2, 1, bias=False),
            nn.BatchNorm1d(channels[0], affine=True),
            nn.ReLU(inplace=True),
            nn.Conv1d(channels[0], channels[1], 3, 2, 1, bias=False),
            nn.BatchNorm1d(channels[1], affine=True),
            nn.ReLU(inplace=True),
            nn.Conv1d(channels[1], channels[2], 3, 2, 1, bias=False),
            nn.BatchNorm1d(channels[2], affine=True),
            nn.ReLU(inplace=True),
            nn.Conv1d(channels[2], channels[3], 3, 2, 1, bias=False),
            nn.BatchNorm1d(channels[3], affine=True),
            nn.ReLU(inplace=True),
            nn.Conv1d(channels[3], output_channel, 3, 2, 1, bias=True),
        ) 
Example #12
Source File: models.py    From transferlearning with MIT License 6 votes vote down vote up
def __init__(self, num_class, base_net='resnet50', transfer_loss='mmd', use_bottleneck=True, bottleneck_width=256, width=1024):
        super(Transfer_Net, self).__init__()
        self.base_network = backbone.network_dict[base_net]()
        self.use_bottleneck = use_bottleneck
        self.transfer_loss = transfer_loss
        bottleneck_list = [nn.Linear(self.base_network.output_num(
        ), bottleneck_width), nn.BatchNorm1d(bottleneck_width), nn.ReLU(), nn.Dropout(0.5)]
        self.bottleneck_layer = nn.Sequential(*bottleneck_list)
        classifier_layer_list = [nn.Linear(self.base_network.output_num(), width), nn.ReLU(), nn.Dropout(0.5),
                                 nn.Linear(width, num_class)]
        self.classifier_layer = nn.Sequential(*classifier_layer_list)

        self.bottleneck_layer[0].weight.data.normal_(0, 0.005)
        self.bottleneck_layer[0].bias.data.fill_(0.1)
        for i in range(2):
            self.classifier_layer[i * 3].weight.data.normal_(0, 0.01)
            self.classifier_layer[i * 3].bias.data.fill_(0.0) 
Example #13
Source File: digit_network.py    From transferlearning with MIT License 6 votes vote down vote up
def __init__(self):
        super(Network, self).__init__()
        self.feature = nn.Sequential()
        self.feature.add_module('f_conv1', nn.Conv2d(3, 64, kernel_size=5))
        self.feature.add_module('f_bn1', nn.BatchNorm2d(64))
        self.feature.add_module('f_pool1', nn.MaxPool2d(2))
        self.feature.add_module('f_relu1', nn.ReLU(True))
        self.feature.add_module('f_conv2', nn.Conv2d(64, 50, kernel_size=5))
        self.feature.add_module('f_bn2', nn.BatchNorm2d(50))
        self.feature.add_module('f_drop1', nn.Dropout2d())
        self.feature.add_module('f_pool2', nn.MaxPool2d(2))
        self.feature.add_module('f_relu2', nn.ReLU(True))

        self.class_classifier = nn.Sequential()
        self.class_classifier.add_module('c_fc1', nn.Linear(50 * 5 * 5, 100))
        self.class_classifier.add_module('c_bn1', nn.BatchNorm1d(100))
        self.class_classifier.add_module('c_relu1', nn.ReLU(True))
        self.class_classifier.add_module('c_drop1', nn.Dropout2d())
        self.class_classifier.add_module('c_fc2', nn.Linear(100, 500))
        self.class_classifier.add_module('c_bn2', nn.BatchNorm1d(500))
        self.class_classifier.add_module('c_relu2', nn.ReLU(True))
        self.class_classifier.add_module('c_fc3', nn.Linear(500, 10)) 
Example #14
Source File: simple_kaggle.py    From argus-freesound with MIT License 6 votes vote down vote up
def __init__(self, num_classes, base_size=64, dropout=0.2):
        super().__init__()

        self.conv = nn.Sequential(
            ConvBlock(in_channels=3, out_channels=base_size),
            ConvBlock(in_channels=base_size, out_channels=base_size*2),
            ConvBlock(in_channels=base_size*2, out_channels=base_size*4),
            ConvBlock(in_channels=base_size*4, out_channels=base_size*8),
        )

        self.fc = nn.Sequential(
            nn.Dropout(dropout),
            nn.Linear(base_size*8, base_size*2),
            nn.PReLU(),
            nn.BatchNorm1d(base_size*2),
            nn.Dropout(dropout/2),
            nn.Linear(base_size*2, num_classes),
        ) 
Example #15
Source File: utils.py    From prediction-flow with MIT License 6 votes vote down vote up
def init_weights(model):
    if isinstance(model, nn.Linear):
        if model.weight is not None:
            init.kaiming_uniform_(model.weight.data)
        if model.bias is not None:
            init.normal_(model.bias.data)
    elif isinstance(model, nn.BatchNorm1d):
        if model.weight is not None:
            init.normal_(model.weight.data, mean=1, std=0.02)
        if model.bias is not None:
            init.constant_(model.bias.data, 0)
    elif isinstance(model, nn.BatchNorm2d):
        if model.weight is not None:
            init.normal_(model.weight.data, mean=1, std=0.02)
        if model.bias is not None:
            init.constant_(model.bias.data, 0)
    elif isinstance(model, nn.BatchNorm3d):
        if model.weight is not None:
            init.normal_(model.weight.data, mean=1, std=0.02)
        if model.bias is not None:
            init.constant_(model.bias.data, 0)
    else:
        pass 
Example #16
Source File: mlp.py    From prediction-flow with MIT License 6 votes vote down vote up
def __init__(self, input_size, hidden_layers,
                 dropout=0.0, batchnorm=True, activation='relu'):
        super(MLP, self).__init__()
        modules = OrderedDict()

        previous_size = input_size
        for index, hidden_layer in enumerate(hidden_layers):
            modules[f"dense{index}"] = nn.Linear(previous_size, hidden_layer)
            if batchnorm:
                modules[f"batchnorm{index}"] = nn.BatchNorm1d(hidden_layer)
            if activation:
                if activation.lower() == 'relu':
                    modules[f"activation{index}"] = nn.ReLU()
                elif activation.lower() == 'prelu':
                    modules[f"activation{index}"] = nn.PReLU()
                elif activation.lower() == 'sigmoid':
                    modules[f"activation{index}"] = nn.Sigmoid()
                else:
                    raise NotImplementedError(f"{activation} is not supported")
            if dropout:
                modules[f"dropout{index}"] = nn.Dropout(dropout)
            previous_size = hidden_layer
        self._sequential = nn.Sequential(modules) 
Example #17
Source File: model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self):
        super(CW2_Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, 3)
        self.bnm1 = nn.BatchNorm2d(32, momentum=0.1)
        self.conv2 = nn.Conv2d(32, 64, 3)
        self.bnm2 = nn.BatchNorm2d(64, momentum=0.1)
        self.conv3 = nn.Conv2d(64, 128, 3)
        self.bnm3 = nn.BatchNorm2d(128, momentum=0.1)
        self.conv4 = nn.Conv2d(128, 128, 3)
        self.bnm4 = nn.BatchNorm2d(128, momentum=0.1)
        self.fc1 = nn.Linear(3200, 256)
        #self.dropout1 = nn.Dropout(p=0.35, inplace=False)
        self.bnm5 = nn.BatchNorm1d(256, momentum=0.1)
        self.fc2 = nn.Linear(256, 256)
        self.bnm6 = nn.BatchNorm1d(256, momentum=0.1)
        self.fc3 = nn.Linear(256, 10)
        #self.dropout2 = nn.Dropout(p=0.35, inplace=False)
        #self.dropout3 = nn.Dropout(p=0.35, inplace=False) 
Example #18
Source File: pointnet.py    From TreeGAN with MIT License 5 votes vote down vote up
def __init__(self, global_feat = True):
        super(PointNetfeat, self).__init__()
        self.stn = STN3d()
        self.conv1 = torch.nn.Conv1d(3, 64, 1)
        self.conv2 = torch.nn.Conv1d(64, 128, 1)
        self.conv3 = torch.nn.Conv1d(128, 1024, 1)
        self.bn1 = nn.BatchNorm1d(64)
        self.bn2 = nn.BatchNorm1d(128)
        self.bn3 = nn.BatchNorm1d(1024)
        self.global_feat = global_feat 
Example #19
Source File: pointnet.py    From TreeGAN with MIT License 5 votes vote down vote up
def __init__(self, k = 2):
        super(PointNetCls, self).__init__()
        self.feat = PointNetfeat(global_feat=True)
        self.fc1 = nn.Linear(1024, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, k)
        self.bn1 = nn.BatchNorm1d(512)
        self.bn2 = nn.BatchNorm1d(256)
        self.relu = nn.ReLU() 
Example #20
Source File: pointnet.py    From TreeGAN with MIT License 5 votes vote down vote up
def __init__(self):
        super(STN3d, self).__init__()
        self.conv1 = torch.nn.Conv1d(3, 64, 1)
        self.conv2 = torch.nn.Conv1d(64, 128, 1)
        self.conv3 = torch.nn.Conv1d(128, 1024, 1)
        self.fc1 = nn.Linear(1024, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, 9)
        self.relu = nn.ReLU()

        self.bn1 = nn.BatchNorm1d(64)
        self.bn2 = nn.BatchNorm1d(128)
        self.bn3 = nn.BatchNorm1d(1024)
        self.bn4 = nn.BatchNorm1d(512)
        self.bn5 = nn.BatchNorm1d(256) 
Example #21
Source File: modules.py    From ScenarioMeta with MIT License 5 votes vote down vote up
def __init__(self, input_size, hidden_layers, final_size=0, final_activation="none", normalization="batch_norm",
                 activation='relu'):
        """
        :param input_size:
        :param hidden_layers: [(unit_num, normalization, dropout_rate)]
        :param final_size:
        :param final_activation:
        """
        nn.Module.__init__(self)
        self.input_size = input_size
        fcs = []
        last_size = self.input_size
        for size, to_norm, dropout_rate in hidden_layers:
            linear = nn.Linear(last_size, size)
            linear.bias.data.fill_(0.0)
            fcs.append(linear)
            last_size = size
            if to_norm:
                if normalization == 'batch_norm':
                    fcs.append(nn.BatchNorm1d(last_size))
                elif normalization == 'layer_norm':
                    fcs.append(nn.LayerNorm(last_size))
            fcs.append(activation_method(activation))
            if dropout_rate > 0.0:
                fcs.append(nn.Dropout(dropout_rate))
        self.fc = nn.Sequential(*fcs)
        if final_size > 0:
            linear = nn.Linear(last_size, final_size)
            linear.bias.data.fill_(0.0)
            finals = [linear, activation_method(final_activation)]
        else:
            finals = []
        self.final_layer = nn.Sequential(*finals) 
Example #22
Source File: classifier.py    From pytorch-atda with MIT License 5 votes vote down vote up
def __init__(self, dropout_keep=0.5, use_BN=True):
        """Init classifier."""
        super(ClassifierA, self).__init__()

        self.dropout_keep = dropout_keep
        self.use_BN = use_BN
        self.restored = False

        if self.use_BN:
            self.classifier = nn.Sequential(
                nn.Dropout(self.dropout_keep),
                nn.Linear(3072, 2048),
                nn.BatchNorm1d(2048),
                nn.ReLU(),
                nn.Dropout(self.dropout_keep),
                nn.Linear(2048, 10),
                nn.BatchNorm1d(10),
                nn.Softmax()
            )
        else:
            self.classifier = nn.Sequential(
                nn.Dropout(self.dropout_keep),
                nn.Linear(3072, 2048),
                nn.ReLU(),
                nn.Dropout(self.dropout_keep),
                nn.Linear(2048, 10),
                nn.Softmax()
            ) 
Example #23
Source File: models_sherlock.py    From sato with Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 input_dim: int,
                 embedding_dim: int = 300):
        super(RestEncoder, self).__init__()
        self.input_dim = input_dim
        self.bn1 = nn.BatchNorm1d(num_features=input_dim) 
Example #24
Source File: mlp.py    From Deep-SAD-PyTorch with MIT License 5 votes vote down vote up
def __init__(self, in_features, out_features, bias=False, eps=1e-04):
        super(Linear_BN_leakyReLU, self).__init__()

        self.linear = nn.Linear(in_features, out_features, bias=bias)
        self.bn = nn.BatchNorm1d(out_features, eps=eps, affine=bias) 
Example #25
Source File: fmnist_LeNet.py    From Deep-SAD-PyTorch with MIT License 5 votes vote down vote up
def __init__(self, rep_dim=64):
        super().__init__()

        self.rep_dim = rep_dim

        self.fc3 = nn.Linear(self.rep_dim, 128, bias=False)
        self.bn1d2 = nn.BatchNorm1d(128, eps=1e-04, affine=False)
        self.deconv1 = nn.ConvTranspose2d(8, 32, 5, bias=False, padding=2)
        self.bn2d3 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
        self.deconv2 = nn.ConvTranspose2d(32, 16, 5, bias=False, padding=3)
        self.bn2d4 = nn.BatchNorm2d(16, eps=1e-04, affine=False)
        self.deconv3 = nn.ConvTranspose2d(16, 1, 5, bias=False, padding=2) 
Example #26
Source File: fmnist_LeNet.py    From Deep-SAD-PyTorch with MIT License 5 votes vote down vote up
def __init__(self, rep_dim=64):
        super().__init__()

        self.rep_dim = rep_dim
        self.pool = nn.MaxPool2d(2, 2)

        self.conv1 = nn.Conv2d(1, 16, 5, bias=False, padding=2)
        self.bn2d1 = nn.BatchNorm2d(16, eps=1e-04, affine=False)
        self.conv2 = nn.Conv2d(16, 32, 5, bias=False, padding=2)
        self.bn2d2 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
        self.fc1 = nn.Linear(32 * 7 * 7, 128, bias=False)
        self.bn1d1 = nn.BatchNorm1d(128, eps=1e-04, affine=False)
        self.fc2 = nn.Linear(128, self.rep_dim, bias=False) 
Example #27
Source File: utils.py    From whynot with MIT License 5 votes vote down vote up
def __init__(self, env, n_layers=1, hidden_dim=8, activation=nn.ReLU):
        super(NNPolicy, self).__init__(env)
        layers = []
        in_dim = self.obs_dim
        for _ in range(n_layers):
            layers.append(nn.BatchNorm1d(in_dim, affine=False))
            layers.append(nn.Linear(in_dim, hidden_dim))
            layers.append(activation())
            in_dim = hidden_dim
        layers.append(nn.Linear(in_dim, self.ac_dim))
        self.seq = nn.Sequential(*layers)
        # Start with completely random action.
        self.epsilon = 1.0 
Example #28
Source File: modelcolor.py    From DIB-R with MIT License 5 votes vote down vote up
def linearblock(self, indim, outdim):
        block2 = [
            nn.Linear(indim, outdim),
            nn.BatchNorm1d(outdim),
            nn.ReLU()
        ]
        return block2 
Example #29
Source File: model_3d_lc.py    From DPC with MIT License 5 votes vote down vote up
def __init__(self, sample_size, num_seq, seq_len, 
                 network='resnet18', dropout=0.5, num_class=101):
        super(LC, self).__init__()
        torch.cuda.manual_seed(666)
        self.sample_size = sample_size
        self.num_seq = num_seq
        self.seq_len = seq_len
        self.num_class = num_class 
        print('=> Using RNN + FC model ')

        print('=> Use 2D-3D %s!' % network)
        self.last_duration = int(math.ceil(seq_len / 4))
        self.last_size = int(math.ceil(sample_size / 32))
        track_running_stats = True 

        self.backbone, self.param = select_resnet(network, track_running_stats=track_running_stats)
        self.param['num_layers'] = 1
        self.param['hidden_size'] = self.param['feature_size']

        print('=> using ConvRNN, kernel_size = 1')
        self.agg = ConvGRU(input_size=self.param['feature_size'],
                               hidden_size=self.param['hidden_size'],
                               kernel_size=1,
                               num_layers=self.param['num_layers'])
        self._initialize_weights(self.agg)

        self.final_bn = nn.BatchNorm1d(self.param['feature_size'])
        self.final_bn.weight.data.fill_(1)
        self.final_bn.bias.data.zero_()

        self.final_fc = nn.Sequential(nn.Dropout(dropout),
                                      nn.Linear(self.param['feature_size'], self.num_class))
        self._initialize_weights(self.final_fc) 
Example #30
Source File: test_sync_batchnorm.py    From EMANet with GNU General Public License v3.0 5 votes vote down vote up
def _find_bn(module):
    for m in module.modules():
        if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, SynchronizedBatchNorm1d, SynchronizedBatchNorm2d)):
            return m