Python torch.nn.InstanceNorm1d() Examples

The following are 30 code examples of torch.nn.InstanceNorm1d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: layers.py    From USIP with GNU General Public License v3.0 7 votes vote down vote up
def __init__(self, in_features, out_features, activation=None, normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1):
        super(MyLinear, self).__init__()
        self.activation = activation
        self.normalization = normalization

        self.linear = nn.Linear(in_features, out_features, bias=True)
        if self.normalization == 'batch':
            self.norm = MyBatchNorm1d(out_features, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
        elif self.normalization == 'instance':
            self.norm = nn.InstanceNorm1d(out_features, momentum=momentum, affine=True)
        if self.activation == 'relu':
            self.act = nn.ReLU()
        elif 'elu' == activation:
            self.act = nn.ELU(alpha=1.0)
        elif 'swish' == self.activation:
            self.act = Swish()
        elif 'leakyrelu' == self.activation:
            self.act = nn.LeakyReLU(0.01)
        elif 'selu' == self.activation:
            self.act = nn.SELU()

        self.weight_init() 
Example #2
Source File: layers.py    From RL-GAN-Net with MIT License 6 votes vote down vote up
def __init__(self, in_features, out_features, activation=None, normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1):
        super(MyLinear, self).__init__()
        self.activation = activation
        self.normalization = normalization

        self.linear = nn.Linear(in_features, out_features, bias=True)
        if self.normalization == 'batch':
            self.norm = MyBatchNorm1d(out_features, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
        elif self.normalization == 'instance':
            self.norm = nn.InstanceNorm1d(out_features, momentum=momentum, affine=True)
        if self.activation == 'relu':
            self.act = nn.ReLU()
        elif 'elu' == activation:
            self.act = nn.ELU(alpha=1.0)
        elif 'swish' == self.activation:
            self.act = Swish()
        elif 'leakyrelu' == self.activation:
            self.act = nn.LeakyReLU(0.1)

        self.weight_init() 
Example #3
Source File: base_networks.py    From STARnet with MIT License 6 votes vote down vote up
def __init__(self, input_size, output_size, bias=True, activation='relu', norm='batch'):
        super(DenseBlock, self).__init__()
        self.fc = nn.Linear(input_size, output_size, bias=bias)

        self.norm = norm
        if self.norm =='batch':
            self.bn = nn.BatchNorm1d(output_size)
        elif self.norm == 'instance':
            self.bn = nn.InstanceNorm1d(output_size)

        self.activation = activation
        if self.activation == 'relu':
            self.act = nn.ReLU(True)
        elif self.activation == 'prelu':
            self.act = nn.PReLU()
        elif self.activation == 'lrelu':
            self.act = nn.LeakyReLU(0.1, True)
        elif self.activation == 'tanh':
            self.act = nn.Tanh()
        elif self.activation == 'sigmoid':
            self.act = nn.Sigmoid() 
Example #4
Source File: layers.py    From occupancy_networks with MIT License 6 votes vote down vote up
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
        super().__init__()
        self.c_dim = c_dim
        self.f_dim = f_dim
        self.norm_method = norm_method
        # Submodules
        self.fc_gamma = nn.Linear(c_dim, f_dim)
        self.fc_beta = nn.Linear(c_dim, f_dim)
        if norm_method == 'batch_norm':
            self.bn = nn.BatchNorm1d(f_dim, affine=False)
        elif norm_method == 'instance_norm':
            self.bn = nn.InstanceNorm1d(f_dim, affine=False)
        elif norm_method == 'group_norm':
            self.bn = nn.GroupNorm1d(f_dim, affine=False)
        else:
            raise ValueError('Invalid normalization method!')
        self.reset_parameters() 
Example #5
Source File: net_builder.py    From Visual-Template-Free-Form-Parsing with GNU General Public License v3.0 6 votes vote down vote up
def fcReLU(in_ch,out_ch,norm,dropout=None,relu=True):
    fc = nn.Linear(in_ch,out_ch)
    if 'weight' in norm:
        layers = [weight_norm(fc)]
    else:
        layers = [fc]
    if 'batch' in norm:
        layers.append(nn.BatchNorm1d(out_ch))
    elif 'instance' in norm:
        layers.append(nn.InstanceNorm1d(out_ch))
    elif 'group' in norm:
        layers.append(nn.GroupNorm(getGroupSize(out_ch),out_ch))
    if dropout is not None:
        if dropout != False:
            layers.append(nn.Dropout(p=0.1,inplace=True))
    if relu:
        layers += [nn.ReLU(inplace=True)]
    return layers 
Example #6
Source File: normalized_eight_point_net.py    From DFE with MIT License 6 votes vote down vote up
def __init__(self, input_size, inplace=True, has_bias=True, learn_affine=True):
        """Init.

        Args:
            input_size (float): size of input
            inplace (bool, optional): Defaults to True. LeakyReLU inplace?
            has_bias (bool, optional): Defaults to True. Conv1d bias?
            learn_affine (bool, optional): Defaults to True. InstanceNorm1d affine?
        """

        super(LDFWeightEstimatorNet, self).__init__()

        track = False
        self.conv_in = nn.Conv1d(input_size, 128, kernel_size=1, bias=has_bias)

        blocks = []
        for i in range(12):
            blocks.append(ResNetBlock())

        self.backbone = nn.Sequential(*blocks)

        self.conv_out = nn.Conv1d(128, 1, kernel_size=1, bias=has_bias) 
Example #7
Source File: layers.py    From occupancy_networks with MIT License 6 votes vote down vote up
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
        super().__init__()
        self.c_dim = c_dim
        self.f_dim = f_dim
        self.norm_method = norm_method
        # Submodules
        self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
        self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
        if norm_method == 'batch_norm':
            self.bn = nn.BatchNorm1d(f_dim, affine=False)
        elif norm_method == 'instance_norm':
            self.bn = nn.InstanceNorm1d(f_dim, affine=False)
        elif norm_method == 'group_norm':
            self.bn = nn.GroupNorm1d(f_dim, affine=False)
        else:
            raise ValueError('Invalid normalization method!')
        self.reset_parameters() 
Example #8
Source File: privacy_engine_test.py    From pytorch-dp with Apache License 2.0 6 votes vote down vote up
def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 16, 8, 3)
        self.gnorm1 = nn.GroupNorm(4, 16)
        self.conv2 = nn.Conv1d(16, 32, 3, 1)
        self.lnorm1 = nn.LayerNorm((32, 23))
        self.conv3 = nn.Conv1d(32, 32, 3, 1)
        self.instnorm1 = nn.InstanceNorm1d(32, affine=True)
        self.convf = nn.Conv1d(32, 32, 1, 1)
        for p in self.convf.parameters():
            p.requires_grad = False
        self.fc1 = nn.Linear(21, 17)
        self.lnorm2 = nn.LayerNorm(17)
        self.fc2 = nn.Linear(32 * 17, 10)

        for layer in (self.gnorm1, self.lnorm1, self.lnorm2, self.instnorm1):
            nn.init.uniform_(layer.weight)
            nn.init.uniform_(layer.bias) 
Example #9
Source File: model.py    From ZeroSpeech-TTS-without-T with MIT License 6 votes vote down vote up
def __init__(self, c_in=512, c_out=513, c_h=512, c_a=8, ns=0.2, seg_len=64):
		super(Spectrogram_Patcher, self).__init__()
		self.ns = ns
		self.seg_len = seg_len
		self.input_layer = nn.Linear(c_in, c_h)
		self.dense1 = nn.Linear(c_h, c_h)
		self.dense2 = nn.Linear(c_h, c_h)
		self.dense3 = nn.Linear(c_h, c_h)
		self.dense4 = nn.Linear(c_h, c_h)
		self.RNN = nn.GRU(input_size=c_h, hidden_size=c_h//2, num_layers=1, bidirectional=True)
		self.dense5 = nn.Linear(2*c_h + c_h, c_h)
		self.linear = nn.Linear(c_h, c_out)
		# normalization layer
		self.ins_norm1 = nn.InstanceNorm1d(c_h)
		self.ins_norm2 = nn.InstanceNorm1d(c_h)
		# embedding layer
		self.emb1 = nn.Embedding(c_a, c_h)
		self.emb2 = nn.Embedding(c_a, c_h) 
Example #10
Source File: layers.py    From SO-Net with MIT License 6 votes vote down vote up
def __init__(self, in_features, out_features, activation=None, normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1):
        super(MyLinear, self).__init__()
        self.activation = activation
        self.normalization = normalization

        self.linear = nn.Linear(in_features, out_features, bias=True)
        if self.normalization == 'batch':
            self.norm = MyBatchNorm1d(out_features, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
        elif self.normalization == 'instance':
            self.norm = nn.InstanceNorm1d(out_features, momentum=momentum, affine=True)
        if self.activation == 'relu':
            self.act = nn.ReLU()
        elif 'elu' == activation:
            self.act = nn.ELU(alpha=1.0)
        elif 'swish' == self.activation:
            self.act = Swish()
        elif 'leakyrelu' == self.activation:
            self.act = nn.LeakyReLU(0.1)

        self.weight_init() 
Example #11
Source File: layers.py    From occupancy_flow with MIT License 6 votes vote down vote up
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
        super().__init__()
        self.c_dim = c_dim
        self.f_dim = f_dim
        self.norm_method = norm_method
        # Submodules
        self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
        self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
        if norm_method == 'batch_norm':
            self.bn = nn.BatchNorm1d(f_dim, affine=False)
        elif norm_method == 'instance_norm':
            self.bn = nn.InstanceNorm1d(f_dim, affine=False)
        elif norm_method == 'group_norm':
            self.bn = nn.GroupNorm1d(f_dim, affine=False)
        else:
            raise ValueError('Invalid normalization method!')
        self.reset_parameters() 
Example #12
Source File: layers.py    From occupancy_flow with MIT License 6 votes vote down vote up
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
        super().__init__()
        self.c_dim = c_dim
        self.f_dim = f_dim
        self.norm_method = norm_method
        # Submodules
        self.fc_gamma = nn.Linear(c_dim, f_dim)
        self.fc_beta = nn.Linear(c_dim, f_dim)
        if norm_method == 'batch_norm':
            self.bn = nn.BatchNorm1d(f_dim, affine=False)
        elif norm_method == 'instance_norm':
            self.bn = nn.InstanceNorm1d(f_dim, affine=False)
        elif norm_method == 'group_norm':
            self.bn = nn.GroupNorm1d(f_dim, affine=False)
        else:
            raise ValueError('Invalid normalization method!')
        self.reset_parameters() 
Example #13
Source File: layers.py    From RL-GAN-Net with MIT License 6 votes vote down vote up
def __init__(self, in_features, out_features, activation=None, normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1):
        super(MyLinear, self).__init__()
        self.activation = activation
        self.normalization = normalization

        self.linear = nn.Linear(in_features, out_features, bias=True)
        if self.normalization == 'batch':
            self.norm = MyBatchNorm1d(out_features, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
        elif self.normalization == 'instance':
            self.norm = nn.InstanceNorm1d(out_features, momentum=momentum, affine=True)
        if self.activation == 'relu':
            self.act = nn.ReLU()
        elif 'sigmoid' == activation:
            self.act = nn.Sigmoid()
        elif 'swish' == self.activation:
            self.act = Swish()
        elif 'leakyrelu' == self.activation:
            self.act = nn.LeakyReLU(0.1)

        self.weight_init() 
Example #14
Source File: convnet_model.py    From deepsaber with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, opt):
        super().__init__()

        self.initial = nn.Sequential(
            nn.Conv1d(opt.input_channels, opt.num_filters, opt.kernel_size),
            nn.InstanceNorm1d(opt.num_filters),
            nn.ReLU(),
            nn.Dropout(p=0.2),
            nn.Conv1d(opt.num_filters, opt.num_filters, opt.kernel_size),
            nn.InstanceNorm1d(opt.num_filters),
            nn.Dropout(p=0.2),
            nn.ReLU(),
        )

        layers = []
        for i in range(1, opt.layers + 1):
            layers.extend([
                nn.Conv1d(opt.num_filters * i, opt.num_filters * (i + 1), opt.kernel_size),
                nn.InstanceNorm1d(opt.num_filters),
                nn.MaxPool1d(kernel_size=2),
                nn.Dropout(p=0.2),
                nn.ReLU()
            ])
        self.features = nn.Sequential(*layers)

        self.penultimate = nn.Sequential(
            nn.Conv1d(opt.num_filters * (i + 1), opt.num_filters * (i + 1), opt.kernel_size),
            nn.InstanceNorm1d(opt.num_filters),
            nn.ReLU()
        )

        self.final = nn.Sequential(
            nn.Linear(500, 300),
            nn.LayerNorm(300),
            nn.ReLU(),
            nn.Linear(300, opt.output_channels)
        ) 
Example #15
Source File: graph_transformer_layers_new_dropout_3_adj_mtx.py    From multigraph_transformer with MIT License 5 votes vote down vote up
def __init__(self, embed_dim, normalization='batch'):
        super(Normalization, self).__init__()

        normalizer_class = {
            'batch': nn.BatchNorm1d,
            'instance': nn.InstanceNorm1d
        }.get(normalization, None)

        self.normalizer = normalizer_class(embed_dim, affine=True)

        # Normalization by default initializes affine parameters 
        # with bias 0 and weight unif(0,1) which is too large!
        self.init_parameters() 
Example #16
Source File: basic_feat_extrator.py    From sanet_relocal_demo with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, n_channels=[256, 128]):
        super(ContextNormFCN, self).__init__()
        self.conv0 = nn.Conv2d(n_channels[0], n_channels[1], kernel_size=1, stride=1, padding=0, bias=False)
        self.context_norm = nn.InstanceNorm1d(n_channels[1], affine=False, track_running_stats=False)
        self.bn_relu = nn.Sequential(
            nn.BatchNorm2d(n_channels[1]),
            nn.ReLU(inplace=True)
        ) 
Example #17
Source File: graph_conv_net.py    From multigraph_transformer with MIT License 5 votes vote down vote up
def forward(self, input, mask=None):
        if isinstance(self.normalizer, nn.BatchNorm1d):
            return self.normalizer(input.view(-1, input.size(-1))).view(*input.size())
        elif isinstance(self.normalizer, nn.InstanceNorm1d):
            return self.normalizer(input.permute(0, 2, 1)).permute(0, 2, 1)
        else:
            assert self.normalizer is None, "Unknown normalizer type"
            return input 
Example #18
Source File: dp_model_inspector_test.py    From pytorch-dp with Apache License 2.0 5 votes vote down vote up
def test_running_stats(self):
        inspector = dp_inspector.DPModelInspector()
        inspector.should_throw = False

        self.assertTrue(inspector.validate(nn.InstanceNorm1d(16)))
        self.assertTrue(inspector.validate(nn.InstanceNorm1d(16, affine=True)))
        self.assertTrue(
            inspector.validate(nn.InstanceNorm1d(16, track_running_stats=True))
        )
        self.assertFalse(
            inspector.validate(
                nn.InstanceNorm1d(16, affine=True, track_running_stats=True)
            )
        ) 
Example #19
Source File: torch_nn.py    From sgas with MIT License 5 votes vote down vote up
def norm_layer(norm_type, nc):
    # helper selecting normalization layer
    norm = norm_type.lower()
    if norm == 'batch':
        layer = nn.BatchNorm1d(nc, affine=True)
    elif norm == 'instance':
        layer = nn.InstanceNorm1d(nc, affine=False)
    else:
        raise NotImplementedError('normalization layer [%s] is not found' % norm)
    return layer 
Example #20
Source File: graph_transformer_layers_new_dropout_2_adj_mtx.py    From multigraph_transformer with MIT License 5 votes vote down vote up
def __init__(self, embed_dim, normalization='batch'):
        super(Normalization, self).__init__()

        normalizer_class = {
            'batch': nn.BatchNorm1d,
            'instance': nn.InstanceNorm1d
        }.get(normalization, None)

        self.normalizer = normalizer_class(embed_dim, affine=True)

        # Normalization by default initializes affine parameters 
        # with bias 0 and weight unif(0,1) which is too large!
        self.init_parameters() 
Example #21
Source File: utils.py    From pytorch-dp with Apache License 2.0 5 votes vote down vote up
def _batchnorm_to_instancenorm(module: nn.modules.batchnorm._BatchNorm) -> nn.Module:
    """
    Converts a BatchNorm `module` to the corresponding InstanceNorm module
    """

    def matchDim():
        if isinstance(module, nn.BatchNorm1d):
            return nn.InstanceNorm1d
        elif isinstance(module, nn.BatchNorm2d):
            return nn.InstanceNorm2d
        elif isinstance(module, nn.BatchNorm3d):
            return nn.InstanceNorm3d

    return matchDim()(module.num_features) 
Example #22
Source File: generic_UNet.py    From nnUNet with Apache License 2.0 5 votes vote down vote up
def print_module_training_status(module):
    if isinstance(module, nn.Conv2d) or isinstance(module, nn.Conv3d) or isinstance(module, nn.Dropout3d) or \
            isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout) or isinstance(module, nn.InstanceNorm3d) \
            or isinstance(module, nn.InstanceNorm2d) or isinstance(module, nn.InstanceNorm1d) \
            or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or isinstance(module,
                                                                                                      nn.BatchNorm1d):
        print(str(module), module.training) 
Example #23
Source File: torch_nn.py    From deep_gcns_torch with MIT License 5 votes vote down vote up
def norm_layer(norm_type, nc):
    # helper selecting normalization layer
    norm = norm_type.lower()
    if norm == 'batch':
        layer = nn.BatchNorm1d(nc, affine=True)
    elif norm == 'layer':
        layer = nn.LayerNorm(nc, elementwise_affine=True)
    elif norm == 'instance':
        layer = nn.InstanceNorm1d(nc, affine=False)
    else:
        raise NotImplementedError('normalization layer [%s] is not found' % norm)
    return layer 
Example #24
Source File: models.py    From OAG with MIT License 5 votes vote down vote up
def __init__(self, n_type_nodes, n_units=[1433, 8, 7], n_head=8, dropout=0.1,
                 attn_dropout=0.0, instance_normalization=False):
        super(MatchBatchHGAT, self).__init__()
        self.n_layer = len(n_units) - 1
        self.dropout = dropout
        self.inst_norm = instance_normalization

        if self.inst_norm:
            self.norm = nn.InstanceNorm1d(n_units[0], momentum=0.0, affine=True)

        d_hidden = n_units[-1]

        self.fc1 = torch.nn.Linear(d_hidden, d_hidden * 3)
        self.fc2 = torch.nn.Linear(d_hidden * 3, d_hidden)
        self.fc3 = torch.nn.Linear(d_hidden, 2)

        self.attentions = BatchMultiHeadGraphAttention(n_head=n_head,
                                                       f_in=n_units[0],
                                                       f_out=n_units[1],
                                                       attn_dropout=attn_dropout,
                                                       n_type_nodes=n_type_nodes)

        self.out_att = BatchMultiHeadGraphAttention(n_head=1,
                                                    f_in=n_head*n_units[1],
                                                    f_out=n_units[2],
                                                    attn_dropout=attn_dropout,
                                                    n_type_nodes=n_type_nodes) 
Example #25
Source File: graph_transformer_layers_new_dropout_2_adj_mtx.py    From multigraph_transformer with MIT License 5 votes vote down vote up
def forward(self, input, mask=None):
        if isinstance(self.normalizer, nn.BatchNorm1d):
            return self.normalizer(input.view(-1, input.size(-1))).view(*input.size())
        elif isinstance(self.normalizer, nn.InstanceNorm1d):
            return self.normalizer(input.permute(0, 2, 1)).permute(0, 2, 1)
        else:
            assert self.normalizer is None, "Unknown normalizer type"
            return input 
Example #26
Source File: nn.py    From AttGAN-PyTorch with MIT License 5 votes vote down vote up
def add_normalization_1d(layers, fn, n_out):
    if fn == 'none':
        pass
    elif fn == 'batchnorm':
        layers.append(nn.BatchNorm1d(n_out))
    elif fn == 'instancenorm':
        layers.append(Unsqueeze(-1))
        layers.append(nn.InstanceNorm1d(n_out, affine=True))
        layers.append(Squeeze(-1))
    elif fn == 'switchnorm':
        layers.append(SwitchNorm1d(n_out))
    else:
        raise Exception('Unsupported normalization: ' + str(fn))
    return layers 
Example #27
Source File: pytorch_utils.py    From Pointnet2.PyTorch with MIT License 5 votes vote down vote up
def __init__(
            self,
            in_size: int,
            out_size: int,
            *,
            kernel_size: int = 1,
            stride: int = 1,
            padding: int = 0,
            activation=nn.ReLU(inplace=True),
            bn: bool = False,
            init=nn.init.kaiming_normal_,
            bias: bool = True,
            preact: bool = False,
            name: str = "",
            instance_norm=False
    ):
        super().__init__(
            in_size,
            out_size,
            kernel_size,
            stride,
            padding,
            activation,
            bn,
            init,
            conv=nn.Conv1d,
            batch_norm=BatchNorm1d,
            bias=bias,
            preact=preact,
            name=name,
            instance_norm=instance_norm,
            instance_norm_func=nn.InstanceNorm1d
        ) 
Example #28
Source File: layers.py    From RL-GAN-Net with MIT License 5 votes vote down vote up
def weight_init(self):
        for m in self.modules():
            if isinstance(m, nn.Conv1d):
                n = m.kernel_size[0] * m.in_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                if m.bias is not None:
                    m.bias.data.fill_(0)
            elif isinstance(m, MyBatchNorm1d) or isinstance(m, nn.InstanceNorm1d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example #29
Source File: gcn.py    From DeepInf with MIT License 5 votes vote down vote up
def __init__(self, n_units, dropout, pretrained_emb, vertex_feature,
            use_vertex_feature, fine_tune=False, instance_normalization=False):
        super(BatchGCN, self).__init__()
        self.num_layer = len(n_units) - 1
        self.dropout = dropout
        self.inst_norm = instance_normalization
        if self.inst_norm:
            self.norm = nn.InstanceNorm1d(pretrained_emb.size(1), momentum=0.0, affine=True)

        # https://discuss.pytorch.org/t/can-we-use-pre-trained-word-embeddings-for-weight-initialization-in-nn-embedding/1222/2
        self.embedding = nn.Embedding(pretrained_emb.size(0), pretrained_emb.size(1))
        self.embedding.weight = nn.Parameter(pretrained_emb)
        self.embedding.weight.requires_grad = fine_tune
        n_units[0] += pretrained_emb.size(1)

        self.use_vertex_feature = use_vertex_feature
        if self.use_vertex_feature:
            self.vertex_feature = nn.Embedding(vertex_feature.size(0), vertex_feature.size(1))
            self.vertex_feature.weight = nn.Parameter(vertex_feature)
            self.vertex_feature.weight.requires_grad = False
            n_units[0] += vertex_feature.size(1)

        self.layer_stack = nn.ModuleList()

        for i in range(self.num_layer):
            self.layer_stack.append(
                    BatchGraphConvolution(n_units[i], n_units[i + 1])
                    ) 
Example #30
Source File: per_sample_gradient_test.py    From pytorch-dp with Apache License 2.0 5 votes vote down vote up
def test_instancenorm(self):
        self._check_one_layer(
            nn.InstanceNorm1d(16, affine=True), torch.randn(16, 16, 10)
        )
        self._check_one_layer(
            nn.InstanceNorm2d(16, affine=True), torch.randn(16, 16, 10, 9)
        )
        self._check_one_layer(
            nn.InstanceNorm3d(16, affine=True), torch.randn(16, 16, 10, 9, 8)
        )