Python torch.nn.parameter.Parameter() Examples

The following are 30 code examples of torch.nn.parameter.Parameter(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.parameter , or try the search function .
Example #1
Source File: topology_attack.py    From DeepRobust with MIT License 6 votes vote down vote up
def __init__(self, model=None, nnodes=None, loss_type='CE', feature_shape=None, attack_structure=True, attack_features=False, device='cpu'):

        super(PGDAttack, self).__init__(model, nnodes, attack_structure, attack_features, device)

        assert attack_features or attack_structure, 'attack_features or attack_structure cannot be both False'

        self.loss_type = loss_type
        self.modified_adj = None
        self.modified_features = None

        if attack_structure:
            assert nnodes is not None, 'Please give nnodes='
            self.adj_changes = Parameter(torch.FloatTensor(int(nnodes*(nnodes-1)/2)))
            self.adj_changes.data.fill_(0)

        if attack_features:
            assert True, 'Topology Attack does not support attack feature'

        self.complementary = None 
Example #2
Source File: reparameterized_layers.py    From dynamic-reparameterization with Apache License 2.0 6 votes vote down vote up
def __init__(self, in_features, out_features, initial_sparsity, bias = True , sparse = True ):
        super(DynamicLinear, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.initial_sparsity = initial_sparsity
        self.sparse = sparse
        
        if sparse:
            self.d_tensor = SparseTensor([out_features,in_features],initial_sparsity = initial_sparsity)
        else:
            self.d_tensor = TiedTensor([out_features,in_features],initial_sparsity = initial_sparsity)
            
        if bias:
            self.bias = Parameter(torch.Tensor(out_features))
        else:
            self.bias = None

        self.init_parameters()
#        self.weight = self.d_tensor.s_tensor 
Example #3
Source File: train_eval_rtml.py    From verb-attributes with MIT License 6 votes vote down vote up
def __init__(self, L=3, lamb=5):
        super(RTML, self).__init__()
        self.L = L
        self.N = len(att_names)
        self.lamb = lamb
        self.theta = Parameter(torch.Tensor(self.L, 300, 300))
        self.alpha = Parameter(torch.Tensor(self.N, self.L+1)) # L+1 is so to parameterize
                                                               # being smaller than norm lamb
        self.reset_parameters()

        self.att_emb = nn.Embedding(self.N, 300)
        if PREINIT:
            self.att_emb.weight.data = _load_vectors(att_names).cuda()
        else:
            _np_emb = np.random.randn(self.N, 300)
            _np_emb = _np_emb / np.square(_np_emb).sum(1)[:, None]
            self.att_emb.weight.data = torch.FloatTensor(_np_emb).cuda() 
Example #4
Source File: model_classes.py    From e2e-model-learning with Apache License 2.0 6 votes vote down vote up
def __init__(self, X, Y, hidden_layer_sizes):
        super(Net, self).__init__()

        # Initialize linear layer with least squares solution
        X_ = np.hstack([X, np.ones((X.shape[0],1))])
        Theta = np.linalg.solve(X_.T.dot(X_), X_.T.dot(Y))
        
        self.lin = nn.Linear(X.shape[1], Y.shape[1])
        W,b = self.lin.parameters()
        W.data = torch.Tensor(Theta[:-1,:].T)
        b.data = torch.Tensor(Theta[-1,:])
        
        # Set up non-linear network of 
        # Linear -> BatchNorm -> ReLU -> Dropout layers
        layer_sizes = [X.shape[1]] + hidden_layer_sizes
        layers = reduce(operator.add, 
            [[nn.Linear(a,b), nn.BatchNorm1d(b), nn.ReLU(), nn.Dropout(p=0.2)] 
                for a,b in zip(layer_sizes[0:-1], layer_sizes[1:])])
        layers += [nn.Linear(layer_sizes[-1], Y.shape[1])]
        self.net = nn.Sequential(*layers)
        self.sig = Parameter(torch.ones(1, Y.shape[1]).cuda()) 
Example #5
Source File: CrossReplicaBN.py    From BigGAN-pytorch with Apache License 2.0 6 votes vote down vote up
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
                 track_running_stats=True):
        super(_BatchNorm, self).__init__()
        self.num_features = num_features
        self.eps = eps
        self.momentum = momentum
        self.affine = affine
        self.track_running_stats = track_running_stats
        if self.affine:
            self.weight = Parameter(torch.Tensor(num_features))
            self.bias = Parameter(torch.Tensor(num_features))
        else:
            self.register_parameter('weight', None)
            self.register_parameter('bias', None)
        if self.track_running_stats:
            self.register_buffer('running_mean', torch.zeros(num_features))
            self.register_buffer('running_var', torch.ones(num_features))
            self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
        else:
            self.register_parameter('running_mean', None)
            self.register_parameter('running_var', None)
            self.register_parameter('num_batches_tracked', None)
        self.reset_parameters() 
Example #6
Source File: main.py    From dgl with Apache License 2.0 6 votes vote down vote up
def __init__(self, num_nodes, num_sampled, embedding_size):
        super(NSLoss, self).__init__()
        self.num_nodes = num_nodes  
        self.num_sampled = num_sampled  
        self.embedding_size = embedding_size  
        self.weights = Parameter(torch.FloatTensor(num_nodes, embedding_size))  
        # [ (log(i+2) - log(i+1)) / log(num_nodes + 1)]
        self.sample_weights = F.normalize(
            torch.Tensor(
                [
                    (math.log(k + 2) - math.log(k + 1)) / math.log(num_nodes + 1)
                    for k in range(num_nodes)
                ]
            ),
            dim=0,
        )

        self.reset_parameters() 
Example #7
Source File: main.py    From dgl with Apache License 2.0 6 votes vote down vote up
def __init__(self, num_nodes, embedding_size, embedding_u_size, edge_types, edge_type_count, dim_a):
        super(DGLGATNE, self).__init__()
        self.num_nodes = num_nodes
        self.embedding_size = embedding_size
        self.embedding_u_size = embedding_u_size
        self.edge_types = edge_types
        self.edge_type_count = edge_type_count
        self.dim_a = dim_a

        self.node_embeddings = Parameter(torch.FloatTensor(num_nodes, embedding_size))
        self.node_type_embeddings = Parameter(torch.FloatTensor(num_nodes, edge_type_count, embedding_u_size))
        self.trans_weights = Parameter(torch.FloatTensor(edge_type_count, embedding_u_size, embedding_size))
        self.trans_weights_s1 = Parameter(torch.FloatTensor(edge_type_count, embedding_u_size, dim_a))
        self.trans_weights_s2 = Parameter(torch.FloatTensor(edge_type_count, dim_a, 1))

        self.reset_parameters() 
Example #8
Source File: ib_layers.py    From VIBNet with MIT License 6 votes vote down vote up
def __init__(self, dim, mask_thresh=0, init_mag=9, init_var=0.01,
                kl_mult=1, divide_w=False, sample_in_training=True, sample_in_testing=False, masking=False):
        super(InformationBottleneck, self).__init__()
        self.prior_z_logD = Parameter(torch.Tensor(dim))
        self.post_z_mu = Parameter(torch.Tensor(dim))
        self.post_z_logD = Parameter(torch.Tensor(dim))

        self.epsilon = 1e-8
        self.dim = dim
        self.sample_in_training = sample_in_training
        self.sample_in_testing = sample_in_testing
        # if masking=True, apply mask directly
        self.masking = masking

        # initialization
        stdv = 1. / math.sqrt(dim)
        self.post_z_mu.data.normal_(1, init_var)
        self.prior_z_logD.data.normal_(-init_mag, init_var)
        self.post_z_logD.data.normal_(-init_mag, init_var)

        self.need_update_z = True # flag for updating z during testing
        self.mask_thresh = mask_thresh
        self.kl_mult=kl_mult
        self.divide_w=divide_w 
Example #9
Source File: inceptionv3.py    From 2D-kinectics with MIT License 6 votes vote down vote up
def load_my_state_dict(self, state_dict, seq_len):
        own_state = self.state_dict()
        #print(own_state.keys())
        #pdb.set_trace()
        for name, param in state_dict.items():
            if name in own_state.keys():
                if isinstance(param, Parameter):
                    # backwards compatibility for serialized parameters
                    param = param.data

                if name.find('Conv2d_1a_3x3') > -1 and not name.find('bn') > -1:
                    param = param.repeat(1, seq_len, 1, 1)
                    param = param / float(seq_len)

                try:
                    own_state[name].copy_(param)
                except Exception:
                    raise RuntimeError('While copying the parameter named {}, '
                                       'whose dimensions in the model are {} and '
                                       'whose dimensions in the checkpoint are {}.'
                                       .format(name, own_state[name].size(), param.size())) 
Example #10
Source File: resnet.py    From 2D-kinectics with MIT License 6 votes vote down vote up
def load_my_state_dict(self, state_dict, seq_len):
        own_state = self.state_dict()
        # print(own_state.keys())
        for name, param in state_dict.items():
            # pdb.set_trace()
            if name in own_state.keys():
                # print(name)
                if isinstance(param, Parameter):
                    # backwards compatibility for serialized parameters
                    param = param.data
                if name == 'conv1.weight':
                    print(name, 'is being filled with {:d} seq_len\n'.format(seq_len))
                    param = param.repeat(1, seq_len, 1, 1)
                    param = param / float(seq_len)
                try:
                    own_state[name].copy_(param)
                except Exception:
                    raise RuntimeError('While copying the parameter named {}, '
                                       'whose dimensions in the model are {} and '
                                       'whose dimensions in the checkpoint are {}.'
                                       .format(name, own_state[name].size(), param.size()))
            else:
                print('NAME IS NOT IN OWN STATE::>' + name) 
Example #11
Source File: quan_all_main.py    From alibabacloud-quantization-networks with Apache License 2.0 6 votes vote down vote up
def load_params(new_model, pretrained_model):
    #new_model_dict = new_model.module.state_dict()
    new_model_dict = new_model.state_dict()
    pretrained_checkpoint = load_checkpoint(pretrained_model)
    #for name, param in pretrained_checkpoint.items():
    for name, param in pretrained_checkpoint['state_dict'].items():
        print('pretrained_model params name and size: ', name, param.size())
        if name in new_model_dict:
            if isinstance(param, Parameter):
                # backwards compatibility for serialized parameters
                param = param.data
            try:
                new_model_dict[name].copy_(param)
                print('############# new_model load params name: ',name)
            except:
                raise RuntimeError('While copying the parameter named {}, \
                                   whose dimensions in the model are {} and \
                                   whose dimensions in the checkpoint are {}.'
                                   .format(name, new_model_dict[name].size(), param.size()))
        else:
            continue 
Example #12
Source File: quantization.py    From alibabacloud-quantization-networks with Apache License 2.0 6 votes vote down vote up
def __init__(self, quant_values=[-1, 0, 1], quan_bias=[0], init_beta=0.0):
        super(Quantization, self).__init__()
        """register_parameter: params w/ grad, and need to be learned
            register_buffer: params w/o grad, do not need to be learned
            example shown in: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
        """
        self.values = quant_values
        # number of sigmoids
        self.n = len(self.values) - 1 
        self.alpha = Parameter(torch.Tensor([1]))
        self.beta = Parameter(torch.Tensor([1]))
        self.register_buffer('biases', torch.zeros(self.n))
        self.register_buffer('scales', torch.zeros(self.n))
          
        boundary = np.array(quan_bias)
        self.init_scale_and_offset()
        self.bias_inited = False
        self.alpha_beta_inited = False
        self.init_biases(boundary)
        self.init_alpha_and_beta(init_beta) 
Example #13
Source File: main_pytorch.py    From GATNE with MIT License 6 votes vote down vote up
def __init__(
        self, num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_a
    ):
        super(GATNEModel, self).__init__()
        self.num_nodes = num_nodes
        self.embedding_size = embedding_size
        self.embedding_u_size = embedding_u_size
        self.edge_type_count = edge_type_count
        self.dim_a = dim_a

        self.node_embeddings = Parameter(torch.FloatTensor(num_nodes, embedding_size))
        self.node_type_embeddings = Parameter(
            torch.FloatTensor(num_nodes, edge_type_count, embedding_u_size)
        )
        self.trans_weights = Parameter(
            torch.FloatTensor(edge_type_count, embedding_u_size, embedding_size)
        )
        self.trans_weights_s1 = Parameter(
            torch.FloatTensor(edge_type_count, embedding_u_size, dim_a)
        )
        self.trans_weights_s2 = Parameter(torch.FloatTensor(edge_type_count, dim_a, 1))

        self.reset_parameters() 
Example #14
Source File: main_pytorch.py    From GATNE with MIT License 6 votes vote down vote up
def __init__(self, num_nodes, num_sampled, embedding_size):
        super(NSLoss, self).__init__()
        self.num_nodes = num_nodes
        self.num_sampled = num_sampled
        self.embedding_size = embedding_size
        self.weights = Parameter(torch.FloatTensor(num_nodes, embedding_size))
        self.sample_weights = F.normalize(
            torch.Tensor(
                [
                    (math.log(k + 2) - math.log(k + 1)) / math.log(num_nodes + 1)
                    for k in range(num_nodes)
                ]
            ),
            dim=0,
        )

        self.reset_parameters() 
Example #15
Source File: syncbn.py    From pytorch-syncbn with MIT License 6 votes vote down vote up
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
                 track_running_stats=True):
        super(_BatchNorm, self).__init__()
        self.num_features = num_features
        self.eps = eps
        self.momentum = momentum
        self.affine = affine
        self.track_running_stats = track_running_stats
        self.freezed = False
        if self.affine:
            self.weight = Parameter(torch.Tensor(num_features))
            self.bias = Parameter(torch.Tensor(num_features))
        else:
            self.register_parameter('weight', None)
            self.register_parameter('bias', None)
        if self.track_running_stats:
            self.register_buffer('running_mean', torch.zeros(num_features))
            self.register_buffer('running_var', torch.ones(num_features))
        else:
            self.register_parameter('running_mean', None)
            self.register_parameter('running_var', None)
        self.reset_parameters() 
Example #16
Source File: layers.py    From graph-cnn.pytorch with MIT License 6 votes vote down vote up
def __init__(self, in_features, out_features, bias=True, init='xavier'):
        super(GraphConvolution, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = Parameter(torch.FloatTensor(in_features, out_features))
        if bias:
            self.bias = Parameter(torch.FloatTensor(out_features))
        else:
            self.register_parameter('bias', None)
        if init == 'uniform':
            print("| Uniform Initialization")
            self.reset_parameters_uniform()
        elif init == 'xavier':
            print("| Xavier Initialization")
            self.reset_parameters_xavier()
        elif init == 'kaiming':
            print("| Kaiming Initialization")
            self.reset_parameters_kaiming()
        else:
            raise NotImplementedError 
Example #17
Source File: gat_layers.py    From DeepInf with MIT License 6 votes vote down vote up
def __init__(self, n_head, f_in, f_out, attn_dropout, bias=True):
        super(BatchMultiHeadGraphAttention, self).__init__()
        self.n_head = n_head
        self.w = Parameter(torch.Tensor(n_head, f_in, f_out))
        self.a_src = Parameter(torch.Tensor(n_head, f_out, 1))
        self.a_dst = Parameter(torch.Tensor(n_head, f_out, 1))

        self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
        self.softmax = nn.Softmax(dim=-1)
        self.dropout = nn.Dropout(attn_dropout)
        if bias:
            self.bias = Parameter(torch.Tensor(f_out))
            init.constant_(self.bias, 0)
        else:
            self.register_parameter('bias', None)

        init.xavier_uniform_(self.w)
        init.xavier_uniform_(self.a_src)
        init.xavier_uniform_(self.a_dst) 
Example #18
Source File: gat_layers.py    From DeepInf with MIT License 6 votes vote down vote up
def __init__(self, n_head, f_in, f_out, attn_dropout, bias=True):
        super(MultiHeadGraphAttention, self).__init__()
        self.n_head = n_head
        self.w = Parameter(torch.Tensor(n_head, f_in, f_out))
        self.a_src = Parameter(torch.Tensor(n_head, f_out, 1))
        self.a_dst = Parameter(torch.Tensor(n_head, f_out, 1))

        self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
        self.softmax = nn.Softmax(dim=-1)
        self.dropout = nn.Dropout(attn_dropout)

        if bias:
            self.bias = Parameter(torch.Tensor(f_out))
            init.constant_(self.bias, 0)
        else:
            self.register_parameter('bias', None)

        init.xavier_uniform_(self.w)
        init.xavier_uniform_(self.a_src)
        init.xavier_uniform_(self.a_dst) 
Example #19
Source File: mettack.py    From DeepRobust with MIT License 6 votes vote down vote up
def __init__(self, model=None, nnodes=None, feature_shape=None, lambda_=0.5, attack_structure=True, attack_features=False, device='cpu'):

        super(BaseMeta, self).__init__(model, nnodes, attack_structure, attack_features, device)
        self.lambda_ = lambda_

        assert attack_features or attack_structure, 'attack_features or attack_structure cannot be both False'

        self.modified_adj = None
        self.modified_features = None

        if attack_structure:
            assert nnodes is not None, 'Please give nnodes='
            self.adj_changes = Parameter(torch.FloatTensor(nnodes, nnodes))
            self.adj_changes.data.fill_(0)

        if attack_features:
            assert feature_shape is not None, 'Please give feature_shape='
            self.feature_changes = Parameter(torch.FloatTensor(feature_shape))
            self.feature_changes.data.fill_(0)

        self.with_relu = model.with_relu 
Example #20
Source File: main.py    From alibabacloud-quantization-networks with Apache License 2.0 5 votes vote down vote up
def get_params(pretrained_model):
    pretrained_checkpoint = load_checkpoint(pretrained_model)
    for name, param in pretrained_checkpoint.items():
    #for name, param in pretrained_checkpoint['state_dict'].items():
        print('pretrained_model params name and size: ', name, param.size())
        if isinstance(param, Parameter):
            # backwards compatibility for serialized parameters
            param = param.data
        try:
            np.save(name+'.npy', param.cpu().numpy())
            print('############# new_model load params name: ',name)
        except:
            raise RuntimeError('While copying the parameter named {}, \
                               whose dimensions in the model are {} and \
                               whose dimensions in the checkpoint are {}.'
                               .format(name, new_model_dict[name].size(), param.size())) 
Example #21
Source File: model.py    From LearnTrajDep with MIT License 5 votes vote down vote up
def __init__(self, in_features, out_features, bias=True, node_n=48):
        super(GraphConvolution, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = Parameter(torch.FloatTensor(in_features, out_features))
        self.att = Parameter(torch.FloatTensor(node_n, node_n))
        if bias:
            self.bias = Parameter(torch.FloatTensor(out_features))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters() 
Example #22
Source File: CosNormClassifier.py    From OpenLongTailRecognition-OLTR with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, in_dims, out_dims, scale=16, margin=0.5, init_std=0.001):
        super(CosNorm_Classifier, self).__init__()
        self.in_dims = in_dims
        self.out_dims = out_dims
        self.scale = scale
        self.margin = margin
        self.weight = Parameter(torch.Tensor(out_dims, in_dims).cuda())
        self.reset_parameters() 
Example #23
Source File: gcn.py    From DeepRobust with MIT License 5 votes vote down vote up
def __init__(self, in_features, out_features, with_bias=True):
        super(GraphConvolution, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = Parameter(torch.FloatTensor(in_features, out_features))
        if with_bias:
            self.bias = Parameter(torch.FloatTensor(out_features))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters() 
Example #24
Source File: r_gcn.py    From DeepRobust with MIT License 5 votes vote down vote up
def __init__(self, in_features, out_features, dropout):
        super(GGCL_D, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.dropout = dropout
        self.weight_miu = Parameter(torch.FloatTensor(in_features, out_features))
        self.weight_sigma = Parameter(torch.FloatTensor(in_features, out_features))
        # self.register_parameter('bias', None)
        self.reset_parameters() 
Example #25
Source File: modules.py    From GPT2sQA with Apache License 2.0 5 votes vote down vote up
def __init__(self, nf, nx):
        super(Conv1D, self).__init__()
        self.nf = nf
        w = torch.empty(nx, nf)
        nn.init.normal_(w, std=0.02)
        self.weight = Parameter(w)
        self.bias = Parameter(torch.zeros(nf)) 
Example #26
Source File: parameterized_tensors.py    From dynamic-reparameterization with Apache License 2.0 5 votes vote down vote up
def __init__(self, full_tensor_size,initial_sparsity,   sub_kernel_granularity = False):

        super(TiedTensor, self).__init__()

        ndim = len(full_tensor_size)
        assert ndim == 2 or ndim == 4, 'only 2D or 4D tensors supported'

        self.full_tensor_size = torch.Size(full_tensor_size)
        self.sub_kernel_granularity = sub_kernel_granularity
        
        n_alloc_elements =  np.prod(self.full_tensor_size).item() if sub_kernel_granularity else np.prod(self.full_tensor_size[:2]).item()

        self.num_weights = round((1 - initial_sparsity)*n_alloc_elements)
        
        
        self.register_buffer('weight_alloc',torch.zeros(n_alloc_elements).long())
        indices = np.arange(n_alloc_elements)
        np.random.shuffle(indices)
        self.weight_alloc[indices] = uniform_coverage(self.num_weights,n_alloc_elements)
 
        self.conv_tensor = False if ndim ==2 else True
       

        trailing_dimensions = [] if sub_kernel_granularity else self.full_tensor_size[2:]
        self.bank = Parameter(torch.Tensor(self.num_weights,*trailing_dimensions))

        self.init_parameters() 
Example #27
Source File: r_gcn.py    From DeepRobust with MIT License 5 votes vote down vote up
def __init__(self, in_features, out_features, dropout=0.6):
        super(GGCL_F, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.dropout = dropout
        self.weight_miu = Parameter(torch.FloatTensor(in_features, out_features))
        self.weight_sigma = Parameter(torch.FloatTensor(in_features, out_features))
        self.reset_parameters() 
Example #28
Source File: fga.py    From DeepRobust with MIT License 5 votes vote down vote up
def __init__(self, model, nnodes, feature_shape=None, attack_structure=True, attack_features=False, device='cpu'):

        super(FGA, self).__init__(model, nnodes, attack_structure=attack_structure, attack_features=attack_features, device=device)

        if self.attack_structure:
            self.adj_changes = Parameter(torch.FloatTensor(nnodes))
            self.adj_changes.data.fill_(0)

        assert not self.attack_features, "not support attacking features"

        if self.attack_features:
            self.feature_changes = Parameter(torch.FloatTensor(feature_shape))
            self.feature_changes.data.fill_(0) 
Example #29
Source File: conv2d_mtl.py    From meta-transfer-learning with MIT License 5 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel_size, stride,
                 padding, dilation, transposed, output_padding, groups, bias):
        super(_ConvNdMtl, self).__init__()
        if in_channels % groups != 0:
            raise ValueError('in_channels must be divisible by groups')
        if out_channels % groups != 0:
            raise ValueError('out_channels must be divisible by groups')
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.dilation = dilation
        self.transposed = transposed
        self.output_padding = output_padding
        self.groups = groups
        if transposed:
            self.weight = Parameter(torch.Tensor(
                in_channels, out_channels // groups, *kernel_size))
            self.mtl_weight = Parameter(torch.ones(in_channels, out_channels // groups, 1, 1))
        else:
            self.weight = Parameter(torch.Tensor(
                out_channels, in_channels // groups, *kernel_size))
            self.mtl_weight = Parameter(torch.ones(out_channels, in_channels // groups, 1, 1))
        self.weight.requires_grad=False
        if bias:
            self.bias = Parameter(torch.Tensor(out_channels))
            self.bias.requires_grad=False
            self.mtl_bias = Parameter(torch.zeros(out_channels))
        else:
            self.register_parameter('bias', None)
            self.register_parameter('mtl_bias', None)
        self.reset_parameters() 
Example #30
Source File: base.py    From macarico with MIT License 5 votes vote down vote up
def make_out_of_bounds(self):
        oob = Parameter(torch.Tensor(1, self.features.dim))
        oob.data.zero_()
        return oob