Python torch.nn.Linear() Examples

The following are 30 code examples for showing how to use torch.nn.Linear(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module torch.nn , or try the search function .

Example 1
Project: hgraph2graph   Author: wengong-jin   File: encoder.py    License: MIT License 9 votes vote down vote up
def __init__(self, rnn_type, input_size, node_fdim, hidden_size, depth, dropout):
        super(MPNEncoder, self).__init__()
        self.hidden_size = hidden_size
        self.input_size = input_size
        self.depth = depth
        self.W_o = nn.Sequential( 
                nn.Linear(node_fdim + hidden_size, hidden_size), 
                nn.ReLU(),
                nn.Dropout(dropout)
        )

        if rnn_type == 'GRU':
            self.rnn = GRU(input_size, hidden_size, depth) 
        elif rnn_type == 'LSTM':
            self.rnn = LSTM(input_size, hidden_size, depth) 
        else:
            raise ValueError('unsupported rnn cell type ' + rnn_type) 
Example 2
Project: cat-bbs   Author: aleju   File: model.py    License: MIT License 6 votes vote down vote up
def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(MyResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        # note the increasing dilation
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilation=1)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)

        # these layers will not be used
        self.avgpool = nn.AvgPool2d(7)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example 3
def _init_modules(self): 
    self._init_head_tail()

    # rpn
    self.rpn_net = nn.Conv2d(self._net_conv_channels, cfg.RPN_CHANNELS, [3, 3], padding=1)

    self.rpn_cls_score_net = nn.Conv2d(cfg.RPN_CHANNELS, self._num_anchors * 2, [1, 1])
    
    self.rpn_bbox_pred_net = nn.Conv2d(cfg.RPN_CHANNELS, self._num_anchors * 4, [1, 1])

    self.cls_score_net_fast = nn.Linear(self._fc7_channels, self._num_classes+1)
    self.bbox_pred_net_fast = nn.Linear(self._fc7_channels, (self._num_classes+1) * 4)


    self.cls_score_net = nn.Linear(self._fc7_channels, self._num_classes)  # between class
    self.bbox_pred_net = nn.Linear(self._fc7_channels, self._num_classes)  # between boxes

    self.init_weights() 
Example 4
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model2.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, input_size, hidden_size, correlation_func=1, do_similarity=False):
        super(AttentionScore, self).__init__()
        self.correlation_func = correlation_func
        self.hidden_size = hidden_size

        if correlation_func == 2 or correlation_func == 3:
            self.linear = nn.Linear(input_size, hidden_size, bias=False)
            if do_similarity:
                self.diagonal = Parameter(torch.ones(1, 1, 1) / (hidden_size ** 0.5), requires_grad=False)
            else:
                self.diagonal = Parameter(torch.ones(1, 1, hidden_size), requires_grad=True)

        if correlation_func == 4:
            self.linear = nn.Linear(input_size, input_size, bias=False)

        if correlation_func == 5:
            self.linear = nn.Linear(input_size, hidden_size, bias=False) 
Example 5
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model2.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, input_size, hidden_size, correlation_func=1, do_similarity=False):
        super(AttentionScore, self).__init__()
        self.correlation_func = correlation_func
        self.hidden_size = hidden_size

        if correlation_func == 2 or correlation_func == 3:
            self.linear = nn.Linear(input_size, hidden_size, bias=False)
            if do_similarity:
                self.diagonal = Parameter(torch.ones(1, 1, 1) / (hidden_size ** 0.5), requires_grad=False)
            else:
                self.diagonal = Parameter(torch.ones(1, 1, hidden_size), requires_grad=True)

        if correlation_func == 4:
            self.linear = nn.Linear(input_size, input_size, bias=False)

        if correlation_func == 5:
            self.linear = nn.Linear(input_size, hidden_size, bias=False) 
Example 6
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model2.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, input_size, hidden_size, correlation_func=1, do_similarity=False):
        super(AttentionScore, self).__init__()
        self.correlation_func = correlation_func
        self.hidden_size = hidden_size

        if correlation_func == 2 or correlation_func == 3:
            self.linear = nn.Linear(input_size, hidden_size, bias=False)
            if do_similarity:
                self.diagonal = Parameter(torch.ones(1, 1, 1) / (hidden_size ** 0.5), requires_grad=False)
            else:
                self.diagonal = Parameter(torch.ones(1, 1, hidden_size), requires_grad=True)

        if correlation_func == 4:
            self.linear = nn.Linear(input_size, input_size, bias=False)

        if correlation_func == 5:
            self.linear = nn.Linear(input_size, hidden_size, bias=False) 
Example 7
Project: hgraph2graph   Author: wengong-jin   File: encoder.py    License: MIT License 6 votes vote down vote up
def __init__(self, rnn_type, input_size, node_fdim, hidden_size, depth, dropout):
        super(MPNEncoder, self).__init__()
        self.hidden_size = hidden_size
        self.input_size = input_size
        self.depth = depth
        self.W_o = nn.Sequential( 
                nn.Linear(node_fdim + hidden_size, hidden_size), 
                nn.ReLU(),
                nn.Dropout(dropout)
        )

        if rnn_type == 'GRU':
            self.rnn = GRU(input_size, hidden_size, depth) 
        elif rnn_type == 'LSTM':
            self.rnn = LSTM(input_size, hidden_size, depth) 
        else:
            raise ValueError('unsupported rnn cell type ' + rnn_type) 
Example 8
Project: subword-qac   Author: clovaai   File: model.py    License: MIT License 6 votes vote down vote up
def __init__(self, config):
        super(LanguageModel, self).__init__()
        self.config = config
        self.ntoken = ntoken = config.ntoken
        self.ninp = ninp = config.ninp
        self.nhid = nhid = config.nhid
        self.nlayers = nlayers = config.nlayers

        self.encoder = nn.Embedding(ntoken, ninp)
        self.dropouti = nn.Dropout(config.dropouti) if config.dropouti > 0 else None
        self.lstm = LSTM([ninp] + [nhid] * nlayers, bias=False, layernorm=True,
                         dropoutr=config.dropoutr, dropouth=config.dropouth, dropouto=config.dropouto)
        self.projection = nn.Linear(nhid, ninp)
        self.decoder = nn.Linear(ninp, ntoken)
        self.decoder.weight = self.encoder.weight

        self.init_weights() 
Example 9
Project: neural-fingerprinting   Author: StephanZheng   File: model.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self):
        super(CW2_Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, 3)
        self.bnm1 = nn.BatchNorm2d(32, momentum=0.1)
        self.conv2 = nn.Conv2d(32, 64, 3)
        self.bnm2 = nn.BatchNorm2d(64, momentum=0.1)
        self.conv3 = nn.Conv2d(64, 128, 3)
        self.bnm3 = nn.BatchNorm2d(128, momentum=0.1)
        self.conv4 = nn.Conv2d(128, 128, 3)
        self.bnm4 = nn.BatchNorm2d(128, momentum=0.1)
        self.fc1 = nn.Linear(3200, 256)
        #self.dropout1 = nn.Dropout(p=0.35, inplace=False)
        self.bnm5 = nn.BatchNorm1d(256, momentum=0.1)
        self.fc2 = nn.Linear(256, 256)
        self.bnm6 = nn.BatchNorm1d(256, momentum=0.1)
        self.fc3 = nn.Linear(256, 10)
        #self.dropout2 = nn.Dropout(p=0.35, inplace=False)
        #self.dropout3 = nn.Dropout(p=0.35, inplace=False) 
Example 10
Project: MomentumContrast.pytorch   Author: peisuke   File: network.py    License: MIT License 5 votes vote down vote up
def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, 3, 1)
        self.conv2 = nn.Conv2d(32, 64, 3, 1)
        self.fc1 = nn.Linear(9216, 128) 
Example 11
def __init__(self, config):
        super(BertSelfAttention, self).__init__()
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(
                "The hidden size (%d) is not a multiple of the number of attention "
                "heads (%d)" % (config.hidden_size, config.num_attention_heads))
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size

        self.query = nn.Linear(config.hidden_size, self.all_head_size)
        self.key = nn.Linear(config.hidden_size, self.all_head_size)
        self.value = nn.Linear(config.hidden_size, self.all_head_size)

        self.dropout = nn.Dropout(config.attention_probs_dropout_prob) 
Example 12
def __init__(self, config):
        super(BertSelfOutput, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
        self.dropout = nn.Dropout(config.hidden_dropout_prob) 
Example 13
def __init__(self, config):
        super(BertIntermediate, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
        self.intermediate_act_fn = ACT2FN[config.hidden_act] \
            if isinstance(config.hidden_act, str) else config.hidden_act 
Example 14
def __init__(self, config):
        super(BertOutput, self).__init__()
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
        self.dropout = nn.Dropout(config.hidden_dropout_prob) 
Example 15
def __init__(self, config):
        super(BertPooler, self).__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.activation = nn.Tanh() 
Example 16
def __init__(self, config, bert_model_embedding_weights):
        super(BertLMPredictionHead, self).__init__()
        self.transform = BertPredictionHeadTransform(config)

        # The output weights are the same as the input embeddings, but there is
        # an output-only bias for each token.
        self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
                                 bert_model_embedding_weights.size(0),
                                 bias=False)
        self.decoder.weight = bert_model_embedding_weights
        self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0))) 
Example 17
def __init__(self, config):
        super(BertOnlyNSPHead, self).__init__()
        self.seq_relationship = nn.Linear(config.hidden_size, 2) 
Example 18
def __init__(self, config, bert_model_embedding_weights):
        super(BertPreTrainingHeads, self).__init__()
        self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
        self.seq_relationship = nn.Linear(config.hidden_size, 2) 
Example 19
def init_bert_weights(self, module):
        """ Initialize the weights.
        """
        if isinstance(module, (nn.Linear, nn.Embedding)):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        elif isinstance(module, BertLayerNorm):
            module.bias.data.normal_(mean=0.0, std=self.config.initializer_range)
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        if isinstance(module, nn.Linear) and module.bias is not None:
            module.bias.data.zero_() 
Example 20
def __init__(self, config, num_labels=2):
        super(BertForSequenceClassification, self).__init__(config)
        self.num_labels = num_labels
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, num_labels)
        self.apply(self.init_bert_weights) 
Example 21
def __init__(self, config, num_labels=2):
        super(BertForTokenClassification, self).__init__(config)
        self.num_labels = num_labels
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, num_labels)
        self.apply(self.init_bert_weights) 
Example 22
def __init__(self, config):
        super(BertForQuestionAnswering, self).__init__(config)
        self.bert = BertModel(config)
        # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
        # self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.qa_outputs = nn.Linear(config.hidden_size, 2)
        self.apply(self.init_bert_weights) 
Example 23
def __init__(self, config):
        super(BertForQuestionAnswering, self).__init__(config)
        self.bert = BertModel(config)
        self.qa_outputs = nn.Linear(config.hidden_size, 1)
        self.apply(self.init_bert_weights) 
Example 24
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model2.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, h, d_model, dropout=0.1):
        """Take in model size and number of heads."""
        super(MultiHeadedAttention, self).__init__()
        assert d_model % h == 0
        # We assume d_v always equals d_k
        self.d_k = d_model // h
        self.h = h
        self.linears = clones(nn.Linear(d_model, d_model), 4)
        self.attn = None
        self.dropout = nn.Dropout(p=dropout) 
Example 25
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model2.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, d_model, d_ff, dropout=0.1):
        super(PositionwiseFeedForward, self).__init__()
        self.w_1 = nn.Linear(d_model, d_ff)
        self.w_2 = nn.Linear(d_ff, d_model)
        self.dropout = nn.Dropout(dropout) 
Example 26
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model2.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, d_model, vocab):
        super(Generator, self).__init__()
        self.proj = nn.Linear(d_model, vocab) 
Example 27
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model2.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, encoder, decoder, gru, src_embed, tgt_embed, generator, input_size):
        super(EncoderDecoder, self).__init__()
        self.encoder = encoder
        self.decoder = decoder
        self.gru = gru
        self.src_embed = src_embed
        self.tgt_embed = tgt_embed
        self.generator = generator
        self.linear = nn.Linear(input_size * 2, input_size)
        self.attention = AttentionScore(input_size, input_size)
        self.gru_decoder = nn.GRU(input_size * 2, input_size, 1) 
Example 28
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, h, d_model, dropout=0.1):
        """Take in model size and number of heads."""
        super(MultiHeadedAttention, self).__init__()
        assert d_model % h == 0
        # We assume d_v always equals d_k
        self.d_k = d_model // h
        self.h = h
        self.linears = clones(nn.Linear(d_model, d_model), 4)
        self.attn = None
        self.dropout = nn.Dropout(p=dropout) 
Example 29
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, d_model, d_ff, dropout=0.1):
        super(PositionwiseFeedForward, self).__init__()
        self.w_1 = nn.Linear(d_model, d_ff)
        self.w_2 = nn.Linear(d_ff, d_model)
        self.dropout = nn.Dropout(dropout) 
Example 30
Project: controllable-text-attribute-transfer   Author: Nrgeup   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, d_model, vocab):
        super(Generator, self).__init__()
        self.proj = nn.Linear(d_model, vocab)