Python torch.nn.functional.log_softmax() Examples

The following are 30 code examples of torch.nn.functional.log_softmax(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: model.py    From ICDAR-2019-SROIE with MIT License 6 votes vote down vote up
def forward(self, input, target):
        if input.dim()>2:
            input = input.view(input.size(0),input.size(1),-1)  # N,C,H,W => N,C,H*W
            input = input.transpose(1,2)    # N,C,H*W => N,H*W,C
            input = input.contiguous().view(-1,input.size(2))   # N,H*W,C => N*H*W,C
        target = target.view(-1,1)

        logpt = F.log_softmax(input)
        logpt = logpt.gather(1,target)
        logpt = logpt.view(-1)
        pt = Variable(logpt.data.exp())

        if self.alpha is not None:
            if self.alpha.type()!=input.data.type():
                self.alpha = self.alpha.type_as(input.data)
            at = self.alpha.gather(0,target.data.view(-1))
            logpt = logpt * Variable(at)

        loss = -1 * (1-pt)**self.gamma * logpt
        if self.size_average: return loss.mean()
        else: return loss.sum() 
Example #2
Source File: models.py    From IGMC with MIT License 6 votes vote down vote up
def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        if self.adj_dropout > 0:
            edge_index, edge_type = dropout_adj(
                edge_index, edge_type, p=self.adj_dropout, 
                force_undirected=self.force_undirected, num_nodes=len(x), 
                training=self.training
            )
        concat_states = []
        for conv in self.convs:
            x = torch.tanh(conv(x, edge_index))
            concat_states.append(x)
        concat_states = torch.cat(concat_states, 1)
        x = global_add_pool(concat_states, batch)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        if self.regression:
            return x[:, 0]
        else:
            return F.log_softmax(x, dim=-1) 
Example #3
Source File: models.py    From cvpr2018-hnd with MIT License 6 votes vote down vote up
def forward(self, input, target): # input = Variable(logits), target = labels
        loss = Variable(torch.zeros(1).cuda()) if self.gpu else Variable(torch.zeros(1))
        
        # novel loss
        if self.loo > 0.:
            target_novel = self.labels_relevant[target]
            for i, rel in enumerate(self.relevant):
                if target_novel[:,i].any():
                    relevant_loc = target_novel[:,i].nonzero().view(-1)
                    loss += -F.log_softmax(input[relevant_loc][:, rel], dim=1)[:,0].mean() * self.class_weight[i]
            loss *= self.loo
        
        # known loss
        log_probs = F.log_softmax(input, dim=1)
        loss += F.nll_loss(log_probs, Variable(target))
        
        # regularization
        if self.label_smooth > 0.:
            loss -= (log_probs.mean() + self.kld_u_const) * self.label_smooth
        
        return loss 
Example #4
Source File: nn_lib.py    From ConvLab with MIT License 6 votes vote down vote up
def forward(self, inputs):
        """
        :param inputs: batch_size x input_size
        :return:
        """
        if self.is_lstm:
            h, c= inputs
            if h.dim() == 3:
                h = h.squeeze(0)
                c = c.squeeze(0)
            logits = self.p_h(h) + self.p_c(c)
        else:
            logits = self.p_h(inputs)
        logits = logits.view(-1, self.k_size)
        log_qy = F.log_softmax(logits, dim=1)
        return logits, log_qy 
Example #5
Source File: text_cnn.py    From TaskBot with GNU General Public License v3.0 6 votes vote down vote up
def forward(self, x):
        # x: (batch, sentence_length)
        x = self.embed(x)
        # x: (batch, sentence_length, embed_dim)
        # TODO init embed matrix with pre-trained
        x = x.unsqueeze(1)
        # x: (batch, 1, sentence_length, embed_dim)
        x1 = self.conv_and_pool(x, self.conv11)  # (batch, kernel_num)
        x2 = self.conv_and_pool(x, self.conv12)  # (batch, kernel_num)
        x3 = self.conv_and_pool(x, self.conv13)  # (batch, kernel_num)
        x = torch.cat((x1, x2, x3), 1)  # (batch, 3 * kernel_num)
        x = self.dropout(x)
        logit = F.log_softmax(self.fc1(x), dim=1)
        # logit = F.softmax(self.fc1(x), dim=1)
        # logit = self.fc1(x)
        return logit 
Example #6
Source File: nnutils.py    From hgraph2graph with MIT License 6 votes vote down vote up
def hier_topk(cls_scores, icls_scores, vocab, topk):
    batch_size = len(cls_scores)
    cls_scores = F.log_softmax(cls_scores, dim=-1)
    cls_scores_topk, cls_topk = cls_scores.topk(topk, dim=-1)
    final_topk = []
    for i in range(topk):
        clab = cls_topk[:, i]
        mask = vocab.get_mask(clab)
        masked_icls_scores = F.log_softmax(icls_scores + mask, dim=-1)
        icls_scores_topk, icls_topk = masked_icls_scores.topk(topk, dim=-1)
        topk_scores = cls_scores_topk[:, i].unsqueeze(-1) + icls_scores_topk
        final_topk.append( (topk_scores, clab.unsqueeze(-1).expand(-1, topk), icls_topk) )

    topk_scores, cls_topk, icls_topk = zip(*final_topk)
    topk_scores = torch.cat(topk_scores, dim=-1)
    cls_topk = torch.cat(cls_topk, dim=-1)
    icls_topk = torch.cat(icls_topk, dim=-1)

    topk_scores, topk_index = topk_scores.topk(topk, dim=-1)
    batch_index = cls_topk.new_tensor([[i] * topk for i in range(batch_size)])
    cls_topk = cls_topk[batch_index, topk_index]
    icls_topk = icls_topk[batch_index, topk_index]
    return topk_scores, cls_topk.tolist(), icls_topk.tolist() 
Example #7
Source File: nnutils.py    From hgraph2graph with MIT License 6 votes vote down vote up
def hier_topk(cls_scores, icls_scores, vocab, topk):
    batch_size = len(cls_scores)
    cls_scores = F.log_softmax(cls_scores, dim=-1)
    cls_scores_topk, cls_topk = cls_scores.topk(topk, dim=-1)
    final_topk = []
    for i in range(topk):
        clab = cls_topk[:, i]
        mask = vocab.get_mask(clab)
        masked_icls_scores = F.log_softmax(icls_scores + mask, dim=-1)
        icls_scores_topk, icls_topk = masked_icls_scores.topk(topk, dim=-1)
        topk_scores = cls_scores_topk[:, i].unsqueeze(-1) + icls_scores_topk
        final_topk.append( (topk_scores, clab.unsqueeze(-1).expand(-1, topk), icls_topk) )

    topk_scores, cls_topk, icls_topk = zip(*final_topk)
    topk_scores = torch.cat(topk_scores, dim=-1)
    cls_topk = torch.cat(cls_topk, dim=-1)
    icls_topk = torch.cat(icls_topk, dim=-1)

    topk_scores, topk_index = topk_scores.topk(topk, dim=-1)
    batch_index = cls_topk.new_tensor([[i] * topk for i in range(batch_size)])
    cls_topk = cls_topk[batch_index, topk_index]
    icls_topk = icls_topk[batch_index, topk_index]
    return topk_scores, cls_topk.tolist(), icls_topk.tolist() 
Example #8
Source File: tutorial.py    From TaskBot with GNU General Public License v3.0 6 votes vote down vote up
def forward(self, input, hidden, encoder_outputs):
        embedded = self.embedding(input).view(1, 1, -1)
        embedded = self.dropout(embedded)

        attn_weights = F.softmax(
            self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
        attn_applied = torch.bmm(attn_weights.unsqueeze(0),
                                 encoder_outputs.unsqueeze(0))

        output = torch.cat((embedded[0], attn_applied[0]), 1)
        output = self.attn_combine(output).unsqueeze(0)

        output = F.relu(output)
        output, hidden = self.gru(output, hidden)

        output = F.log_softmax(self.out(output[0]), dim=1)
        return output, hidden, attn_weights 
Example #9
Source File: masked_cross_entropy.py    From ConvLab with MIT License 5 votes vote down vote up
def masked_cross_entropy(logits, target, length):
	length = Variable(torch.LongTensor(length)).cuda()

	"""
	Args:
		logits: A Variable containing a FloatTensor of size
			(batch, max_len, num_classes) which contains the
			unnormalized probability for each class.
		target: A Variable containing a LongTensor of size
			(batch, max_len) which contains the index of the true
			class for each corresponding step.
		length: A Variable containing a LongTensor of size (batch,)
			which contains the length of each data in a batch.
	Returns:
		loss: An average loss value masked by the length.
	"""

	# logits_flat: (batch * max_len, num_classes)
	logits_flat = logits.view(-1, logits.size(-1))
	# log_probs_flat: (batch * max_len, num_classes)
	log_probs_flat = functional.log_softmax(logits_flat, dim=1)
	# target_flat: (batch * max_len, 1)
	target_flat = target.view(-1, 1)
	# losses_flat: (batch * max_len, 1)
	losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
	# losses: (batch, max_len)
	losses = losses_flat.view(*target.size())
	# mask: (batch, max_len)
	mask = sequence_mask(sequence_length=length, max_len=target.size(1))
	losses = losses * mask.float()
	loss = losses.sum() / length.float().sum() # per word loss
	return loss 
Example #10
Source File: models.py    From IGMC with MIT License 5 votes vote down vote up
def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        if self.adj_dropout > 0:
            edge_index, edge_type = dropout_adj(
                edge_index, edge_type, p=self.adj_dropout, 
                force_undirected=self.force_undirected, num_nodes=len(x), 
                training=self.training
            )
        concat_states = []
        for conv in self.convs:
            x = torch.tanh(conv(x, edge_index))
            concat_states.append(x)
        concat_states = torch.cat(concat_states, 1)
        x = global_sort_pool(concat_states, batch, self.k)  # batch * (k*hidden)
        x = x.unsqueeze(1)  # batch * 1 * (k*hidden)
        x = F.relu(self.conv1d_params1(x))
        x = self.maxpool1d(x)
        x = F.relu(self.conv1d_params2(x))
        x = x.view(len(x), -1)  # flatten
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        if self.regression:
            return x[:, 0]
        else:
            return F.log_softmax(x, dim=-1) 
Example #11
Source File: model.py    From ConvLab with MIT License 5 votes vote down vote up
def forward(self, input, hidden, not_used):
        embedded = self.embedding(input).transpose(0, 1)  # [B,1] -> [ 1,B, D]
        embedded = F.dropout(embedded, self.dropout_rate)

        output = embedded
        #output = F.relu(embedded)

        output, hidden = self.rnn(output, hidden)

        out = self.out(output.squeeze(0))
        output = F.log_softmax(out, dim=1)

        return output, hidden 
Example #12
Source File: ner_model.py    From Doc2EDAG with MIT License 5 votes vote down vote up
def produce_ner_batch_metrics(seq_logits, gold_labels, masks):
    # seq_logits: [batch_size, seq_len, num_entity_labels]
    # gold_labels: [batch_size, seq_len]
    # masks: [batch_size, seq_len]
    batch_size, seq_len, num_entities = seq_logits.size()

    # [batch_size, seq_len, num_entity_labels]
    seq_logp = F.log_softmax(seq_logits, dim=-1)
    # [batch_size, seq_len]
    pred_labels = seq_logp.argmax(dim=-1)
    # [batch_size*seq_len, num_entity_labels]
    token_logp = seq_logp.view(-1, num_entities)
    # [batch_size*seq_len]
    token_labels = gold_labels.view(-1)
    # [batch_size, seq_len]
    seq_token_loss = F.nll_loss(token_logp, token_labels, reduction='none').view(batch_size, seq_len)

    batch_metrics = []
    for bid in range(batch_size):
        ex_loss = seq_token_loss[bid, masks[bid]].mean().item()
        ex_acc = (pred_labels[bid, masks[bid]] == gold_labels[bid, masks[bid]]).float().mean().item()
        ex_pred_lids = pred_labels[bid, masks[bid]].tolist()
        ex_gold_lids = gold_labels[bid, masks[bid]].tolist()
        ner_tp_set, ner_fp_set, ner_fn_set = judge_ner_prediction(ex_pred_lids, ex_gold_lids)
        batch_metrics.append([ex_loss, ex_acc, len(ner_tp_set), len(ner_fp_set), len(ner_fn_set)])

    return torch.tensor(batch_metrics, dtype=torch.float, device=seq_logits.device) 
Example #13
Source File: fast_text.py    From TaskBot with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, x):
        x = self.embed(x)
        x = torch.mean(x, dim=1, keepdim=False)
        x = self.dropout(x)
        output = self.fc(x)
        output = F.log_softmax(output, dim=1)
        return output 
Example #14
Source File: masked_cross_entropy.py    From ConvLab with MIT License 5 votes vote down vote up
def masked_cross_entropy(logits, target, length):
    """
    Args:
        logits: A Variable containing a FloatTensor of size
            (batch, max_len, num_classes) which contains the
            unnormalized probability for each class.
        target: A Variable containing a LongTensor of size
            (batch, max_len) which contains the index of the true
            class for each corresponding step.
        length: A Variable containing a LongTensor of size (batch,)
            which contains the length of each data in a batch.

    Returns:
        loss: An average loss value masked by the length.
    """
    if USE_CUDA:
        length = Variable(torch.LongTensor(length)).cuda()
    else:
        length = Variable(torch.LongTensor(length))    

    # logits_flat: (batch * max_len, num_classes)
    logits_flat = logits.view(-1, logits.size(-1)) ## -1 means infered from other dimentions
    # log_probs_flat: (batch * max_len, num_classes)
    log_probs_flat = functional.log_softmax(logits_flat,dim=1)
    # target_flat: (batch * max_len, 1)
    target_flat = target.view(-1, 1)
    # losses_flat: (batch * max_len, 1)
    losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
    # losses: (batch, max_len)
    losses = losses_flat.view(*target.size())
    # mask: (batch, max_len)
    mask = sequence_mask(sequence_length=length, max_len=target.size(1))  
    losses = losses * mask.float()
    loss = losses.sum() / length.float().sum()
    return loss 
Example #15
Source File: model.py    From ConvLab with MIT License 5 votes vote down vote up
def forward(self, input, hidden, encoder_outputs):
        if isinstance(hidden, tuple):
            h_t = hidden[0]
        else:
            h_t = hidden
        encoder_outputs = encoder_outputs.transpose(0, 1)
        embedded = self.embedding(input)  # .view(1, 1, -1)
        # embedded = F.dropout(embedded, self.dropout_p)

        # SCORE 3
        max_len = encoder_outputs.size(1)
        h_t = h_t.transpose(0, 1)  # [1,B,D] -> [B,1,D]
        h_t = h_t.repeat(1, max_len, 1)  # [B,1,D]  -> [B,T,D]
        energy = self.attn(torch.cat((h_t, encoder_outputs), 2))  # [B,T,2D] -> [B,T,D]
        energy = torch.tanh(energy)
        energy = energy.transpose(2, 1)  # [B,H,T]
        v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1)  # [B,1,H]
        energy = torch.bmm(v, energy)  # [B,1,T]
        attn_weights = F.softmax(energy, dim=2)  # [B,1,T]

        # getting context
        context = torch.bmm(attn_weights, encoder_outputs)  # [B,1,H]

        # context = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.unsqueeze(0)) #[B,1,H]
        # Combine embedded input word and attended context, run through RNN
        rnn_input = torch.cat((embedded, context), 2)
        rnn_input = rnn_input.transpose(0, 1)
        output, hidden = self.rnn(rnn_input, hidden)
        output = output.squeeze(0)  # (1,B,V)->(B,V)

        output = F.log_softmax(self.out(output), dim=1)
        return output, hidden  # , attn_weights 
Example #16
Source File: decoders.py    From ConvLab with MIT License 5 votes vote down vote up
def _step(self, input_var, hidden_state, encoder_outputs, goal_hid):
        # input_var: (1, 1)
        # hidden_state: tuple: (h, c)
        # encoder_outputs: (1, max_dlg_len, dlg_cell_size)
        # goal_hid: (1, goal_nhid)
        batch_size, output_seq_len = input_var.size()
        embedded = self.embedding(input_var) # (1, 1, embedding_dim)

        if goal_hid is not None:
            goal_hid = goal_hid.view(goal_hid.size(0), 1, goal_hid.size(1)) # (1, 1, goal_nhid)
            goal_rep = goal_hid.repeat(1, output_seq_len, 1) # (1, 1, goal_nhid)
            embedded = th.cat([embedded, goal_rep], dim=2) # (1, 1, embedding_dim+goal_nhid)

        embedded = self.input_dropout(embedded)

        # ############
        # embedded = self.FC(embedded.view(-1, embedded.size(-1))).view(batch_size, output_seq_len, -1)

        # output: (1, 1, dec_cell_size)
        # hidden: tuple: (h, c)
        output, hidden_s = self.rnn(embedded, hidden_state)

        attn = None
        if self.use_attn:
            # output: (1, 1, dec_cell_size)
            # encoder_outputs: (1, max_dlg_len, dlg_cell_size)
            # attn: (1, 1, max_dlg_len)
            output, attn = self.attention(output, encoder_outputs)

        logits = self.project(output.view(-1, self.dec_cell_size)) # (1*1, vocab_size)
        prediction = logits.view(batch_size, output_seq_len, -1) # (1, 1, vocab_size)
        # prediction = self.log_softmax(logits, dim=logits.dim()-1).view(batch_size, output_seq_len, -1) # (batch_size, output_seq_len, vocab_size)
        return prediction, hidden_s

    # special for rl 
Example #17
Source File: v1_neuro.py    From Attentive-Filtering-Network with MIT License 5 votes vote down vote up
def forward(self, inputs, hidden):
        batch_size = inputs.size(0) # batch size is 1
        # input size is (batch_size x H x W x C)
        # reshape if to (batch_size x C x H x W) for CNN
        inputs = inputs.transpose(2,3).transpose(1,2)

        # Run through Conv2d, BatchNorm2d, ReLU layers
        h = self.conv1(inputs)
        h = self.batch1(h)
        h = self.relu1(h)
        h = self.conv2(h)
        h = self.batch2(h)
        h = self.relu2(h)
        h = self.conv3(h)
        h = self.batch3(h)
        h = self.relu3(h)

        h = h.squeeze() # get ride of the batc_size dim
        h = h.view(h.size(0), -1, 4).transpose(1,2) # reshape (C x H x W)

        r, hidden = self.gru(h, hidden) # BGRU unit is applied to each channel of CNN's output
        r = r.view(1, -1)

        f = self.fc1(r)
        f = self.drop1(f)
        f = self.relu4(f)
        f = self.fc2(f)
        f = self.drop2(f)
        f = self.relu5(f)
        f = self.fc3(f)
        return F.log_softmax(r, dim=1), hidden 
Example #18
Source File: ner_model.py    From Doc2EDAG with MIT License 5 votes vote down vote up
def forward(self, input_ids, input_masks,
                label_ids=None, train_flag=True, decode_flag=True):
        """Assume input size [batch_size, seq_len]"""
        if input_masks.dtype != torch.uint8:
            input_masks = input_masks == 1
        if train_flag:
            assert label_ids is not None

        # get contextual info
        input_emb = self.token_embedding(input_ids)
        input_masks = input_masks.unsqueeze(-2)  # to fit for the transformer code
        batch_seq_enc = self.token_encoder(input_emb, input_masks)

        if self.config.use_crf_layer:
            ner_loss, batch_seq_preds = self.crf_layer(
                batch_seq_enc, seq_token_label=label_ids, batch_first=True,
                train_flag=train_flag, decode_flag=decode_flag
            )
        else:
            # [batch_size, seq_len, num_entity_labels]
            batch_seq_logits = self.classifier(batch_seq_enc)
            batch_seq_logp = F.log_softmax(batch_seq_logits, dim=-1)

            if train_flag:
                batch_logp = batch_seq_logp.view(-1, batch_seq_logp.size(-1))
                batch_label = label_ids.view(-1)
                # ner_loss = F.nll_loss(batch_logp, batch_label, reduction='sum')
                ner_loss = F.nll_loss(batch_logp, batch_label, reduction='none')
                ner_loss = ner_loss.view(label_ids.size()).sum(dim=-1)  # [batch_size]
            else:
                ner_loss = None

            if decode_flag:
                batch_seq_preds = batch_seq_logp.argmax(dim=-1)
            else:
                batch_seq_preds = None

        return batch_seq_enc, ner_loss, batch_seq_preds 
Example #19
Source File: decoders.py    From ConvLab with MIT License 5 votes vote down vote up
def forward_step(self, input_var, hidden_state, encoder_outputs, goal_hid):
        # input_var: (batch_size, response_size-1 i.e. output_seq_len)
        # hidden_state: tuple: (h, c)
        # encoder_outputs: (batch_size, max_ctx_len, ctx_cell_size)
        # goal_hid: (batch_size, goal_nhid)
        batch_size, output_seq_len = input_var.size()
        embedded = self.embedding(input_var) # (batch_size, output_seq_len, embedding_dim)

        # add goals
        if goal_hid is not None:
            goal_hid = goal_hid.view(goal_hid.size(0), 1, goal_hid.size(1)) # (batch_size, 1, goal_nhid)
            goal_rep = goal_hid.repeat(1, output_seq_len, 1) # (batch_size, output_seq_len, goal_nhid)
            embedded = th.cat([embedded, goal_rep], dim=2) # (batch_size, output_seq_len, embedding_dim+goal_nhid)

        embedded = self.input_dropout(embedded)

        # ############
        # embedded = self.FC(embedded.view(-1, embedded.size(-1))).view(batch_size, output_seq_len, -1)

        # output: (batch_size, output_seq_len, dec_cell_size)
        # hidden: tuple: (h, c)
        output, hidden_s = self.rnn(embedded, hidden_state)

        attn = None
        if self.use_attn:
            # output: (batch_size, output_seq_len, dec_cell_size)
            # encoder_outputs: (batch_size, max_ctx_len, ctx_cell_size)
            # attn: (batch_size, output_seq_len, max_ctx_len)
            output, attn = self.attention(output, encoder_outputs)

        logits = self.project(output.contiguous().view(-1, self.dec_cell_size)) # (batch_size*output_seq_len, vocab_size)
        prediction = self.log_softmax(logits, dim=logits.dim()-1).view(batch_size, output_seq_len, -1) # (batch_size, output_seq_len, vocab_size)
        return prediction, hidden_s, attn

    # special for rl 
Example #20
Source File: att_rcnn.py    From TaskBot with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, x):
        embed = self.lookup(x)
        att = self._attention(embed)
        o, h = self.rnn(embed)
        att = att.unsqueeze(2).expand_as(o)
        att_o = att * o
        att_o = torch.sum(att_o, dim=1, keepdim=False)
        logit = F.log_softmax(self.fc(att_o), 1)
        return logit 
Example #21
Source File: mnist.py    From iAI with MIT License 5 votes vote down vote up
def forward(self, x):
        x = F.max_pool2d(self.conv1(x), kernel_size=2, stride=2)
        x = F.max_pool2d(self.conv2(x), kernel_size=2, stride=2)
        x = x.view(-1, 800)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return F.log_softmax(x, dim=1) 
Example #22
Source File: att_fasttext.py    From TaskBot with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, x):
        x = self.embed(x)
        score = self._score(x)
        # print(score[0])
        # x = torch.mean(x, dim=1, keepdim=False)
        x_att = torch.sum(score.unsqueeze(2).expand_as(x) * x, dim=1, keepdim=False)
        output = self.fc(x_att)
        output = F.log_softmax(output, dim=1)
        return output 
Example #23
Source File: mnist.py    From dockerfiles with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        x = F.relu(self.fc(x))
        x = self.fc2(x)
        return F.log_softmax(x, dim=1) 
Example #24
Source File: mnist.py    From dockerfiles with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x) 
Example #25
Source File: mnist.py    From dockerfiles with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        x = F.relu(self.fc(x))
        x = self.fc2(x)
        return F.log_softmax(x, dim=1) 
Example #26
Source File: mnist.py    From dockerfiles with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        x = F.relu(self.fc(x))
        x = self.fc2(x)
        return F.log_softmax(x) 
Example #27
Source File: mnist.py    From Pytorch-Project-Template with MIT License 5 votes vote down vote up
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1) 
Example #28
Source File: mnist.py    From Pytorch-Project-Template with MIT License 5 votes vote down vote up
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1) 
Example #29
Source File: models.py    From cvpr2018-hnd with MIT License 5 votes vote down vote up
def postprocess(self, input):
        if self.relu:
            input = F.relu(input)
        if self.softmax == 'l':
            input = F.log_softmax(input, dim=1)
        elif self.softmax == 's':
            input = F.softmax(input, dim=1)
        return input 
Example #30
Source File: model.py    From treelstm.pytorch with MIT License 5 votes vote down vote up
def forward(self, lvec, rvec):
        mult_dist = torch.mul(lvec, rvec)
        abs_dist = torch.abs(torch.add(lvec, -rvec))
        vec_dist = torch.cat((mult_dist, abs_dist), 1)

        out = F.sigmoid(self.wh(vec_dist))
        out = F.log_softmax(self.wp(out), dim=1)
        return out


# putting the whole model together