Python torch.sigmoid() Examples

The following are 30 code examples of torch.sigmoid(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: utils.py    From pruning_yolov3 with GNU General Public License v3.0 8 votes vote down vote up
def plot_wh_methods():  # from utils.utils import *; plot_wh_methods()
    # Compares the two methods for width-height anchor multiplication
    # https://github.com/ultralytics/yolov3/issues/168
    x = np.arange(-4.0, 4.0, .1)
    ya = np.exp(x)
    yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2

    fig = plt.figure(figsize=(6, 3), dpi=150)
    plt.plot(x, ya, '.-', label='yolo method')
    plt.plot(x, yb ** 2, '.-', label='^2 power method')
    plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
    plt.xlim(left=-4, right=4)
    plt.ylim(bottom=0, top=6)
    plt.xlabel('input')
    plt.ylabel('output')
    plt.legend()
    fig.tight_layout()
    fig.savefig('comparison.png', dpi=200) 
Example #2
Source File: unified_information.py    From interpret-text with MIT License 6 votes vote down vote up
def forward(self):
        """ Calculate loss:
            $L(sigma) = (||Phi(embed + epsilon) - Phi(embed)||_2^2)
            // (regularization^2) - rate * log(sigma)$
        :return: a scalar, the target loss.
        :rtype: torch.FloatTensor
        """
        ratios = torch.sigmoid(self.ratio)  # S * 1
        x = self.input_embeddings + 0.0
        x_tilde = (
            x
            + ratios
            * torch.randn(self.input_size, self.input_dimension).to(x.device)
            * self.scale
        )  # S * D
        s = self.Phi(x)  # D or S * D
        s_tilde = self.Phi(x_tilde)
        loss = (s_tilde - s) ** 2
        if self.regular is not None:
            loss = torch.mean(loss / self.regular ** 2)
        else:
            loss = torch.mean(loss) / torch.mean(s ** 2)

        return loss - torch.mean(torch.log(ratios)) * self.rate 
Example #3
Source File: models.py    From models with MIT License 6 votes vote down vote up
def forward(self,iput):

		bin_a=None
		level1_rep=None
		[batch_size,_,_]=iput.size()

		for hm,hm_encdr in enumerate(self.rnn_hms):
			hmod=iput[:,:,hm].contiguous()
			hmod=torch.t(hmod).unsqueeze(2)

			op,a= hm_encdr(hmod)
			if level1_rep is None:
				level1_rep=op
				bin_a=a
			else:
				level1_rep=torch.cat((level1_rep,op),1)
				bin_a=torch.cat((bin_a,a),1)
		level1_rep=level1_rep.permute(1,0,2)
		final_rep_1,hm_level_attention_1=self.hm_level_rnn_1(level1_rep)
		final_rep_1=final_rep_1.squeeze(1)
		prediction_m=((self.fdiff1_1(final_rep_1)))
		
		return torch.sigmoid(prediction_m) 
Example #4
Source File: plugin.py    From End-to-end-ASR-Pytorch with MIT License 6 votes vote down vote up
def fuse_prob(self, x_emb, dec_logit):
        ''' Takes context and decoder logit to perform word embedding fusion '''
        # Compute distribution for dec/emb
        if self.fuse_normalize:
            emb_logit = nn.functional.linear(nn.functional.normalize(x_emb, dim=-1),
                                             nn.functional.normalize(self.emb_table.weight, dim=-1))
        else:
            emb_logit = nn.functional.linear(x_emb, self.emb_table.weight)
        emb_prob = (nn.functional.relu(self.temp)*emb_logit).softmax(dim=-1)
        dec_prob = dec_logit.softmax(dim=-1)
        # Mix distribution
        if self.fuse_learnable:
            fused_prob = (1-torch.sigmoid(self.fuse_lambda))*dec_prob +\
                torch.sigmoid(self.fuse_lambda)*emb_prob
        else:
            fused_prob = (1-self.fuse_lambda)*dec_prob + \
                self.fuse_lambda*emb_prob
        # Log-prob
        log_fused_prob = (fused_prob+self.eps).log()

        return log_fused_prob 
Example #5
Source File: blow.py    From blow with Apache License 2.0 6 votes vote down vote up
def forward(self,h,emb):
        sbatch,nsq,lchunk=h.size()
        h=h.contiguous()
        """
        # Slower version
        ws=list(self.adapt_w(emb).view(sbatch,self.ncha,1,self.kw))
        bs=list(self.adapt_b(emb))
        hs=list(torch.chunk(h,sbatch,dim=0))
        out=[]
        for hi,wi,bi in zip(hs,ws,bs):
            out.append(torch.nn.functional.conv1d(hi,wi,bias=bi,padding=self.kw//2,groups=nsq))
        h=torch.cat(out,dim=0)
        """
        # Faster version fully using group convolution
        w=self.adapt_w(emb).view(-1,1,self.kw)
        b=self.adapt_b(emb).view(-1)
        h=torch.nn.functional.conv1d(h.view(1,-1,lchunk),w,bias=b,padding=self.kw//2,groups=sbatch*nsq).view(sbatch,self.ncha,lchunk)
        #"""
        h=self.net.forward(h)
        s,m=torch.chunk(h,2,dim=1)
        s=torch.sigmoid(s+2)+1e-7
        return s,m

########################################################################################################################
######################################################################################################################## 
Example #6
Source File: rfp.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def forward(self, inputs):
        inputs = list(inputs)
        assert len(inputs) == len(self.in_channels) + 1  # +1 for input image
        img = inputs.pop(0)
        # FPN forward
        x = super().forward(tuple(inputs))
        for rfp_idx in range(self.rfp_steps - 1):
            rfp_feats = [x[0]] + list(
                self.rfp_aspp(x[i]) for i in range(1, len(x)))
            x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats)
            # FPN forward
            x_idx = super().forward(x_idx)
            x_new = []
            for ft_idx in range(len(x_idx)):
                add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx]))
                x_new.append(add_weight * x_idx[ft_idx] +
                             (1 - add_weight) * x[ft_idx])
            x = x_new
        return x 
Example #7
Source File: afi.py    From pytorch-fm with MIT License 6 votes vote down vote up
def forward(self, x):
        """
        :param x: Long tensor of size ``(batch_size, num_fields)``
        """
        embed_x = self.embedding(x)
        atten_x = self.atten_embedding(embed_x)
        cross_term = atten_x.transpose(0, 1)
        for self_attn in self.self_attns:
            cross_term, _ = self_attn(cross_term, cross_term, cross_term)
        cross_term = cross_term.transpose(0, 1)
        if self.has_residual:
            V_res = self.V_res_embedding(embed_x)
            cross_term += V_res
        cross_term = F.relu(cross_term).contiguous().view(-1, self.atten_output_dim)
        x = self.linear(x) + self.attn_fc(cross_term) + self.mlp(embed_x.view(-1, self.embed_output_dim))
        return torch.sigmoid(x.squeeze(1)) 
Example #8
Source File: meta.py    From ScenarioMeta with MIT License 6 votes vote down vote up
def forward(self, grad_norm, grad_sign, param_norm, param_sign, loss_norm, hx):
            batch_size = grad_norm.size(0)
            inputs = torch.stack((grad_norm, grad_sign, param_norm, param_sign, loss_norm.expand(grad_norm.size(0))),
                                 dim=1)
            if hx is None:
                self.lrs = []
                if self.forget_gate:
                    self.fgs = []
                hx = (self.h_0.expand((batch_size, -1)), self.c_0.expand((batch_size, -1)))
            h, c = self.lstm(inputs, hx)
            if self.layer_norm is not None:
                h = self.layer_norm(h)
            if self.input_gate:
                lr = torch.sigmoid(self.lr_layer(h))
            else:
                lr = self.output_layer(h)
            self.lrs.append(lr.mean().item())
            if self.forget_gate:
                fg = torch.sigmoid(self.fg_layer(h))
                self.fgs.append(fg.mean().item())
                return lr, fg, (h, c)
            else:
                return lr, (h, c) 
Example #9
Source File: functional.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def logits_nll_loss(input, target, weight=None, reduction='mean'):
    """logits_nll_loss
    Different from nll loss, this is for sigmoid based loss.
    The difference is this will add along C(class) dim.
    """

    assert input.dim() == 2, 'Input shape should be (B, C).'
    if input.size(0) != target.size(0):
        raise ValueError(
            'Expected input batch_size ({}) to match target batch_size ({}).' .format(
                input.size(0), target.size(0)))

    ret = input.sum(dim=-1)
    if weight is not None:
        ret = _batch_weight(weight, target) * ret
    return reducing(ret, reduction) 
Example #10
Source File: model2.py    From controllable-text-attribute-transfer with Apache License 2.0 6 votes vote down vote up
def forward(self, src, tgt, src_mask, tgt_mask):
        """
        Take in and process masked src and target sequences.
        """
        memory = self.encode(src, src_mask)  # (batch_size, max_src_seq, d_model)
        # attented_mem=self.attention(memory,memory,memory,src_mask)
        # memory=attented_mem
        score = self.attention(memory, memory, src_mask)
        attent_memory = score.bmm(memory)
        # memory=self.linear(torch.cat([memory,attent_memory],dim=-1))

        memory, _ = self.gru(attented_mem)
        '''
        score=torch.sigmoid(self.linear(memory))
        memory=memory*score
        '''
        latent = torch.sum(memory, dim=1)  # (batch_size, d_model)
        logit = self.decode(latent.unsqueeze(1), tgt, tgt_mask)  # (batch_size, max_tgt_seq, d_model)
        # logit,_=self.gru_decoder(logit)
        prob = self.generator(logit)  # (batch_size, max_seq, vocab_size)
        return latent, prob 
Example #11
Source File: predictor.py    From ConvLab with MIT License 6 votes vote down vote up
def predict(self, state):
        
        example, kb = self.gen_example(state)
        feature = self.gen_feature(example)
        
        input_ids = torch.tensor([feature.input_ids], dtype=torch.long).to(self.device)
        input_masks = torch.tensor([feature.input_mask], dtype=torch.long).to(self.device)
        segment_ids = torch.tensor([feature.segment_ids], dtype=torch.long).to(self.device)

        with torch.no_grad():
            logits = self.model(input_ids, segment_ids, input_masks, labels=None)
            logits = torch.sigmoid(logits)
        preds = (logits > 0.4).float()
        preds_numpy = preds.cpu().nonzero().squeeze().numpy()
        
#        for i in preds_numpy:
#            if i < 10:
#                print(Constants.domains[i], end=' ')
#            elif i < 17:
#                print(Constants.functions[i-10], end=' ')
#            else:
#                print(Constants.arguments[i-17], end=' ')
#        print()
        
        return preds, kb 
Example #12
Source File: model2.py    From controllable-text-attribute-transfer with Apache License 2.0 6 votes vote down vote up
def forward(self, src, tgt, src_mask, tgt_mask):
        """
        Take in and process masked src and target sequences.
        """
        memory = self.encode(src, src_mask)  # (batch_size, max_src_seq, d_model)
        # attented_mem=self.attention(memory,memory,memory,src_mask)
        # memory=attented_mem
        score = self.attention(memory, memory, src_mask)
        attent_memory = score.bmm(memory)
        # memory=self.linear(torch.cat([memory,attent_memory],dim=-1))

        memory, _ = self.gru(attented_mem)
        '''
        score=torch.sigmoid(self.linear(memory))
        memory=memory*score
        '''
        latent = torch.sum(memory, dim=1)  # (batch_size, d_model)
        logit = self.decode(latent.unsqueeze(1), tgt, tgt_mask)  # (batch_size, max_tgt_seq, d_model)
        # logit,_=self.gru_decoder(logit)
        prob = self.generator(logit)  # (batch_size, max_seq, vocab_size)
        return latent, prob 
Example #13
Source File: functional.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def evo_norm(x, prefix, running_var, v, weight, bias,
             training, momentum, eps=0.1, groups=32):
    if prefix == 'b0':
        if training:
            var = torch.var(x, dim=(0, 2, 3), keepdim=True)
            running_var.mul_(momentum)
            running_var.add_((1 - momentum) * var)
        else:
            var = running_var
        if v is not None:
            den = torch.max((var + eps).sqrt(), v * x + instance_std(x, eps))
            x = x / den * weight + bias
        else:
            x = x * weight + bias
    else:
        if v is not None:
            x = x * torch.sigmoid(v * x) / group_std(x,
                                                     groups, eps) * weight + bias
        else:
            x = x * weight + bias

    return x 
Example #14
Source File: Transformer.py    From ConvLab with MIT License 6 votes vote down vote up
def forward(self, src_seq, src_pos, act_vocab_id):
        # -- Prepare masks
        slf_attn_mask = get_attn_key_pad_mask(seq_k=src_seq, seq_q=src_seq)
        non_pad_mask = get_non_pad_mask(src_seq)

        # -- Forward Word Embedding
        enc_output = self.src_word_emb(src_seq) + self.position_enc(src_pos)
        # -- Forward Ontology Embedding
        ontology_embedding = self.src_word_emb(act_vocab_id)

        for enc_layer in self.layer_stack:
            enc_output, enc_slf_attn = enc_layer(
                enc_output,
                non_pad_mask=non_pad_mask,
                slf_attn_mask=slf_attn_mask)

        dot_prod = torch.sum(enc_output[:, :, None, :] * ontology_embedding[None, None, :, :], -1)
        #index = length[:, None, None].repeat(1, 1, dot_prod.size(-1))
        #pooled_dot_prod = dot_prod.gather(1, index).squeeze()
        pooled_dot_prod = dot_prod[:, 0, :]
        pooling_likelihood = torch.sigmoid(pooled_dot_prod)
        return pooling_likelihood, enc_output 
Example #15
Source File: dfm.py    From pytorch-fm with MIT License 5 votes vote down vote up
def forward(self, x):
        """
        :param x: Long tensor of size ``(batch_size, num_fields)``
        """
        embed_x = self.embedding(x)
        x = self.linear(x) + self.fm(embed_x) + self.mlp(embed_x.view(-1, self.embed_output_dim))
        return torch.sigmoid(x.squeeze(1)) 
Example #16
Source File: exp_synph.py    From connecting_the_dots with MIT License 5 votes vote down vote up
def loss_forward(self, out, train):
    out, edge = out
    if not(isinstance(out, tuple) or isinstance(out, list)):
      out = [out]
    if not(isinstance(edge, tuple) or isinstance(edge, list)):
      edge = [edge]

    vals = []

    # apply photometric loss
    for s,l,o in zip(itertools.count(), self.losses, out):
      val, pattern_proj = l(o, self.data[f'im{s}'][:,0:1,...], self.data[f'std{s}'])
      if s == 0: 
        self.pattern_proj = pattern_proj.detach()
      vals.append(val)

    # apply disparity loss
    # 1-edge as ground truth edge if inversed
    edge0 = 1-torch.sigmoid(edge[0])
    val = self.disparity_loss(out[0], edge0)
    if self.dp_weight>0:
      vals.append(val * self.dp_weight)

    # apply edge loss on a subset of training samples
    for s,e in zip(itertools.count(), edge):
      # inversed ground truth edge where 0 means edge
      grad = self.data[f'grad{s}']<0.2
      grad = grad.to(torch.float32)
      ids = self.data['id']
      mask = ids>self.train_edge
      if mask.sum()>0:
        val = self.edge_loss(e[mask], grad[mask])
      else:
        val = torch.zeros_like(vals[0]) 
      if s == 0:
        self.edge = e.detach()
        self.edge = torch.sigmoid(self.edge)
        self.edge_gt = grad.detach() 
      vals.append(val)

    return vals 
Example #17
Source File: quasi_symbolic.py    From NSCL-PyTorch-Release with MIT License 5 votes vote down vote up
def count_equal(self, selected1, selected2):
        if self.training or _test_quantize.value < InferenceQuantizationMethod.STANDARD.value:
            a = torch.sigmoid(selected1).sum(dim=-1)
            b = torch.sigmoid(selected2).sum(dim=-1)
            return ((2 * self._count_margin - (a - b).abs()) / (2 * self._count_margin) / self._count_tau)
        else:
            return -10 + 20 * (self.count(selected1) == self.count(selected2)).float() 
Example #18
Source File: quasi_symbolic.py    From NSCL-PyTorch-Release with MIT License 5 votes vote down vote up
def count_greater(self, selected1, selected2):
        if self.training or _test_quantize.value < InferenceQuantizationMethod.STANDARD.value:
            a = torch.sigmoid(selected1).sum(dim=-1)
            b = torch.sigmoid(selected2).sum(dim=-1)

            return ((a - b - 1 + 2 * self._count_margin) / self._count_tau)
        else:
            return -10 + 20 * (self.count(selected1) > self.count(selected2)).float() 
Example #19
Source File: ncf.py    From pytorch-fm with MIT License 5 votes vote down vote up
def forward(self, x):
        """
        :param x: Long tensor of size ``(batch_size, num_user_fields)``
        """
        x = self.embedding(x)
        user_x = x[:, self.user_field_idx].squeeze(1)
        item_x = x[:, self.item_field_idx].squeeze(1)
        x = self.mlp(x.view(-1, self.embed_output_dim))
        gmf = user_x * item_x
        x = torch.cat([gmf, x], dim=1)
        x = self.fc(x).squeeze(1)
        return torch.sigmoid(x) 
Example #20
Source File: 12_activation_functions.py    From pytorchTutorial with MIT License 5 votes vote down vote up
def __init__(self, input_size, hidden_size):
        super(NeuralNet, self).__init__()
        self.linear1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.linear2 = nn.Linear(hidden_size, 1)
        self.sigmoid = nn.Sigmoid() 
Example #21
Source File: quasi_symbolic.py    From NSCL-PyTorch-Release with MIT License 5 votes vote down vote up
def count(self, selected):
        if self.training:
            return torch.sigmoid(selected).sum(dim=-1)
        else:
            if _test_quantize.value >= InferenceQuantizationMethod.STANDARD.value:
                return (selected > 0).float().sum()
            return torch.sigmoid(selected).sum(dim=-1).round() 
Example #22
Source File: sigmoid_focal_loss.py    From Res2Net-maskrcnn with MIT License 5 votes vote down vote up
def sigmoid_focal_loss_cpu(logits, targets, gamma, alpha):
    num_classes = logits.shape[1]
    gamma = gamma[0]
    alpha = alpha[0]
    dtype = targets.dtype
    device = targets.device
    class_range = torch.arange(1, num_classes+1, dtype=dtype, device=device).unsqueeze(0)

    t = targets.unsqueeze(1)
    p = torch.sigmoid(logits)
    term1 = (1 - p) ** gamma * torch.log(p)
    term2 = p ** gamma * torch.log(1 - p)
    return -(t == class_range).float() * term1 * alpha - ((t != class_range) * (t >= 0)).float() * term2 * (1 - alpha) 
Example #23
Source File: learn.py    From neuralcoref with MIT License 5 votes vote down vote up
def get_top_pair_loss(n):
    def top_pair_loss(scores, targets, debug=False):
        """ Top pairs (best true and best mistaken) and single mention probabilistic loss
        """
        true_ants = targets[2]
        false_ants = targets[3] if len(targets) == 5 else None
        s_scores = clipped_sigmoid(scores)
        true_pairs = torch.gather(s_scores, 1, true_ants)
        top_true, top_true_arg = torch.log(true_pairs).max(
            dim=1
        )  # max(log(p)), p=sigmoid(s)
        if debug:
            print("true_pairs", true_pairs.data)
            print("top_true", top_true.data)
            print("top_true_arg", top_true_arg.data)
        out_score = torch.sum(top_true).neg()
        if (
            false_ants is not None
        ):  # We have no false antecedents when there are no pairs
            false_pairs = torch.gather(s_scores, 1, false_ants)
            top_false, _ = torch.log(1 - false_pairs).min(
                dim=1
            )  # min(log(1-p)), p=sigmoid(s)
            out_score = out_score + torch.sum(top_false).neg()
        return out_score / n

    return top_pair_loss 
Example #24
Source File: learn.py    From neuralcoref with MIT License 5 votes vote down vote up
def clipped_sigmoid(inputs):
    epsilon = 1.0e-7
    return torch.sigmoid(inputs).clamp(epsilon, 1.0 - epsilon) 
Example #25
Source File: utils.py    From blow with Apache License 2.0 5 votes vote down vote up
def disclogistic_log_p(x,mu=0,sigma=1,eps=1e-12):
    xx=(x-mu)/sigma
    return torch.log(torch.sigmoid(xx+0.5)-torch.sigmoid(xx-0.5)+eps)

######################################################################################################################## 
Example #26
Source File: convrnn.py    From DPC with MIT License 5 votes vote down vote up
def forward(self, input_tensor, hidden_state):
        if hidden_state is None:
            B, C, *spatial_dim = input_tensor.size()
            hidden_state = torch.zeros([B,self.hidden_size,*spatial_dim]).cuda()
        # [B, C, H, W]
        combined = torch.cat([input_tensor, hidden_state], dim=1) #concat in C
        update = torch.sigmoid(self.update_gate(combined))
        reset = torch.sigmoid(self.reset_gate(combined))
        out = torch.tanh(self.out_gate(torch.cat([input_tensor, hidden_state * reset], dim=1)))
        new_state = hidden_state * (1 - update) + out * update
        return new_state 
Example #27
Source File: meta.py    From ScenarioMeta with MIT License 5 votes vote down vote up
def forward(self, inputs, hx):
            if hx is None:
                hx = (self.h_0.unsqueeze(0), self.c_0.unsqueeze(0))
            h, c = self.lstm(inputs, hx)
            return torch.sigmoid(self.output_layer(h).squeeze()), (h, c) 
Example #28
Source File: 12_activation_functions.py    From pytorchTutorial with MIT License 5 votes vote down vote up
def forward(self, x):
        out = torch.relu(self.linear1(x))
        out = torch.sigmoid(self.linear2(out))
        return out 
Example #29
Source File: 12_activation_functions.py    From pytorchTutorial with MIT License 5 votes vote down vote up
def forward(self, x):
        out = self.linear1(x)
        out = self.relu(out)
        out = self.linear2(out)
        out = self.sigmoid(out)
        return out

# option 2 (use activation functions directly in forward pass) 
Example #30
Source File: bert_basic_layer.py    From mrc-for-flat-nested-ner with Apache License 2.0 5 votes vote down vote up
def swish(x):
    return x * torch.sigmoid(x)