Python torch.reciprocal() Examples

The following are 23 code examples of torch.reciprocal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: metrics.py    From aqm-plus with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def evaluateMetric(ranks, metric):
    ranks = ranks.data.numpy()
    if metric == 'r1':
        ranks = ranks.reshape(-1)
        return 100 * (ranks == 1).sum() / float(ranks.shape[0])
    if metric == 'r5':
        ranks = ranks.reshape(-1)
        return 100 * (ranks <= 5).sum() / float(ranks.shape[0])
    if metric == 'r10':
        # ranks = ranks.view(-1)
        ranks = ranks.reshape(-1)
        # return 100*torch.sum(ranks <= 10).data[0]/float(ranks.size(0))
        return 100 * (ranks <= 10).sum() / float(ranks.shape[0])
    if metric == 'mean':
        # ranks = ranks.view(-1).float()
        ranks = ranks.reshape(-1).astype(float)
        return ranks.mean()
    if metric == 'mrr':
        # ranks = ranks.view(-1).float()
        ranks = ranks.reshape(-1).astype(float)
        # return torch.reciprocal(ranks).mean().data[0]
        return (1 / ranks).mean() 
Example #2
Source File: metric.py    From Neural-Attentive-Session-Based-Recommendation-PyTorch with GNU General Public License v3.0 6 votes vote down vote up
def get_mrr(indices, targets):
    """
    Calculates the MRR score for the given predictions and targets
    Args:
        indices (Bxk): torch.LongTensor. top-k indices predicted by the model.
        targets (B): torch.LongTensor. actual target indices.

    Returns:
        mrr (float): the mrr score
    """

    tmp = targets.view(-1, 1)
    targets = tmp.expand_as(indices)
    hits = (targets == indices).nonzero()
    ranks = hits[:, -1] + 1
    ranks = ranks.float()
    rranks = torch.reciprocal(ranks)
    mrr = torch.sum(rranks).data / targets.size(0)
    return mrr.item() 
Example #3
Source File: slate_estimators.py    From ReAgent with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _get_discount(self, slate_size: int) -> Tensor:
        weights = DCGSlateMetric._weights
        if (
            weights is None
            or weights.shape[0] < slate_size
            or weights.device != self._device
        ):
            DCGSlateMetric._weights = torch.reciprocal(
                torch.log2(
                    torch.arange(
                        2, slate_size + 2, dtype=torch.double, device=self._device
                    )
                )
            )
        weights = DCGSlateMetric._weights
        assert weights is not None
        return weights[:slate_size] 
Example #4
Source File: distributions.py    From Maximally_Interfered_Retrieval with MIT License 5 votes vote down vote up
def log_normal_diag(x, mean, log_var, average=False, reduce=True, dim=None):
    log_norm = -0.5 * (log_var + (x - mean) * (x - mean) * log_var.exp().reciprocal())
    if reduce:
        if average:
            return torch.mean(log_norm, dim)
        else:
            return torch.sum(log_norm, dim)
    else:
        return log_norm 
Example #5
Source File: mdn.py    From DL-Seq2Seq with MIT License 5 votes vote down vote up
def gaussian_distribution(y, mu, sigma):
    # make |mu|=K copies of y, subtract mu, divide by sigma
    result = (y.expand_as(mu) - mu) * torch.reciprocal(sigma)
    result = -0.5 * (result * result)
    return (torch.exp(result) * torch.reciprocal(sigma)) * oneDivSqrtTwoPI 
Example #6
Source File: ops.py    From tntorch with GNU Lesser General Public License v3.0 5 votes vote down vote up
def rsqrt(t):
    """
    Element-wise square-root reciprocal computed using cross-approximation; see PyTorch's `rsqrt()`.

    :param t: input :class:`Tensor`

    :return: a :class:`Tensor`
    """

    return tn.cross(lambda x: torch.rsqrt(x), tensors=t, verbose=False) 
Example #7
Source File: ops.py    From tntorch with GNU Lesser General Public License v3.0 5 votes vote down vote up
def reciprocal(t):
    """
    Element-wise reciprocal computed using cross-approximation; see PyTorch's `reciprocal()`.

    :param t: input :class:`Tensor`

    :return: a :class:`Tensor`
    """

    return tn.cross(lambda x: torch.reciprocal(x), tensors=t, verbose=False) 
Example #8
Source File: DBHead.py    From DBNet.pytorch with Apache License 2.0 5 votes vote down vote up
def step_function(self, x, y):
        return torch.reciprocal(1 + torch.exp(-self.k * (x - y))) 
Example #9
Source File: training.py    From occupancy_networks with MIT License 5 votes vote down vote up
def give_laplacian_coordinates(self, pred, block_id):
        r''' Returns the laplacian coordinates for the predictions and given block.

            The helper matrices are used to detect neighbouring vertices and
            the number of neighbours which are relevant for the weight matrix.
            The maximal number of neighbours is 8, and if a vertex has less,
            the index -1 is used which points to the added zero vertex.

        Arguments:
            pred (tensor): vertex predictions
            block_id (int): deformation block id (1,2 or 3)
        '''
        batch_size = pred.shape[0]
        num_vert = pred.shape[1]
        # Add "zero vertex" for vertices with less than 8 neighbours
        vertex = torch.cat(
            [pred, torch.zeros(batch_size, 1, 3).to(self.device)], 1)
        assert(vertex.shape == (batch_size, num_vert+1, 3))
        # Get 8 neighbours for each vertex; if a vertex has less, the
        # remaining indices are -1
        indices = torch.from_numpy(
            self.lape_idx[block_id-1][:, :8]).to(self.device)
        assert(indices.shape == (num_vert, 8))
        weights = torch.from_numpy(
            self.lape_idx[block_id-1][:, -1]).float().to(self.device)
        weights = torch.reciprocal(weights)
        weights = weights.view(-1, 1).expand(-1, 3)
        vertex_select = vertex[:, indices.long(), :]
        assert(vertex_select.shape == (batch_size, num_vert, 8, 3))
        laplace = vertex_select.sum(dim=2)  # Add neighbours
        laplace = torch.mul(laplace, weights)  # Multiply by weights
        laplace = torch.sub(pred, laplace)  # Subtract from prediction
        assert(laplace.shape == (batch_size, num_vert, 3))
        return laplace 
Example #10
Source File: distributions.py    From ffjord with MIT License 5 votes vote down vote up
def log_normal_normalized(x, mean, log_var, average=False, reduce=True, dim=None):
    log_norm = -(x - mean) * (x - mean)
    log_norm *= torch.reciprocal(2. * log_var.exp())
    log_norm += -0.5 * log_var
    log_norm += -0.5 * torch.log(2. * PI)

    if reduce:
        if average:
            return torch.mean(log_norm, dim)
        else:
            return torch.sum(log_norm, dim)
    else:
        return log_norm 
Example #11
Source File: distributions.py    From ffjord with MIT License 5 votes vote down vote up
def log_normal_diag(x, mean, log_var, average=False, reduce=True, dim=None):
    log_norm = -0.5 * (log_var + (x - mean) * (x - mean) * log_var.exp().reciprocal())
    if reduce:
        if average:
            return torch.mean(log_norm, dim)
        else:
            return torch.sum(log_norm, dim)
    else:
        return log_norm 
Example #12
Source File: unary.py    From torch2trt with MIT License 5 votes vote down vote up
def aten_reciprocal(inputs, attributes, scope):
    inp = inputs[0]
    ctx = current_context()
    net = ctx.network
    if ctx.is_tensorrt and has_trt_tensor(inputs):
        layer = net.add_unary(inp, trt.UnaryOperation.RECIP)
        output = layer.get_output(0)
        output.name = scope
        layer.name = scope
        return [output]
    elif ctx.is_tvm and has_tvm_tensor(inputs):
        raise NotImplementedError

    return [torch.reciprocal(inp)] 
Example #13
Source File: distributions.py    From Maximally_Interfered_Retrieval with MIT License 5 votes vote down vote up
def log_normal_normalized(x, mean, log_var, average=False, reduce=True, dim=None):
    log_norm = -(x - mean) * (x - mean)
    log_norm *= torch.reciprocal(2.*log_var.exp())
    log_norm += -0.5 * log_var
    log_norm += -0.5 * torch.log(2. * PI)

    if reduce:
        if average:
            return torch.mean(log_norm, dim)
        else:
            return torch.sum(log_norm, dim)
    else:
        return log_norm 
Example #14
Source File: layers.py    From nice_pytorch with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def anticoupling_law(self, a, b):
        return torch.mul(a, torch.reciprocal(b)) 
Example #15
Source File: distributions.py    From UMNN with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def log_normal_normalized(x, mean, log_var, average=False, reduce=True, dim=None):
    log_norm = -(x - mean) * (x - mean)
    log_norm *= torch.reciprocal(2. * log_var.exp())
    log_norm += -0.5 * log_var
    log_norm += -0.5 * torch.log(2. * PI)

    if reduce:
        if average:
            return torch.mean(log_norm, dim)
        else:
            return torch.sum(log_norm, dim)
    else:
        return log_norm 
Example #16
Source File: distributions.py    From UMNN with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def log_normal_diag(x, mean, log_var, average=False, reduce=True, dim=None):
    log_norm = -0.5 * (log_var + (x - mean) * (x - mean) * log_var.exp().reciprocal())
    if reduce:
        if average:
            return torch.mean(log_norm, dim)
        else:
            return torch.sum(log_norm, dim)
    else:
        return log_norm 
Example #17
Source File: distributions.py    From sylvester-flows with MIT License 5 votes vote down vote up
def log_normal_normalized(x, mean, log_var, average=False, reduce=True, dim=None):
    log_norm = -(x - mean) * (x - mean)
    log_norm *= torch.reciprocal(2.*log_var.exp())
    log_norm += -0.5 * log_var
    log_norm += -0.5 * torch.log(2. * PI)

    if reduce:
        if average:
            return torch.mean(log_norm, dim)
        else:
            return torch.sum(log_norm, dim)
    else:
        return log_norm 
Example #18
Source File: distributions.py    From sylvester-flows with MIT License 5 votes vote down vote up
def log_normal_diag(x, mean, log_var, average=False, reduce=True, dim=None):
    log_norm = -0.5 * (log_var + (x - mean) * (x - mean) * log_var.exp().reciprocal())
    if reduce:
        if average:
            return torch.mean(log_norm, dim)
        else:
            return torch.sum(log_norm, dim)
    else:
        return log_norm 
Example #19
Source File: anybit.py    From alibabacloud-quantization-networks with Apache License 2.0 5 votes vote down vote up
def updateQuaGradWeight(self, T, alpha, beta, init):
        """
        Calculate the gradients of all the parameters.
        The gradients of model parameters are saved in the [Variable].grad.data.
        Args:
            T: the temperature, a single number. 
            alpha: the scale factor of the output, a list.
            beta: the scale factor of the input, a list. 
            init: a flag represents the first loading of the quantization function.
        Returns:
            alpha_grad: the gradient of alpha.
            beta_grad: the gradient of beta.
        """
        beta_grad = [0.0] * len(beta)
        alpha_grad = [0.0] * len(alpha)
        T = (T > 2000)*2000 + (T <= 2000)*T 
        for index in range(self.num_of_params):
            if init:
                beta[index].data = torch.Tensor([self.threshold / self.target_modules[index].data.abs().max()]).cuda()
                alpha[index].data = torch.reciprocal(beta[index].data)
            x = self.target_modules[index].data.mul(beta[index].data)

            # set T = 1 when train binary model
            y_grad = self.backward(x, 1, self.QW_biases[index]).mul(T)
            # set T = T when train the other quantization model
            #y_grad = self.backward(x, T, self.QW_biases[index]).mul(T)
            
        
            beta_grad[index] = y_grad.mul(self.target_modules[index].data).mul(alpha[index].data).\
                               mul(self.target_modules[index].grad.data).sum()
            alpha_grad[index] = self.forward(x, T, self.QW_biases[index]).\
                                mul(self.target_modules[index].grad.data).sum()

            self.target_modules[index].grad.data = y_grad.mul(beta[index].data).mul(alpha[index].data).\
                                                   mul(self.target_modules[index].grad.data)
        return alpha_grad, beta_grad 
Example #20
Source File: anybit.py    From alibabacloud-quantization-networks with Apache License 2.0 5 votes vote down vote up
def quantizeConvParams(self, T, alpha, beta, init, train_phase):
        """
        quantize the parameters in forward
        """
        T = (T > 2000)*2000 + (T <= 2000)*T
        for index in range(self.num_of_params):
            if init:
                beta[index].data = torch.Tensor([self.threshold / self.target_modules[index].data.abs().max()]).cuda()
                alpha[index].data = torch.reciprocal(beta[index].data)
            # scale w
            x = self.target_modules[index].data.mul(beta[index].data)
            
            y = self.forward(x, T, self.QW_biases[index], train=train_phase)
            #scale w^hat
            self.target_modules[index].data = y.mul(alpha[index].data) 
Example #21
Source File: quantization.py    From alibabacloud-quantization-networks with Apache License 2.0 5 votes vote down vote up
def init_alpha_and_beta(self, init_beta):
        """
        Initialize the alpha and beta of quantization function.
        init_data in numpy format.
        """
        # activations initialization (obtained offline)
        self.beta.data = torch.Tensor([init_beta]).cuda()
        self.alpha.data = torch.reciprocal(self.beta.data)
        self.alpha_beta_inited = True 
Example #22
Source File: BASS.py    From BASS with MIT License 4 votes vote down vote up
def M_Step(X, loc, argmax, Sigma, SigmaInv, Nk, X1, X2_00, X2_01, X2_11, init, Nk_s, X1_s, X2_00_s, X2_01_s, X2_11_s, SigmaXY_s, SigmaInv_s, it, max_it): #Nk_r,X1_r, X2_00_r, X2_01_r,X2_11_r,SigmaXY_r,SigmaXY_l,SigmaInv_r,SigmaInv_l):

    Nk.zero_()
    Nk_s.zero_()
    X1.zero_()
    X2_00.zero_()
    X2_01.zero_()
    X2_11.zero_()
    argmax=argmax[:,0]
    Nk.index_add_(0, argmax, Global.ones)
    Nk = Nk + 0.0000000001
    X1.index_add_(0,argmax,X)

    C = torch.div(X1, Nk.unsqueeze(1))
    mul=torch.pow(loc[:,0],2)

    X2_00.index_add_(0,argmax,mul)

    mul=torch.mul(loc[:,0],loc[:,1])
    X2_01.index_add_(0,argmax,mul)


    mul=torch.pow(loc[:,1],2)
    X2_11.index_add_(0,argmax,mul)


    Sigma00=torch.add(X2_00,-torch.div(torch.pow(X1[:,0],2),Nk))
    Sigma01=torch.add(X2_01,-torch.div(torch.mul(X1[:,0],X1[:,1]),Nk))
    Sigma11=torch.add(X2_11,-torch.div(torch.pow(X1[:,1],2),Nk))


    a_prior=Global.split_lvl[0:Nk.shape[0]]



    Global.psi_prior=torch.mul(torch.pow(a_prior,2).unsqueeze(1),torch.eye(2).reshape(-1,4).to(Global.device))
    Global.ni_prior=(Global.C_prior*a_prior)-3


    Sigma[:, 0] = torch.div(torch.add(Sigma00, Global.psi_prior[:,0]), torch.add(Nk, Global.ni_prior))
    Sigma[:, 1] = torch.div((Sigma01), torch.add(Nk, Global.ni_prior))
    Sigma[:, 2] = Sigma[:, 1]
    Sigma[:, 3] = torch.div(torch.add(Sigma11, Global.psi_prior[:,3]), torch.add(Nk, Global.ni_prior))

    det=torch.reciprocal(torch.add(torch.mul(Sigma[:,0],Sigma[:,3]),-torch.mul(Sigma[:,1],Sigma[:,2])))
    det[(det <= 0).nonzero()] = 0.00001

    SigmaInv[:, 0] = torch.mul(Sigma[:, 3], det)
    SigmaInv[:, 1] = torch.mul(-Sigma[:, 1], det)
    SigmaInv[:, 2] = torch.mul(-Sigma[:, 2], det)
    SigmaInv[:, 3] = torch.mul(Sigma[:, 0], det)

    SIGMAxylab[:,0:2,0:2]=Sigma[:,0:4].view(-1,2,2)
    logdet=torch.log(torch.mul(torch.reciprocal(det),Global.detInt))
    return C,logdet 
Example #23
Source File: dice.py    From MONAI with Apache License 2.0 4 votes vote down vote up
def __init__(
        self,
        include_background: bool = True,
        to_onehot_y: bool = False,
        sigmoid: bool = False,
        softmax: bool = False,
        w_type: Union[Weight, str] = Weight.SQUARE,
        reduction: Union[LossReduction, str] = LossReduction.MEAN,
    ):
        """
        Args:
            include_background: If False channel index 0 (background category) is excluded from the calculation.
            to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
            sigmoid: If True, apply a sigmoid function to the prediction.
            softmax: If True, apply a softmax function to the prediction.
            w_type: {``"square"``, ``"simple"``, ``"uniform"``}
                Type of function to transform ground truth volume to a weight factor. Defaults to ``"square"``.
            reduction: {``"none"``, ``"mean"``, ``"sum"``}
                Specifies the reduction to apply to the output. Defaults to ``"mean"``.

                - ``"none"``: no reduction will be applied.
                - ``"mean"``: the sum of the output will be divided by the number of elements in the output.
                - ``"sum"``: the output will be summed.

        Raises:
            ValueError: reduction={reduction} is invalid. Valid options are: none, mean or sum.
            ValueError: sigmoid=True and softmax=True are not compatible.

        """
        super().__init__(reduction=LossReduction(reduction))

        self.include_background = include_background
        self.to_onehot_y = to_onehot_y
        if sigmoid and softmax:
            raise ValueError("sigmoid=True and softmax=True are not compatible.")
        self.sigmoid = sigmoid
        self.softmax = softmax

        w_type = Weight(w_type)
        self.w_func: Callable = torch.ones_like
        if w_type == Weight.SIMPLE:
            self.w_func = torch.reciprocal
        elif w_type == Weight.SQUARE:
            self.w_func = lambda x: torch.reciprocal(x * x)