Python torch.sinh() Examples

The following are 12 code examples of torch.sinh(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: trigonometrics.py    From heat with MIT License 6 votes vote down vote up
def sinh(x, out=None):
    """
    Return the hyperbolic sine, element-wise.

    Parameters
    ----------
    x : ht.DNDarray
        The value for which to compute the hyperbolic sine.
    out : ht.DNDarray or None, optional
        A location in which to store the results. If provided, it must have a broadcastable shape. If not provided
        or set to None, a fresh tensor is allocated.

    Returns
    -------
    hyperbolic sine : ht.DNDarray
        A tensor of the same shape as x, containing the trigonometric sine of each element in this tensor.
        Negative input elements are returned as nan. If out was provided, square_roots is a reference to it.

    Examples
    --------
    >>> ht.sinh(ht.arange(-6, 7, 2))
    tensor([[-201.7132,  -27.2899,   -3.6269,    0.0000,    3.6269,   27.2899,  201.7132])
    """
    return local_op(torch.sinh, x, out) 
Example #2
Source File: lorentz.py    From lorentz-embeddings with MIT License 5 votes vote down vote up
def exp_map(x, v):
    # BD, BD -> BD
    tn = tangent_norm(v).unsqueeze(dim=1)
    tn_expand = tn.repeat(1, x.size()[-1])
    result = torch.cosh(tn) * x + torch.sinh(tn) * (v / tn)
    result = torch.where(tn_expand > 0, result, x)  # only update if tangent norm is > 0
    return result 
Example #3
Source File: ising.py    From tensorgrad with Apache License 2.0 5 votes vote down vote up
def build_tensor(K):
    lam = [torch.cosh(K)*2, torch.sinh(K)*2]
    T = []
    for i in range(2):
        for j in range(2):
            for k in range(2):
                for l in range(2):
                    if ((i+j+k+l)%2==0):
                        T.append(torch.sqrt(lam[i]*lam[j]*lam[k]*lam[l])/2.)
                    else:
                        T.append(torch.tensor(0.0, dtype=K.dtype, device=K.device))
    T = torch.stack(T).view(2, 2, 2, 2)
    return T 
Example #4
Source File: transform.py    From flowseq with Apache License 2.0 5 votes vote down vote up
def bwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
        a, b, c, d, g = NLSQ.get_pseudo_params(params)

        # double needed for stability. No effect on overall speed
        a = a.double()
        b = b.double()
        c = c.double()
        d = d.double()
        g = g.double()
        z = z.double()

        aa = -b * d.pow(2)
        bb = (z - a) * d.pow(2) - 2 * b * d * g
        cc = (z - a) * 2 * d * g - b * (1 + g.pow(2))
        dd = (z - a) * (1 + g.pow(2)) - c

        p = (3 * aa * cc - bb.pow(2)) / (3 * aa.pow(2))
        q = (2 * bb.pow(3) - 9 * aa * bb * cc + 27 * aa.pow(2) * dd) / (27 * aa.pow(3))

        t = -2 * torch.abs(q) / q * torch.sqrt(torch.abs(p) / 3)
        inter_term1 = -3 * torch.abs(q) / (2 * p) * torch.sqrt(3 / torch.abs(p))
        inter_term2 = 1 / 3 * arccosh(torch.abs(inter_term1 - 1) + 1)
        t = t * torch.cosh(inter_term2)

        tpos = -2 * torch.sqrt(torch.abs(p) / 3)
        inter_term1 = 3 * q / (2 * p) * torch.sqrt(3 / torch.abs(p))
        inter_term2 = 1 / 3 * arcsinh(inter_term1)
        tpos = tpos * torch.sinh(inter_term2)

        t[p > 0] = tpos[p > 0]
        z = t - bb / (3 * aa)
        arg = d * z + g
        denom = arg.pow(2) + 1
        logdet = torch.log(b - 2 * c * d * arg / denom.pow(2))

        z = z.float().mul(mask.unsqueeze(2))
        logdet = logdet.float().mul(mask.unsqueeze(2)).view(z.size(0), -1).sum(dim=1) * -1.0
        return z, logdet 
Example #5
Source File: hyperbolic.py    From flambe with MIT License 5 votes vote down vote up
def log_map(x, y):
    """Perform the log step."""
    d = dist(x, y)
    return (d / torch.sinh(d)) * (y - torch.cosh(d) * x) 
Example #6
Source File: hyperbolic.py    From flambe with MIT License 5 votes vote down vote up
def exp_map(x, y):
    """Perform the exp step."""
    n = torch.clamp(norm(y), min=EPSILON)
    return torch.cosh(n) * x + (torch.sinh(n) / n) * y 
Example #7
Source File: hyperbolic_radius.py    From pvae with MIT License 5 votes vote down vote up
def grad_log_prob(self, value):
        res = - value / self.scale.pow(2) + (self.dim - 1) * self.c.sqrt() * torch.cosh(self.c.sqrt() * value) / torch.sinh(self.c.sqrt() * value) 
        return res 
Example #8
Source File: poincareball.py    From pvae with MIT License 5 votes vote down vote up
def logdetexp(self, x, y, is_vector=False, keepdim=False):
        d = self.norm(x, y, keepdim=keepdim) if is_vector else self.dist(x, y, keepdim=keepdim)
        return (self.dim - 1) * (torch.sinh(self.c.sqrt()*d) / self.c.sqrt() / d).log() 
Example #9
Source File: unary.py    From torch2trt with MIT License 5 votes vote down vote up
def aten_sinh(inputs, attributes, scope):
    inp = inputs[0]
    ctx = current_context()
    net = ctx.network
    if ctx.is_tensorrt and has_trt_tensor(inputs):
        layer = net.add_unary(inp, trt.UnaryOperation.SINH)
        output = layer.get_output(0)
        output.name = scope
        layer.name = scope
        return [output]
    elif ctx.is_tvm and has_tvm_tensor(inputs):
        raise NotImplementedError

    return [torch.sinh(inp)] 
Example #10
Source File: ops.py    From tntorch with GNU Lesser General Public License v3.0 5 votes vote down vote up
def sinh(t):
    """
    Element-wise hyperbolic sine computed using cross-approximation; see PyTorch's `inh()`.

    :param t: input :class:`Tensor`

    :return: a :class:`Tensor`
    """

    return tn.cross(lambda x: torch.sinh(x), tensors=t, verbose=False) 
Example #11
Source File: flows.py    From TextFlow with MIT License 4 votes vote down vote up
def standard(x, nn_outp):
        a, b, c, d, f = NLSq.get_pseudo_params(nn_outp)
        
        # double needed for stability. No effect on overall speed
        a = a.double()
        b = b.double()
        c = c.double()
        d = d.double()
        f = f.double()
        x = x.double()

        aa = -b*d.pow(2)
        bb = (x-a)*d.pow(2) - 2*b*d*f
        cc = (x-a)*2*d*f - b*(1+f.pow(2))
        dd = (x-a)*(1+f.pow(2)) - c

        p = (3*aa*cc - bb.pow(2))/(3*aa.pow(2))
        q = (2*bb.pow(3) - 9*aa*bb*cc + 27*aa.pow(2)*dd)/(27*aa.pow(3))
        
        t = -2*torch.abs(q)/q*torch.sqrt(torch.abs(p)/3)
        inter_term1 = -3*torch.abs(q)/(2*p)*torch.sqrt(3/torch.abs(p))
        inter_term2 = 1/3*arccosh(torch.abs(inter_term1-1)+1)
        t = t*torch.cosh(inter_term2)

        tpos = -2*torch.sqrt(torch.abs(p)/3)
        inter_term1 = 3*q/(2*p)*torch.sqrt(3/torch.abs(p))
        inter_term2 = 1/3*arcsinh(inter_term1)
        tpos = tpos*torch.sinh(inter_term2)

        t[p > 0] = tpos[p > 0]
        y = t - bb/(3*aa)

        arg = d*y + f
        denom = 1 + arg.pow(2)

        x_new = a + b*y + c/denom

        logdet = -torch.log(b - 2*c*d*arg/denom.pow(2)).sum(-1)

        y = y.float()
        logdet = logdet.float()

        return y, logdet 
Example #12
Source File: _abstract_nalu.py    From stable-nalu with MIT License 4 votes vote down vote up
def forward(self, x):
        self.stored_input = x

        g_add = self._compute_gate(x, self.G_add, self.bias_add)
        self.stored_gate_add = g_add

        if self.nalu_two_gate:
            g_mul = self._compute_gate(x, self.G_mul, self.bias_mul)
            self.stored_gate_mul = g_mul
            self.writer.add_histogram('gate/add', g_add)
            self.writer.add_histogram('gate/mul', g_mul)
        else:
            g_mul = 1 - g_add
            self.writer.add_histogram('gate', g_add)
            self.writer.add_scalar('gate/mean', torch.mean(g_add), verbose_only=False)

        # a = W x = nac(x)
        a = self.nac_add(x)

        # m = exp(W log(|x| + eps)) = exp(nac(log(|x| + eps)))
        if self.nalu_mul == 'normal':
            m = torch.exp(self.nac_mul(
                torch.log(torch.abs(x) + self.eps)
            ))
        elif self.nalu_mul == 'safe':
            m = torch.exp(self.nac_mul(
                torch.log(torch.abs(x - 1) + 1)
            ))
        elif self.nac_mul == 'max-safe':
            m = torch.exp(self.nac_mul(
                torch.log(torch.relu(x - 1) + 1)
            ))
        elif self.nalu_mul == 'trig':
            m = torch.sinh(self.nac_mul(
                torch.log(x+(x**2+1)**0.5 + self.eps)  # torch.asinh(x) does not exist
            ))
        elif self.nalu_mul == 'mnac':
            m = self.nac_mul(x)
        else:
            raise ValueError(f'Unsupported nalu_mul option ({self.nalu_mul})')

        self.writer.add_histogram('add', a)
        self.writer.add_histogram('mul', m)
        # y = g (*) a + (1 - g) (*) m
        y = g_add * a + g_mul * m

        return y