Python torch.cosh() Examples
The following are 14
code examples of torch.cosh().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: loss_functions.py From signaltrain with GNU General Public License v3.0 | 6 votes |
def calc_loss(y_hat, y_cuda, mag_hat, batch_size=20, scale_by_freq=None, l1_lambda=2e-5, reg_logcosh=False): # Reconstruction term plus regularization -> Slightly less wiggly waveform #loss = logcosh(y_hat, y_cuda) + 1e-5*torch.abs(mag_hat).mean() # loss = logcosh(y_hat, y_cuda) + 2e-5*torch.abs(mag_hat).mean() #print("y_hat.dtype, y_cuda.dtype, mag_hat.dtype, scale_by_freq.dtype =",y_hat.dtype, y_cuda.dtype, mag_hat.dtype, scale_by_freq.dtype) if not reg_logcosh: if scale_by_freq is None: loss = logcosh(y_hat, y_cuda) + l1_lambda*torch.abs(mag_hat).mean() # second term is an L1 regularization to help 'damp' high-freq noise else: loss = logcosh(y_hat, y_cuda) + l1_lambda/10*torch.abs(mag_hat*scale_by_freq).mean() # second term is an L1 regularization to help 'damp' high-freq noise else: if scale_by_freq is None: loss = logcosh(y_hat, y_cuda) + l1_lambda*torch.mean(torch.log(torch.cosh(mag_hat))) # second term is an L1 regularization to help 'damp' high-freq noise else: loss = logcosh(y_hat, y_cuda) + l1_lambda/10*torch.mean(scale_by_freq*torch.log(torch.cosh(mag_hat))) # second term is an L1 regularization to help 'damp' high-freq noise return loss # EOF
Example #2
Source File: trigonometrics.py From heat with MIT License | 6 votes |
def cosh(x, out=None): """ Return the hyperbolic cosine, element-wise. Parameters ---------- x : ht.DNDarray The value for which to compute the hyperbolic cosine. out : ht.DNDarray or None, optional A location in which to store the results. If provided, it must have a broadcastable shape. If not provided or set to None, a fresh tensor is allocated. Returns ------- hyperbolic cosine : ht.DNDarray A tensor of the same shape as x, containing the hyperbolic cosine of each element in this tensor. Negative input elements are returned as nan. If out was provided, square_roots is a reference to it. Examples -------- >>> ht.cosh(ht.arange(-6, 7, 2)) tensor([201.7156, 27.3082, 3.7622, 1.0000, 3.7622, 27.3082, 201.7156]) """ return local_op(torch.cosh, x, out)
Example #3
Source File: loss_functions.py From signaltrain with GNU General Public License v3.0 | 5 votes |
def logcosh(y_hat, y): return torch.mean( torch.log( torch.cosh(y - y_hat) ))
Example #4
Source File: lorentz.py From lorentz-embeddings with MIT License | 5 votes |
def exp_map(x, v): # BD, BD -> BD tn = tangent_norm(v).unsqueeze(dim=1) tn_expand = tn.repeat(1, x.size()[-1]) result = torch.cosh(tn) * x + torch.sinh(tn) * (v / tn) result = torch.where(tn_expand > 0, result, x) # only update if tangent norm is > 0 return result
Example #5
Source File: ising.py From tensorgrad with Apache License 2.0 | 5 votes |
def build_tensor(K): lam = [torch.cosh(K)*2, torch.sinh(K)*2] T = [] for i in range(2): for j in range(2): for k in range(2): for l in range(2): if ((i+j+k+l)%2==0): T.append(torch.sqrt(lam[i]*lam[j]*lam[k]*lam[l])/2.) else: T.append(torch.tensor(0.0, dtype=K.dtype, device=K.device)) T = torch.stack(T).view(2, 2, 2, 2) return T
Example #6
Source File: transform.py From flowseq with Apache License 2.0 | 5 votes |
def bwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]: a, b, c, d, g = NLSQ.get_pseudo_params(params) # double needed for stability. No effect on overall speed a = a.double() b = b.double() c = c.double() d = d.double() g = g.double() z = z.double() aa = -b * d.pow(2) bb = (z - a) * d.pow(2) - 2 * b * d * g cc = (z - a) * 2 * d * g - b * (1 + g.pow(2)) dd = (z - a) * (1 + g.pow(2)) - c p = (3 * aa * cc - bb.pow(2)) / (3 * aa.pow(2)) q = (2 * bb.pow(3) - 9 * aa * bb * cc + 27 * aa.pow(2) * dd) / (27 * aa.pow(3)) t = -2 * torch.abs(q) / q * torch.sqrt(torch.abs(p) / 3) inter_term1 = -3 * torch.abs(q) / (2 * p) * torch.sqrt(3 / torch.abs(p)) inter_term2 = 1 / 3 * arccosh(torch.abs(inter_term1 - 1) + 1) t = t * torch.cosh(inter_term2) tpos = -2 * torch.sqrt(torch.abs(p) / 3) inter_term1 = 3 * q / (2 * p) * torch.sqrt(3 / torch.abs(p)) inter_term2 = 1 / 3 * arcsinh(inter_term1) tpos = tpos * torch.sinh(inter_term2) t[p > 0] = tpos[p > 0] z = t - bb / (3 * aa) arg = d * z + g denom = arg.pow(2) + 1 logdet = torch.log(b - 2 * c * d * arg / denom.pow(2)) z = z.float().mul(mask.unsqueeze(2)) logdet = logdet.float().mul(mask.unsqueeze(2)).view(z.size(0), -1).sum(dim=1) * -1.0 return z, logdet
Example #7
Source File: hyperbolic.py From flambe with MIT License | 5 votes |
def log_map(x, y): """Perform the log step.""" d = dist(x, y) return (d / torch.sinh(d)) * (y - torch.cosh(d) * x)
Example #8
Source File: hyperbolic.py From flambe with MIT License | 5 votes |
def exp_map(x, y): """Perform the exp step.""" n = torch.clamp(norm(y), min=EPSILON) return torch.cosh(n) * x + (torch.sinh(n) / n) * y
Example #9
Source File: hyperbolic_radius.py From pvae with MIT License | 5 votes |
def grad_log_prob(self, value): res = - value / self.scale.pow(2) + (self.dim - 1) * self.c.sqrt() * torch.cosh(self.c.sqrt() * value) / torch.sinh(self.c.sqrt() * value) return res
Example #10
Source File: unary.py From torch2trt with MIT License | 5 votes |
def aten_cosh(inputs, attributes, scope): inp = inputs[0] ctx = current_context() net = ctx.network if ctx.is_tensorrt and has_trt_tensor(inputs): layer = net.add_unary(inp, trt.UnaryOperation.COSH) output = layer.get_output(0) output.name = scope layer.name = scope return [output] elif ctx.is_tvm and has_tvm_tensor(inputs): raise NotImplementedError return [torch.cosh(inp)]
Example #11
Source File: ops.py From tntorch with GNU Lesser General Public License v3.0 | 5 votes |
def cosh(t): """ Element-wise hyperbolic cosine computed using cross-approximation; see PyTorch's `cosh()`. :param t: input :class:`Tensor` :return: a :class:`Tensor` """ return tn.cross(lambda x: torch.cosh(x), tensors=t, verbose=False)
Example #12
Source File: flows.py From TextFlow with MIT License | 4 votes |
def standard(x, nn_outp): a, b, c, d, f = NLSq.get_pseudo_params(nn_outp) # double needed for stability. No effect on overall speed a = a.double() b = b.double() c = c.double() d = d.double() f = f.double() x = x.double() aa = -b*d.pow(2) bb = (x-a)*d.pow(2) - 2*b*d*f cc = (x-a)*2*d*f - b*(1+f.pow(2)) dd = (x-a)*(1+f.pow(2)) - c p = (3*aa*cc - bb.pow(2))/(3*aa.pow(2)) q = (2*bb.pow(3) - 9*aa*bb*cc + 27*aa.pow(2)*dd)/(27*aa.pow(3)) t = -2*torch.abs(q)/q*torch.sqrt(torch.abs(p)/3) inter_term1 = -3*torch.abs(q)/(2*p)*torch.sqrt(3/torch.abs(p)) inter_term2 = 1/3*arccosh(torch.abs(inter_term1-1)+1) t = t*torch.cosh(inter_term2) tpos = -2*torch.sqrt(torch.abs(p)/3) inter_term1 = 3*q/(2*p)*torch.sqrt(3/torch.abs(p)) inter_term2 = 1/3*arcsinh(inter_term1) tpos = tpos*torch.sinh(inter_term2) t[p > 0] = tpos[p > 0] y = t - bb/(3*aa) arg = d*y + f denom = 1 + arg.pow(2) x_new = a + b*y + c/denom logdet = -torch.log(b - 2*c*d*arg/denom.pow(2)).sum(-1) y = y.float() logdet = logdet.float() return y, logdet
Example #13
Source File: hat.py From hat with MIT License | 4 votes |
def train_epoch(self,t,x,y,thres_cosh=50,thres_emb=6): self.model.train() r=np.arange(x.size(0)) np.random.shuffle(r) r=torch.LongTensor(r).cuda() # Loop batches for i in range(0,len(r),self.sbatch): if i+self.sbatch<=len(r): b=r[i:i+self.sbatch] else: b=r[i:] images=torch.autograd.Variable(x[b],volatile=False) targets=torch.autograd.Variable(y[b],volatile=False) task=torch.autograd.Variable(torch.LongTensor([t]).cuda(),volatile=False) s=(self.smax-1/self.smax)*i/len(r)+1/self.smax # Forward outputs,masks=self.model.forward(task,images,s=s) output=outputs[t] loss,_=self.criterion(output,targets,masks) # Backward self.optimizer.zero_grad() loss.backward() # Restrict layer gradients in backprop if t>0: for n,p in self.model.named_parameters(): if n in self.mask_back: p.grad.data*=self.mask_back[n] # Compensate embedding gradients for n,p in self.model.named_parameters(): if n.startswith('e'): num=torch.cosh(torch.clamp(s*p.data,-thres_cosh,thres_cosh))+1 den=torch.cosh(p.data)+1 p.grad.data*=self.smax/s*num/den # Apply step torch.nn.utils.clip_grad_norm(self.model.parameters(),self.clipgrad) self.optimizer.step() # Constrain embeddings for n,p in self.model.named_parameters(): if n.startswith('e'): p.data=torch.clamp(p.data,-thres_emb,thres_emb) #print(masks[-1].data.view(1,-1)) #if i>=5*self.sbatch: sys.exit() #if i==0: print(masks[-2].data.view(1,-1),masks[-2].data.max(),masks[-2].data.min()) #print(masks[-2].data.view(1,-1)) return
Example #14
Source File: hat_test.py From hat with MIT License | 4 votes |
def train_epoch(self,t,x,y,thres_cosh=50,thres_emb=6): self.model.train() r=np.arange(x.size(0)) np.random.shuffle(r) r=torch.LongTensor(r).cuda() # Loop batches for i in range(0,len(r),self.sbatch): if i+self.sbatch<=len(r): b=r[i:i+self.sbatch] else: b=r[i:] images=torch.autograd.Variable(x[b],volatile=False) targets=torch.autograd.Variable(y[b],volatile=False) task=torch.autograd.Variable(torch.LongTensor([t]).cuda(),volatile=False) s=(self.smax-1/self.smax)*i/len(r)+1/self.smax # Forward outputs,masks=self.model.forward(task,images,s=s) output=outputs[t] loss,_=self.criterion(output,targets,masks) # Backward self.optimizer.zero_grad() loss.backward() # Restrict layer gradients in backprop if t>0: for n,p in self.model.named_parameters(): if n in self.mask_back: p.grad.data*=self.mask_back[n] # Compensate embedding gradients for n,p in self.model.named_parameters(): if n.startswith('e'): num=torch.cosh(torch.clamp(s*p.data,-thres_cosh,thres_cosh))+1 den=torch.cosh(p.data)+1 p.grad.data*=self.smax/s*num/den # Apply step torch.nn.utils.clip_grad_norm(self.model.parameters(),self.clipgrad) self.optimizer.step() # Constrain embeddings for n,p in self.model.named_parameters(): if n.startswith('e'): p.data=torch.clamp(p.data,-thres_emb,thres_emb) #print(masks[-1].data.view(1,-1)) #if i>=5*self.sbatch: sys.exit() #if i==0: print(masks[-2].data.view(1,-1),masks[-2].data.max(),masks[-2].data.min()) #print(masks[-2].data.view(1,-1)) return