Python torch.log10() Examples
The following are 30
code examples of torch.log10().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: transforms.py From audio with BSD 2-Clause "Simplified" License | 7 votes |
def forward(self, waveform: Tensor) -> Tensor: r""" Args: waveform (Tensor): Tensor of audio of dimension (..., time). Returns: Tensor: Tensor of audio of dimension (..., time). """ if self.gain_type == "amplitude": waveform = waveform * self.gain if self.gain_type == "db": waveform = F.gain(waveform, self.gain) if self.gain_type == "power": waveform = F.gain(waveform, 10 * math.log10(self.gain)) return torch.clamp(waveform, -1, 1)
Example #2
Source File: metric_utils.py From asteroid with MIT License | 6 votes |
def snr(pred_signal: torch.Tensor, true_signal: torch.Tensor) -> torch.FloatTensor: """ Calculate the Signal-to-Noise Ratio from two signals Args: pred_signal (torch.Tensor): predicted signal spectrogram. true_signal (torch.Tensor): original signal spectrogram. """ inter_signal = true_signal - pred_signal true_power = (true_signal ** 2).sum() inter_power = (inter_signal ** 2).sum() snr = 10*torch.log10(true_power / inter_power) return snr
Example #3
Source File: log_spectrogram.py From espnet with Apache License 2.0 | 6 votes |
def forward( self, input: torch.Tensor, input_lengths: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: # 1. Stft: time -> time-freq input_stft, feats_lens = self.stft(input, input_lengths) assert input_stft.dim() >= 4, input_stft.shape # "2" refers to the real/imag parts of Complex assert input_stft.shape[-1] == 2, input_stft.shape # NOTE(kamo): We use different definition for log-spec between TTS and ASR # TTS: log_10(abs(stft)) # ASR: log_e(power(stft)) # STFT -> Power spectrum # input_stft: (..., F, 2) -> (..., F) input_power = input_stft[..., 0] ** 2 + input_stft[..., 1] ** 2 log_amp = 0.5 * torch.log10(torch.clamp(input_power, min=1.0e-10)) return log_amp, feats_lens
Example #4
Source File: exponential.py From heat with MIT License | 6 votes |
def log10(x, out=None): """ log base 10, element-wise. Parameters ---------- x : ht.DNDarray The value for which to compute the logarithm. out : ht.DNDarray or None, optional A location in which to store the results. If provided, it must have a broadcastable shape. If not provided or set to None, a fresh tensor is allocated. Returns ------- logarithms : ht.DNDarray A tensor of the same shape as x, containing the positive logarithms of each element in this tensor. Negative input elements are returned as nan. If out was provided, logarithms is a reference to it. Examples -------- >>> ht.log10(ht.arange(5)) tensor([ -inf, 0.0000, 0.3010, 0.4771, 0.6021]) """ return operations.__local_op(torch.log10, x, out)
Example #5
Source File: epsilon_greedy.py From rlpyt with MIT License | 6 votes |
def make_vec_eps(self, global_B, env_ranks): """Construct log-spaced epsilon values and select local assignments from the global number of sampler environment instances (for SyncRl and AsyncRl).""" if (self.eps_final_min is not None and self.eps_final_min != self._eps_final_scalar): # vector epsilon. if self.alternating: # In FF case, sampler sets agent.alternating. assert global_B % 2 == 0 global_B = global_B // 2 # Env pairs will share epsilon. env_ranks = list(set([i // 2 for i in env_ranks])) self.eps_init = self._eps_init_scalar * torch.ones(len(env_ranks)) global_eps_final = torch.logspace( torch.log10(torch.tensor(self.eps_final_min)), torch.log10(torch.tensor(self._eps_final_scalar)), global_B) self.eps_final = global_eps_final[env_ranks] self.eps_sample = self.eps_init
Example #6
Source File: vocoder.py From TTS-Cube with Apache License 2.0 | 6 votes |
def synthesize(self, mgc, batch_size, temperature=1.0): mel = mgc mel = torch.autograd.Variable(torch.tensor(mel).cuda().float()).transpose(0, 1) mel = torch.unsqueeze(mel, 0) mel = torch.log10(mel) * 20 # from ipdb import set_trace # set_trace() with torch.no_grad(): audio = self.waveglow.infer(mel, sigma=temperature) audio = audio * 32768 audio = audio.squeeze() audio = audio.cpu().numpy() from scipy import signal audio = signal.lfilter([1.0], [1.0, -0.97], audio) audio = audio.astype('int16') return audio
Example #7
Source File: modules.py From melgan-neurips with MIT License | 6 votes |
def forward(self, audio): p = (self.n_fft - self.hop_length) // 2 audio = F.pad(audio, (p, p), "reflect").squeeze(1) fft = torch.stft( audio, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=False, ) real_part, imag_part = fft.unbind(-1) magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2) mel_output = torch.matmul(self.mel_basis, magnitude) log_mel_spec = torch.log10(torch.clamp(mel_output, min=1e-5)) return log_mel_spec
Example #8
Source File: dice_pytorch.py From DiCE with MIT License | 6 votes |
def compute_yloss(self): """Computes the first part (y-loss) of the loss function.""" yloss = 0.0 for i in range(self.total_CFs): if self.yloss_type == "l2_loss": temp_loss = torch.pow((self.get_model_output(self.cfs[i]) - self.target_cf_class), 2)[0] elif self.yloss_type == "log_loss": temp_logits = torch.log10((abs(self.get_model_output(self.cfs[i]) - 0.000001))/(1 - abs(self.get_model_output(self.cfs[i]) - 0.000001))) criterion = torch.nn.BCEWithLogitsLoss() temp_loss = criterion(temp_logits, torch.tensor([self.target_cf_class])) elif self.yloss_type == "hinge_loss": temp_logits = torch.log10((abs(self.get_model_output(self.cfs[i]) - 0.000001))/(1 - abs(self.get_model_output(self.cfs[i]) - 0.000001))) criterion = torch.nn.ReLU() temp_loss = criterion(0.5 - (temp_logits*self.target_cf_class))[0] yloss += temp_loss return yloss/self.total_CFs
Example #9
Source File: trainer.py From conv-tasnet with MIT License | 6 votes |
def sisnr(self, x, s, eps=1e-8): """ Arguments: x: separated signal, N x S tensor s: reference signal, N x S tensor Return: sisnr: N tensor """ def l2norm(mat, keepdim=False): return th.norm(mat, dim=-1, keepdim=keepdim) if x.shape != s.shape: raise RuntimeError( "Dimention mismatch when calculate si-snr, {} vs {}".format( x.shape, s.shape)) x_zm = x - th.mean(x, dim=-1, keepdim=True) s_zm = s - th.mean(s, dim=-1, keepdim=True) t = th.sum( x_zm * s_zm, dim=-1, keepdim=True) * s_zm / (l2norm(s_zm, keepdim=True)**2 + eps) return 20 * th.log10(eps + l2norm(t) / (l2norm(x_zm - t) + eps))
Example #10
Source File: scatter_vs_gather.py From sbmc with Apache License 2.0 | 6 votes |
def forward(self, data, coords): # in_ = coords.contiguous() in_ = th.cat([th.log10(1.0 + data/255.0), coords], 2).contiguous() assert in_.shape[0] == 1, "current implementation assumes batch_size = 1" kernels = self.net(in_.squeeze(0)) cdata = crop_like(data.squeeze(0), kernels).contiguous() output, _ = self.kernel_update(cdata, kernels) # Average over samples output = th.unsqueeze(output, 0).mean(1) # crop output k = (self.ksize-1) // 2 output = output[..., k:-k, k:-k] kviz = kernels.detach().clone() min_ = kviz.min() max_ = kviz.max() kviz = (kviz - min_) / (max_ - min_ - 1e-8) bs, k2, h, w = kviz.shape return output, kviz.view(bs, self.ksize, self.ksize, h, w)
Example #11
Source File: image_quality.py From Waifu2x with GNU General Public License v3.0 | 6 votes |
def calc_psnr(sr, hr, scale=0, benchmark=False): # adapt from EDSR: https://github.com/thstkdgus35/EDSR-PyTorch diff = (sr - hr).data if benchmark: shave = scale if diff.size(1) > 1: convert = diff.new(1, 3, 1, 1) convert[0, 0, 0, 0] = 65.738 convert[0, 1, 0, 0] = 129.057 convert[0, 2, 0, 0] = 25.064 diff.mul_(convert).div_(256) diff = diff.sum(dim=1, keepdim=True) else: shave = scale + 6 valid = diff[:, :, shave:-shave, shave:-shave] mse = valid.pow(2).mean() return -10 * math.log10(mse) # +++++++++++++++++++++++++++++++++++++ # PSNR # -------------------------------------
Example #12
Source File: loss_e2e.py From onssen with GNU General Public License v3.0 | 6 votes |
def SI_SNR(_s, s, zero_mean=True): ''' Calculate the SNR indicator between the two audios. The larger the value, the better the separation. input: _s: Generated audio s: Ground Truth audio output: SNR value ''' if zero_mean: _s = _s - torch.mean(_s) s = s - torch.mean(s) s_target = sum(torch.mul(_s, s))*s/torch.pow(torch.norm(s, p=2), 2) e_noise = _s - s_target return 20*torch.log10(torch.norm(s_target, p=2)/torch.norm(e_noise, p=2))
Example #13
Source File: loss_e2e.py From onssen with GNU General Public License v3.0 | 6 votes |
def sisnr(x, s, eps=1e-8): """ calculate training loss input: x: separated signal, N x S tensor s: reference signal, N x S tensor Return: sisnr: N tensor """ def l2norm(mat, keepdim=False): return torch.norm(mat, dim=-1, keepdim=keepdim) if x.shape != s.shape: raise RuntimeError( "Dimention mismatch when calculate si-snr, {} vs {}".format( x.shape, s.shape)) x_zm = x - torch.mean(x, dim=-1, keepdim=True) s_zm = s - torch.mean(s, dim=-1, keepdim=True) t = torch.sum( x_zm * s_zm, dim=-1, keepdim=True) * s_zm / (l2norm(s_zm, keepdim=True)**2 + eps) return 20 * torch.log10(eps + l2norm(t) / (l2norm(x_zm - t) + eps))
Example #14
Source File: models.py From SteganoGAN with MIT License | 6 votes |
def _validate(self, validate, metrics): """Validation process""" for cover, _ in tqdm(validate, disable=not self.verbose): gc.collect() cover = cover.to(self.device) generated, payload, decoded = self._encode_decode(cover, quantize=True) encoder_mse, decoder_loss, decoder_acc = self._coding_scores( cover, generated, payload, decoded) generated_score = self._critic(generated) cover_score = self._critic(cover) metrics['val.encoder_mse'].append(encoder_mse.item()) metrics['val.decoder_loss'].append(decoder_loss.item()) metrics['val.decoder_acc'].append(decoder_acc.item()) metrics['val.cover_score'].append(cover_score.item()) metrics['val.generated_score'].append(generated_score.item()) metrics['val.ssim'].append(ssim(cover, generated).item()) metrics['val.psnr'].append(10 * torch.log10(4 / encoder_mse).item()) metrics['val.bpp'].append(self.data_depth * (2 * decoder_acc.item() - 1))
Example #15
Source File: transforms.py From audio with BSD 2-Clause "Simplified" License | 6 votes |
def _fade_in(self, waveform_length: int) -> Tensor: fade = torch.linspace(0, 1, self.fade_in_len) ones = torch.ones(waveform_length - self.fade_in_len) if self.fade_shape == "linear": fade = fade if self.fade_shape == "exponential": fade = torch.pow(2, (fade - 1)) * fade if self.fade_shape == "logarithmic": fade = torch.log10(.1 + fade) + 1 if self.fade_shape == "quarter_sine": fade = torch.sin(fade * math.pi / 2) if self.fade_shape == "half_sine": fade = torch.sin(fade * math.pi - math.pi / 2) / 2 + 0.5 return torch.cat((fade, ones)).clamp_(0, 1)
Example #16
Source File: transforms.py From audio with BSD 2-Clause "Simplified" License | 6 votes |
def _fade_out(self, waveform_length: int) -> Tensor: fade = torch.linspace(0, 1, self.fade_out_len) ones = torch.ones(waveform_length - self.fade_out_len) if self.fade_shape == "linear": fade = - fade + 1 if self.fade_shape == "exponential": fade = torch.pow(2, - fade) * (1 - fade) if self.fade_shape == "logarithmic": fade = torch.log10(1.1 - fade) + 1 if self.fade_shape == "quarter_sine": fade = torch.sin(fade * math.pi / 2 + math.pi / 2) if self.fade_shape == "half_sine": fade = torch.sin(fade * math.pi + math.pi / 2) / 2 + 0.5 return torch.cat((ones, fade)).clamp_(0, 1)
Example #17
Source File: multiscaleloss.py From STFAN with MIT License | 5 votes |
def PSNR(output, target, max_val = 1.0): output = output.clamp(0.0,1.0) mse = torch.pow(target - output, 2).mean() if mse == 0: return torch.Tensor([100.0]) return 10 * torch.log10(max_val**2 / mse)
Example #18
Source File: utils.py From CAE-ADMM with MIT License | 5 votes |
def compute_psnr(x, y): y = y.view(y.shape[0], -1) x = x.view(x.shape[0], -1) rmse = torch.sqrt(torch.mean((y - x) ** 2, dim=1)) psnr = torch.mean(20. * torch.log10(1. / rmse)) return psnr
Example #19
Source File: multiscaleloss.py From DAVANet with MIT License | 5 votes |
def PSNR(output, target, max_val = 1.0): output = output.clamp(0.0,1.0) mse = torch.pow(target - output, 2).mean() if mse == 0: return torch.Tensor([100.0]) return 10 * torch.log10(max_val**2 / mse)
Example #20
Source File: metrics.py From srntt-pytorch with Apache License 2.0 | 5 votes |
def forward(self, x, y): if self.mode == 'Y' and x.shape[1] == 3 and y.shape[1] == 3: x = kornia.color.rgb_to_grayscale(x) y = kornia.color.rgb_to_grayscale(y) mse = F.mse_loss(x, y, reduction='mean') psnr = 10 * torch.log10(self.max_val ** 2 / mse) return psnr
Example #21
Source File: pairwise_loss.py From sigmanet with MIT License | 5 votes |
def psnr(gt, pred, data_range=None, batch=True, reduce=True): """ Compute the peak signal to noise ratio (psnr) :param gt: gt image (torch.Tensor :param pred: input image (torch.Tensor) :param data_range: if None, estimated from gt :return: (mean) psnr """ if batch: batch_size = gt.shape[0] else: batch_size = 1 # reshape the view pred = pred.contiguous().view(batch_size, -1) gt = gt.contiguous().view(batch_size, -1) if data_range is None: # by default use max, same as fastmri data_range = gt.max(dim=1)[0]# - gt.min(dim=1)[0] mse_err = (abs(gt - pred) ** 2).mean(1) psnr_val = 10 * torch.log10(data_range ** 2 / mse_err) if reduce: return psnr_val.mean() else: return psnr_val
Example #22
Source File: sdr.py From asteroid with MIT License | 5 votes |
def forward(self, est_targets, targets): assert targets.size() == est_targets.size() # Step 1. Zero-mean norm if self.zero_mean: mean_source = torch.mean(targets, dim=2, keepdim=True) mean_estimate = torch.mean(est_targets, dim=2, keepdim=True) targets = targets - mean_source est_targets = est_targets - mean_estimate # Step 2. Pair-wise SI-SDR. if self.sdr_type in ["sisdr", "sdsdr"]: # [batch, n_src] pair_wise_dot = torch.sum(est_targets * targets, dim=2, keepdim=True) # [batch, n_src] s_target_energy = torch.sum(targets ** 2, dim=2, keepdim=True) + EPS # [batch, n_src, time] scaled_targets = pair_wise_dot * targets / s_target_energy else: # [batch, n_src, time] scaled_targets = targets if self.sdr_type in ["sdsdr", "snr"]: e_noise = est_targets - targets else: e_noise = est_targets - scaled_targets # [batch, n_src] pair_wise_sdr = torch.sum(scaled_targets ** 2, dim=2) / ( torch.sum(e_noise ** 2, dim=2) + EPS) if self.take_log: pair_wise_sdr = 10 * torch.log10(pair_wise_sdr + EPS) return - torch.mean(pair_wise_sdr, dim=-1) # aliases
Example #23
Source File: losses.py From geoseg with MIT License | 5 votes |
def forward(self, output, target): mse = self.criterion(output, target) loss = 10 * torch.log10(1.0 / mse) return loss
Example #24
Source File: sdr.py From asteroid with MIT License | 5 votes |
def forward(self, est_target, target): assert target.size() == est_target.size() # Step 1. Zero-mean norm if self.zero_mean: mean_source = torch.mean(target, dim=1, keepdim=True) mean_estimate = torch.mean(est_target, dim=1, keepdim=True) target = target - mean_source est_target = est_target - mean_estimate # Step 2. Pair-wise SI-SDR. if self.sdr_type in ["sisdr", "sdsdr"]: # [batch, 1] dot = torch.sum(est_target * target, dim=1, keepdim=True) # [batch, 1] s_target_energy = torch.sum(target ** 2, dim=1, keepdim=True) + EPS # [batch, time] scaled_target = dot * target / s_target_energy else: # [batch, time] scaled_target = target if self.sdr_type in ["sdsdr", "snr"]: e_noise = est_target - target else: e_noise = est_target - scaled_target # [batch] losses = torch.sum(scaled_target ** 2, dim=1) / ( torch.sum(e_noise ** 2, dim=1) + EPS) if self.take_log: losses = 10 * torch.log10(losses + EPS) losses = losses.mean() if self.reduction == 'mean' else losses return - losses
Example #25
Source File: util.py From colorization-pytorch with MIT License | 5 votes |
def calculate_psnr_np(img1, img2): import numpy as np SE_map = (1.*img1-img2)**2 cur_MSE = np.mean(SE_map) return 20*np.log10(255./np.sqrt(cur_MSE))
Example #26
Source File: util.py From colorization-pytorch with MIT License | 5 votes |
def calculate_psnr_torch(img1, img2): SE_map = (1.*img1-img2)**2 cur_MSE = torch.mean(SE_map) return 20*torch.log10(1./torch.sqrt(cur_MSE))
Example #27
Source File: sdr.py From asteroid with MIT License | 5 votes |
def forward(self, est_targets, targets): assert targets.size() == est_targets.size() # Step 1. Zero-mean norm if self.zero_mean: mean_source = torch.mean(targets, dim=2, keepdim=True) mean_estimate = torch.mean(est_targets, dim=2, keepdim=True) targets = targets - mean_source est_targets = est_targets - mean_estimate # Step 2. Pair-wise SI-SDR. (Reshape to use broadcast) s_target = torch.unsqueeze(targets, dim=1) s_estimate = torch.unsqueeze(est_targets, dim=2) if self.sdr_type in ["sisdr", "sdsdr"]: # [batch, n_src, n_src, 1] pair_wise_dot = torch.sum(s_estimate * s_target, dim=3, keepdim=True) # [batch, 1, n_src, 1] s_target_energy = torch.sum(s_target**2, dim=3, keepdim=True) + EPS # [batch, n_src, n_src, time] pair_wise_proj = pair_wise_dot * s_target / s_target_energy else: # [batch, n_src, n_src, time] pair_wise_proj = s_target.repeat(1, s_target.shape[2], 1, 1) if self.sdr_type in ["sdsdr", "snr"]: e_noise = s_estimate - s_target else: e_noise = s_estimate - pair_wise_proj # [batch, n_src, n_src] pair_wise_sdr = torch.sum(pair_wise_proj ** 2, dim=3) / ( torch.sum(e_noise ** 2, dim=3) + EPS) if self.take_log: pair_wise_sdr = 10 * torch.log10(pair_wise_sdr + EPS) return - pair_wise_sdr
Example #28
Source File: ops.py From tntorch with GNU Lesser General Public License v3.0 | 5 votes |
def log10(t): """ Element-wise base-10 logarithm computed using cross-approximation; see PyTorch's `log10()`. :param t: input :class:`Tensor` :return: a :class:`Tensor` """ return tn.cross(lambda x: torch.log10(x), tensors=t, verbose=False)
Example #29
Source File: transforms.py From asteroid with MIT License | 5 votes |
def ebased_vad(mag_spec, th_db=40): """ Compute energy-based VAD from a magnitude spectrogram (or equivalent). Args: mag_spec (torch.Tensor): the spectrogram to perform VAD on. Expected shape (batch, *, freq, time). The VAD mask will be computed independently for all the leading dimensions until the last two. Independent of the ordering of the last two dimensions. th_db (int): The threshold in dB from which a TF-bin is considered silent. Returns: torch.BoolTensor, the VAD mask. Examples: >>> import torch >>> mag_spec = torch.abs(torch.randn(10, 2, 65, 16)) >>> batch_src_mask = ebased_vad(mag_spec) """ log_mag = 20 * torch.log10(mag_spec) # Compute VAD for each utterance in a batch independently. to_view = list(mag_spec.shape[:-2]) + [1, -1] max_log_mag = torch.max(log_mag.view(to_view), -1, keepdim=True)[0] return log_mag > (max_log_mag - th_db)
Example #30
Source File: Conceptor.py From EchoTorch with GNU General Public License v3.0 | 5 votes |
def plot_delta_measure(self, start, end, steps=50): """ Plot delta measure :param start: :param end: :return: """ # Gamma values gamma_values = torch.logspace(start=start, end=end, steps=steps) # Log10 of gamma values gamma_log_values = torch.log10(gamma_values) # Delta measures C_norms = torch.zeros(steps) delta_scores = torch.zeros(steps) # For each gamma measure for i, gamma in enumerate(gamma_values): delta_scores[i], C_norms[i] = self.delta_measure(float(gamma), epsilon=0.1) # end for # Plot plt.plot(gamma_log_values.numpy(), delta_scores.numpy()) plt.plot(gamma_log_values.numpy(), C_norms.numpy()) plt.show() # end plot_delta_measure # Compute Delta measure