Python torch.var() Examples

The following are 30 code examples of torch.var(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source Project: torch-toolbox   Author: PistonY   File: functional.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def evo_norm(x, prefix, running_var, v, weight, bias,
             training, momentum, eps=0.1, groups=32):
    if prefix == 'b0':
        if training:
            var = torch.var(x, dim=(0, 2, 3), keepdim=True)
            running_var.mul_(momentum)
            running_var.add_((1 - momentum) * var)
        else:
            var = running_var
        if v is not None:
            den = torch.max((var + eps).sqrt(), v * x + instance_std(x, eps))
            x = x / den * weight + bias
        else:
            x = x * weight + bias
    else:
        if v is not None:
            x = x * torch.sigmoid(v * x) / group_std(x,
                                                     groups, eps) * weight + bias
        else:
            x = x * weight + bias

    return x 
Example #2
Source Project: JEM   Author: wgrathwohl   File: norms.py    License: Apache License 2.0 6 votes vote down vote up
def forward(self, x, y):
        means = torch.mean(x, dim=(2, 3))
        m = torch.mean(means, dim=-1, keepdim=True)
        v = torch.var(means, dim=-1, keepdim=True)
        means = (means - m) / (torch.sqrt(v + 1e-5))
        h = self.instance_norm(x)

        if self.bias:
            gamma, alpha, beta = self.embed(y).chunk(3, dim=-1)
            h = h + means[..., None, None] * alpha[..., None, None]
            out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
        else:
            gamma, alpha = self.embed(y).chunk(2, dim=-1)
            h = h + means[..., None, None] * alpha[..., None, None]
            out = gamma.view(-1, self.num_features, 1, 1) * h
        return out 
Example #3
Source Project: nsf   Author: bayesiains   File: normalization.py    License: MIT License 6 votes vote down vote up
def forward(self, inputs, context=None):
        if inputs.dim() != 2:
            raise ValueError('Expected 2-dim inputs, got inputs of shape: {}'.format(inputs.shape))

        if self.training:
            mean, var = inputs.mean(0), inputs.var(0)
            self.running_mean.mul_(1 - self.momentum).add_(mean * self.momentum)
            self.running_var.mul_(1 - self.momentum).add_(var * self.momentum)
        else:
            mean, var = self.running_mean, self.running_var

        outputs = self.weight * ((inputs - mean) / torch.sqrt((var + self.eps))) + self.bias

        logabsdet_ = torch.log(self.weight) - 0.5 * torch.log(var + self.eps)
        logabsdet = torch.sum(logabsdet_) * torch.ones(inputs.shape[0])

        return outputs, logabsdet 
Example #4
Source Project: sentence-transformers   Author: UKPLab   File: WKPooling.py    License: Apache License 2.0 6 votes vote down vote up
def unify_sentence(self, sentence_feature, one_sentence_embedding):
        """
            Unify Sentence By Token Importance
        """
        sent_len = one_sentence_embedding.size()[0]

        var_token = torch.zeros(sent_len, device=one_sentence_embedding.device)
        for token_index in range(sent_len):
            token_feature = sentence_feature[:, token_index, :]
            sim_map = self.cosine_similarity_torch(token_feature)
            var_token[token_index] = torch.var(sim_map.diagonal(-1))

        var_token = var_token / torch.sum(var_token)
        sentence_embedding = torch.mv(one_sentence_embedding.t(), var_token)

        return sentence_embedding 
Example #5
Source Project: ncsn   Author: ermongroup   File: cond_refinenet_dilated.py    License: GNU General Public License v3.0 6 votes vote down vote up
def forward(self, x, y):
        means = torch.mean(x, dim=(2, 3))
        m = torch.mean(means, dim=-1, keepdim=True)
        v = torch.var(means, dim=-1, keepdim=True)
        means = (means - m) / (torch.sqrt(v + 1e-5))
        h = self.instance_norm(x)

        if self.bias:
            gamma, alpha, beta = self.embed(y).chunk(3, dim=-1)
            h = h + means[..., None, None] * alpha[..., None, None]
            out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
        else:
            gamma, alpha = self.embed(y).chunk(2, dim=-1)
            h = h + means[..., None, None] * alpha[..., None, None]
            out = gamma.view(-1, self.num_features, 1, 1) * h
        return out 
Example #6
Source Project: CrypTen   Author: facebookresearch   File: test_crypten.py    License: MIT License 6 votes vote down vote up
def test_rand(self):
        """Tests uniform random variable generation on [0, 1)"""
        for size in [(10,), (10, 10), (10, 10, 10)]:
            randvec = crypten.rand(*size)
            self.assertTrue(randvec.size() == size, "Incorrect size")
            tensor = randvec.get_plain_text()
            self.assertTrue(
                (tensor >= 0).all() and (tensor < 1).all(), "Invalid values"
            )

        randvec = crypten.rand(int(1e6)).get_plain_text()
        mean = torch.mean(randvec)
        var = torch.var(randvec)
        self.assertTrue(torch.isclose(mean, torch.Tensor([0.5]), rtol=1e-3, atol=1e-3))
        self.assertTrue(
            torch.isclose(var, torch.Tensor([1.0 / 12]), rtol=1e-3, atol=1e-3)
        ) 
Example #7
Source Project: torchgan   Author: torchgan   File: virtualbatchnorm.py    License: MIT License 6 votes vote down vote up
def _normalize(self, x, mu, var):
        r"""Normalizes the tensor ``x`` using the statistics ``mu`` and ``var``.

        Args:
            x (torch.Tensor): The Tensor to be normalized.
            mu (torch.Tensor): Mean using which the Tensor is to be normalized.
            var (torch.Tensor): Variance used in the normalization of ``x``.

        Returns:
            Normalized Tensor ``x``.
        """
        std = torch.sqrt(self.eps + var)
        x = (x - mu) / std
        sizes = list(x.size())
        for dim, i in enumerate(x.size()):
            if dim != 1:
                sizes[dim] = 1
        scale = self.scale.view(*sizes)
        bias = self.bias.view(*sizes)
        return x * scale + bias 
Example #8
Source Project: biva-pytorch   Author: vlievin   File: linear.py    License: MIT License 6 votes vote down vote up
def init_parameters(self, x, init_scale=0.05, eps=1e-8):
        if self.weightnorm:
            # initial values
            self.linear._parameters['weight_v'].data.normal_(mean=0, std=init_scale)
            self.linear._parameters['weight_g'].data.fill_(1.)
            self.linear._parameters['bias'].data.fill_(0.)
            init_scale = .01
            # data dependent init
            x = self.linear(x)
            m_init, v_init = torch.mean(x, 0), torch.var(x, 0)
            scale_init = init_scale / torch.sqrt(v_init + eps)
            self.linear._parameters['weight_g'].data = self.linear._parameters['weight_g'].data * scale_init.view(
                self.linear._parameters['weight_g'].data.size())
            self.linear._parameters['bias'].data = self.linear._parameters['bias'].data - m_init * scale_init
            self.initialized = True + self.initialized
            return scale_init[None, :] * (x - m_init[None, :]) 
Example #9
Source Project: biva-pytorch   Author: vlievin   File: linear.py    License: MIT License 6 votes vote down vote up
def init_parameters(self, x, init_scale=0.05, eps=1e-8):
        if self.weightnorm:
            # initial values
            self.linear._parameters['weight_v'].data.normal_(mean=0, std=init_scale)
            self.linear._parameters['weight_g'].data.fill_(1.)
            self.linear._parameters['bias'].data.fill_(0.)
            init_scale = .01
            # data dependent init
            x = self.linear(x)
            m_init, v_init = torch.mean(x, 0), torch.var(x, 0)
            scale_init = init_scale / torch.sqrt(v_init + eps)
            self.linear._parameters['weight_g'].data = self.linear._parameters['weight_g'].data * scale_init.view(
                self.linear._parameters['weight_g'].data.size())
            self.linear._parameters['bias'].data = self.linear._parameters['bias'].data - m_init * scale_init
            self.initialized = True + self.initialized
            return scale_init[None, :] * (x - m_init[None, :]) 
Example #10
Source Project: pytorch-metric-learning   Author: KevinMusgrave   File: test_distance_weighted_miner.py    License: MIT License 6 votes vote down vote up
def test_distance_weighted_miner(self):
        embedding_angles = torch.arange(0, 180)
        embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings
        labels = torch.randint(low=0, high=2, size=(180,))
        a,_,n = lmu.get_all_triplets_indices(labels)
        all_an_dist = torch.nn.functional.pairwise_distance(embeddings[a], embeddings[n], 2)
        min_an_dist = torch.min(all_an_dist)
        
        for non_zero_cutoff_int in range(5, 15):
            non_zero_cutoff = (float(non_zero_cutoff_int) / 10.) - 0.01
            miner = DistanceWeightedMiner(0, non_zero_cutoff)
            a, p, n = miner(embeddings, labels)
            anchors, positives, negatives = embeddings[a], embeddings[p], embeddings[n]
            an_dist = torch.nn.functional.pairwise_distance(anchors, negatives, 2)
            self.assertTrue(torch.max(an_dist)<=non_zero_cutoff)
            an_dist_var = torch.var(an_dist)
            an_dist_mean = torch.mean(an_dist)
            target_var = ((non_zero_cutoff - min_an_dist)**2) / 12 # variance formula for uniform distribution
            target_mean = (non_zero_cutoff - min_an_dist) / 2
            self.assertTrue(torch.abs(an_dist_var-target_var)/target_var < 0.1)
            self.assertTrue(torch.abs(an_dist_mean-target_mean)/target_mean < 0.1) 
Example #11
Source Project: surreal   Author: SurrealAI   File: ppo.py    License: MIT License 6 votes vote down vote up
def _value_loss(self, obs, returns):
        """
        Computes the loss with current data. also returns a dictionary of statistics
        which includes value loss and explained variance
        return: surreal.utils.pytorch.GPUVariable, dict
        Args:
            obs: batch of observations in form of (batch_size, obs_dim)
            returns: batch of N-step return estimate (batch_size,)
        Returns:
            loss: Variable for loss
            stats: dictionary of recorded statistics
        """
        values = self.model.forward_critic(obs, self.cells)
        if len(values.size()) == 3: values = values.squeeze(2)
        explained_var = 1 - torch.var(returns - values) / torch.var(returns)
        loss = (values - returns).pow(2).mean()

        stats = {
            '_val_loss': loss.item(),
            '_val_explained_var': explained_var.item()
        }
        return loss, stats 
Example #12
Source Project: torch-toolbox   Author: PistonY   File: functional.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def instance_std(x, eps=1e-5):
    var = torch.var(x, dim=(2, 3), keepdim=True)
    std = torch.sqrt(var + eps)
    return std 
Example #13
Source Project: torch-toolbox   Author: PistonY   File: functional.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def group_std(x: torch.Tensor, groups=32, eps=1e-5):
    n, c, h, w = x.size()
    x = torch.reshape(x, (n, groups, c // groups, h, w))
    var = torch.var(x, dim=(2, 3, 4), keepdim=True)
    std = torch.sqrt(var + eps)
    return torch.reshape(std, (n, c, h, w)) 
Example #14
Source Project: audio   Author: pytorch   File: utils.py    License: BSD 2-Clause "Simplified" License 5 votes vote down vote up
def calc_mean_invstddev(feature):
    if len(feature.shape) != 2:
        raise ValueError("We expect the input feature to be 2-D tensor")
    mean = torch.mean(feature, dim=0)
    var = torch.var(feature, dim=0)
    # avoid division by ~zero
    if (var < sys.float_info.epsilon).any():
        return mean, 1.0 / (torch.sqrt(var) + sys.float_info.epsilon)
    return mean, 1.0 / torch.sqrt(var) 
Example #15
Source Project: JEM   Author: wgrathwohl   File: norms.py    License: Apache License 2.0 5 votes vote down vote up
def forward(self, x, y):
        if self.init:
            scale, bias = self.embed(y).chunk(2, dim=-1)
            return x * scale[:, :, None, None] + bias[:, :, None, None]
        else:
            m, v = torch.mean(x, dim=(0, 2, 3)), torch.var(x, dim=(0, 2, 3))
            std = torch.sqrt(v + 1e-5)
            scale_init = 1. / std
            bias_init = -1. * m / std
            self.embed.weight.data[:, :self.num_features] = scale_init[None].repeat(self.num_classes, 1)
            self.embed.weight.data[:, self.num_features:] = bias_init[None].repeat(self.num_classes, 1)
            self.init = True
            return self(x, y) 
Example #16
Source Project: Parsing-R-CNN   Author: soeaver   File: mixture_batchnorm.py    License: MIT License 5 votes vote down vote up
def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avgpool(x)  # .view(b, c)
        var = torch.var(x, dim=(2, 3)).view(b, c, 1, 1)
        y *= (var + 1e-3).rsqrt()
        # y = torch.cat((y, var), dim=1)
        return self.attention(y).view(b, self.k)


# TODO: keep it to use FP32 always, need to figure out how to set it using apex ? 
Example #17
Source Project: residual-flows   Author: rtqichen   File: act_norm.py    License: MIT License 5 votes vote down vote up
def forward(self, x, logpx=None):
        c = x.size(1)

        if not self.initialized:
            with torch.no_grad():
                # compute batch statistics
                x_t = x.transpose(0, 1).contiguous().view(c, -1)
                batch_mean = torch.mean(x_t, dim=1)
                batch_var = torch.var(x_t, dim=1)

                # for numerical issues
                batch_var = torch.max(batch_var, torch.tensor(0.2).to(batch_var))

                self.bias.data.copy_(-batch_mean)
                self.weight.data.copy_(-0.5 * torch.log(batch_var))
                self.initialized.fill_(1)

        bias = self.bias.view(*self.shape).expand_as(x)
        weight = self.weight.view(*self.shape).expand_as(x)

        y = (x + bias) * torch.exp(weight)

        if logpx is None:
            return y
        else:
            return y, logpx - self._logdetgrad(x) 
Example #18
Source Project: cnaps   Author: cambridge-mlg   File: normalization_layers.py    License: MIT License 5 votes vote down vote up
def _normalize(self, x, mean, var):
        """
        Normalize activations.
        :param x: input activations
        :param mean: mean used to normalize
        :param var: var used to normalize
        :return: normalized activations
        """
        return (self.weight.view(1, -1, 1, 1) * (x - mean) / torch.sqrt(var + self.eps)) + self.bias.view(1, -1, 1, 1) 
Example #19
Source Project: cnaps   Author: cambridge-mlg   File: normalization_layers.py    License: MIT License 5 votes vote down vote up
def _compute_batch_moments(x):
        """
        Compute conventional batch mean and variance.
        :param x: input activations
        :return: batch mean, batch variance
        """
        return torch.mean(x, dim=(0, 2, 3), keepdim=True), torch.var(x, dim=(0, 2, 3), keepdim=True) 
Example #20
Source Project: cnaps   Author: cambridge-mlg   File: normalization_layers.py    License: MIT License 5 votes vote down vote up
def _compute_instance_moments(x):
        """
        Compute instance mean and variance.
        :param x: input activations
        :return: instance mean, instance variance
        """
        return torch.mean(x, dim=(2, 3), keepdim=True), torch.var(x, dim=(2, 3), keepdim=True) 
Example #21
Source Project: cnaps   Author: cambridge-mlg   File: normalization_layers.py    License: MIT License 5 votes vote down vote up
def _compute_layer_moments(x):
        """
        Compute layer mean and variance.
        :param x: input activations
        :return: layer mean, layer variance
        """
        return torch.mean(x, dim=(1, 2, 3), keepdim=True), torch.var(x, dim=(1, 2, 3), keepdim=True) 
Example #22
Source Project: ncsn   Author: ermongroup   File: refinenet_dilated_baseline.py    License: GNU General Public License v3.0 5 votes vote down vote up
def forward(self, x, y):
        means = torch.mean(x, dim=(2, 3))
        m = torch.mean(means, dim=-1, keepdim=True)
        v = torch.var(means, dim=-1, keepdim=True)
        means = (means - m) / (torch.sqrt(v + 1e-5))
        h = self.instance_norm(x)

        if self.bias:
            h = h + means[..., None, None] * self.alpha[..., None, None]
            out = self.gamma.view(-1, self.num_features, 1, 1) * h + self.beta.view(-1, self.num_features, 1, 1)
        else:
            h = h + means[..., None, None] * self.alpha[..., None, None]
            out = self.gamma.view(-1, self.num_features, 1, 1) * h
        return out 
Example #23
Source Project: backpack   Author: f-dangel   File: implementation_autograd.py    License: MIT License 5 votes vote down vote up
def variance(self):
        batch_grad = self.batch_gradients()
        variances = [torch.var(g, dim=0, unbiased=False) for g in batch_grad]
        return variances 
Example #24
Source Project: DualResidualNetworks   Author: liu-vis   File: N_modules.py    License: MIT License 5 votes vote down vote up
def forward(self, x):        
        flat_len = x.size(2)*x.size(3)
        vec = x.view(x.size(0), x.size(1), flat_len)
        mean = torch.mean(vec, 2).unsqueeze(2).unsqueeze(3).expand_as(x)
        var = torch.var(vec, 2).unsqueeze(2).unsqueeze(3).expand_as(x) * ((flat_len - 1)/float(flat_len))
        scale_broadcast = self.scale.unsqueeze(1).unsqueeze(1).unsqueeze(0)
        scale_broadcast = scale_broadcast.expand_as(x)
        shift_broadcast = self.scale.unsqueeze(1).unsqueeze(1).unsqueeze(0)
        shift_broadcast = shift_broadcast.expand_as(x)
        out = (x - mean) / torch.sqrt(var+self.eps)
        out = out * scale_broadcast + shift_broadcast
        return out 
Example #25
Source Project: Conditional-Batch-Norm   Author: ap229997   File: cbn.py    License: MIT License 5 votes vote down vote up
def forward(self, feature, lstm_emb):
        self.batch_size, self.channels, self.height, self.width = feature.data.shape


        # get delta values
        delta_betas, delta_gammas = self.create_cbn_input(lstm_emb)

        betas_cloned = self.betas.clone()
        gammas_cloned = self.gammas.clone()

        # update the values of beta and gamma
        betas_cloned += delta_betas
        gammas_cloned += delta_gammas

        # get the mean and variance for the batch norm layer
        batch_mean = torch.mean(feature)
        batch_var = torch.var(feature)

        # extend the betas and gammas of each channel across the height and width of feature map
        betas_expanded = torch.stack([betas_cloned]*self.height, dim=2)
        betas_expanded = torch.stack([betas_expanded]*self.width, dim=3)

        gammas_expanded = torch.stack([gammas_cloned]*self.height, dim=2)
        gammas_expanded = torch.stack([gammas_expanded]*self.width, dim=3)

        # normalize the feature map
        feature_normalized = (feature-batch_mean)/torch.sqrt(batch_var+self.eps)

        # get the normalized feature map with the updated beta and gamma values
        out = torch.mul(feature_normalized, gammas_expanded) + betas_expanded

        return out, lstm_emb 
Example #26
Source Project: torchgan   Author: torchgan   File: virtualbatchnorm.py    License: MIT License 5 votes vote down vote up
def _batch_stats(self, x):
        r"""Computes the statistics of the batch ``x``.

        Args:
            x (torch.Tensor): Tensor whose statistics need to be computed.

        Returns:
            A tuple of the mean and variance of the batch ``x``.
        """
        mu = torch.mean(x, dim=0, keepdim=True)
        var = torch.var(x, dim=0, keepdim=True)
        return mu, var 
Example #27
Source Project: PointFlow   Author: stevenygd   File: normalization.py    License: MIT License 5 votes vote down vote up
def stable_var(x, mean=None, dim=1):
    if mean is None:
        mean = x.mean(dim, keepdim=True)
    mean = mean.view(-1, 1)
    res = torch.pow(x - mean, 2)
    max_sqr = torch.max(res, dim, keepdim=True)[0]
    var = torch.mean(res / max_sqr, 1, keepdim=True) * max_sqr
    var = var.view(-1)
    # change nan to zero
    var[var != var] = 0
    return var 
Example #28
Source Project: uncertainty_estimation_deep_learning   Author: mattiasegu   File: eval.py    License: MIT License 5 votes vote down vote up
def compute_preds(net, inputs, use_adf=False, use_mcdo=False):
    
    model_variance = None
    data_variance = None
    
    def keep_variance(x, min_variance):
        return x + min_variance

    keep_variance_fn = lambda x: keep_variance(x, min_variance=args.min_variance)
    softmax = nn.Softmax(dim=1)
    adf_softmax = adf.Softmax(dim=1, keep_variance_fn=keep_variance_fn)
    
    net.eval()
    if use_mcdo:
        net = set_training_mode_for_dropout(net, True)
        outputs = [net(inputs) for i in range(args.num_samples)]
        
        if use_adf:
            outputs = [adf_softmax(*outs) for outs in outputs]
            outputs_mean = [mean for (mean, var) in outputs]
            data_variance = [var for (mean, var) in outputs]
            data_variance = torch.stack(data_variance)
            data_variance = torch.mean(data_variance, dim=0)
        else:
            outputs_mean = [softmax(outs) for outs in outputs]
            
        outputs_mean = torch.stack(outputs_mean)
        model_variance = torch.var(outputs_mean, dim=0)
        # Compute MCDO prediction
        outputs_mean = torch.mean(outputs_mean, dim=0)
    else:
        outputs = net(inputs)
        if adf:
            outputs_mean, data_variance = adf_softmax(*outputs)
        else:
            outputs_mean = outputs
        
    net = set_training_mode_for_dropout(net, False)
    
    return outputs_mean, data_variance, model_variance 
Example #29
Source Project: pydlt   Author: dmarnerides   File: misc.py    License: BSD 3-Clause Clear License 5 votes vote down vote up
def moving_var(x, width=5):
    """Performes moving variance of a one dimensional Tensor or Array

    Args:
        x (Tensor or Array): 1D Tensor or array.
        width (int, optional): Width of the kernel.
    """
    if len(x) >= width:
        if is_array(x):
            return np.var(slide_window_(x, width, 1), -1)
        else:
            return torch.var(slide_window_(x, width, 1), -1)
    else:
        return x.var() 
Example #30
Source Project: pydlt   Author: dmarnerides   File: misc.py    License: BSD 3-Clause Clear License 5 votes vote down vote up
def sub_var(x, width=5):
    """Calculates variance of a one dimensional Tensor or Array every `width` elements.

    Args:
        x (Tensor or Array): 1D Tensor or array.
        width (int, optional): Width of the kernel.
    """
    if len(x) >= width:
        if is_array(x):
            return np.var(slide_window_(x, width, width), -1)
        else:
            return torch.var(slide_window_(x, width, width), -1)
    else:
        return x.var()