Python torch.std() Examples

The following are 30 code examples of torch.std(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source Project: pyfilter   Author: tingiskhan   File: filters.py    License: MIT License 6 votes vote down vote up
def test_SDE(self):
        def f(x, a, s):
            return -a * x

        def g(x, a, s):
            return s

        em = AffineEulerMaruyama((f, g), (0.02, 0.15), Normal(0., 1.), Normal(0., 1.), dt=1e-2, num_steps=10)
        model = LinearGaussianObservations(em, scale=1e-3)

        x, y = model.sample_path(500)

        for filt in [SISR(model, 500, proposal=Bootstrap()), UKF(model)]:
            filt = filt.initialize().longfilter(y)

            means = filt.result.filter_means
            if isinstance(filt, UKF):
                means = means[:, 0]

            self.assertLess(torch.std(x - means), 5e-2) 
Example #2
Source Project: self-attentive-parser   Author: nikitakit   File: parse_nk.py    License: MIT License 6 votes vote down vote up
def forward(self, z):
        if z.size(-1) == 1:
            return z

        mu = torch.mean(z, keepdim=True, dim=-1)
        sigma = torch.std(z, keepdim=True, dim=-1)
        ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
        if self.affine:
            ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)

        # NOTE(nikita): the t2t code does the following instead, with eps=1e-6
        # However, I currently have no reason to believe that this difference in
        # implementation matters.
        # mu = torch.mean(z, keepdim=True, dim=-1)
        # variance = torch.mean((z - mu.expand_as(z))**2, keepdim=True, dim=-1)
        # ln_out = (z - mu.expand_as(z)) * torch.rsqrt(variance + self.eps).expand_as(z)
        # ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)

        return ln_out

# %% 
Example #3
Source Project: sodeep   Author: technicolor-research   File: model.py    License: BSD 3-Clause Clear License 6 votes vote down vote up
def comp(self, inpu):
        in_mat1 = torch.triu(inpu.repeat(inpu.size(0), 1), diagonal=1)
        in_mat2 = torch.triu(inpu.repeat(inpu.size(0), 1).t(), diagonal=1)

        comp_first = (in_mat1 - in_mat2)
        comp_second = (in_mat2 - in_mat1)

        std1 = torch.std(comp_first).item()
        std2 = torch.std(comp_second).item()

        comp_first = torch.sigmoid(comp_first * (6.8 / std1))
        comp_second = torch.sigmoid(comp_second * (6.8 / std2))

        comp_first = torch.triu(comp_first, diagonal=1)
        comp_second = torch.triu(comp_second, diagonal=1)

        return (torch.sum(comp_first, 1) + torch.sum(comp_second, 0) + 1) / inpu.size(0) 
Example #4
Source Project: torchsupport   Author: mjendrusch   File: transforms.py    License: MIT License 6 votes vote down vote up
def __call__(self, x):
        if not self.auto:
            for idx in range(x.shape[0]):
                xmean = torch.mean(x[idx, :, :])
                xstd = torch.std(x[idx, :, :])
                x[idx, :, :] = (x[idx, :, :] - xmean) / xstd
                if xstd == 0:
                    x[idx, :, :] = 0.0
        else:
            view = x.view(x.shape[0], -1)
            length = view.shape[1]
            mean = view.mean(dim=1)
            var = view.var(dim=1)
            self.var = var / (self.count + 1) + self.count / (self.count + 1) * self.var
            self.var += self.count / ((self.count + 1) ** 2) * (self.mean - mean) ** 2
            self.mean = (self.count * self.mean + view.mean(dim=1)) / (self.count + 1)
            for idx in range(x.shape[0]):
                x[idx, :, :] = (x[idx, :, :] - self.mean) / torch.sqrt(self.var)
                if xstd == 0:
                    x[idx, :, :] = 0.0
        return x 
Example #5
Source Project: nispat   Author: amarquand   File: NP.py    License: GNU General Public License v3.0 6 votes vote down vote up
def forward(self, x_context, y_context, x_all=None, y_all=None, n = 10):
        y_sigma = None
        z_context = self.xy_to_z_params(x_context, y_context)
        if self.training:
            z_all = self.xy_to_z_params(x_all, y_all)
            z_sample = self.reparameterise(z_all)
            y_hat = self.decoder.forward(z_sample, x_all)
        else:  
            z_all = z_context
            if self.type == 'ST':
                temp = torch.zeros([n,y_context.shape[0], y_context.shape[2]], device = 'cpu')
            elif self.type == 'MT':
                temp = torch.zeros([n,y_context.shape[0],1,y_context.shape[2],y_context.shape[3],
                                y_context.shape[4]], device = 'cpu')                                
            for i in range(n):
                z_sample = self.reparameterise(z_all)
                temp[i,:] = self.decoder.forward(z_sample, x_context)
            y_hat = torch.mean(temp, dim=0).to(self.device)
            if n > 1:
                y_sigma = torch.std(temp, dim=0).to(self.device)
        return y_hat, z_all, z_context, y_sigma
    
############################################################################### 
Example #6
Source Project: nispat   Author: amarquand   File: NPR.py    License: GNU General Public License v3.0 6 votes vote down vote up
def forward(self, x_context, y_context, x_all=None, y_all=None, n = 10):
        y_sigma = None
        y_sigma_84 = None
        z_context = self.xy_to_z_params(x_context, y_context)
        if self.training:
            z_all = self.xy_to_z_params(x_all, y_all)
            z_sample = self.reparameterise(z_all)
            y_hat, y_hat_84 = self.decoder.forward(z_sample)
        else:  
            z_all = z_context
            temp = torch.zeros([n,y_context.shape[0], y_context.shape[2]], device = self.device)
            temp_84 = torch.zeros([n,y_context.shape[0], y_context.shape[2]], device = self.device)
            for i in range(n):
                z_sample = self.reparameterise(z_all)
                temp[i,:], temp_84[i,:] = self.decoder.forward(z_sample)
            y_hat = torch.mean(temp, dim=0).to(self.device)
            y_hat_84 = torch.mean(temp_84, dim=0).to(self.device)
            if n > 1:
                y_sigma = torch.std(temp, dim=0).to(self.device)
                y_sigma_84 = torch.std(temp_84, dim=0).to(self.device)
        return y_hat, y_hat_84, z_all, z_context, y_sigma, y_sigma_84
    
############################################################################### 
Example #7
Source Project: parser   Author: yzhangcs   File: field.py    License: MIT License 6 votes vote down vote up
def build(self, corpus, min_freq=1, embed=None):
        sequences = getattr(corpus, self.name)
        counter = Counter(token for sequence in sequences
                          for token in self.transform(sequence))
        self.vocab = Vocab(counter, min_freq, self.specials)

        if not embed:
            self.embed = None
        else:
            tokens = self.transform(embed.tokens)
            # if the `unk` token has existed in the pretrained,
            # then replace it with a self-defined one
            if embed.unk:
                tokens[embed.unk_index] = self.unk

            self.vocab.extend(tokens)
            self.embed = torch.zeros(len(self.vocab), embed.dim)
            self.embed[self.vocab.token2id(tokens)] = embed.vectors
            self.embed /= torch.std(self.embed) 
Example #8
Source Project: torch-light   Author: ne7ermore   File: model.py    License: MIT License 6 votes vote down vote up
def __init__(self, n_head, d_model, dropout):
        super().__init__()
        self.n_head = n_head
        self.d_v = self.d_k = d_k = d_model // n_head

        for name in ["w_qs", "w_ks", "w_vs"]:
            self.__setattr__(name,
                             nn.Parameter(torch.FloatTensor(n_head, d_model, d_k)))

        self.attention = ScaledDotProductAttention(d_k, dropout)
        self.lm = LayerNorm(d_model)
        self.w_o = nn.Linear(d_model, d_model, bias=False)
        self.dropout = nn.Dropout(dropout)

        self.w_qs.data.normal_(std=const.INIT_RANGE)
        self.w_ks.data.normal_(std=const.INIT_RANGE)
        self.w_vs.data.normal_(std=const.INIT_RANGE) 
Example #9
Source Project: torch-light   Author: ne7ermore   File: model.py    License: MIT License 6 votes vote down vote up
def __init__(self, n_head, d_model, dropout=0.5):
        super().__init__()
        self.n_head = n_head
        self.d_v = self.d_k = d_k = d_model // n_head

        for name in ["w_qs", "w_ks", "w_vs"]:
            self.__setattr__(name,
                             nn.Parameter(torch.FloatTensor(n_head, d_model, d_k)))

        self.attention = ScaledDotProductAttention(d_k, dropout)
        self.lm = LayerNorm(d_model)
        self.w_o = nn.Linear(d_model, d_model, bias=False)
        self.dropout = nn.Dropout(dropout)

        self.w_qs.data.normal_(std=const.INIT_RANGE)
        self.w_ks.data.normal_(std=const.INIT_RANGE)
        self.w_vs.data.normal_(std=const.INIT_RANGE) 
Example #10
Source Project: mmdetection   Author: open-mmlab   File: reppoints_head.py    License: Apache License 2.0 5 votes vote down vote up
def init_weights(self):
        """Initialize weights of the head."""
        for m in self.cls_convs:
            normal_init(m.conv, std=0.01)
        for m in self.reg_convs:
            normal_init(m.conv, std=0.01)
        bias_cls = bias_init_with_prob(0.01)
        normal_init(self.reppoints_cls_conv, std=0.01)
        normal_init(self.reppoints_cls_out, std=0.01, bias=bias_cls)
        normal_init(self.reppoints_pts_init_conv, std=0.01)
        normal_init(self.reppoints_pts_init_out, std=0.01)
        normal_init(self.reppoints_pts_refine_conv, std=0.01)
        normal_init(self.reppoints_pts_refine_out, std=0.01) 
Example #11
Source Project: SEDST   Author: AuCson   File: rnn_net.py    License: MIT License 5 votes vote down vote up
def forward(self, z):
        if z.size(1) == 1:
            return z
        mu = torch.mean(z, keepdim=True, dim=-1)
        sigma = torch.std(z, keepdim=True, dim=-1)
        ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
        ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)
        return ln_out 
Example #12
Source Project: pase   Author: santi-pdp   File: make_trainset_statistics.py    License: MIT License 5 votes vote down vote up
def extract_stats(opts):
    dset = build_dataset_providers(opts)
    collater_keys = dset[-1]
    dset = dset[0]
    collater = DictCollater()
    collater.batching_keys.extend(collater_keys)
    dloader = DataLoader(dset, batch_size = 100,
                         shuffle=True, collate_fn=collater,
                         num_workers=opts.num_workers)
    # Compute estimation of bpe. As we sample chunks randomly, we
    # should say that an epoch happened after seeing at least as many
    # chunks as total_train_wav_dur // chunk_size
    bpe = (dset.total_wav_dur // opts.chunk_size) // 500
    data = {}
    # run one epoch of training data to extract z-stats of minions
    for bidx, batch in enumerate(dloader, start=1):
        print('Bidx: {}/{}'.format(bidx, bpe))
        for k, v in batch.items():
            if k in opts.exclude_keys:
                continue
            if k not in data:
                data[k] = []
            data[k].append(v)

        if bidx >= opts.max_batches:
            break

    stats = {}
    data = dict((k, torch.cat(v)) for k, v in data.items())
    for k, v in data.items():
        stats[k] = {'mean':torch.mean(torch.mean(v, dim=2), dim=0),
                    'std':torch.std(torch.std(v, dim=2), dim=0)}
    with open(opts.out_file, 'wb') as stats_f:
        pickle.dump(stats, stats_f) 
Example #13
Source Project: torchsupport   Author: mjendrusch   File: transforms.py    License: MIT License 5 votes vote down vote up
def __init__(self, mean=0.0, std=0.1):
        """Perturb an image by normally distributed additive noise."""
        self.mean = mean
        self.std = std 
Example #14
Source Project: torchsupport   Author: mjendrusch   File: transforms.py    License: MIT License 5 votes vote down vote up
def __call__(self, x):
        noise = x.data.new(x.size()).normal_(
            self.mean, self.std
        ) if not isinstance(self.std, tuple) else \
        x.data.new(x.size()).normal_(
            np.random.uniform(*self.mean), np.random.uniform(*self.std)
        )
        x = x + noise
        return x 
Example #15
Source Project: torchsupport   Author: mjendrusch   File: transforms.py    License: MIT License 5 votes vote down vote up
def __call__(self, x):
        scale = np.random.uniform(*self.scale)
        shift = np.random.uniform(*self.shift)
        return (x - x.mean()) / x.std() * scale + shift 
Example #16
Source Project: lightNLP   Author: smilelight   File: vocab.py    License: Apache License 2.0 5 votes vote down vote up
def read_embeddings(self, embed, unk=None):
        words = embed.words
        # if the UNK token has existed in pretrained vocab,
        # then replace it with a self-defined one
        if unk in embed:
            words[words.index(unk)] = self.UNK

        self.extend(words)
        self.embeddings = torch.zeros(self.n_words, embed.dim)

        for i, word in enumerate(self.words):
            if word in embed:
                self.embeddings[i] = embed[word]
        self.embeddings /= torch.std(self.embeddings) 
Example #17
Source Project: srl-zoo   Author: araffin   File: losses.py    License: MIT License 5 votes vote down vote up
def mutualInformationLoss(states, rewards_st, weight, loss_manager):
    """
    TODO: Equation needs to be fixed for faster computation
    Loss criterion to assess mutual information between predicted states and rewards
    see: https://en.wikipedia.org/wiki/Mutual_information
    :param states: (th.Tensor)
    :param rewards_st:(th.Tensor)
    :param weight: coefficient to weight the loss (float)
    :param loss_manager: loss criterion needed to log the loss value
    :return:
    """
    X = states
    Y = rewards_st
    I = 0
    eps = 1e-10
    p_x = float(1 / np.sqrt(2 * np.pi)) * \
          th.exp(-th.pow(th.norm((X - th.mean(X, dim=0)) / (th.std(X, dim=0) + eps), 2, dim=1), 2) / 2) + eps
    p_y = float(1 / np.sqrt(2 * np.pi)) * \
          th.exp(-th.pow(th.norm((Y - th.mean(Y, dim=0)) / (th.std(Y, dim=0) + eps), 2, dim=1), 2) / 2) + eps
    for x in range(X.shape[0]):
        for y in range(Y.shape[0]):
            p_xy = float(1 / np.sqrt(2 * np.pi)) * \
                   th.exp(-th.pow(th.norm((th.cat([X[x], Y[y]]) - th.mean(th.cat([X, Y], dim=1), dim=0)) /
                                          (th.std(th.cat([X, Y], dim=1), dim=0) + eps), 2), 2) / 2) + eps
            I += p_xy * th.log(p_xy / (p_x[x] * p_y[y]))

    mutual_info_loss = th.exp(-I)
    loss_manager.addToLosses('mutual_info', weight, mutual_info_loss)
    return weight * mutual_info_loss 
Example #18
Source Project: tfm-franroldan-wav2pix   Author: franroldans   File: segan.py    License: GNU General Public License v3.0 5 votes vote down vote up
def forward(self, activation):
        if len(activation.size()) == 3:
            ori_size = activation.size()
            activation = activation.view(-1, activation.size(-1))
        else:
            ori_size = None
        means = torch.mean(activation, dim=1, keepdim=True)
        stds = torch.std(activation, dim=1, keepdim=True)
        activation = (activation - means) / stds
        if ori_size is not None:
            activation = activation.view(ori_size)
        return activation 
Example #19
Source Project: RepPoints   Author: microsoft   File: reppoints_head.py    License: MIT License 5 votes vote down vote up
def init_weights(self):
        for m in self.cls_convs:
            normal_init(m.conv, std=0.01)
        for m in self.reg_convs:
            normal_init(m.conv, std=0.01)
        bias_cls = bias_init_with_prob(0.01)
        normal_init(self.reppoints_cls_conv, std=0.01)
        normal_init(self.reppoints_cls_out, std=0.01, bias=bias_cls)
        normal_init(self.reppoints_pts_init_conv, std=0.01)
        normal_init(self.reppoints_pts_init_out, std=0.01)
        normal_init(self.reppoints_pts_refine_conv, std=0.01)
        normal_init(self.reppoints_pts_refine_out, std=0.01) 
Example #20
Source Project: GraphIE   Author: thomas0809   File: transformer.py    License: GNU General Public License v3.0 5 votes vote down vote up
def forward(self, z):
        if z.size(1) == 1:
            return z

        mu = torch.mean(z, keepdim=True, dim=-1)
        sigma = torch.std(z, keepdim=True, dim=-1)
        ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
        ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)

        return ln_out 
Example #21
Source Project: reversible-rnn   Author: matthewmackay   File: UtilClass.py    License: MIT License 5 votes vote down vote up
def forward(self, z):
        if z.size(1) == 1:
            return z
        mu = torch.mean(z, dim=1)
        sigma = torch.std(z, dim=1)
        # HACK. PyTorch is changing behavior
        if mu.dim() == 1:
            mu = mu.unsqueeze(1)
            sigma = sigma.unsqueeze(1)
        ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
        ln_out = ln_out.mul(self.a_2.expand_as(ln_out)) \
            + self.b_2.expand_as(ln_out)
        return ln_out 
Example #22
Source Project: DFGN-pytorch   Author: woshiyyya   File: utils.py    License: MIT License 5 votes vote down vote up
def report_tensor(tensor, name, dim=-1, verbose=False):
    print('{}: shape={}, mean={}, std={}, min={}, max={}'.
          format(name, tensor.shape, torch.mean(tensor), torch.std(tensor), torch.min(tensor), torch.max(tensor)))

    if verbose and len(tensor.shape) > 1:
        matrix = tensor.view(tensor.shape[0], -1)
        # if dim is None:
        #     check_dim = -1 if len(tensor.shape) < 3 else tuple(range(1-len(tensor.shape), 0))
        # else:
        #     check_dim = dim
        print('details: mean={},\n\t\tstd={},\n\t\tmin={},\n\t\tmax={}'.
              format(torch.mean(matrix, dim=dim), torch.std(matrix, dim=dim),
                     torch.min(matrix, dim=dim), torch.max(matrix, dim=dim))) 
Example #23
Source Project: kaggle-kuzushiji-recognition   Author: tascj   File: reppoints_head.py    License: MIT License 5 votes vote down vote up
def init_weights(self):
        for m in self.cls_convs:
            normal_init(m.conv, std=0.01)
        for m in self.reg_convs:
            normal_init(m.conv, std=0.01)
        bias_cls = bias_init_with_prob(0.01)
        normal_init(self.reppoints_cls_conv, std=0.01)
        normal_init(self.reppoints_cls_out, std=0.01, bias=bias_cls)
        normal_init(self.reppoints_pts_init_conv, std=0.01)
        normal_init(self.reppoints_pts_init_out, std=0.01)
        normal_init(self.reppoints_pts_refine_conv, std=0.01)
        normal_init(self.reppoints_pts_refine_out, std=0.01) 
Example #24
Source Project: affnet   Author: ducha-aiki   File: architectures.py    License: MIT License 5 votes vote down vote up
def input_norm(self,x):
        flat = x.view(x.size(0), -1)
        mp = torch.mean(flat, dim=1)
        sp = torch.std(flat, dim=1) + 1e-7
        return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x) 
Example #25
Source Project: affnet   Author: ducha-aiki   File: architectures.py    License: MIT License 5 votes vote down vote up
def input_norm(self,x):
        flat = x.view(x.size(0), -1)
        mp = torch.mean(flat, dim=1)
        sp = torch.std(flat, dim=1) + 1e-7
        return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x) 
Example #26
Source Project: affnet   Author: ducha-aiki   File: architectures.py    License: MIT License 5 votes vote down vote up
def input_norm(self,x):
        flat = x.view(x.size(0), -1)
        mp = torch.mean(flat, dim=1).detach()
        sp = torch.std(flat, dim=1).detach() + 1e-7
        return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x) 
Example #27
Source Project: affnet   Author: ducha-aiki   File: architectures.py    License: MIT License 5 votes vote down vote up
def input_norm(self,x):
        flat = x.view(x.size(0), -1)
        mp = torch.mean(flat, dim=1).detach()
        sp = torch.std(flat, dim=1).detach() + 1e-7
        return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x) 
Example #28
Source Project: affnet   Author: ducha-aiki   File: architectures.py    License: MIT License 5 votes vote down vote up
def input_norm(self,x):
        flat = x.view(x.size(0), -1)
        mp = torch.mean(flat, dim=1).detach()
        sp = torch.std(flat, dim=1).detach() + 1e-7
        return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x) 
Example #29
Source Project: affnet   Author: ducha-aiki   File: architectures.py    License: MIT License 5 votes vote down vote up
def input_norm(self,x):
        flat = x.view(x.size(0), -1)
        mp = torch.mean(flat, dim=1).detach()
        sp = torch.std(flat, dim=1).detach() + 1e-7
        return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x) 
Example #30
Source Project: affnet   Author: ducha-aiki   File: architectures.py    License: MIT License 5 votes vote down vote up
def input_norm(self,x):
        flat = x.view(x.size(0), -1)
        mp = torch.mean(flat, dim=1).detach()
        sp = torch.std(flat, dim=1).detach() + 1e-7
        return (x - mp.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x)