Python torch.normal() Examples

The following are 30 code examples of torch.normal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: reconstruct.py    From MelNet with MIT License 6 votes vote down vote up
def inverse(self, melspectrogram, iters=1000):
        x = torch.normal(0, 1e-6, size=((melspectrogram.size(1) - 1) * self.hp.audio.hop_length, )).cuda().requires_grad_()
        optimizer = torch.optim.LBFGS([x], tolerance_change=1e-16)
        melspectrogram = self.post_spec(melspectrogram)

        def closure():
            optimizer.zero_grad()
            mel = self.get_mel(x)
            loss = self.criterion(mel, melspectrogram)
            loss.backward()
            return loss

        with tqdm(range(iters)) as pbar:
            for i in pbar:
                optimizer.step(closure=closure)
                pbar.set_postfix(loss=self.criterion(self.get_mel(x), melspectrogram).item())

        return x, self.pre_spec(self.get_mel(x)) 
Example #2
Source File: test_core.py    From ptstat with MIT License 6 votes vote down vote up
def setUp(self):
        batch_size = 2
        rv_dimension = 5
        p = torch.normal(torch.zeros(batch_size, rv_dimension), torch.ones(batch_size, rv_dimension))
        p_pos = torch.abs(torch.normal(torch.zeros(batch_size, rv_dimension), torch.ones(batch_size, rv_dimension)))
        p_pos = torch.clamp(p_pos, 0.1, 0.9)
        if cuda:
            p = p.cuda()
            p_pos = p_pos.cuda()
        p = Variable(p)
        p_pos = Variable(p_pos)
        self.rv = [
            stat.Normal(size=(batch_size, rv_dimension), cuda=cuda),
            stat.Normal(p, p_pos),
            stat.Categorical(size=(batch_size, rv_dimension), cuda=cuda),
            stat.Categorical(p_pos / torch.sum(p_pos, 1).expand_as(p_pos)),
            stat.Bernoulli(size=(batch_size, rv_dimension), cuda=cuda),
            stat.Bernoulli(p_pos),
            stat.Uniform(size=(batch_size, rv_dimension), cuda=cuda)
        ] 
Example #3
Source File: semi_sup_net.py    From SEDST with MIT License 6 votes vote down vote up
def get_sparse_input(x_input):
    """
    get a sparse matrix of x_input: [T,B,V] where x_sparse[i][j][k]=1, and others = 1e-8
    :param x_input: *Tensor* of [T,B]
    :return: *Tensor* in shape [B,T,V]
    """
    # indexes that will make no effect in copying
    sw = time.time()
    print('sparse input start: %s' % sw)
    ignore_index = [0]
    result = torch.normal(mean=0, std=torch.zeros(x_input.size(0), x_input.size(1), cfg.vocab_size))
    for t in range(x_input.size(0)):
        for b in range(x_input.size(1)):
            if x_input[t][b] not in ignore_index:
                result[t][b][x_input[t][b]] = 1.0
    print('sparse input end %s' % time.time())
    return result.transpose(0, 1) 
Example #4
Source File: mseloss.py    From backpack with MIT License 6 votes vote down vote up
def _sqrt_hessian_sampled(self, module, g_inp, g_out, mc_samples=1):
        """A Monte-Carlo estimate of the square-root of the Hessian.

        Attributes:
            module: (torch.nn.MSELoss) module.
            g_inp: Gradient of loss w.r.t. input.
            g_out: Gradient of loss w.r.t. output.
            mc_samples: (int, optional) Number of MC samples to use. Default: 1.

        Returns:
            tensor:
        """
        N, D = module.input0.shape
        samples = normal(0, 1, size=[mc_samples, N, D], device=module.input0.device)
        samples *= sqrt(2) / sqrt(mc_samples)

        if module.reduction == "mean":
            samples /= sqrt(module.input0.numel())

        return samples 
Example #5
Source File: torch_ard.py    From pytorch_ard with MIT License 6 votes vote down vote up
def forward(self, input):
        """
        Forward with all regularized connections and random activations (Beyesian mode). Typically used for train
        """
        if self.training == False:
            return F.conv2d(input, self.weights_clipped,
                self.bias, self.stride,
                self.padding, self.dilation, self.groups)
        eps = 1e-8
        W = self.weight
        zeros = torch.zeros_like(W)
        clip_mask = self.get_clip_mask()
        conved_mu = F.conv2d(input, W, self.bias, self.stride,
            self.padding, self.dilation, self.groups)
        log_alpha = self.clip(self.log_alpha)
        conved_si = torch.sqrt(eps + F.conv2d(input*input,
            torch.exp(log_alpha) * W * W, self.bias, self.stride,
            self.padding, self.dilation, self.groups))
        conved = conved_mu + \
            conved_si * torch.normal(torch.zeros_like(conved_mu), torch.ones_like(conved_mu))
        return conved 
Example #6
Source File: torch_ard.py    From pytorch_ard with MIT License 6 votes vote down vote up
def forward(self, input):
        """
        Forward with all regularized connections and random activations (Beyesian mode). Typically used for train
        """
        if self.training == False: return F.linear(input, self.weights_clipped, self.bias)

        clip_mask = self.get_clip_mask()
        W = self.weight
        zeros = torch.zeros_like(W)
        mu = input.matmul(W.t())
        eps = 1e-8
        log_alpha = self.clip(self.log_alpha)
        si = torch.sqrt((input * input) \
                        .matmul(((torch.exp(log_alpha) * self.weight * self.weight)+eps).t()))
        activation = mu + torch.normal(torch.zeros_like(mu), torch.ones_like(mu)) * si
        return activation + self.bias 
Example #7
Source File: models.py    From ARAE with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def encode(self, indices, lengths, noise): 
        embeddings = self.embedding(indices)
        packed_embeddings = pack_padded_sequence(input=embeddings,
                                                 lengths=lengths,
                                                 batch_first=True)

        packed_output, state = self.encoder(packed_embeddings)
        hidden = state[0][-1]
        hidden = hidden / torch.norm(hidden, p=2, dim=1, keepdim=True)
        
        if noise and self.noise_r > 0:
            gauss_noise = torch.normal(means=torch.zeros(hidden.size()),
                                       std=self.noise_r)
            hidden = hidden + Variable(gauss_noise.cuda())

        return hidden 
Example #8
Source File: explain.py    From gnn-model-explainer with Apache License 2.0 6 votes vote down vote up
def construct_edge_mask(self, num_nodes, init_strategy="normal", const_val=1.0):
        mask = nn.Parameter(torch.FloatTensor(num_nodes, num_nodes))
        if init_strategy == "normal":
            std = nn.init.calculate_gain("relu") * math.sqrt(
                2.0 / (num_nodes + num_nodes)
            )
            with torch.no_grad():
                mask.normal_(1.0, std)
                # mask.clamp_(0.0, 1.0)
        elif init_strategy == "const":
            nn.init.constant_(mask, const_val)

        if self.args.mask_bias:
            mask_bias = nn.Parameter(torch.FloatTensor(num_nodes, num_nodes))
            nn.init.constant_(mask_bias, 0.0)
        else:
            mask_bias = None

        return mask, mask_bias 
Example #9
Source File: models_deal.py    From NeuralDialog-LaRL with Apache License 2.0 6 votes vote down vote up
def z2dec(self, last_h, requires_grad):
        p_mu, p_logvar = self.c2z(last_h)
        if requires_grad:
            sample_z = self.gauss_connector(p_mu, p_logvar)
            joint_logpz = None
        else:
            sample_z = th.normal(p_mu, th.sqrt(th.exp(p_logvar))).detach()
            logprob_sample_z = self.gaussian_logprob(p_mu, p_logvar, sample_z)
            joint_logpz = th.sum(logprob_sample_z.squeeze(0), dim=1)

        dec_init_state = self.z_embedding(sample_z)
        attn_context = None

        if self.config.dec_rnn_cell == 'lstm':
            dec_init_state = tuple([dec_init_state, dec_init_state])

        return dec_init_state, attn_context, joint_logpz 
Example #10
Source File: senti_unified.py    From transfer with MIT License 6 votes vote down vote up
def train_advreg_mmd(iter_cnt, encoder, gan_g, gan_d, corpus_loader, args, optimizer_reg):
    encoder.train()
    gan_g.train()
    gan_d.train()

    # train gan_disc
    for batch, labels in corpus_loader:
        optimizer_reg.zero_grad()

        batch = Variable(batch.cuda())
        z_real_hidden = encoder(batch)
        z_gauss = torch.normal(means=torch.zeros(batch.size()),
                               std=args.noise_radius)
        z_gauss = Variable(z_gauss.cuda())
        z_gauss_hidden = gan_g(z_gauss)

        loss_ar = gan_d(z_real_hidden, z_gauss_hidden)

        loss_ar.backward()
        optimizer_reg.step() 
Example #11
Source File: medgan.py    From SDGym with MIT License 6 votes vote down vote up
def sample(self, n):
        self.generator.eval()
        self.decoder.eval()

        steps = n // self.batch_size + 1
        data = []
        for i in range(steps):
            mean = torch.zeros(self.batch_size, self.random_dim)
            std = mean + 1
            noise = torch.normal(mean=mean, std=std).to(self.device)
            emb = self.generator(noise)
            fake = self.decoder(emb, self.transformer.output_info)
            fake = torch.sigmoid(fake)
            data.append(fake.detach().cpu().numpy())
        data = np.concatenate(data, axis=0)
        data = data[:n]
        return self.transformer.inverse_transform(data) 
Example #12
Source File: tvae.py    From SDGym with MIT License 6 votes vote down vote up
def sample(self, samples):
        self.decoder.eval()

        steps = samples // self.batch_size + 1
        data = []
        for _ in range(steps):
            mean = torch.zeros(self.batch_size, self.embedding_dim)
            std = mean + 1
            noise = torch.normal(mean=mean, std=std).to(self.device)
            fake, sigmas = self.decoder(noise)
            fake = torch.tanh(fake)
            data.append(fake.detach().cpu().numpy())

        data = np.concatenate(data, axis=0)
        data = data[:samples]
        return self.transformer.inverse_transform(data, sigmas.detach().cpu().numpy()) 
Example #13
Source File: veegan.py    From SDGym with MIT License 6 votes vote down vote up
def sample(self, n):
        self.generator.eval()

        output_info = self.transformer.output_info
        steps = n // self.batch_size + 1
        data = []
        for i in range(steps):
            mean = torch.zeros(self.batch_size, self.embedding_dim)
            std = mean + 1
            noise = torch.normal(mean=mean, std=std).to(self.device)
            fake = self.generator(noise, output_info)
            data.append(fake.detach().cpu().numpy())

        data = np.concatenate(data, axis=0)
        data = data[:n]
        return self.transformer.inverse_transform(data) 
Example #14
Source File: transformer.py    From attention-cnn with Apache License 2.0 6 votes vote down vote up
def random_masking(self, batch_images, batch_mask, device):
        """
        with probability 10% we keep the image unchanged;
        with probability 10% we change the mask region to a normal distribution
        with 80% we mask the region as 0.
        :param batch_images: image to be masked
        :param batch_mask: mask region
        :param device:
        :return: masked image
        """
        return batch_images
        # TODO disabled
        temp = random.random()
        if temp > 0.1:
            batch_images = batch_images * batch_mask.unsqueeze(1).float()
            if temp < 0.2:
                batch_images = batch_images + (
                    ((-batch_mask.unsqueeze(1).float()) + 1)
                    * torch.normal(mean=0.5, std=torch.ones(batch_images.shape)).to(device)
                )
        return batch_images 
Example #15
Source File: dmlab_model.py    From sample-factory with MIT License 5 votes vote down vote up
def __init__(self, cfg, obs_space, timing):
        super().__init__(cfg, timing)

        self.basic_encoder = create_standard_encoder(cfg, obs_space, timing)
        self.encoder_out_size = self.basic_encoder.encoder_out_size

        # same as IMPALA paper
        self.embedding_size = 20
        self.instructions_lstm_units = 64
        self.instructions_lstm_layers = 1

        padding_idx = 0
        self.word_embedding = nn.Embedding(
            num_embeddings=DMLAB_VOCABULARY_SIZE,
            embedding_dim=self.embedding_size,
            padding_idx=padding_idx
        )

        self.instructions_lstm = nn.LSTM(
            input_size=self.embedding_size,
            hidden_size=self.instructions_lstm_units,
            num_layers=self.instructions_lstm_layers,
            batch_first=True,
        )

        # learnable initial state?
        # initial_hidden_values = torch.normal(0, 1, size=(self.instructions_lstm_units, ))
        # self.lstm_h0 = nn.Parameter(initial_hidden_values, requires_grad=True)
        # self.lstm_c0 = nn.Parameter(initial_hidden_values, requires_grad=True)

        self.encoder_out_size += self.instructions_lstm_units
        log.debug('Policy head output size: %r', self.encoder_out_size)

        self.cpu_device = torch.device('cpu') 
Example #16
Source File: train_ac_f18.py    From berkeleydeeprlcourse-homework-pytorch with MIT License 5 votes vote down vote up
def sample_action(self, ob_no):
        """
            Build the method used for sampling action from the policy distribution
    
            arguments:
                ob_no: (batch_size, self.ob_dim)

            returns:
                sampled_ac: 
                    if discrete: (batch_size)
                    if continuous: (batch_size, self.ac_dim)

            Hint: for the continuous case, use the reparameterization trick:
                 The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
        
                      mu + sigma * z,         z ~ N(0, I)
        
                 This reduces the problem to just sampling z. (Hint: use torch.normal!)
        """
        ts_ob_no = torch.from_numpy(ob_no).float()
        
        raise NotImplementedError
        if self.discrete:
            ts_logits_na = self.policy_net(ts_ob_no)
            # YOUR HW2 CODE HERE
            ts_probs = None
            ts_sampled_ac = None
        else:
            ts_mean, ts_logstd = self.policy_net(ts_ob_no)
            # YOUR HW2 CODE HERE
            ts_sampled_ac = None

        sampled_ac = ts_sampled_ac.numpy()
            
        return sampled_ac 
Example #17
Source File: train_pg_f18.py    From berkeleydeeprlcourse-homework-pytorch with MIT License 5 votes vote down vote up
def sample_action(self, ob_no):
        """
            Build the method used for sampling action from the policy distribution
    
            arguments:
                ob_no: (batch_size, self.ob_dim)

            returns:
                sampled_ac: 
                    if discrete: (batch_size)
                    if continuous: (batch_size, self.ac_dim)

            Hint: for the continuous case, use the reparameterization trick:
                 The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
        
                      mu + sigma * z,         z ~ N(0, I)
        
                 This reduces the problem to just sampling z. (Hint: use torch.normal!)
        """
        ts_ob_no = torch.from_numpy(ob_no).float()
        
        raise NotImplementedError
        if self.discrete:
            ts_logits_na = self.policy_net(ts_ob_no)
            # YOUR_CODE_HERE
            ts_sampled_ac = None
        else:
            ts_mean, ts_logstd = self.policy_net(ts_ob_no)
            # YOUR_CODE_HERE
            ts_sampled_ac = None

        sampled_ac = ts_sampled_ac.numpy()
        return sampled_ac

    #========================================================================================#
    #                           ----------PROBLEM 2----------
    #========================================================================================# 
Example #18
Source File: CycleGAN_bayes_z.py    From Bayesian-CycleGAN with MIT License 5 votes vote down vote up
def get_prior(self, parameters, dataset_size):
        prior_loss = Variable(torch.zeros((1))).cuda()
        for param in parameters:
            prior_loss += torch.mean(param * param)
        return prior_loss / dataset_size

    # def get_noise(self, parameters, alpha, dataset_size):
    #     noise_loss = Variable(torch.zeros((1))).cuda()
    #     noise_std = np.sqrt(2 * alpha)
    #     for param in parameters:
    #         noise = Variable(torch.normal(std=torch.ones(param.size()))).cuda()
    #         noise_loss += torch.sum(param*noise*noise_std)
    #     return noise_loss / dataset_size 
Example #19
Source File: trainer.py    From multi-categorical-gans with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def add_noise_to_code(code, noise_radius):
    if noise_radius > 0:
        means = torch.zeros_like(code)
        gauss_noise = torch.normal(means, noise_radius)
        return code + to_cuda_if_available(Variable(gauss_noise))
    else:
        return code 
Example #20
Source File: ctgan.py    From SDGym with MIT License 5 votes vote down vote up
def sample(self, n):
        self.generator.eval()

        output_info = self.transformer.output_info
        steps = n // self.batch_size + 1
        data = []
        for i in range(steps):
            mean = torch.zeros(self.batch_size, self.embedding_dim)
            std = mean + 1
            fakez = torch.normal(mean=mean, std=std).to(self.device)

            condvec = self.cond_generator.sample_zero(self.batch_size)
            if condvec is None:
                pass
            else:
                c1 = condvec
                c1 = torch.from_numpy(c1).to(self.device)
                fakez = torch.cat([fakez, c1], dim=1)

            fake = self.generator(fakez)
            fakeact = apply_activate(fake, output_info)
            data.append(fakeact.detach().cpu().numpy())

        data = np.concatenate(data, axis=0)
        data = data[:n]
        return self.transformer.inverse_transform(data, None) 
Example #21
Source File: pytorch_util.py    From oyster with MIT License 5 votes vote down vote up
def normal(*args, **kwargs):
    return torch.normal(*args, **kwargs).to(device) 
Example #22
Source File: dense_transform.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 5 votes vote down vote up
def __call__(self, img):
        if self._alphastd == 0.:
            return img

        alpha = torch.normal(torch.zeros(3), self._alphastd)
        rgb = (self._eigvec * alpha * self._eigval).sum(dim=1)
        return img + rgb.view(3, 1, 1) 
Example #23
Source File: models.py    From cvpr2018-hnd with MIT License 5 votes vote down vote up
def truncated_normal(model):
    std = math.sqrt(2./(model.in_features + model.out_features))
    if model.bias is not None:
        model.bias.data.zero_()
    model.weight.data.normal_(std=std)
    truncate_me = (model.weight.data > 2.*std) | (model.weight.data < -2.*std)
    while truncate_me.sum() > 0:
        model.weight.data[truncate_me] = torch.normal(std=std*torch.ones(truncate_me.sum()))
        truncate_me = (model.weight.data > 2.*std) | (model.weight.data < -2.*std)
    return model 
Example #24
Source File: synthesizer.py    From CTGAN with MIT License 5 votes vote down vote up
def sample(self, n):
        """Sample data similar to the training data.

        Args:
            n (int):
                Number of rows to sample.

        Returns:
            numpy.ndarray or pandas.DataFrame
        """

        steps = n // self.batch_size + 1
        data = []
        for i in range(steps):
            mean = torch.zeros(self.batch_size, self.embedding_dim)
            std = mean + 1
            fakez = torch.normal(mean=mean, std=std).to(self.device)

            condvec = self.cond_generator.sample_zero(self.batch_size)
            if condvec is None:
                pass
            else:
                c1 = condvec
                c1 = torch.from_numpy(c1).to(self.device)
                fakez = torch.cat([fakez, c1], dim=1)

            fake = self.generator(fakez)
            fakeact = self._apply_activate(fake)
            data.append(fakeact.detach().cpu().numpy())

        data = np.concatenate(data, axis=0)
        data = data[:n]

        return self.transformer.inverse_transform(data, None) 
Example #25
Source File: distributions.py    From oyster with MIT License 5 votes vote down vote up
def sample(self):
            return torch.normal(self.mean, self.std) 
Example #26
Source File: pytorch_util.py    From oac-explore with MIT License 5 votes vote down vote up
def normal(*args, **kwargs):
    return torch.normal(*args, **kwargs).to(device) 
Example #27
Source File: rlmodule.py    From tatk with Apache License 2.0 5 votes vote down vote up
def select_action(self, s, sample=True):
        """
        :param s: [s_dim]
        :return: [a_dim]
        """
        # forward to get action mean and log_std
        # [s_dim] => [a_dim]
        a_mean, a_log_std = self.forward(s)

        # randomly sample from normal distribution, whose mean and variance come from policy network.
        # [a_dim]
        a = torch.normal(a_mean, a_log_std.exp()) if sample else a_mean

        return a 
Example #28
Source File: rlmodule.py    From tatk with Apache License 2.0 5 votes vote down vote up
def select_action(self, s, sample=True):
        """
        :param s: [s_dim]
        :return: [1]
        """
        # forward to get action probs
        # [s_dim] => [a_dim]
        a_weights = self.forward(s)
        a_probs = torch.softmax(a_weights, 0)

        # randomly sample from normal distribution, whose mean and variance come from policy network.
        # [a_dim] => [1]
        a = a_probs.multinomial(1) if sample else a_probs.argmax(0, True)

        return a 
Example #29
Source File: transforms.py    From deep-person-reid with MIT License 5 votes vote down vote up
def __call__(self, tensor):
        if random.uniform(0, 1) > self.p:
            return tensor
        alpha = torch.normal(mean=torch.zeros_like(self.eig_val)) * 0.1
        quatity = torch.mm(self.eig_val * alpha, self.eig_vec)
        tensor = tensor + quatity.view(3, 1, 1)
        return tensor 
Example #30
Source File: model.py    From honk with MIT License 5 votes vote down vote up
def truncated_normal(tensor, std_dev=0.01):
    tensor.zero_()
    tensor.normal_(std=std_dev)
    while torch.sum(torch.abs(tensor) > 2 * std_dev) > 0:
        t = tensor[torch.abs(tensor) > 2 * std_dev]
        t.zero_()
        tensor[torch.abs(tensor) > 2 * std_dev] = torch.normal(t, std=std_dev)