Python torch.randint_like() Examples

The following are 8 code examples of torch.randint_like(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: train.py    From Hadamard-Matrix-for-hashing with MIT License 6 votes vote down vote up
def Hash_center_multilables(labels, Hash_center): # label.shape: [batch_size, num_class], Hash_center.shape: [num_class, hash_bits]
    is_start = True
    for label in labels:
        one_labels = (label == 1).nonzero()  # find the position of 1 in label
        #if len(one_labels) == 0:    # In nus_wide dataset, some image's labels  are all zero, we ignore these images
            #Center_mean = torch.zeros((1, Hash_center.size(1))) # let it's hash center be zero
        #else:
        one_labels = one_labels.squeeze(1)
        Center_mean = torch.mean(Hash_center[one_labels], dim=0)
        Center_mean[Center_mean<0] = -1
        Center_mean[Center_mean>0] = 1
        #random_center = torch.randint_like(Hash_center[0], 2) # the random binary vector {0, 1}, has the same shape with label
        random_center[random_center==0] = -1   # the random binary vector become {-1, 1}
        Center_mean[Center_mean == 0] = random_center[Center_mean == 0]  # shape: [hash_bit]
        Center_mean = Center_mean.view(1, -1) # shape:[1,hash_bit]

        if is_start:  # the first time
            hash_center = Center_mean
            is_start = False
        else:
            hash_center = torch.cat((hash_center, Center_mean), 0)
            #hash_center = torch.stack((hash_center, Center_mean), dim=0)

    return hash_center 
Example #2
Source File: utils.py    From NLP_Toolkit with Apache License 2.0 5 votes vote down vote up
def word_dropout_raw(x, l, unk_drop_prob, rand_drop_prob, vocab):
    if not unk_drop_prob and not rand_drop_prob:
        return x

    assert unk_drop_prob + rand_drop_prob <= 1

    noise = torch.rand(x.size(), dtype=torch.float).to(x.device)
    pos_idx = torch.arange(x.size(1)).unsqueeze(0).expand_as(x).to(x.device)
    token_mask = pos_idx < l.unsqueeze(1)

    x2 = x.clone()
    
    # drop to <unk> token
    if unk_drop_prob:
        unk_idx = vocab.stoi['<unk>']
        unk_drop_mask = (noise < unk_drop_prob) & token_mask
        x2.masked_fill_(unk_drop_mask, unk_idx)

    # drop to random_mask
    if rand_drop_prob:
        rand_drop_mask = (noise > 1 - rand_drop_prob) & token_mask
        rand_tokens = torch.randint_like(x, len(vocab))
        rand_tokens.masked_fill_(1 - rand_drop_mask, 0)
        x2.masked_fill_(rand_drop_mask, 0)
        x2 = x2 + rand_tokens
    
    return x2 
Example #3
Source File: utils.py    From NLP_Toolkit with Apache License 2.0 5 votes vote down vote up
def rand_dropout_(x, l, drop_prob, vocab_size):
    noise = torch.rand(x.size(), dtype=torch.float).to(x.device)
    pos_idx = torch.arange(x.size(1)).unsqueeze(0).expand_as(x).to(x.device)
    token_mask = pos_idx < l.unsqueeze(1)
    rand_drop_mask = (noise < drop_prob) & token_mask
    rand_tokens = torch.randint_like(x, vocab_size)
    rand_tokens.masked_fill_(1 - rand_drop_mask, 0)
    x.masked_fill_(rand_drop_mask, 0)
    x += rand_tokens 
Example #4
Source File: hessian.py    From PyHessian with GNU General Public License v3.0 5 votes vote down vote up
def trace(self, maxIter=100, tol=1e-3):
        """
        compute the trace of hessian using Hutchinson's method
        maxIter: maximum iterations used to compute trace
        tol: the relative tolerance
        """

        device = self.device
        trace_vhv = []
        trace = 0.

        for i in range(maxIter):
            self.model.zero_grad()
            v = [
                torch.randint_like(p, high=2, device=device)
                for p in self.params
            ]
            # generate Rademacher random variables
            for v_i in v:
                v_i[v_i == 0] = -1

            if self.full_dataset:
                _, Hv = self.dataloader_hv_product(v)
            else:
                Hv = hessian_vector_product(self.gradsH, self.params, v)
            trace_vhv.append(group_product(Hv, v).cpu().item())
            if abs(np.mean(trace_vhv) - trace) / (trace + 1e-6) < tol:
                return trace_vhv
            else:
                trace = np.mean(trace_vhv)

        return trace_vhv 
Example #5
Source File: get_neg_batch.py    From lightKG with Apache License 2.0 5 votes vote down vote up
def get_neg_batch(head, tail, entity_num):
    neg_head = head.clone()
    neg_tail = tail.clone()
    if random.random() > 0.5:
        offset_tensor = torch.randint_like(neg_head, entity_num)
        neg_head = (neg_head + offset_tensor) % entity_num
    else:
        offset_tensor = torch.randint_like(neg_tail, entity_num)
        neg_tail = (neg_tail + offset_tensor) % entity_num
    return neg_head, neg_tail 
Example #6
Source File: test_deterministic.py    From ignite with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __getitem__(self, i):
        dp = self.data[i]
        r = torch.randint_like(dp, -100, 100) if self.enabled else 0.0
        return dp + r * 0.01 
Example #7
Source File: eps_greedy.py    From vel with MIT License 5 votes vote down vote up
def forward(self, actions, batch_info=None):
        if batch_info is None:
            # Just take final value if there is no batch info
            epsilon = self.epsilon_schedule.value(1.0)
        else:
            epsilon = self.epsilon_schedule.value(batch_info['progress'])

        random_samples = torch.randint_like(actions, self.action_space.n)
        selector = torch.rand_like(random_samples, dtype=torch.float32)

        # Actions with noise applied
        noisy_actions = torch.where(selector > epsilon, actions, random_samples)

        return noisy_actions 
Example #8
Source File: test_scvi.py    From scVI with MIT License 4 votes vote down vote up
def test_zinb_distribution():
    theta = 100.0 + torch.rand(size=(2,))
    mu = 15.0 * torch.ones_like(theta)
    pi = torch.randn_like(theta)
    x = torch.randint_like(mu, high=20)
    log_p_ref = log_zinb_positive(x, mu, theta, pi)

    dist = ZeroInflatedNegativeBinomial(mu=mu, theta=theta, zi_logits=pi)
    log_p_zinb = dist.log_prob(x)
    assert (log_p_ref - log_p_zinb).abs().max().item() <= 1e-8

    torch.manual_seed(0)
    s1 = dist.sample((100,))
    assert s1.shape == (100, 2)
    s2 = dist.sample(sample_shape=(4, 3))
    assert s2.shape == (4, 3, 2)

    log_p_ref = log_nb_positive(x, mu, theta)
    dist = NegativeBinomial(mu=mu, theta=theta)
    log_p_nb = dist.log_prob(x)
    assert (log_p_ref - log_p_nb).abs().max().item() <= 1e-8

    s1 = dist.sample((1000,))
    assert s1.shape == (1000, 2)
    assert (s1.mean(0) - mu).abs().mean() <= 1e0
    assert (s1.std(0) - (mu + mu * mu / theta) ** 0.5).abs().mean() <= 1e0

    size = (50, 3)
    theta = 100.0 + torch.rand(size=size)
    mu = 15.0 * torch.ones_like(theta)
    pi = torch.randn_like(theta)
    x = torch.randint_like(mu, high=20)
    dist1 = ZeroInflatedNegativeBinomial(mu=mu, theta=theta, zi_logits=pi)
    dist2 = NegativeBinomial(mu=mu, theta=theta)
    assert dist1.log_prob(x).shape == size
    assert dist2.log_prob(x).shape == size

    with pytest.raises(ValueError):
        ZeroInflatedNegativeBinomial(mu=-mu, theta=theta, zi_logits=pi)
    with pytest.warns(UserWarning):
        dist1.log_prob(-x)  # ensures neg values raise warning
    with pytest.warns(UserWarning):
        dist2.log_prob(0.5 * x)  # ensures float values raise warning