Python torch.randint() Examples

The following are 30 code examples of torch.randint(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: main.py    From Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch with MIT License 7 votes vote down vote up
def get_batch(source, i, train):
    if train:
        i = torch.randint(low=0, high=(len(source) - args.bptt), size=(1,)).long().item()
        seq_len = args.bptt
        target = source[i + 1:i + 1 + seq_len].t()
    else:
        seq_len = min(args.bptt, len(source) - 1 - i)
        target = source[i + seq_len, :]

    data = source[i:i + seq_len].t()

    data_mask = (data != pad).unsqueeze(-2)
    target_mask = make_std_mask(data.long())

    # reshape target to match what cross_entropy expects
    target = target.contiguous().view(-1)

    return data, target, data_mask, target_mask 
Example #2
Source File: test_gnn_explainer.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def test_gnn_explainer():
    model = Net()
    explainer = GNNExplainer(model, log=False)
    assert explainer.__repr__() == 'GNNExplainer()'

    x = torch.randn(8, 3)
    edge_index = torch.tensor([[0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7],
                               [1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6]])
    y = torch.randint(0, 6, (8, ), dtype=torch.long)

    node_feat_mask, edge_mask = explainer.explain_node(2, x, edge_index)
    assert node_feat_mask.size() == (x.size(1), )
    assert node_feat_mask.min() >= 0 and node_feat_mask.max() <= 1
    assert edge_mask.size() == (edge_index.size(1), )
    assert edge_mask.min() >= 0 and edge_mask.max() <= 1

    explainer.visualize_subgraph(2, edge_index, edge_mask, threshold=None)
    explainer.visualize_subgraph(2, edge_index, edge_mask, threshold=0.5)
    explainer.visualize_subgraph(2, edge_index, edge_mask, y=y, threshold=None)
    explainer.visualize_subgraph(2, edge_index, edge_mask, y=y, threshold=0.5) 
Example #3
Source File: test_roberta_embedding.py    From fastNLP with Apache License 2.0 6 votes vote down vote up
def test_save_load(self):
        bert_save_test = 'roberta_save_test'
        try:
            os.makedirs(bert_save_test, exist_ok=True)
            vocab = Vocabulary().add_word_lst("this is a test . [SEP] NotInBERT".split())
            embed = RobertaEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_roberta',
                                     word_dropout=0.1,
                                     auto_truncate=True)
            embed.save(bert_save_test)
            load_embed = RobertaEmbedding.load(bert_save_test)
            words = torch.randint(len(vocab), size=(2, 20))
            embed.eval(), load_embed.eval()
            self.assertEqual((embed(words) - load_embed(words)).sum(), 0)
        finally:
            import shutil
            shutil.rmtree(bert_save_test) 
Example #4
Source File: test_sparse_rsgd.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def test_adam_poincare(params):
    torch.manual_seed(44)
    manifold = geoopt.PoincareBall()
    ideal = manifold.random(10, 2)
    start = manifold.random(10, 2)
    start = geoopt.ManifoldParameter(start, manifold=manifold)

    def closure():
        idx = torch.randint(10, size=(3,))
        start_select = torch.nn.functional.embedding(idx, start, sparse=True)
        ideal_select = torch.nn.functional.embedding(idx, ideal, sparse=True)
        optim.zero_grad()
        loss = manifold.dist2(start_select, ideal_select).sum()
        loss.backward()
        assert start.grad.is_sparse
        return loss.item()

    optim = geoopt.optim.SparseRiemannianSGD([start], **params)

    for _ in range(2000):
        optim.step(closure)
    np.testing.assert_allclose(start.data, ideal, atol=1e-5, rtol=1e-5) 
Example #5
Source File: CBP.py    From fast-MPN-COV with MIT License 6 votes vote down vote up
def __init__(self, thresh=1e-8, projDim=8192, input_dim=512):
         super(CBP, self).__init__()
         self.thresh = thresh
         self.projDim = projDim
         self.input_dim = input_dim
         self.output_dim = projDim
         torch.manual_seed(1)
         self.h_ = [
                 torch.randint(0, self.output_dim, (self.input_dim,),dtype=torch.long),
                 torch.randint(0, self.output_dim, (self.input_dim,),dtype=torch.long)
         ]
         self.weights_ = [
             (2 * torch.randint(0, 2, (self.input_dim,)) - 1).float(),
             (2 * torch.randint(0, 2, (self.input_dim,)) - 1).float()
         ]

         indices1 = torch.cat((torch.arange(input_dim, dtype=torch.long).reshape(1, -1),
                               self.h_[0].reshape(1, -1)), dim=0)
         indices2 = torch.cat((torch.arange(input_dim, dtype=torch.long).reshape(1, -1),
                               self.h_[1].reshape(1, -1)), dim=0)

         self.sparseM = [
             torch.sparse.FloatTensor(indices1, self.weights_[0], torch.Size([self.input_dim, self.output_dim])).to_dense(),
             torch.sparse.FloatTensor(indices2, self.weights_[1], torch.Size([self.input_dim, self.output_dim])).to_dense(),
         ] 
Example #6
Source File: test_sparse_adam.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def test_adam_poincare(params):
    torch.manual_seed(44)
    manifold = geoopt.PoincareBall()
    ideal = manifold.random(10, 2)
    start = manifold.random(10, 2)
    start = geoopt.ManifoldParameter(start, manifold=manifold)

    def closure():
        idx = torch.randint(10, size=(3,))
        start_select = torch.nn.functional.embedding(idx, start, sparse=True)
        ideal_select = torch.nn.functional.embedding(idx, ideal, sparse=True)
        optim.zero_grad()
        loss = manifold.dist2(start_select, ideal_select).sum()
        loss.backward()
        assert start.grad.is_sparse
        return loss.item()

    optim = geoopt.optim.SparseRiemannianAdam([start], **params)

    for _ in range(2000):
        optim.step(closure)
    np.testing.assert_allclose(start.data, ideal, atol=1e-5, rtol=1e-5) 
Example #7
Source File: test_bert_embedding.py    From fastNLP with Apache License 2.0 6 votes vote down vote up
def test_save_load(self):
        bert_save_test = 'bert_save_test'
        try:
            os.makedirs(bert_save_test, exist_ok=True)
            vocab = Vocabulary().add_word_lst("this is a test . [SEP] NotInBERT".split())
            embed = BertEmbedding(vocab, model_dir_or_name='test/data_for_tests/embedding/small_bert', word_dropout=0.1,
                                  auto_truncate=True)

            embed.save(bert_save_test)
            load_embed = BertEmbedding.load(bert_save_test)
            words = torch.randint(len(vocab), size=(2, 20))
            embed.eval(), load_embed.eval()
            self.assertEqual((embed(words) - load_embed(words)).sum(), 0)

        finally:
            import shutil
            shutil.rmtree(bert_save_test) 
Example #8
Source File: metapath2vec.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def neg_sample(self, batch):
        batch = batch.repeat(self.walks_per_node * self.num_negative_samples)

        rws = [batch]
        for i in range(self.walk_length):
            keys = self.metapath[i % len(self.metapath)]
            batch = torch.randint(0, self.num_nodes_dict[keys[-1]],
                                  (batch.size(0), ), dtype=torch.long)
            rws.append(batch)

        rw = torch.stack(rws, dim=-1)
        rw.add_(self.offset.view(1, -1))

        walks = []
        num_walks_per_rw = 1 + self.walk_length + 1 - self.context_size
        for j in range(num_walks_per_rw):
            walks.append(rw[:, j:j + self.context_size])
        return torch.cat(walks, dim=0) 
Example #9
Source File: test_deep_graph_infomax.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def test_deep_graph_infomax():
    def corruption(z):
        return z + 1

    model = DeepGraphInfomax(
        hidden_channels=16,
        encoder=lambda x: x,
        summary=lambda z, *args: z.mean(dim=0),
        corruption=lambda x: x + 1)

    assert model.__repr__() == 'DeepGraphInfomax(16)'

    x = torch.ones(20, 16)

    pos_z, neg_z, summary = model(x)
    assert pos_z.size() == (20, 16) and neg_z.size() == (20, 16)
    assert summary.size() == (16, )

    loss = model.loss(pos_z, neg_z, summary)
    assert 0 <= loss.item()

    acc = model.test(
        torch.ones(20, 16), torch.randint(10, (20, )), torch.ones(20, 16),
        torch.randint(10, (20, )))
    assert 0 <= acc and acc <= 1 
Example #10
Source File: test_node2vec.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def test_node2vec():
    edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]])

    model = Node2Vec(edge_index, embedding_dim=16, walk_length=2,
                     context_size=2)
    assert model.__repr__() == 'Node2Vec(3, 16)'

    z = model(torch.arange(3))
    assert z.size() == (3, 16)

    pos_rw, neg_rw = model.sample(torch.arange(3))

    loss = model.loss(pos_rw, neg_rw)
    assert 0 <= loss.item()

    acc = model.test(torch.ones(20, 16), torch.randint(10, (20, )),
                     torch.ones(20, 16), torch.randint(10, (20, )))
    assert 0 <= acc and acc <= 1 
Example #11
Source File: test_native.py    From PySyft with Apache License 2.0 6 votes vote down vote up
def test_encrypt_decrypt(workers):
    bob, alice, james = (workers["bob"], workers["alice"], workers["james"])

    x = torch.randint(10, (1, 5), dtype=torch.float32)
    x_encrypted = x.encrypt(workers=[bob, alice], crypto_provider=james, base=10)
    x_decrypted = x_encrypted.decrypt()
    assert torch.all(torch.eq(x_decrypted, x))

    x = torch.randint(10, (1, 5), dtype=torch.float32)
    x_encrypted = x.encrypt(workers=[bob, alice], crypto_provider=james)
    x_decrypted = x_encrypted.decrypt()
    assert torch.all(torch.eq(x_decrypted, x))

    x = torch.randint(10, (1, 5), dtype=torch.float32)
    public, private = syft.frameworks.torch.he.paillier.keygen()
    x_encrypted = x.encrypt(protocol="paillier", public_key=public)
    x_decrypted = x_encrypted.decrypt(protocol="paillier", private_key=private)
    assert torch.all(torch.eq(x_decrypted, x)) 
Example #12
Source File: primitives.py    From PySyft with Apache License 2.0 6 votes vote down vote up
def build_fss_keys(self, type_op):
        """
        The builder to generate functional keys for Function Secret Sharing (FSS)
        """
        if type_op == "eq":
            fss_class = sy.frameworks.torch.mpc.fss.DPF
        elif type_op == "comp":
            fss_class = sy.frameworks.torch.mpc.fss.DIF
        else:
            raise ValueError(f"type_op {type_op} not valid")

        n = sy.frameworks.torch.mpc.fss.n

        def build_separate_fss_keys(n_party, n_instances=100):
            assert (
                n_party == 2
            ), f"The FSS protocol only works for 2 workers, {n_party} were provided."
            alpha, s_00, s_01, *CW = fss_class.keygen(n_values=n_instances)
            # simulate sharing TODO clean this
            mask = th.randint(0, 2 ** n, alpha.shape)
            return [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]

        return build_separate_fss_keys 
Example #13
Source File: test.py    From centerpose with MIT License 6 votes vote down vote up
def example_mdpooling():
    input = torch.randn(2, 32, 64, 64).cuda()
    input.requires_grad = True
    batch_inds = torch.randint(2, (20, 1)).cuda().float()
    x = torch.randint(256, (20, 1)).cuda().float()
    y = torch.randint(256, (20, 1)).cuda().float()
    w = torch.randint(64, (20, 1)).cuda().float()
    h = torch.randint(64, (20, 1)).cuda().float()
    rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)

    # mdformable pooling (V2)
    dpooling = DCNPooling(spatial_scale=1.0 / 4,
                          pooled_size=7,
                          output_dim=32,
                          no_trans=False,
                          group_size=1,
                          trans_std=0.1,
                          deform_fc_dim=1024).cuda()

    dout = dpooling(input, rois)
    target = dout.new(*dout.size())
    target.data.uniform_(-0.1, 0.1)
    error = (target - dout).mean()
    error.backward()
    print(dout.shape) 
Example #14
Source File: pointnet_util.py    From Pointnet_Pointnet2_pytorch with MIT License 6 votes vote down vote up
def farthest_point_sample(xyz, npoint):
    """
    Input:
        xyz: pointcloud data, [B, N, 3]
        npoint: number of samples
    Return:
        centroids: sampled pointcloud index, [B, npoint]
    """
    device = xyz.device
    B, N, C = xyz.shape
    centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
    distance = torch.ones(B, N).to(device) * 1e10
    farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
    batch_indices = torch.arange(B, dtype=torch.long).to(device)
    for i in range(npoint):
        centroids[:, i] = farthest
        centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
        dist = torch.sum((xyz - centroid) ** 2, -1)
        mask = dist < distance
        distance[mask] = dist[mask]
        farthest = torch.max(distance, -1)[1]
    return centroids 
Example #15
Source File: model.py    From LiteralE with Apache License 2.0 6 votes vote down vote up
def forward_attr(self, e, mode='left'):
        assert mode == 'left' or mode == 'right'

        e_emb = self.emb_e(e.view(-1))

        # Sample one numerical literal for each entity
        e_attr = self.numerical_literals[e.view(-1)]
        m = len(e_attr)
        idxs = torch.randint(self.n_num_lit, size=(m,)).cuda()
        attr_emb = self.emb_attr(idxs)

        inputs = torch.cat([e_emb, attr_emb], dim=1)
        pred = self.attr_net_left(inputs) if mode == 'left' else self.attr_net_right(inputs)
        target = e_attr[range(m), idxs]

        return pred, target 
Example #16
Source File: test_cutout.py    From torchbearer with MIT License 6 votes vote down vote up
def test_cutmix(self):
        random_image = torch.rand(5, 3, 100, 100)
        state = {torchbearer.X: random_image, torchbearer.Y_TRUE: torch.randint(10, (5,)).long(), torchbearer.DEVICE: 'cpu'}
        torch.manual_seed(7)
        co = CutMix(0.25, classes=10)
        co.on_sample(state)
        reg_img = state[torchbearer.X].view(-1)

        x = [72, 83, 18, 96, 40]
        y = [8, 17, 62, 30, 66]
        perm = [0, 4, 3, 2, 1]
        sz = 3

        rnd = random_image.clone().numpy()
        known_cut = random_image.clone().numpy()
        known_cut[0, :, y[0]-sz//2:y[0]+sz//2, x[0]-sz//2:x[0]+sz//2] = rnd[perm[0], :, y[0]-sz//2:y[0]+sz//2, x[0]-sz//2:x[0]+sz//2]
        known_cut[1, :, y[1]-sz//2:y[1]+sz//2, x[1]-sz//2:x[1]+sz//2] = rnd[perm[1], :, y[1]-sz//2:y[1]+sz//2, x[1]-sz//2:x[1]+sz//2]
        known_cut[2, :, y[2]-sz//2:y[2]+sz//2, x[2]-sz//2:x[2]+sz//2] = rnd[perm[2], :, y[2]-sz//2:y[2]+sz//2, x[2]-sz//2:x[2]+sz//2]
        known_cut[3, :, y[3]-sz//2:y[3]+sz//2, x[3]-sz//2:x[3]+sz//2] = rnd[perm[3], :, y[3]-sz//2:y[3]+sz//2, x[3]-sz//2:x[3]+sz//2]
        known_cut[4, :, y[4]-sz//2:y[4]+sz//2, x[4]-sz//2:x[4]+sz//2] = rnd[perm[4], :, y[4]-sz//2:y[4]+sz//2, x[4]-sz//2:x[4]+sz//2]
        known_cut = torch.from_numpy(known_cut)
        known_cut = known_cut.view(-1)

        diff = (torch.abs(known_cut-reg_img) > 1e-4).any()
        self.assertTrue(diff.item() == 0) 
Example #17
Source File: denoising_dataset.py    From fairseq with MIT License 6 votes vote down vote up
def add_insertion_noise(self, tokens, p):
        if p == 0.0:
            return tokens

        num_tokens = len(tokens)
        n = int(math.ceil(num_tokens * p))

        noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1
        noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)
        noise_mask[noise_indices] = 1
        result = torch.LongTensor(n + len(tokens)).fill_(-1)

        num_random = int(math.ceil(n * self.random_ratio))
        result[noise_indices[num_random:]] = self.mask_idx
        result[noise_indices[:num_random]] = torch.randint(low=1, high=len(self.vocab), size=(num_random,))

        result[~noise_mask] = tokens

        assert (result >= 0).all()
        return result 
Example #18
Source File: fps.py    From dgl with Apache License 2.0 6 votes vote down vote up
def forward(self, pos):
        r"""Memory allocation and sampling

        Parameters
        ----------
        pos : tensor
            The positional tensor of shape (B, N, C)

        Returns
        -------
        tensor of shape (B, self.npoints)
            The sampled indices in each batch.
        """
        device = pos.device
        B, N, C = pos.shape
        pos = pos.reshape(-1, C)
        dist = th.zeros((B * N), dtype=pos.dtype, device=device)
        start_idx = th.randint(0, N - 1, (B, ), dtype=th.long, device=device)
        result = th.zeros((self.npoints * B), dtype=th.long, device=device)
        farthest_point_sampler(pos, B, self.npoints, dist, start_idx, result)
        return result.reshape(B, self.npoints) 
Example #19
Source File: inside_cnns.py    From torchbearer with MIT License 5 votes vote down vote up
def _targets_hot(self, state):
        targets = torch.randint(high=self.nclasses, size=(1, 1)).long().to(state[torchbearer.DEVICE])
        if self.target is not RANDOM:
            targets[0][0] = self.target
        for key in self._target_keys:
            state[key] = targets
        targets_hot = torch.zeros(1, self.nclasses).to(state[torchbearer.DEVICE])
        targets_hot.scatter_(1, targets, 1)
        targets_hot = targets_hot.ge(0.5)
        return targets_hot 
Example #20
Source File: denoising_dataset.py    From fairseq with MIT License 5 votes vote down vote up
def add_rolling_noise(self, tokens):
        offset = np.random.randint(1, max(1, tokens.size(-1) - 1) + 1)
        tokens = torch.cat(
            (tokens[0:1], tokens[offset:-1], tokens[1:offset], tokens[-1:]),
            dim=0,
        )
        return tokens 
Example #21
Source File: cutout.py    From torchbearer with MIT License 5 votes vote down vote up
def __call__(self, img):
        """

        Args:
            img (Tensor): Tensor image of size (B, C, H, W).
        Returns:
            Tensor: Image with n_holes of dimension length x length cut out of it.
        """
        b = img.size(0)
        c = img.size(1)
        h = img.size(-2)
        w = img.size(-1)

        mask = torch.ones((b, h, w), device=img.device)

        for n in range(self.n_holes):
            y = torch.randint(h, (b,)).long()
            x = torch.randint(w, (b,)).long()

            y1 = (y - self.height // 2).clamp(0, h).int()
            y2 = (y + self.height // 2).clamp(0, h).int()
            x1 = (x - self.width // 2).clamp(0, w).int()
            x2 = (x + self.width // 2).clamp(0, w).int()

            for batch in range(b):
                mask[batch, y1[batch]: y2[batch], x1[batch]: x2[batch]] = 0

        mask = mask.unsqueeze(1).repeat(1, c, 1, 1)

        return mask 
Example #22
Source File: test_lstm.py    From metal with Apache License 2.0 5 votes vote down vote up
def test_lstm_embeddings_freeze(self):
        """Confirm that if embeddings are frozen, they do not change during training"""
        X = torch.randint(1, MAX_INT + 1, (n, SEQ_LEN)).long()
        Y = torch.zeros(n).long()
        needles = np.random.randint(1, SEQ_LEN - 1, n)
        for i in range(n):
            X[i, needles[i]] = MAX_INT + 1
            Y[i] = X[i, needles[i] + 1]

        Xs = self._split_dataset(X)
        Ys = self._split_dataset(Y)

        embed_size = 4
        hidden_size = 10

        for freeze_embs in [True, False]:
            lstm_module = LSTMModule(
                embed_size,
                hidden_size,
                verbose=False,
                encoder_class=EmbeddingsEncoder,
                encoder_kwargs={"vocab_size": MAX_INT + 2, "freeze": freeze_embs},
            )
            em = EndModel(
                k=MAX_INT,
                input_module=lstm_module,
                layer_out_dims=[hidden_size * 2, MAX_INT],
                verbose=False,
            )

            before = lstm_module.encoder.embeddings.weight.clone()
            em.train_model(
                (Xs[0], Ys[0]), valid_data=(Xs[1], Ys[1]), n_epochs=15, verbose=False
            )
            after = lstm_module.encoder.embeddings.weight.clone()

            if freeze_embs:
                self.assertEqual(torch.abs(before - after).sum().item(), 0.0)
            else:
                self.assertNotEqual(torch.abs(before - after).sum().item(), 0.0) 
Example #23
Source File: test_sequence_generator.py    From fairseq with MIT License 5 votes vote down vote up
def setUp(self):
        self.task, self.parser = get_dummy_task_and_parser()
        eos = self.task.tgt_dict.eos()
        src_tokens = torch.randint(3, 50, (2, 10)).long()
        src_tokens = torch.cat((src_tokens, torch.LongTensor([[eos], [eos]])), -1)
        src_lengths = torch.LongTensor([2, 10])
        self.sample = {
            "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths}
        }
        TransformerModel.add_args(self.parser)
        args = self.parser.parse_args([])
        args.encoder_layers = 2
        args.decoder_layers = 1
        self.transformer_model = TransformerModel.build_model(args, self.task) 
Example #24
Source File: em.py    From fairseq with MIT License 5 votes vote down vote up
def initialize_centroids(self):
        """
        Initializes the centroids by sampling random columns from W.
        """

        in_features, out_features = self.W.size()
        indices = torch.randint(
            low=0, high=out_features, size=(self.n_centroids,)
        ).long()
        self.centroids = self.W[:, indices].t()  # (n_centroids x in_features) 
Example #25
Source File: test.py    From centerpose with MIT License 5 votes vote down vote up
def check_gradient_dpooling():
    input = torch.randn(2, 3, 5, 5).cuda() * 0.01
    N = 4
    batch_inds = torch.randint(2, (N, 1)).cuda().float()
    x = torch.rand((N, 1)).cuda().float() * 15
    y = torch.rand((N, 1)).cuda().float() * 15
    w = torch.rand((N, 1)).cuda().float() * 10
    h = torch.rand((N, 1)).cuda().float() * 10
    rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
    offset = torch.randn(N, 2, 3, 3).cuda()
    input.requires_grad = True
    offset.requires_grad = True

    spatial_scale = 1.0 / 4
    pooled_size = 3
    output_dim = 3
    no_trans = 0
    group_size = 1
    trans_std = 0.0
    sample_per_part = 4
    part_size = pooled_size

    print('check_gradient_dpooling:',
          gradcheck(dcn_v2_pooling, (input, rois, offset,
                                     spatial_scale,
                                     pooled_size,
                                     output_dim,
                                     no_trans,
                                     group_size,
                                     part_size,
                                     sample_per_part,
                                     trans_std),
                    eps=1e-4)) 
Example #26
Source File: testing.py    From funsor with Apache License 2.0 5 votes vote down vote up
def randint(low, high, size):
    backend = get_backend()
    if backend == "torch":
        import torch

        return torch.randint(low, high, size=size)
    else:
        return np.random.randint(low, high, size=size) 
Example #27
Source File: eMixPseudoLabelv1.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 5 votes vote down vote up
def create_soft_pslab(self, n_samples, n_classes, dtype='rand'):
        if dtype=='rand': 
            rlabel = torch.randint(0, n_classes, (n_samples,)).long()
            pslab  = one_hot(rlabel, n_classes)
        elif dtype=='zero':
            pslab = torch.zeros(n_samples, n_classes)
        else:
            raise ValueError('Unknown pslab dtype: {}'.format(dtype))
        return pslab.to(self.device) 
Example #28
Source File: test.py    From centerpose with MIT License 5 votes vote down vote up
def example_dpooling():
    input = torch.randn(2, 32, 64, 64).cuda()
    batch_inds = torch.randint(2, (20, 1)).cuda().float()
    x = torch.randint(256, (20, 1)).cuda().float()
    y = torch.randint(256, (20, 1)).cuda().float()
    w = torch.randint(64, (20, 1)).cuda().float()
    h = torch.randint(64, (20, 1)).cuda().float()
    rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
    offset = torch.randn(20, 2, 7, 7).cuda()
    input.requires_grad = True
    offset.requires_grad = True

    # normal roi_align
    pooling = DCNv2Pooling(spatial_scale=1.0 / 4,
                           pooled_size=7,
                           output_dim=32,
                           no_trans=True,
                           group_size=1,
                           trans_std=0.1).cuda()

    # deformable pooling
    dpooling = DCNv2Pooling(spatial_scale=1.0 / 4,
                            pooled_size=7,
                            output_dim=32,
                            no_trans=False,
                            group_size=1,
                            trans_std=0.1).cuda()

    out = pooling(input, rois, offset)
    dout = dpooling(input, rois, offset)
    print(out.shape)
    print(dout.shape)

    target_out = out.new(*out.size())
    target_out.data.uniform_(-0.01, 0.01)
    target_dout = dout.new(*dout.size())
    target_dout.data.uniform_(-0.01, 0.01)
    e = (target_out - out).mean()
    e.backward()
    e = (target_dout - dout).mean()
    e.backward() 
Example #29
Source File: primitives.py    From PySyft with Apache License 2.0 5 votes vote down vote up
def build_xor_add_couple(n_party, n_instances=100):
        assert (
            n_party == 2
        ), f"build_xor_add_couple is only implemented for 2 workers, {n_party} were provided."
        r = th.randint(2, size=(n_instances,))
        mask1 = th.randint(2, size=(n_instances,))
        mask2 = th.randint(2, size=(n_instances,))

        return [(r ^ mask1, r - mask2), (mask1, mask2)] 
Example #30
Source File: iTempensv1.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 5 votes vote down vote up
def create_soft_pslab(self, n_samples, n_classes, dtype='rand'):
        if dtype=='rand': 
             pslab = torch.randint(0, n_classes, (n_samples,n_classes))
        elif dtype=='zero':
             pslab = torch.zeros(n_samples, n_classes)
        else:
             raise ValueError('Unknown pslab dtype: {}'.format(dtype))
        return pslab.to(self.device)