Python torch.rand() Examples

The following are 30 code examples of torch.rand(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: util.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def test_tf2torch(tf_model,torch_model,input_shape, num_rand_inp=10, precision=10**-2):
    """
    Checks consistency of torch and tf models before generating attacks
    :param tf_model: copied tf model
    :param torch_model: torch model to be transferred to tf
    :param input_shape: Format Channels X Height X Width
    :param num_rand_inp: number of random inputs to test consistency on
    :return: raises error if the outputs are not consistent
    """
    torch_model.eval()
    rand_x = torch.rand(num_rand_inp,input_shape[0],input_shape[1],input_shape[2])
    tf_op = tf_model.predict(rand_x.numpy())
    torch_op = F.softmax(torch_model(Variable(rand_x))).data.numpy()
    assert tf_op.shape == torch_op.shape, "Mismatch of dimensions of the outputs from tf and torch models"
    assert np.linalg.norm(torch_op-tf_op)/np.linalg.norm(torch_op)<=num_rand_inp*precision, "Outputs of the torch and tensorflow models" \
                                                            "do not agree"
    pass 
Example #2
Source File: test_librosa_compatibility.py    From audio with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_griffinlim(self):
        # NOTE: This test is flaky without a fixed random seed
        # See https://github.com/pytorch/audio/issues/382
        torch.random.manual_seed(42)
        tensor = torch.rand((1, 1000))

        n_fft = 400
        ws = 400
        hop = 100
        window = torch.hann_window(ws)
        normalize = False
        momentum = 0.99
        n_iter = 8
        length = 1000
        rand_init = False
        init = 'random' if rand_init else None

        specgram = F.spectrogram(tensor, 0, window, n_fft, hop, ws, 2, normalize).sqrt()
        ta_out = F.griffinlim(specgram, window, n_fft, hop, ws, 1, normalize,
                              n_iter, momentum, length, rand_init)
        lr_out = librosa.griffinlim(specgram.squeeze(0).numpy(), n_iter=n_iter, hop_length=hop,
                                    momentum=momentum, init=init, length=length)
        lr_out = torch.from_numpy(lr_out).unsqueeze(0)

        self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5) 
Example #3
Source File: data_processor_test.py    From neural-pipeline with MIT License 6 votes vote down vote up
def test_prediction_output(self):
        model = SimpleModel()
        dp = DataProcessor(model=model)
        self.assertFalse(model.fc.weight.is_cuda)
        res = dp.predict({'data': torch.rand(1, 3)})
        self.assertIs(type(res), torch.Tensor)

        model = NonStandardIOModel()
        dp = DataProcessor(model=model)
        self.assertFalse(model.fc.weight.is_cuda)
        res = dp.predict({'data': {'data1': torch.rand(1, 3), 'data2': torch.rand(1, 3)}})
        self.assertIs(type(res), dict)
        self.assertIn('res1', res)
        self.assertIs(type(res['res1']), torch.Tensor)
        self.assertIn('res2', res)
        self.assertIs(type(res['res2']), torch.Tensor) 
Example #4
Source File: attention.py    From TaskBot with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, hidden_size, method="concat"):
        """

        Args:
            hidden_size: <int>, hidden size
                         previous hidden state size of decoder
            method: <str>, {"concat"}
                        Attention method

        Notes:
            we use the GRU outputs instead of using encoder t-step
            hidden sates for attention, because the pytorch-GRU hidden_n only
            contains the last time step information.
        """
        super(Attn, self).__init__()
        self.method = method
        self.hidden_size = hidden_size
        self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
        self.v = nn.Parameter(torch.rand(hidden_size))
        stdv = 1. / math.sqrt(self.v.size(0))
        self.v.data.normal_(mean=0, std=stdv) 
Example #5
Source File: data_processor_test.py    From neural-pipeline with MIT License 6 votes vote down vote up
def test_prediction_train_output(self):
        model = SimpleModel()
        train_config = TrainConfig(model, [], torch.nn.Module(), torch.optim.SGD(model.parameters(), lr=0.1))
        dp = TrainDataProcessor(train_config=train_config)
        self.assertFalse(model.fc.weight.is_cuda)
        res = dp.predict({'data': torch.rand(1, 3)}, is_train=True)
        self.assertIs(type(res), torch.Tensor)

        model = NonStandardIOModel()
        train_config = TrainConfig(model, [], torch.nn.Module(), torch.optim.SGD(model.parameters(), lr=0.1))
        dp = TrainDataProcessor(train_config=train_config)
        self.assertFalse(model.fc.weight.is_cuda)
        res = dp.predict({'data': {'data1': torch.rand(1, 3), 'data2': torch.rand(1, 3)}}, is_train=True)
        self.assertIs(type(res), dict)
        self.assertIn('res1', res)
        self.assertIs(type(res['res1']), torch.Tensor)
        self.assertIn('res2', res)
        self.assertIs(type(res['res2']), torch.Tensor)

        self.assertTrue(model.training)
        self.assertFalse(res['res1'].requires_grad)
        self.assertIsNone(res['res1'].grad)
        self.assertFalse(res['res2'].requires_grad)
        self.assertIsNone(res['res2'].grad) 
Example #6
Source File: data_processor_test.py    From neural-pipeline with MIT License 6 votes vote down vote up
def test_train(self):
        model = SimpleModel().train()
        train_config = TrainConfig(model, [], torch.nn.Module(), torch.optim.SGD(model.parameters(), lr=0.1))
        dp = TrainDataProcessor(train_config=train_config)

        self.assertFalse(model.fc.weight.is_cuda)
        self.assertTrue(model.training)
        res = dp.predict({'data': torch.rand(1, 3)}, is_train=True)
        self.assertTrue(model.training)
        self.assertTrue(res.requires_grad)
        self.assertIsNone(res.grad)

        with self.assertRaises(NotImplementedError):
            dp.process_batch({'data': torch.rand(1, 3), 'target': torch.rand(1)}, is_train=True)

        loss = SimpleLoss()
        train_config = TrainConfig(model, [], loss, torch.optim.SGD(model.parameters(), lr=0.1))
        dp = TrainDataProcessor(train_config=train_config)
        res = dp.process_batch({'data': torch.rand(1, 3), 'target': torch.rand(1)}, is_train=True)
        self.assertTrue(model.training)
        self.assertTrue(loss.module.requires_grad)
        self.assertIsNotNone(loss.module.grad)
        self.assertTrue(np.array_equal(res, loss.res.data.numpy())) 
Example #7
Source File: data_processor_test.py    From neural-pipeline with MIT License 6 votes vote down vote up
def test_prediction_notrain_output(self):
        model = SimpleModel()
        train_config = TrainConfig(model, [], torch.nn.Module(), torch.optim.SGD(model.parameters(), lr=0.1))
        dp = TrainDataProcessor(train_config=train_config)
        self.assertFalse(model.fc.weight.is_cuda)
        res = dp.predict({'data': torch.rand(1, 3)}, is_train=False)
        self.assertIs(type(res), torch.Tensor)

        model = NonStandardIOModel()
        train_config = TrainConfig(model, [], torch.nn.Module(), torch.optim.SGD(model.parameters(), lr=0.1))
        dp = TrainDataProcessor(train_config=train_config)
        self.assertFalse(model.fc.weight.is_cuda)
        res = dp.predict({'data': {'data1': torch.rand(1, 3), 'data2': torch.rand(1, 3)}}, is_train=False)
        self.assertIs(type(res), dict)
        self.assertIn('res1', res)
        self.assertIs(type(res['res1']), torch.Tensor)
        self.assertIn('res2', res)
        self.assertIs(type(res['res2']), torch.Tensor)

        self.assertFalse(model.training)
        self.assertFalse(res['res1'].requires_grad)
        self.assertIsNone(res['res1'].grad)
        self.assertFalse(res['res2'].requires_grad)
        self.assertIsNone(res['res2'].grad) 
Example #8
Source File: train_config_test.py    From neural-pipeline with MIT License 6 votes vote down vote up
def test_metrics_group_calculation(self):
        metrics_group_lv1 = MetricsGroup('lvl').add(SimpleMetric())
        metrics_group_lv2 = MetricsGroup('lv2').add(SimpleMetric())
        metrics_group_lv1.add(metrics_group_lv2)

        values = []
        for i in range(10):
            output, target = torch.rand(1, 3), torch.rand(1, 3)
            metrics_group_lv1.calc(output, target)
            values.append(np.linalg.norm(output.numpy() - target.numpy()))

        for metrics_group in [metrics_group_lv1, metrics_group_lv2]:
            for m in metrics_group.metrics():
                for v1, v2 in zip(values, m.get_values()):
                    self.assertAlmostEqual(v1, v2, delta=1e-5)

        metrics_group_lv1.reset()
        self.assertEqual(metrics_group_lv1.metrics()[0].get_values().size, 0)
        self.assertEqual(metrics_group_lv2.metrics()[0].get_values().size, 0) 
Example #9
Source File: auto_crop.py    From L3C-PyTorch with GNU General Public License v3.0 6 votes vote down vote up
def test_auto_crop():
    import torch
    import pytorch_ext as pe

    for H, W, num_crops_expected in [(10000, 6000, 64),
                                     (4928, 3264, 16),
                                     (2048, 2048, 4),
                                     (1024, 1024, 1),
                                     ]:
        img = (torch.rand(1, 3, H, W) * 255).round().long()
        print(img.shape)
        if num_crops_expected > 1:
            assert needs_crop(img)
            crops = list(iter_crops(img, 2048 * 1024))
            assert len(crops) == num_crops_expected
            pe.assert_equal(stitch(crops), img)
        else:
            pe.assert_equal(next(iter_crops(img, 2048 * 1024)), img) 
Example #10
Source File: train_test.py    From neural-pipeline with MIT License 6 votes vote down vote up
def test_lr_decaying(self):
        fsm = FileStructManager(base_dir=self.base_dir, is_continue=False)
        model = SimpleModel()
        metrics_processor = MetricsProcessor()
        stages = [TrainStage(TestDataProducer([[{'data': torch.rand(1, 3), 'target': torch.rand(1)}
                                                for _ in list(range(20))]]), metrics_processor),
                  ValidationStage(TestDataProducer([[{'data': torch.rand(1, 3), 'target': torch.rand(1)}
                                                     for _ in list(range(20))]]), metrics_processor)]
        trainer = Trainer(TrainConfig(model, stages, SimpleLoss(), torch.optim.SGD(model.parameters(), lr=0.1)),
                          fsm).set_epoch_num(10)

        def target_value_clbk() -> float:
            return 1

        trainer.enable_lr_decaying(0.5, 3, target_value_clbk)
        trainer.train()

        self.assertAlmostEqual(trainer.data_processor().get_lr(), 0.1 * (0.5 ** 3), delta=1e-6) 
Example #11
Source File: test_models.py    From audio with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_waveform(self):
        """Validate the output dimensions of a _UpsampleNetwork block.
        """

        upsample_scales = [5, 5, 8]
        n_batch = 2
        n_time = 200
        n_freq = 100
        n_output = 256
        n_res_block = 10
        n_hidden = 128
        kernel_size = 5

        total_scale = 1
        for upsample_scale in upsample_scales:
            total_scale *= upsample_scale

        model = _UpsampleNetwork(upsample_scales, n_res_block, n_freq, n_hidden, n_output, kernel_size)

        x = torch.rand(n_batch, n_freq, n_time)
        out1, out2 = model(x)

        assert out1.size() == (n_batch, n_freq, total_scale * (n_time - kernel_size + 1))
        assert out2.size() == (n_batch, n_output, total_scale * (n_time - kernel_size + 1)) 
Example #12
Source File: generate_rotations.py    From pointnet-registration-framework with MIT License 6 votes vote down vote up
def main(args):
    # dataset
    testset = get_datasets(args)
    batch_size = len(testset)

    amp = args.deg * math.pi / 180.0
    w = torch.randn(batch_size, 3)
    w = w / w.norm(p=2, dim=1, keepdim=True) * amp
    t = torch.rand(batch_size, 3) * args.max_trans

    if args.format == 'wv':
        # the output: twist vectors.
        R = ptlk.so3.exp(w) # (N, 3) --> (N, 3, 3)
        G = torch.zeros(batch_size, 4, 4)
        G[:, 3, 3] = 1
        G[:, 0:3, 0:3] = R
        G[:, 0:3, 3] = t

        x = ptlk.se3.log(G) # --> (N, 6)
    else:
        # rotation-vector and translation-vector
        x = torch.cat((w, t), dim=1)

    numpy.savetxt(args.outfile, x, delimiter=',') 
Example #13
Source File: test_librosa_compatibility.py    From audio with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_amplitude_to_DB(self):
        spec = torch.rand((6, 201))

        amin = 1e-10
        db_multiplier = 0.0
        top_db = 80.0

        # Power to DB
        multiplier = 10.0

        ta_out = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)
        lr_out = librosa.core.power_to_db(spec.numpy())
        lr_out = torch.from_numpy(lr_out)

        self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5)

        # Amplitude to DB
        multiplier = 20.0

        ta_out = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)
        lr_out = librosa.core.amplitude_to_db(spec.numpy())
        lr_out = torch.from_numpy(lr_out)

        self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5) 
Example #14
Source File: torchscript_consistency_impl.py    From audio with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_griffinlim(self):
        def func(tensor):
            n_fft = 400
            ws = 400
            hop = 200
            window = torch.hann_window(ws, device=tensor.device, dtype=tensor.dtype)
            power = 2.
            normalize = False
            momentum = 0.99
            n_iter = 32
            length = 1000
            rand_int = False
            return F.griffinlim(tensor, window, n_fft, hop, ws, power, normalize, n_iter, momentum, length, rand_int)

        tensor = torch.rand((1, 201, 6))
        self._assert_consistency(func, tensor) 
Example #15
Source File: torchscript_consistency_impl.py    From audio with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_flanger(self):
        torch.random.manual_seed(40)
        waveform = torch.rand(2, 100) - 0.5

        def func(tensor):
            delay = 0.8
            depth = 0.88
            regen = 3.0
            width = 0.23
            speed = 1.3
            phase = 60.
            sample_rate = 44100
            return F.flanger(tensor, sample_rate, delay, depth, regen, width, speed,
                             phase, modulation='sinusoidal', interpolation='linear')

        self._assert_consistency(func, waveform) 
Example #16
Source File: net_utils.py    From cascade-rcnn_Pytorch with MIT License 6 votes vote down vote up
def vis_det_and_mask(im, class_name, dets, masks, thresh=0.8):
    """Visual debugging of detections."""
    num_dets = np.minimum(10, dets.shape[0])
    colors_mask = random_colors(num_dets)
    colors_bbox = np.round(np.random.rand(num_dets, 3) * 255)
    # sort rois according to the coordinates, draw upper bbox first
    draw_mask = np.zeros(im.shape[:2], dtype=np.uint8)

    for i in range(1):
        bbox = tuple(int(np.round(x)) for x in dets[i, :4])
        mask = masks[i, :, :]
        full_mask = unmold_mask(mask, bbox, im.shape)

        score = dets[i, -1]
        if score > thresh:
            word_width = len(class_name)
            cv2.rectangle(im, bbox[0:2], bbox[2:4], colors_bbox[i], 2)
            cv2.rectangle(im, bbox[0:2], (bbox[0] + 18 + word_width*8, bbox[1]+15), colors_bbox[i], thickness=cv2.FILLED)
            apply_mask(im, full_mask, draw_mask, colors_mask[i], 0.5)
            draw_mask += full_mask
            cv2.putText(im, '%s' % (class_name), (bbox[0]+5, bbox[1] + 12), cv2.FONT_HERSHEY_PLAIN,
								1.0, (255,255,255), thickness=1)
    return im 
Example #17
Source File: utils.py    From cvpr2018-hnd with MIT License 6 votes vote down vote up
def relabel_batch(rate, labels, T):
    root = T['root']
    parents = T['parents']
    relabel_rate = rate / 100.
    relabels = labels.clone()
    relabel_me = (relabels != root)
    while relabel_me.sum():
        relabel_me &= (torch.rand(relabels.size(0)) < relabel_rate)
        for i in relabel_me.nonzero().view(-1):
            k = relabels[i]
            if len(parents[k]) == 0:
                relabel_me[i] = False
            elif len(parents[k]) == 1:
                relabels[i] = parents[k][0]
            else:
                relabels[i] = parents[k][int(torch.rand(1)*len(parents[k]))]
    return relabels 
Example #18
Source File: train_test.py    From neural-pipeline with MIT License 6 votes vote down vote up
def test_savig_states(self):
        fsm = FileStructManager(base_dir=self.base_dir, is_continue=False)
        model = SimpleModel()
        metrics_processor = MetricsProcessor()
        stages = [TrainStage(TestDataProducer([[{'data': torch.rand(1, 3), 'target': torch.rand(1)}
                                                for _ in list(range(20))]]), metrics_processor)]
        trainer = Trainer(TrainConfig(model, stages, SimpleLoss(), torch.optim.SGD(model.parameters(), lr=0.1)),
                          fsm).set_epoch_num(3)

        checkpoint_file = os.path.join(self.base_dir, 'checkpoints', 'last', 'last_checkpoint.zip')

        def on_epoch_end():
            self.assertTrue(os.path.exists(checkpoint_file))
            os.remove(checkpoint_file)

        trainer.add_on_epoch_end_callback(on_epoch_end)
        trainer.train() 
Example #19
Source File: torchscript_consistency_impl.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_MelSpectrogram(self):
        tensor = torch.rand((1, 1000))
        self._assert_consistency(T.MelSpectrogram(), tensor) 
Example #20
Source File: torchscript_consistency_impl.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_MFCC(self):
        tensor = torch.rand((1, 1000))
        self._assert_consistency(T.MFCC(), tensor) 
Example #21
Source File: torchscript_consistency_impl.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_amplitude_to_DB(self):
        def func(tensor):
            multiplier = 10.0
            amin = 1e-10
            db_multiplier = 0.0
            top_db = 80.0
            return F.amplitude_to_DB(tensor, multiplier, amin, db_multiplier, top_db)

        tensor = torch.rand((6, 201))
        self._assert_consistency(func, tensor) 
Example #22
Source File: torchscript_consistency_impl.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_MelScale(self):
        spec_f = torch.rand((1, 6, 201))
        self._assert_consistency(T.MelScale(), spec_f) 
Example #23
Source File: torchscript_consistency_impl.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_GriffinLim(self):
        tensor = torch.rand((1, 201, 6))
        self._assert_consistency(T.GriffinLim(length=1000, rand_init=False), tensor) 
Example #24
Source File: torchscript_consistency_impl.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_Spectrogram(self):
        tensor = torch.rand((1, 1000))
        self._assert_consistency(T.Spectrogram(), tensor) 
Example #25
Source File: transforms.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def __call__(self, tensor):
        # tensor: [N, 3]
        w = torch.Tensor([0, 0, 1]).view(1, 3) * torch.rand(1) * self.mag

        g = so3.exp(w).to(tensor) # [1, 3, 3]

        p1 = so3.transform(g, tensor)
        return p1 
Example #26
Source File: transforms.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def generate_transform(self):
        # return: a twist-vector
        amp = self.mag
        if self.randomly:
            amp = torch.rand(1, 1) * self.mag
        x = torch.randn(1, 6)
        x = x / x.norm(p=2, dim=1, keepdim=True) * amp

        return x # [1, 6] 
Example #27
Source File: functional_impl.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_simple(self):
        """
        Create a very basic signal,
        Then make a simple 4th order delay
        The output should be same as the input but shifted
        """

        torch.random.manual_seed(42)
        waveform = torch.rand(2, 44100 * 1, dtype=self.dtype, device=self.device)
        b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)
        a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)
        output_waveform = F.lfilter(waveform, a_coeffs, b_coeffs)

        self.assertEqual(output_waveform[:, 3:], waveform[:, 0:-3], atol=1e-5, rtol=1e-5) 
Example #28
Source File: test_batch_consistency.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_griffinlim(self):
        n_fft = 400
        ws = 400
        hop = 200
        window = torch.hann_window(ws)
        power = 2
        normalize = False
        momentum = 0.99
        n_iter = 32
        length = 1000
        tensor = torch.rand((1, 201, 6))
        self.assert_batch_consistencies(
            F.griffinlim, tensor, window, n_fft, hop, ws, power, normalize, n_iter, momentum, length, 0, atol=5e-5
        ) 
Example #29
Source File: datasets.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def generate_perturbations(batch_size, mag, randomly=False):
        if randomly:
            amp = torch.rand(batch_size, 1) * mag
        else:
            amp = mag
        x = torch.randn(batch_size, 6)
        x = x / x.norm(p=2, dim=1, keepdim=True) * amp
        return x.numpy() 
Example #30
Source File: torchscript_consistency_impl.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_gain(self):
        def func(tensor):
            gainDB = 2.0
            return F.gain(tensor, gainDB)

        tensor = torch.rand((1, 1000))
        self._assert_consistency(func, tensor)