Python torch.rand() Examples

The following are 30 code examples of torch.rand(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source Project: neural-fingerprinting   Author: StephanZheng   File: util.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_tf2torch(tf_model,torch_model,input_shape, num_rand_inp=10, precision=10**-2):
    """
    Checks consistency of torch and tf models before generating attacks
    :param tf_model: copied tf model
    :param torch_model: torch model to be transferred to tf
    :param input_shape: Format Channels X Height X Width
    :param num_rand_inp: number of random inputs to test consistency on
    :return: raises error if the outputs are not consistent
    """
    torch_model.eval()
    rand_x = torch.rand(num_rand_inp,input_shape[0],input_shape[1],input_shape[2])
    tf_op = tf_model.predict(rand_x.numpy())
    torch_op = F.softmax(torch_model(Variable(rand_x))).data.numpy()
    assert tf_op.shape == torch_op.shape, "Mismatch of dimensions of the outputs from tf and torch models"
    assert np.linalg.norm(torch_op-tf_op)/np.linalg.norm(torch_op)<=num_rand_inp*precision, "Outputs of the torch and tensorflow models" \
                                                            "do not agree"
    pass 
Example #2
Source Project: neural-pipeline   Author: toodef   File: data_processor_test.py    License: MIT License 6 votes vote down vote up
def test_prediction_output(self):
        model = SimpleModel()
        dp = DataProcessor(model=model)
        self.assertFalse(model.fc.weight.is_cuda)
        res = dp.predict({'data': torch.rand(1, 3)})
        self.assertIs(type(res), torch.Tensor)

        model = NonStandardIOModel()
        dp = DataProcessor(model=model)
        self.assertFalse(model.fc.weight.is_cuda)
        res = dp.predict({'data': {'data1': torch.rand(1, 3), 'data2': torch.rand(1, 3)}})
        self.assertIs(type(res), dict)
        self.assertIn('res1', res)
        self.assertIs(type(res['res1']), torch.Tensor)
        self.assertIn('res2', res)
        self.assertIs(type(res['res2']), torch.Tensor) 
Example #3
Source Project: neural-pipeline   Author: toodef   File: data_processor_test.py    License: MIT License 6 votes vote down vote up
def test_prediction_train_output(self):
        model = SimpleModel()
        train_config = TrainConfig(model, [], torch.nn.Module(), torch.optim.SGD(model.parameters(), lr=0.1))
        dp = TrainDataProcessor(train_config=train_config)
        self.assertFalse(model.fc.weight.is_cuda)
        res = dp.predict({'data': torch.rand(1, 3)}, is_train=True)
        self.assertIs(type(res), torch.Tensor)

        model = NonStandardIOModel()
        train_config = TrainConfig(model, [], torch.nn.Module(), torch.optim.SGD(model.parameters(), lr=0.1))
        dp = TrainDataProcessor(train_config=train_config)
        self.assertFalse(model.fc.weight.is_cuda)
        res = dp.predict({'data': {'data1': torch.rand(1, 3), 'data2': torch.rand(1, 3)}}, is_train=True)
        self.assertIs(type(res), dict)
        self.assertIn('res1', res)
        self.assertIs(type(res['res1']), torch.Tensor)
        self.assertIn('res2', res)
        self.assertIs(type(res['res2']), torch.Tensor)

        self.assertTrue(model.training)
        self.assertFalse(res['res1'].requires_grad)
        self.assertIsNone(res['res1'].grad)
        self.assertFalse(res['res2'].requires_grad)
        self.assertIsNone(res['res2'].grad) 
Example #4
Source Project: neural-pipeline   Author: toodef   File: data_processor_test.py    License: MIT License 6 votes vote down vote up
def test_prediction_notrain_output(self):
        model = SimpleModel()
        train_config = TrainConfig(model, [], torch.nn.Module(), torch.optim.SGD(model.parameters(), lr=0.1))
        dp = TrainDataProcessor(train_config=train_config)
        self.assertFalse(model.fc.weight.is_cuda)
        res = dp.predict({'data': torch.rand(1, 3)}, is_train=False)
        self.assertIs(type(res), torch.Tensor)

        model = NonStandardIOModel()
        train_config = TrainConfig(model, [], torch.nn.Module(), torch.optim.SGD(model.parameters(), lr=0.1))
        dp = TrainDataProcessor(train_config=train_config)
        self.assertFalse(model.fc.weight.is_cuda)
        res = dp.predict({'data': {'data1': torch.rand(1, 3), 'data2': torch.rand(1, 3)}}, is_train=False)
        self.assertIs(type(res), dict)
        self.assertIn('res1', res)
        self.assertIs(type(res['res1']), torch.Tensor)
        self.assertIn('res2', res)
        self.assertIs(type(res['res2']), torch.Tensor)

        self.assertFalse(model.training)
        self.assertFalse(res['res1'].requires_grad)
        self.assertIsNone(res['res1'].grad)
        self.assertFalse(res['res2'].requires_grad)
        self.assertIsNone(res['res2'].grad) 
Example #5
Source Project: neural-pipeline   Author: toodef   File: data_processor_test.py    License: MIT License 6 votes vote down vote up
def test_train(self):
        model = SimpleModel().train()
        train_config = TrainConfig(model, [], torch.nn.Module(), torch.optim.SGD(model.parameters(), lr=0.1))
        dp = TrainDataProcessor(train_config=train_config)

        self.assertFalse(model.fc.weight.is_cuda)
        self.assertTrue(model.training)
        res = dp.predict({'data': torch.rand(1, 3)}, is_train=True)
        self.assertTrue(model.training)
        self.assertTrue(res.requires_grad)
        self.assertIsNone(res.grad)

        with self.assertRaises(NotImplementedError):
            dp.process_batch({'data': torch.rand(1, 3), 'target': torch.rand(1)}, is_train=True)

        loss = SimpleLoss()
        train_config = TrainConfig(model, [], loss, torch.optim.SGD(model.parameters(), lr=0.1))
        dp = TrainDataProcessor(train_config=train_config)
        res = dp.process_batch({'data': torch.rand(1, 3), 'target': torch.rand(1)}, is_train=True)
        self.assertTrue(model.training)
        self.assertTrue(loss.module.requires_grad)
        self.assertIsNotNone(loss.module.grad)
        self.assertTrue(np.array_equal(res, loss.res.data.numpy())) 
Example #6
Source Project: neural-pipeline   Author: toodef   File: train_config_test.py    License: MIT License 6 votes vote down vote up
def test_metrics_group_calculation(self):
        metrics_group_lv1 = MetricsGroup('lvl').add(SimpleMetric())
        metrics_group_lv2 = MetricsGroup('lv2').add(SimpleMetric())
        metrics_group_lv1.add(metrics_group_lv2)

        values = []
        for i in range(10):
            output, target = torch.rand(1, 3), torch.rand(1, 3)
            metrics_group_lv1.calc(output, target)
            values.append(np.linalg.norm(output.numpy() - target.numpy()))

        for metrics_group in [metrics_group_lv1, metrics_group_lv2]:
            for m in metrics_group.metrics():
                for v1, v2 in zip(values, m.get_values()):
                    self.assertAlmostEqual(v1, v2, delta=1e-5)

        metrics_group_lv1.reset()
        self.assertEqual(metrics_group_lv1.metrics()[0].get_values().size, 0)
        self.assertEqual(metrics_group_lv2.metrics()[0].get_values().size, 0) 
Example #7
Source Project: neural-pipeline   Author: toodef   File: train_test.py    License: MIT License 6 votes vote down vote up
def test_lr_decaying(self):
        fsm = FileStructManager(base_dir=self.base_dir, is_continue=False)
        model = SimpleModel()
        metrics_processor = MetricsProcessor()
        stages = [TrainStage(TestDataProducer([[{'data': torch.rand(1, 3), 'target': torch.rand(1)}
                                                for _ in list(range(20))]]), metrics_processor),
                  ValidationStage(TestDataProducer([[{'data': torch.rand(1, 3), 'target': torch.rand(1)}
                                                     for _ in list(range(20))]]), metrics_processor)]
        trainer = Trainer(TrainConfig(model, stages, SimpleLoss(), torch.optim.SGD(model.parameters(), lr=0.1)),
                          fsm).set_epoch_num(10)

        def target_value_clbk() -> float:
            return 1

        trainer.enable_lr_decaying(0.5, 3, target_value_clbk)
        trainer.train()

        self.assertAlmostEqual(trainer.data_processor().get_lr(), 0.1 * (0.5 ** 3), delta=1e-6) 
Example #8
Source Project: neural-pipeline   Author: toodef   File: train_test.py    License: MIT License 6 votes vote down vote up
def test_savig_states(self):
        fsm = FileStructManager(base_dir=self.base_dir, is_continue=False)
        model = SimpleModel()
        metrics_processor = MetricsProcessor()
        stages = [TrainStage(TestDataProducer([[{'data': torch.rand(1, 3), 'target': torch.rand(1)}
                                                for _ in list(range(20))]]), metrics_processor)]
        trainer = Trainer(TrainConfig(model, stages, SimpleLoss(), torch.optim.SGD(model.parameters(), lr=0.1)),
                          fsm).set_epoch_num(3)

        checkpoint_file = os.path.join(self.base_dir, 'checkpoints', 'last', 'last_checkpoint.zip')

        def on_epoch_end():
            self.assertTrue(os.path.exists(checkpoint_file))
            os.remove(checkpoint_file)

        trainer.add_on_epoch_end_callback(on_epoch_end)
        trainer.train() 
Example #9
Source Project: cvpr2018-hnd   Author: kibok90   File: utils.py    License: MIT License 6 votes vote down vote up
def relabel_batch(rate, labels, T):
    root = T['root']
    parents = T['parents']
    relabel_rate = rate / 100.
    relabels = labels.clone()
    relabel_me = (relabels != root)
    while relabel_me.sum():
        relabel_me &= (torch.rand(relabels.size(0)) < relabel_rate)
        for i in relabel_me.nonzero().view(-1):
            k = relabels[i]
            if len(parents[k]) == 0:
                relabel_me[i] = False
            elif len(parents[k]) == 1:
                relabels[i] = parents[k][0]
            else:
                relabels[i] = parents[k][int(torch.rand(1)*len(parents[k]))]
    return relabels 
Example #10
Source Project: cascade-rcnn_Pytorch   Author: guoruoqian   File: net_utils.py    License: MIT License 6 votes vote down vote up
def vis_det_and_mask(im, class_name, dets, masks, thresh=0.8):
    """Visual debugging of detections."""
    num_dets = np.minimum(10, dets.shape[0])
    colors_mask = random_colors(num_dets)
    colors_bbox = np.round(np.random.rand(num_dets, 3) * 255)
    # sort rois according to the coordinates, draw upper bbox first
    draw_mask = np.zeros(im.shape[:2], dtype=np.uint8)

    for i in range(1):
        bbox = tuple(int(np.round(x)) for x in dets[i, :4])
        mask = masks[i, :, :]
        full_mask = unmold_mask(mask, bbox, im.shape)

        score = dets[i, -1]
        if score > thresh:
            word_width = len(class_name)
            cv2.rectangle(im, bbox[0:2], bbox[2:4], colors_bbox[i], 2)
            cv2.rectangle(im, bbox[0:2], (bbox[0] + 18 + word_width*8, bbox[1]+15), colors_bbox[i], thickness=cv2.FILLED)
            apply_mask(im, full_mask, draw_mask, colors_mask[i], 0.5)
            draw_mask += full_mask
            cv2.putText(im, '%s' % (class_name), (bbox[0]+5, bbox[1] + 12), cv2.FONT_HERSHEY_PLAIN,
								1.0, (255,255,255), thickness=1)
    return im 
Example #11
Source Project: audio   Author: pytorch   File: test_librosa_compatibility.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_griffinlim(self):
        # NOTE: This test is flaky without a fixed random seed
        # See https://github.com/pytorch/audio/issues/382
        torch.random.manual_seed(42)
        tensor = torch.rand((1, 1000))

        n_fft = 400
        ws = 400
        hop = 100
        window = torch.hann_window(ws)
        normalize = False
        momentum = 0.99
        n_iter = 8
        length = 1000
        rand_init = False
        init = 'random' if rand_init else None

        specgram = F.spectrogram(tensor, 0, window, n_fft, hop, ws, 2, normalize).sqrt()
        ta_out = F.griffinlim(specgram, window, n_fft, hop, ws, 1, normalize,
                              n_iter, momentum, length, rand_init)
        lr_out = librosa.griffinlim(specgram.squeeze(0).numpy(), n_iter=n_iter, hop_length=hop,
                                    momentum=momentum, init=init, length=length)
        lr_out = torch.from_numpy(lr_out).unsqueeze(0)

        self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5) 
Example #12
Source Project: audio   Author: pytorch   File: test_librosa_compatibility.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_amplitude_to_DB(self):
        spec = torch.rand((6, 201))

        amin = 1e-10
        db_multiplier = 0.0
        top_db = 80.0

        # Power to DB
        multiplier = 10.0

        ta_out = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)
        lr_out = librosa.core.power_to_db(spec.numpy())
        lr_out = torch.from_numpy(lr_out)

        self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5)

        # Amplitude to DB
        multiplier = 20.0

        ta_out = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)
        lr_out = librosa.core.amplitude_to_db(spec.numpy())
        lr_out = torch.from_numpy(lr_out)

        self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5) 
Example #13
Source Project: audio   Author: pytorch   File: torchscript_consistency_impl.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_griffinlim(self):
        def func(tensor):
            n_fft = 400
            ws = 400
            hop = 200
            window = torch.hann_window(ws, device=tensor.device, dtype=tensor.dtype)
            power = 2.
            normalize = False
            momentum = 0.99
            n_iter = 32
            length = 1000
            rand_int = False
            return F.griffinlim(tensor, window, n_fft, hop, ws, power, normalize, n_iter, momentum, length, rand_int)

        tensor = torch.rand((1, 201, 6))
        self._assert_consistency(func, tensor) 
Example #14
Source Project: audio   Author: pytorch   File: torchscript_consistency_impl.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_flanger(self):
        torch.random.manual_seed(40)
        waveform = torch.rand(2, 100) - 0.5

        def func(tensor):
            delay = 0.8
            depth = 0.88
            regen = 3.0
            width = 0.23
            speed = 1.3
            phase = 60.
            sample_rate = 44100
            return F.flanger(tensor, sample_rate, delay, depth, regen, width, speed,
                             phase, modulation='sinusoidal', interpolation='linear')

        self._assert_consistency(func, waveform) 
Example #15
Source Project: audio   Author: pytorch   File: test_models.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_waveform(self):
        """Validate the output dimensions of a _UpsampleNetwork block.
        """

        upsample_scales = [5, 5, 8]
        n_batch = 2
        n_time = 200
        n_freq = 100
        n_output = 256
        n_res_block = 10
        n_hidden = 128
        kernel_size = 5

        total_scale = 1
        for upsample_scale in upsample_scales:
            total_scale *= upsample_scale

        model = _UpsampleNetwork(upsample_scales, n_res_block, n_freq, n_hidden, n_output, kernel_size)

        x = torch.rand(n_batch, n_freq, n_time)
        out1, out2 = model(x)

        assert out1.size() == (n_batch, n_freq, total_scale * (n_time - kernel_size + 1))
        assert out2.size() == (n_batch, n_output, total_scale * (n_time - kernel_size + 1)) 
Example #16
Source Project: L3C-PyTorch   Author: fab-jul   File: auto_crop.py    License: GNU General Public License v3.0 6 votes vote down vote up
def test_auto_crop():
    import torch
    import pytorch_ext as pe

    for H, W, num_crops_expected in [(10000, 6000, 64),
                                     (4928, 3264, 16),
                                     (2048, 2048, 4),
                                     (1024, 1024, 1),
                                     ]:
        img = (torch.rand(1, 3, H, W) * 255).round().long()
        print(img.shape)
        if num_crops_expected > 1:
            assert needs_crop(img)
            crops = list(iter_crops(img, 2048 * 1024))
            assert len(crops) == num_crops_expected
            pe.assert_equal(stitch(crops), img)
        else:
            pe.assert_equal(next(iter_crops(img, 2048 * 1024)), img) 
Example #17
Source Project: TaskBot   Author: EvilPsyCHo   File: attention.py    License: GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, hidden_size, method="concat"):
        """

        Args:
            hidden_size: <int>, hidden size
                         previous hidden state size of decoder
            method: <str>, {"concat"}
                        Attention method

        Notes:
            we use the GRU outputs instead of using encoder t-step
            hidden sates for attention, because the pytorch-GRU hidden_n only
            contains the last time step information.
        """
        super(Attn, self).__init__()
        self.method = method
        self.hidden_size = hidden_size
        self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
        self.v = nn.Parameter(torch.rand(hidden_size))
        stdv = 1. / math.sqrt(self.v.size(0))
        self.v.data.normal_(mean=0, std=stdv) 
Example #18
Source Project: pointnet-registration-framework   Author: vinits5   File: generate_rotations.py    License: MIT License 6 votes vote down vote up
def main(args):
    # dataset
    testset = get_datasets(args)
    batch_size = len(testset)

    amp = args.deg * math.pi / 180.0
    w = torch.randn(batch_size, 3)
    w = w / w.norm(p=2, dim=1, keepdim=True) * amp
    t = torch.rand(batch_size, 3) * args.max_trans

    if args.format == 'wv':
        # the output: twist vectors.
        R = ptlk.so3.exp(w) # (N, 3) --> (N, 3, 3)
        G = torch.zeros(batch_size, 4, 4)
        G[:, 3, 3] = 1
        G[:, 0:3, 0:3] = R
        G[:, 0:3, 3] = t

        x = ptlk.se3.log(G) # --> (N, 6)
    else:
        # rotation-vector and translation-vector
        x = torch.cat((w, t), dim=1)

    numpy.savetxt(args.outfile, x, delimiter=',') 
Example #19
Source Project: mmdetection   Author: open-mmlab   File: test_sampler.py    License: Apache License 2.0 5 votes vote down vote up
def test_ohem_sampler_empty_gt():

    assigner = MaxIoUAssigner(
        pos_iou_thr=0.5,
        neg_iou_thr=0.5,
        ignore_iof_thr=0.5,
        ignore_wrt_candidates=False,
    )
    bboxes = torch.FloatTensor([
        [0, 0, 10, 10],
        [10, 10, 20, 20],
        [5, 5, 15, 15],
        [32, 32, 38, 42],
    ])
    gt_bboxes = torch.empty(0, 4)
    gt_labels = torch.LongTensor([])
    gt_bboxes_ignore = torch.Tensor([])
    assign_result = assigner.assign(
        bboxes,
        gt_bboxes,
        gt_bboxes_ignore=gt_bboxes_ignore,
        gt_labels=gt_labels)

    context = _context_for_ohem()

    sampler = OHEMSampler(
        num=10,
        pos_fraction=0.5,
        context=context,
        neg_pos_ub=-1,
        add_gt_as_proposals=True)

    feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]

    sample_result = sampler.sample(
        assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)

    assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
    assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds) 
Example #20
Source Project: mmdetection   Author: open-mmlab   File: test_sampler.py    License: Apache License 2.0 5 votes vote down vote up
def test_ohem_sampler_empty_pred():
    assigner = MaxIoUAssigner(
        pos_iou_thr=0.5,
        neg_iou_thr=0.5,
        ignore_iof_thr=0.5,
        ignore_wrt_candidates=False,
    )
    bboxes = torch.empty(0, 4)
    gt_bboxes = torch.FloatTensor([
        [0, 0, 10, 10],
        [10, 10, 20, 20],
        [5, 5, 15, 15],
        [32, 32, 38, 42],
    ])
    gt_labels = torch.LongTensor([1, 2, 2, 3])
    gt_bboxes_ignore = torch.Tensor([])
    assign_result = assigner.assign(
        bboxes,
        gt_bboxes,
        gt_bboxes_ignore=gt_bboxes_ignore,
        gt_labels=gt_labels)

    context = _context_for_ohem()

    sampler = OHEMSampler(
        num=10,
        pos_fraction=0.5,
        context=context,
        neg_pos_ub=-1,
        add_gt_as_proposals=True)

    feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]

    sample_result = sampler.sample(
        assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)

    assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
    assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds) 
Example #21
Source Project: mmdetection   Author: open-mmlab   File: test_heads.py    License: Apache License 2.0 5 votes vote down vote up
def _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels):
    """Create sample results that can be passed to BBoxHead.get_targets."""
    num_imgs = 1
    feat = torch.rand(1, 1, 3, 3)
    assign_config = dict(
        type='MaxIoUAssigner',
        pos_iou_thr=0.5,
        neg_iou_thr=0.5,
        min_pos_iou=0.5,
        ignore_iof_thr=-1)
    sampler_config = dict(
        type='RandomSampler',
        num=512,
        pos_fraction=0.25,
        neg_pos_ub=-1,
        add_gt_as_proposals=True)
    bbox_assigner = build_assigner(assign_config)
    bbox_sampler = build_sampler(sampler_config)
    gt_bboxes_ignore = [None for _ in range(num_imgs)]
    sampling_results = []
    for i in range(num_imgs):
        assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i],
                                             gt_bboxes_ignore[i], gt_labels[i])
        sampling_result = bbox_sampler.sample(
            assign_result,
            proposal_list[i],
            gt_bboxes[i],
            gt_labels[i],
            feats=feat)
        sampling_results.append(sampling_result)

    return sampling_results 
Example #22
Source Project: deep-learning-note   Author: wdxtub   File: 14_mlp_module.py    License: MIT License 5 votes vote down vote up
def __init__(self, **kwargs):
        super(FancyMLP, self).__init__(**kwargs)

        # 不训练的参数
        self.rand_weight = torch.rand((20, 20), requires_grad=False)
        self.linear = nn.Linear(20, 20) 
Example #23
Source Project: neural-pipeline   Author: toodef   File: data_processor_test.py    License: MIT License 5 votes vote down vote up
def dummy_input():
        return torch.rand(3) 
Example #24
Source Project: neural-pipeline   Author: toodef   File: data_processor_test.py    License: MIT License 5 votes vote down vote up
def test_predict(self):
        model = SimpleModel().train()
        dp = DataProcessor(model=model)
        self.assertFalse(model.fc.weight.is_cuda)
        self.assertTrue(model.training)
        res = dp.predict({'data': torch.rand(1, 3)})
        self.assertFalse(model.training)
        self.assertFalse(res.requires_grad)
        self.assertIsNone(res.grad) 
Example #25
Source Project: neural-pipeline   Author: toodef   File: train_config_test.py    License: MIT License 5 votes vote down vote up
def test_metric(self):
        metric = SimpleMetric()

        for i in range(10):
            output, target = torch.rand(1, 3), torch.rand(1, 3)
            res = metric.calc(output, target)[0]
            self.assertAlmostEqual(res, np.linalg.norm(output.numpy() - target.numpy()), delta=1e-5)

        vals = metric.get_values()
        self.assertEqual(vals.size, 0)

        values = []
        for i in range(10):
            output, target = torch.rand(1, 3), torch.rand(1, 3)
            metric._calc(output, target)
            values.append(np.linalg.norm(output.numpy() - target.numpy()))

        vals = metric.get_values()
        self.assertEqual(vals.size, len(values))
        for v1, v2 in zip(values, vals):
            self.assertAlmostEqual(v1, v2, delta=1e-5)

        metric.reset()
        self.assertEqual(metric.get_values().size, 0)

        self.assertEqual(metric.name(), "SimpleMetric") 
Example #26
Source Project: neural-pipeline   Author: toodef   File: train_config_test.py    License: MIT License 5 votes vote down vote up
def test_metrics_pocessor_calculation(self):
        metrics_group_lv11 = MetricsGroup('lvl').add(SimpleMetric())
        metrics_group_lv21 = MetricsGroup('lv2').add(SimpleMetric())
        metrics_group_lv11.add(metrics_group_lv21)
        metrics_processor = MetricsProcessor()
        metrics_group_lv12 = MetricsGroup('lvl').add(SimpleMetric())
        metrics_group_lv22 = MetricsGroup('lv2').add(SimpleMetric())
        metrics_group_lv12.add(metrics_group_lv22)
        metrics_processor.add_metrics_group(metrics_group_lv11)
        metrics_processor.add_metrics_group(metrics_group_lv12)
        m1, m2 = SimpleMetric(), SimpleMetric()
        metrics_processor.add_metric(m1)
        metrics_processor.add_metric(m2)

        values = []
        for i in range(10):
            output, target = torch.rand(1, 3), torch.rand(1, 3)
            metrics_processor.calc_metrics(output, target)
            values.append(np.linalg.norm(output.numpy() - target.numpy()))

        for metrics_group in [metrics_group_lv11, metrics_group_lv21, metrics_group_lv12, metrics_group_lv22]:
            for m in metrics_group.metrics():
                for v1, v2 in zip(values, m.get_values()):
                    self.assertAlmostEqual(v1, v2, delta=1e-5)
        for m in [m1, m2]:
            for v1, v2 in zip(values, m.get_values()):
                self.assertAlmostEqual(v1, v2, delta=1e-5)

        metrics_processor.reset_metrics()
        self.assertEqual(metrics_group_lv11.metrics()[0].get_values().size, 0)
        self.assertEqual(metrics_group_lv21.metrics()[0].get_values().size, 0)
        self.assertEqual(metrics_group_lv12.metrics()[0].get_values().size, 0)
        self.assertEqual(metrics_group_lv22.metrics()[0].get_values().size, 0)
        self.assertEqual(m1.get_values().size, 0)
        self.assertEqual(m2.get_values().size, 0) 
Example #27
Source Project: neural-pipeline   Author: toodef   File: train_config_test.py    License: MIT License 5 votes vote down vote up
def test_train_stage(self):
        data_producer = DataProducer([[{'data': torch.rand(1, 3), 'target': torch.rand(1)} for _ in list(range(20))]])
        metrics_processor = FakeMetricsProcessor()
        train_stage = TrainStage(data_producer, metrics_processor).enable_hard_negative_mining(0.1)

        fsm = FileStructManager(base_dir=self.base_dir, is_continue=False)
        model = SimpleModel()
        Trainer(TrainConfig(model, [train_stage], SimpleLoss(), torch.optim.SGD(model.parameters(), lr=1)), fsm) \
            .set_epoch_num(1).train()

        self.assertEqual(metrics_processor.call_num, len(data_producer)) 
Example #28
Source Project: neural-pipeline   Author: toodef   File: train_test.py    License: MIT License 5 votes vote down vote up
def test_train(self):
        fsm = FileStructManager(base_dir=self.base_dir, is_continue=False)
        model = SimpleModel()
        metrics_processor = MetricsProcessor()
        stages = [TrainStage(TestDataProducer([[{'data': torch.rand(1, 3), 'target': torch.rand(1)}
                                                for _ in list(range(20))]]), metrics_processor),
                  ValidationStage(TestDataProducer([[{'data': torch.rand(1, 3), 'target': torch.rand(1)}
                                                     for _ in list(range(20))]]), metrics_processor)]
        Trainer(TrainConfig(model, stages, SimpleLoss(), torch.optim.SGD(model.parameters(), lr=1)), fsm) \
            .set_epoch_num(1).train() 
Example #29
Source Project: neural-pipeline   Author: toodef   File: train_test.py    License: MIT License 5 votes vote down vote up
def test_savig_best_states(self):
        fsm = FileStructManager(base_dir=self.base_dir, is_continue=False)
        model = SimpleModel()
        metrics_processor = MetricsProcessor()
        stages = [TrainStage(TestDataProducer([[{'data': torch.rand(1, 3), 'target': torch.rand(1)}
                                                for _ in list(range(20))]]), metrics_processor)]
        trainer = Trainer(TrainConfig(model, stages, SimpleLoss(), torch.optim.SGD(model.parameters(), lr=0.1)),
                          fsm).set_epoch_num(3).enable_best_states_saving(lambda: np.mean(stages[0].get_losses()))

        checkpoint_file = os.path.join(self.base_dir, 'checkpoints', 'last', 'last_checkpoint.zip')
        best_checkpoint_file = os.path.join(self.base_dir, 'checkpoints', 'best', 'best_checkpoint.zip')

        class Val:
            def __init__(self):
                self.v = None

        first_val = Val()

        def on_epoch_end(val):
            if val.v is not None and np.mean(stages[0].get_losses()) < val.v:
                self.assertTrue(os.path.exists(best_checkpoint_file))
                os.remove(best_checkpoint_file)
                val.v = np.mean(stages[0].get_losses())
                return

            val.v = np.mean(stages[0].get_losses())

            self.assertTrue(os.path.exists(checkpoint_file))
            self.assertFalse(os.path.exists(best_checkpoint_file))
            os.remove(checkpoint_file)

        trainer.add_on_epoch_end_callback(lambda: on_epoch_end(first_val))
        trainer.train() 
Example #30
Source Project: neural-pipeline   Author: toodef   File: predict_test.py    License: MIT License 5 votes vote down vote up
def test_predict(self):
        model = SimpleModel()
        fsm = FileStructManager(base_dir=self.base_dir, is_continue=False)

        metrics_processor = MetricsProcessor()
        stages = [TrainStage(TestDataProducer([[{'data': torch.rand(1, 3), 'target': torch.rand(1)}
                                                for _ in list(range(20))]]), metrics_processor),
                  ValidationStage(TestDataProducer([[{'data': torch.rand(1, 3), 'target': torch.rand(1)}
                                                     for _ in list(range(20))]]), metrics_processor)]
        Trainer(TrainConfig(model, stages, SimpleLoss(), torch.optim.SGD(model.parameters(), lr=1)), fsm)\
            .set_epoch_num(1).train()

        fsm = FileStructManager(base_dir=self.base_dir, is_continue=True)
        Predictor(model, fsm).predict({'data': torch.rand(1, 3)})