Python torch.set_default_dtype() Examples

The following are 17 code examples of torch.set_default_dtype(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: nonlinear.py    From notears with Apache License 2.0 6 votes vote down vote up
def main():
    torch.set_default_dtype(torch.double)
    np.set_printoptions(precision=3)

    import notears.utils as ut
    ut.set_random_seed(123)

    n, d, s0, graph_type, sem_type = 200, 5, 9, 'ER', 'mim'
    B_true = ut.simulate_dag(d, s0, graph_type)
    np.savetxt('W_true.csv', B_true, delimiter=',')

    X = ut.simulate_nonlinear_sem(B_true, n, sem_type)
    np.savetxt('X.csv', X, delimiter=',')

    model = NotearsMLP(dims=[d, 10, 1], bias=True)
    W_est = notears_nonlinear(model, X, lambda1=0.01, lambda2=0.01)
    assert ut.is_dag(W_est)
    np.savetxt('W_est.csv', W_est, delimiter=',')
    acc = ut.count_accuracy(B_true, W_est != 0)
    print(acc) 
Example #2
Source File: locally_connected.py    From notears with Apache License 2.0 6 votes vote down vote up
def main():
    n, d, m1, m2 = 2, 3, 5, 7

    # numpy
    import numpy as np
    input_numpy = np.random.randn(n, d, m1)
    weight = np.random.randn(d, m1, m2)
    output_numpy = np.zeros([n, d, m2])
    for j in range(d):
        # [n, m2] = [n, m1] @ [m1, m2]
        output_numpy[:, j, :] = input_numpy[:, j, :] @ weight[j, :, :]

    # torch
    torch.set_default_dtype(torch.double)
    input_torch = torch.from_numpy(input_numpy)
    locally_connected = LocallyConnected(d, m1, m2, bias=False)
    locally_connected.weight.data[:] = torch.from_numpy(weight)
    output_torch = locally_connected(input_torch)

    # compare
    print(torch.allclose(output_torch, torch.from_numpy(output_numpy))) 
Example #3
Source File: spinup_sac_tests.py    From cherry with Apache License 2.0 5 votes vote down vote up
def setUp(self):
        torch.set_default_tensor_type(torch.DoubleTensor)
        torch.set_default_dtype(torch.float64) 
Example #4
Source File: lbfgsb_scipy.py    From notears with Apache License 2.0 5 votes vote down vote up
def main():
    import torch.nn as nn
    # torch.set_default_dtype(torch.double)

    n, d, out, j = 10000, 3000, 10, 0
    input = torch.randn(n, d)
    w_true = torch.rand(d, out)
    w_true[j, :] = 0
    target = torch.matmul(input, w_true)
    linear = nn.Linear(d, out)
    linear.weight.bounds = [(0, None)] * d * out  # hack
    for m in range(out):
        linear.weight.bounds[m * d + j] = (0, 0)
    criterion = nn.MSELoss()
    optimizer = LBFGSBScipy(linear.parameters())
    print(list(linear.parameters()))

    def closure():
        optimizer.zero_grad()
        output = linear(input)
        loss = criterion(output, target)
        print('loss:', loss.item())
        loss.backward()
        return loss
    optimizer.step(closure)
    print(list(linear.parameters()))
    print(w_true.t()) 
Example #5
Source File: pytorch_backend.py    From pyhf with Apache License 2.0 5 votes vote down vote up
def __init__(self, **kwargs):
        self.name = 'pytorch'
        self.precision = kwargs.get('precision', '32b')
        self.dtypemap = {
            'float': torch.float64 if self.precision == '64b' else torch.float32,
            'int': torch.int64 if self.precision == '64b' else torch.int32,
            'bool': torch.bool,
        }
        torch.set_default_dtype(self.dtypemap["float"]) 
Example #6
Source File: spinup_ppo_tests.py    From cherry with Apache License 2.0 5 votes vote down vote up
def tearDown(self):
        torch.set_default_tensor_type(torch.FloatTensor)
        torch.set_default_dtype(torch.float32) 
Example #7
Source File: spinup_ppo_tests.py    From cherry with Apache License 2.0 5 votes vote down vote up
def setUp(self):
        torch.set_default_tensor_type(torch.DoubleTensor)
        torch.set_default_dtype(torch.float64) 
Example #8
Source File: spinup_vpg_tests.py    From cherry with Apache License 2.0 5 votes vote down vote up
def setUp(self):
        torch.set_default_tensor_type(torch.DoubleTensor)
        torch.set_default_dtype(torch.float64) 
Example #9
Source File: spinup_sac_tests.py    From cherry with Apache License 2.0 5 votes vote down vote up
def tearDown(self):
        torch.set_default_tensor_type(torch.FloatTensor)
        torch.set_default_dtype(torch.float32) 
Example #10
Source File: test_rhmc.py    From geoopt with Apache License 2.0 5 votes vote down vote up
def withdtype():
    torch.set_default_dtype(torch.float64)
    try:
        yield
    finally:
        torch.set_default_dtype(torch.float32) 
Example #11
Source File: spinup_ddpg_tests.py    From cherry with Apache License 2.0 5 votes vote down vote up
def tearDown(self):
        torch.set_default_tensor_type(torch.FloatTensor)
        torch.set_default_dtype(torch.float32) 
Example #12
Source File: spinup_ddpg_tests.py    From cherry with Apache License 2.0 5 votes vote down vote up
def setUp(self):
        torch.set_default_tensor_type(torch.DoubleTensor)
        torch.set_default_dtype(torch.float64) 
Example #13
Source File: test_utilities.py    From safe-exploration with MIT License 5 votes vote down vote up
def test_set_torch_dtype():
    """Test dtype context manager."""
    dtype = torch.get_default_dtype()

    torch.set_default_dtype(torch.float32)
    with SetTorchDtype(torch.float64):
        a = torch.zeros(1)

    assert a.dtype is torch.float64
    b = torch.zeros(1)
    assert b.dtype is torch.float32

    torch.set_default_dtype(dtype) 
Example #14
Source File: utilities.py    From safe-exploration with MIT License 5 votes vote down vote up
def __exit__(self, *args):
        """Restor old dtype."""
        torch.set_default_dtype(self.old_dtype) 
Example #15
Source File: utilities.py    From safe-exploration with MIT License 5 votes vote down vote up
def __enter__(self):
        """Set new dtype."""
        self.old_dtype = torch.get_default_dtype()
        torch.set_default_dtype(self.new_dtype) 
Example #16
Source File: test_manifold_basic.py    From geoopt with Apache License 2.0 5 votes vote down vote up
def use_floatX(request):
    dtype_old = torch.get_default_dtype()
    torch.set_default_dtype(request.param)
    yield request.param
    torch.set_default_dtype(dtype_old) 
Example #17
Source File: test_train_distilled_image.py    From dataset-distillation with MIT License 4 votes vote down vote up
def _test_backward(self, state, eps=2e-8, atol=1e-5, rtol=1e-3, max_num_per_param=5):
        @contextlib.contextmanager
        def double_prec():
            saved_dtype = torch.get_default_dtype()
            torch.set_default_dtype(torch.double)
            yield
            torch.set_default_dtype(saved_dtype)

        with double_prec():
            models = [m.to(torch.double) for m in networks.get_networks(state, 1)]
            trainer = Trainer(state, models)

            model = trainer.models[0]

            rdata, rlabel = next(iter(state.train_loader))
            rdata = rdata.to(state.device, torch.double, non_blocking=True)
            rlabel = rlabel.to(state.device, non_blocking=True)
            steps = trainer.get_steps()

            l, saved = trainer.forward(model, rdata, rlabel, steps)
            grad_info = trainer.backward(model, rdata, rlabel, steps, saved)
            trainer.accumulate_grad([grad_info])

            with torch.no_grad():
                for p_idx, p in enumerate(trainer.params):
                    pdata = p.data
                    N = p.numel()
                    for flat_i in np.random.choice(N, min(N, max_num_per_param), replace=False):
                        i = []
                        for s in reversed(p.size()):
                            i.insert(0, flat_i % s)
                            flat_i //= s
                        i = tuple(i)
                        ag = p.grad[i].item()
                        orig = pdata[i].item()
                        pdata[i] -= eps
                        steps = trainer.get_steps()
                        lm, _ = trainer.forward(model, rdata, rlabel, steps)
                        pdata[i] += eps * 2
                        steps = trainer.get_steps()
                        lp, _ = trainer.forward(model, rdata, rlabel, steps)
                        ng = (lp - lm).item() / (2 * eps)
                        pdata[i] = orig
                        rel_err = abs(ag - ng) / (atol + rtol * abs(ng))
                        info_msg = "testing param {} with shape [{}] at ({}):\trel_err={:.4f}\t" \
                                   "analytical={:+.6f}\tnumerical={:+.6f}".format(
                                       p_idx, format_intlist(p.size()),
                                       format_intlist(i), rel_err, ag, ng)
                        if unittest_verbosity() > 0:
                            print(info_msg)
                        self.assertTrue(rel_err <= 1, "gradcheck failed when " + info_msg)