Python theano.tensor.vector() Examples

The following are 30 code examples of theano.tensor.vector(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .
Example #1
Source File: test_nnet.py    From D-VAE with MIT License 6 votes vote down vote up
def test_broadcast(self):
        # test that we don't raise an error during optimization for no good
        # reason as softmax_with_bias don't support correctly some/all
        # broadcasted inputs pattern
        initial_W = numpy.asarray([[0.1, 0.1, 0.1],
                                   [0.1, 0.1, 0.1],
                                   [0.1, 0.1, 0.1]],
                                  dtype=theano.config.floatX)
        W = theano.shared(value=initial_W, name='W')
        vbias = theano.shared(value=0.1, name='vbias')  # 0.01
        hid = T.vector('hid')
        f = theano.function([hid],
                            T.nnet.softmax_op(T.dot(hid, W.T) + vbias))
        ops = [node.op for node in f.maker.fgraph.toposort()]
        assert softmax_with_bias not in ops
        assert softmax_op in ops

        f([0, 1, 0])
        # print f.maker.fgraph.toposort() 
Example #2
Source File: test_basic.py    From D-VAE with MIT License 6 votes vote down vote up
def test_csc_dense(self):
        x = theano.sparse.csc_matrix('x')
        y = theano.tensor.matrix('y')
        v = theano.tensor.vector('v')

        for (x, y, x_v, y_v) in [(x, y, self.x_csc, self.y),
                                 (x, v, self.x_csc, self.v_100),
                                 (v, x, self.v_10, self.x_csc)]:

            f_a = theano.function([x, y], theano.sparse.dot(x, y))
            f_b = lambda x, y: x * y

            utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v))

            # Test infer_shape
            self._compile_and_check([x, y], [theano.sparse.dot(x, y)],
                                    [x_v, y_v],
                                    (Dot, Usmm, UsmmCscDense)) 
Example #3
Source File: test_activations.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def test_relu():
    '''
    Relu implementation doesn't depend on the value being
    a theano variable. Testing ints, floats and theano tensors.
    '''

    from keras.activations import relu as r

    assert r(5) == 5
    assert r(-5) == 0
    assert r(-0.1) == 0
    assert r(0.1) == 0.1

    x = T.vector()
    exp = r(x)
    f = theano.function([x], exp)

    test_values = get_standard_values()
    result = f(test_values)

    list_assert_equal(result, test_values) # because no negatives in test values 
Example #4
Source File: test_activations.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def test_softmax():

    from keras.activations import softmax as s

    # Test using a reference implementation of softmax
    def softmax(values):
        m = max(values)
        values = numpy.array(values)
        e = numpy.exp(values - m)
        dist = list(e / numpy.sum(e))

        return dist

    x = T.vector()
    exp = s(x)
    f = theano.function([x], exp)
    test_values=get_standard_values()

    result = f(test_values)
    expected = softmax(test_values)

    print(str(result))
    print(str(expected))

    list_assert_equal(result, expected) 
Example #5
Source File: test_activations.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def test_tanh():

    from keras.activations import tanh as t
    test_values = get_standard_values()

    x = T.vector()
    exp = t(x)
    f = theano.function([x], exp)

    result = f(test_values)
    expected = [math.tanh(v) for v in test_values]

    print(result)
    print(expected)

    list_assert_equal(result, expected) 
Example #6
Source File: preprocessing.py    From Projects with MIT License 6 votes vote down vote up
def __init__(self):
        X_in = T.matrix('X_in')
        u = T.matrix('u')
        s = T.vector('s')
        eps = T.scalar('eps')

        X_ = X_in - T.mean(X_in, 0)
        sigma = T.dot(X_.T, X_) / X_.shape[0]
        self.sigma = theano.function([X_in], sigma, allow_input_downcast=True)

        Z = T.dot(T.dot(u, T.nlinalg.diag(1. / T.sqrt(s + eps))), u.T)
        X_zca = T.dot(X_, Z.T)
        self.compute_zca = theano.function([X_in, u, s, eps], X_zca, allow_input_downcast=True)

        self._u = None
        self._s = None 
Example #7
Source File: variable.py    From python-mle with MIT License 6 votes vote down vote up
def var(name, label=None, observed=False, const=False, vector=False, lower=None, upper=None):
    if vector and not observed:
        raise ValueError('Currently, only observed variables can be vectors')

    if observed and const:
        raise ValueError('Observed variables are automatically const')

    if vector:
        var = T.vector(name)
    else:
        var = T.scalar(name)

    var._name = name
    var._label = label
    var._observed = observed
    var._const = observed or const
    var._lower = lower or -np.inf
    var._upper = upper or np.inf

    return var 
Example #8
Source File: test_basic.py    From D-VAE with MIT License 6 votes vote down vote up
def test_csr_dense(self):
        x = theano.sparse.csr_matrix('x')
        y = theano.tensor.matrix('y')
        v = theano.tensor.vector('v')

        for (x, y, x_v, y_v) in [(x, y, self.x_csr, self.y),
                                 (x, v, self.x_csr, self.v_100),
                                 (v, x, self.v_10, self.x_csr)]:
            f_a = theano.function([x, y], theano.sparse.dot(x, y))
            f_b = lambda x, y: x * y

            utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v))

            # Test infer_shape
            self._compile_and_check([x, y], [theano.sparse.dot(x, y)],
                                    [x_v, y_v],
                                    (Dot, Usmm, UsmmCscDense)) 
Example #9
Source File: test_basic.py    From D-VAE with MIT License 6 votes vote down vote up
def test_csm_grad(self):
        for sparsetype in ('csr', 'csc'):
            x = tensor.vector()
            y = tensor.ivector()
            z = tensor.ivector()
            s = tensor.ivector()
            call = getattr(sp, sparsetype + '_matrix')
            spm = call(random_lil((300, 400), config.floatX, 5))
            out = tensor.grad(dense_from_sparse(
                CSM(sparsetype)(x, y, z, s)
            ).sum(), x)
            self._compile_and_check([x, y, z, s],
                                    [out],
                                    [spm.data, spm.indices, spm.indptr,
                                     spm.shape],
                                    (CSMGrad, CSMGradC)
                                   ) 
Example #10
Source File: test_opt.py    From D-VAE with MIT License 6 votes vote down vote up
def test_local_csm_properties_csm():
    data = tensor.vector()
    indices, indptr, shape = (tensor.ivector(), tensor.ivector(),
                              tensor.ivector())
    mode = theano.compile.mode.get_default_mode()
    mode = mode.including("specialize", "local_csm_properties_csm")
    for CS, cast in [(sparse.CSC, sp.csc_matrix),
                     (sparse.CSR, sp.csr_matrix)]:
        f = theano.function([data, indices, indptr, shape],
                            sparse.csm_properties(
                                CS(data, indices, indptr, shape)),
                            mode=mode)
        assert not any(
            isinstance(node.op, (sparse.CSM, sparse.CSMProperties))
            for node in f.maker.fgraph.toposort())
        v = cast(random_lil((10, 40),
                            config.floatX, 3))
        f(v.data, v.indices, v.indptr, v.shape) 
Example #11
Source File: test_opt.py    From D-VAE with MIT License 6 votes vote down vote up
def test_local_mul_s_v():
    if not theano.config.cxx:
        raise SkipTest("G++ not available, so we need to skip this test.")
    mode = theano.compile.mode.get_default_mode()
    mode = mode.including("specialize", "local_mul_s_v")

    for sp_format in ['csr']:  # Not implemented for other format
        inputs = [getattr(theano.sparse, sp_format + '_matrix')(),
                  tensor.vector()]

        f = theano.function(inputs,
                            sparse.mul_s_v(*inputs),
                            mode=mode)

        assert not any(isinstance(node.op, sparse.MulSV) for node
                       in f.maker.fgraph.toposort()) 
Example #12
Source File: test_opt.py    From D-VAE with MIT License 6 votes vote down vote up
def test_local_csm_grad_c():
    raise SkipTest("Opt disabled as it don't support unsorted indices")
    if not theano.config.cxx:
        raise SkipTest("G++ not available, so we need to skip this test.")
    data = tensor.vector()
    indices, indptr, shape = (tensor.ivector(), tensor.ivector(),
                              tensor.ivector())
    mode = theano.compile.mode.get_default_mode()

    if theano.config.mode == 'FAST_COMPILE':
        mode = theano.compile.Mode(linker='c|py', optimizer='fast_compile')

    mode = mode.including("specialize", "local_csm_grad_c")
    for CS, cast in [(sparse.CSC, sp.csc_matrix), (sparse.CSR, sp.csr_matrix)]:
        cost = tensor.sum(sparse.DenseFromSparse()(CS(data, indices, indptr, shape)))
        f = theano.function(
            [data, indices, indptr, shape],
            tensor.grad(cost, data),
            mode=mode)
        assert not any(isinstance(node.op, sparse.CSMGrad) for node
                       in f.maker.fgraph.toposort())
        v = cast(random_lil((10, 40),
                            config.floatX, 3))
        f(v.data, v.indices, v.indptr, v.shape) 
Example #13
Source File: aa.py    From D-VAE with MIT License 6 votes vote down vote up
def __init__(self):
        super(M, self).__init__()

        x = T.matrix('x') # input, target
        self.w = module.Member(T.matrix('w')) # weights
        self.a = module.Member(T.vector('a')) # hid bias
        self.b = module.Member(T.vector('b')) # output bias

        self.hid = T.tanh(T.dot(x, self.w) + self.a)
        hid = self.hid

        self.out = T.tanh(T.dot(hid, self.w.T) + self.b)
        out = self.out

        self.err = 0.5 * T.sum((out - x)**2)
        err = self.err

        params = [self.w, self.a, self.b]

        gparams = T.grad(err, params)

        updates = [(p, p - 0.01 * gp) for p, gp in zip(params, gparams)]

        self.step = module.Method([x], err, updates=dict(updates)) 
Example #14
Source File: test_basic.py    From D-VAE with MIT License 6 votes vote down vote up
def test_mul_s_v(self):
        sp_types = {'csc': sp.csc_matrix,
                    'csr': sp.csr_matrix}

        for format in ['csr', 'csc']:
            for dtype in ['float32', 'float64']:
                x = theano.sparse.SparseType(format, dtype=dtype)()
                y = tensor.vector(dtype=dtype)
                f = theano.function([x, y], mul_s_v(x, y))

                spmat = sp_types[format](random_lil((4, 3), dtype, 3))
                mat = numpy.asarray(numpy.random.rand(3), dtype=dtype)

                out = f(spmat, mat)

                utt.assert_allclose(spmat.toarray() * mat, out.toarray()) 
Example #15
Source File: test_basic.py    From D-VAE with MIT License 6 votes vote down vote up
def test_structured_add_s_v(self):
        sp_types = {'csc': sp.csc_matrix,
                    'csr': sp.csr_matrix}

        for format in ['csr', 'csc']:
            for dtype in ['float32', 'float64']:
                x = theano.sparse.SparseType(format, dtype=dtype)()
                y = tensor.vector(dtype=dtype)
                f = theano.function([x, y], structured_add_s_v(x, y))

                spmat = sp_types[format](random_lil((4, 3), dtype, 3))
                spones = spmat.copy()
                spones.data = numpy.ones_like(spones.data)
                mat = numpy.asarray(numpy.random.rand(3), dtype=dtype)

                out = f(spmat, mat)

                utt.assert_allclose(as_ndarray(spones.multiply(spmat + mat)),
                                    out.toarray()) 
Example #16
Source File: test_function_module.py    From D-VAE with MIT License 6 votes vote down vote up
def __init__(self):
        a = T.scalar()  # the a is for 'anonymous' (un-named).
        x, s = T.scalars('xs')
        v = T.vector('v')

        self.s = s
        self.x = x
        self.v = v

        self.e = a * x + s

        self.f1 = function([x, In(a, value=1.0, name='a'),
                            In(s, value=0.0, update=s + a * x, mutable=True)],
                           s + a * x)

        self.f2 = function([x, In(a, value=1.0, name='a'),
                            In(s, value=self.f1.container[s], update=s + a * x,
                               mutable=True)],
                           s + a * x) 
Example #17
Source File: test_nnet.py    From D-VAE with MIT License 6 votes vote down vote up
def test_softmax_optimizations_w_bias_vector(self):
        x = tensor.vector('x')
        b = tensor.vector('b')
        one_of_n = tensor.lvector('one_of_n')
        op = crossentropy_categorical_1hot
        fgraph = gof.FunctionGraph(
                [x, b, one_of_n],
                [op(softmax_op(x + b), one_of_n)])
        assert fgraph.outputs[0].owner.op == op
        # print 'BEFORE'
        # for node in fgraph.toposort():
        #    print node.op
        # print printing.pprint(node.outputs[0])
        # print '----'

        theano.compile.mode.optdb.query(
                theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
        # print 'AFTER'
        # for node in fgraph.toposort():
        #    print node.op
        # print '===='
        assert len(fgraph.toposort()) == 3
        assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
        assert (fgraph.outputs[0].owner.inputs[0].owner.op ==
                crossentropy_softmax_argmax_1hot_with_bias) 
Example #18
Source File: test_function_module.py    From D-VAE with MIT License 6 votes vote down vote up
def test_free(self):
        """
        Make test on free() function
        """
        x = T.vector('x')
        func = function([x], x + 1)
        func.fn.allow_gc = False
        func([1])

        check_list = []
        for key, val in iteritems(func.fn.storage_map):
            if not isinstance(key, theano.gof.Constant):
                check_list.append(val)
        assert any([val[0] for val in check_list])

        func.free()

        for key, val in iteritems(func.fn.storage_map):
            if not isinstance(key, theano.gof.Constant):
                assert (val[0] is None) 
Example #19
Source File: test_nnet.py    From D-VAE with MIT License 6 votes vote down vote up
def test_1d_basic(self):
        # this should be a softmax, but of a one-row matrix
        c = T.vector()
        p_y = T.exp(c) / T.exp(c).sum()

        # test that function contains softmax and no div.
        f = theano.function([c], p_y)
        hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
        # printing.debugprint(f)

        # test that function contains softmax and no div.
        backup = config.warn.sum_div_dimshuffle_bug
        config.warn.sum_div_dimshuffle_bug = False
        try:
            g = theano.function([c], T.grad(p_y.sum(), c))
            hasattr(g.maker.fgraph.outputs[0].tag, 'trace')
        finally:
            config.warn.sum_div_dimshuffle_bug = backup
        # printing.debugprint(g)
        raise SkipTest('Optimization not enabled for the moment')

    # REPEAT 3 CASES in presence of log(softmax) with the advanced indexing
    # etc. 
Example #20
Source File: test_sigm.py    From D-VAE with MIT License 6 votes vote down vote up
def test_is_1pexp(self):
        backup = config.warn.identify_1pexp_bug
        config.warn.identify_1pexp_bug = False
        try:
            x = tensor.vector('x')
            exp = tensor.exp
            assert is_1pexp(1 + exp(x)) == (False, x)
            assert is_1pexp(exp(x) + 1) == (False, x)
            for neg, exp_arg in imap(is_1pexp, [(1 + exp(-x)), (exp(-x) + 1)]):
                assert not neg and theano.gof.graph.is_same_graph(exp_arg, -x)
            assert is_1pexp(1 - exp(x)) is None
            assert is_1pexp(2 + exp(x)) is None
            assert is_1pexp(exp(x) + 2) is None
            assert is_1pexp(exp(x) - 1) is None
            assert is_1pexp(-1 + exp(x)) is None
            assert is_1pexp(1 + 2 * exp(x)) is None
        finally:
            config.warn.identify_1pexp_bug = backup 
Example #21
Source File: test_pool.py    From D-VAE with MIT License 6 votes vote down vote up
def test_DownsampleFactorMax_hessian(self):
        # Example provided by Frans Cronje, see
        # https://groups.google.com/d/msg/theano-users/qpqUy_3glhw/JMwIvlN5wX4J
        x_vec = tensor.vector('x')
        z = tensor.dot(x_vec.dimshuffle(0, 'x'),
                       x_vec.dimshuffle('x', 0))
        y = pool_2d(input=z, ds=(2, 2), ignore_border=True)
        C = tensor.exp(tensor.sum(y))

        grad_hess = tensor.hessian(cost=C, wrt=x_vec)
        fn_hess = function(inputs=[x_vec], outputs=grad_hess)

        # The value has been manually computed from the theoretical gradient,
        # and confirmed by the implementation.

        assert numpy.allclose(fn_hess([1, 2]), [[0., 0.], [0., 982.7667]]) 
Example #22
Source File: test_extra_ops.py    From D-VAE with MIT License 6 votes vote down vote up
def test_basic_vector(self):           
        """
        Basic test for a vector.
        Done by using the op and checking that it returns the right answer.
        """
        x = theano.tensor.vector()
        inp = np.asarray([2,1,3,2], dtype=config.floatX)
        list_outs_expected = [[np.unique(inp)], 
                              np.unique(inp, True), 
                              np.unique(inp, False, True), 
                              np.unique(inp, True, True)]
        if bool(numpy_ver >= [1, 9]) :
            list_outs_expected.extend([
                                np.unique(inp, False, False, True), 
                                np.unique(inp, True, False, True), 
                                np.unique(inp, False, True, True), 
                                np.unique(inp, True, True, True)])
        for op, outs_expected in zip(self.ops, list_outs_expected) :
            f = theano.function(inputs=[x], outputs=op(x, return_list=True))
            outs = f(inp)
            # Compare the result computed to the expected value.
            for out, out_exp in zip(outs, outs_expected):
                utt.assert_allclose(out, out_exp) 
Example #23
Source File: test_inc_subtensor.py    From D-VAE with MIT License 6 votes vote down vote up
def test_wrong_broadcast(self):
        a = tt.col()
        increment = tt.vector()

        # These symbolic graphs legitimate, as long as increment has exactly
        # one element. So it should fail at runtime, not at compile time.
        rng = numpy.random.RandomState(utt.fetch_seed())

        def rng_randX(*shape):
            return rng.rand(*shape).astype(theano.config.floatX)

        for op in (tt.set_subtensor, tt.inc_subtensor):
            for base in (a[:], a[0]):
                out = op(base, increment)
                f = theano.function([a, increment], out)
                # This one should work
                f(rng_randX(3, 1), rng_randX(1))
                # These ones should not
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(2))
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(3))
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(0)) 
Example #24
Source File: test_blas.py    From D-VAE with MIT License 6 votes vote down vote up
def test_dot_vm(self):
        ''' Test vector dot matrix '''
        rng = numpy.random.RandomState(unittest_tools.fetch_seed())
        v = theano.shared(numpy.array(rng.uniform(size=(2,)), dtype='float32'))
        m = theano.shared(numpy.array(rng.uniform(size=(2, 3)),
             dtype='float32'))
        f = theano.function([], theano.dot(v, m), mode=mode_blas_opt)

        # Assert that the dot was optimized somehow
        self.assertFunctionContains0(f, T.dot)
        self.assertFunctionContains1(f, Gemv(True))

        # Assert they produce the same output
        assert numpy.allclose(f(), numpy.dot(v.get_value(), m.get_value()))
        # Assert it works when m has no contiguous dimension
        m.set_value(
                m.get_value(borrow=True)[::-1, ::-1],
                borrow=True)
        assert numpy.allclose(f(), numpy.dot(v.get_value(), m.get_value())) 
Example #25
Source File: test_blas.py    From D-VAE with MIT License 6 votes vote down vote up
def test_dot_mv(self):
        ''' Test matrix dot vector '''
        rng = numpy.random.RandomState(unittest_tools.fetch_seed())
        v = theano.shared(numpy.array(rng.uniform(size=(2,)), dtype='float32'))
        m = theano.shared(numpy.array(rng.uniform(size=(3, 2)),
                                       dtype='float32'))
        f = theano.function([], theano.dot(m, v), mode=mode_blas_opt)

        # Assert that the dot was optimized somehow
        self.assertFunctionContains0(f, T.dot)
        self.assertFunctionContains1(f, Gemv(True))

        # Assert they produce the same output
        assert numpy.allclose(f(), numpy.dot(m.get_value(), v.get_value()))
        # Assert it works when m has no contiguous dimension
        m.set_value(
                m.get_value(borrow=True)[::-1, ::-1],
                borrow=True)
        assert numpy.allclose(f(), numpy.dot(m.get_value(), v.get_value())) 
Example #26
Source File: test_elemwise.py    From D-VAE with MIT License 6 votes vote down vote up
def setUp(self):
        self.test_vals = [numpy.array(x, dtype=config.floatX) for x in [
            0,
            1,
            numpy.nan,
            numpy.inf,
            -numpy.inf,
            [numpy.nan, numpy.inf, -numpy.inf, 0, 1, -1],
            ]]
        self.scalar = tensor.scalar()
        self.vector = tensor.vector()
        self.mode = get_default_mode()
        if isinstance(self.mode, theano.compile.debugmode.DebugMode):
            # Disable the check preventing usage of NaN / Inf values.
            self.mode = copy(self.mode)
            self.mode.check_isfinite = False 
Example #27
Source File: test_nlinalg.py    From D-VAE with MIT License 6 votes vote down vote up
def test_diag(self):
        # test that it builds a matrix with given diagonal when using
        # vector inputs
        x = theano.tensor.vector()
        y = diag(x)
        assert y.owner.op.__class__ == AllocDiag

        # test that it extracts the diagonal when using matrix input
        x = theano.tensor.matrix()
        y = extract_diag(x)
        assert y.owner.op.__class__ == ExtractDiag

        # other types should raise error
        x = theano.tensor.tensor3()
        ok = False
        try:
            y = extract_diag(x)
        except TypeError:
            ok = True
        assert ok

    # not testing the view=True case since it is not used anywhere. 
Example #28
Source File: test_nlinalg.py    From D-VAE with MIT License 6 votes vote down vote up
def test_trace():
    rng = numpy.random.RandomState(utt.fetch_seed())
    x = theano.tensor.matrix()
    g = trace(x)
    f = theano.function([x], g)

    for shp in [(2, 3), (3, 2), (3, 3)]:
        m = rng.rand(*shp).astype(config.floatX)
        v = numpy.trace(m)
        assert v == f(m)

    xx = theano.tensor.vector()
    ok = False
    try:
        trace(xx)
    except TypeError:
        ok = True
    assert ok 
Example #29
Source File: test_nlinalg.py    From D-VAE with MIT License 6 votes vote down vote up
def test_numpy_compare(self):
        rng = numpy.random.RandomState(utt.fetch_seed())

        M = tensor.matrix("A", dtype=theano.config.floatX)
        V = tensor.vector("V", dtype=theano.config.floatX)

        a = rng.rand(4, 4).astype(theano.config.floatX)
        b = rng.rand(4).astype(theano.config.floatX)

        A = (   [None, 'fro', 'inf', '-inf', 1, -1, None, 'inf', '-inf', 0, 1, -1, 2, -2],
                [M, M, M, M, M, M, V, V, V, V, V, V, V, V],
                [a, a, a, a, a, a, b, b, b, b, b, b, b, b],
                [None, 'fro', inf, -inf, 1, -1, None, inf, -inf, 0, 1, -1, 2, -2])

        for i in range(0, 14):
            f = function([A[1][i]], norm(A[1][i], A[0][i]))
            t_n = f(A[2][i])
            n_n = numpy.linalg.norm(A[2][i], A[3][i])
            assert _allclose(n_n, t_n) 
Example #30
Source File: test_shared_randomstreams.py    From D-VAE with MIT License 6 votes vote down vote up
def test_permutation(self):
        """Test that RandomStreams.permutation generates the same results as numpy"""
        # Check over two calls to see if the random state is correctly updated.
        random = RandomStreams(utt.fetch_seed())
        fn = function([], random.permutation((20,), 10), updates=random.updates())

        fn_val0 = fn()
        fn_val1 = fn()

        rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)
        rng = numpy.random.RandomState(int(rng_seed))  # int() is for 32bit

        # rng.permutation outputs one vector at a time, so we iterate.
        numpy_val0 = numpy.asarray([rng.permutation(10) for i in range(20)])
        numpy_val1 = numpy.asarray([rng.permutation(10) for i in range(20)])

        assert numpy.all(fn_val0 == numpy_val0)
        assert numpy.all(fn_val1 == numpy_val1)