Python theano.grad() Examples

The following are 30 code examples of theano.grad(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano , or try the search function .
Example #1
Source File: test_subtensor.py    From D-VAE with MIT License 6 votes vote down vote up
def test_grad_advanced_inc_subtensor(self):
        def inc_slice(*s):
            def just_numeric_args(a, b):
                cost = (a[s] + b).sum()
                cost_wrt_a = theano.tensor.grad(cost, a)
                cost_wrt_b = theano.tensor.grad(cost, b)
                grads = cost_wrt_a.sum() + cost_wrt_b.sum()
                return grads
            return just_numeric_args

        # vector
        utt.verify_grad(
            inc_slice(slice(2, 4, None)),
            (numpy.asarray([0, 1, 2, 3, 4, 5.]), numpy.asarray([9, 9.]),))

        # matrix
        utt.verify_grad(
            inc_slice(slice(1, 2, None), slice(None, None, None)),
            (numpy.asarray([[0, 1], [2, 3], [4, 5.]]),
             numpy.asarray([[9, 9.]]),))

        # single element
        utt.verify_grad(
            inc_slice(2, 1),
            (numpy.asarray([[0, 1], [2, 3], [4, 5.]]), numpy.asarray(9.),)) 
Example #2
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_1in_1out(self):
        """Test grad is called correctly for a 1-to-1 op"""
        gval = theano.tensor.matrix()

        class O(gof.op.Op):
            __props__ = ()

            def make_node(self):
                inputs = [theano.tensor.matrix()]
                outputs = [theano.tensor.matrix()]
                return gof.Apply(self, inputs, outputs)

            def grad(self, inp, grads):
                return gval,
        a1 = O().make_node()
        g = grad_sources_inputs([(a1.outputs[0], one)], None)
        self.assertTrue(g[a1.inputs[0]] is gval) 
Example #3
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_grad_constant(self):

        # Test that the gradient handles Constants and consider_constant variables
        # consistently

        x = theano.tensor.scalar()
        y = theano.tensor.scalar()
        z_x = x + y
        z_one = one + y
        g_x = theano.tensor.grad(z_x, x, consider_constant=[x])
        g_one = theano.tensor.grad(z_one, one)

        f = theano.function([x, y], [g_x, g_one])

        g_x, g_one = f(1, .5)

        if not np.allclose(g_x, g_one):
            raise AssertionError("Gradient using consider constant is " +
                                 str(g_x) +
                                 " but gradient with respect to the same Constant is " +
                                 str(g_one)) 
Example #4
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_1in_Nout(self):
        """Test grad is called correctly for a 1-to-many op"""
        gval = theano.tensor.matrix()

        class O(gof.op.Op):
            __props__ = ()

            def make_node(self):
                inputs = [theano.tensor.matrix()]
                outputs = [theano.tensor.scalar(), theano.tensor.scalar()]
                return gof.Apply(self, inputs, outputs)

            def grad(self, inp, grads):
                x, = inp
                gz1, gz2 = grads
                return gval,
        a1 = O().make_node()
        g = grad_sources_inputs([(a1.outputs[0], one)], None)
        self.assertTrue(g[a1.inputs[0]] is gval) 
Example #5
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_Nin_1out(self):
        """Test grad is called correctly for a many-to-1 op"""
        gval0 = theano.tensor.scalar()
        gval1 = theano.tensor.scalar()

        class O(gof.op.Op):
            __props__ = ()

            def make_node(self):
                inputs = [theano.tensor.scalar(), theano.tensor.scalar()]
                outputs = [theano.tensor.matrix()]
                return gof.Apply(self, inputs, outputs)

            def grad(self, inp, grads):
                x0, x1 = inp
                gz, = grads
                return (gval0, gval1)
        a1 = O().make_node()
        g = grad_sources_inputs([(a1.outputs[0], one)], None)
        self.assertTrue(g[a1.inputs[0]] is gval0)
        self.assertTrue(g[a1.inputs[1]] is gval1) 
Example #6
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_downcast_dtype(self):
        # Test that the gradient of a cost wrt a float32 variable does not
        # get upcasted to float64.
        # x has dtype float32, regardless of the value of floatX
        x = theano.tensor.fscalar('x')
        y = x * 2
        z = theano.tensor.lscalar('z')

        c = y + z
        dc_dx, dc_dy, dc_dz, dc_dc = theano.grad(c, [x, y, z, c])
        # The dtype of dc_dy and dc_dz can be either float32 or float64,
        # that might depend on floatX, but is not specified.
        assert dc_dc.dtype in ('float32', 'float64')
        assert dc_dz.dtype in ('float32', 'float64')
        assert dc_dy.dtype in ('float32', 'float64')

        # When the output gradient of y is passed to op.grad, it should
        # be downcasted to float32, so dc_dx should also be float32
        assert dc_dx.dtype == 'float32' 
Example #7
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_grad_disconnected(self):

        # tests corner cases of gradient for shape and alloc

        x = theano.tensor.vector(name='x')
        total = x.sum()
        total.name = 'total'
        num_elements = x.shape[0]
        num_elements.name = 'num_elements'
        silly_vector = theano.tensor.alloc(total / num_elements, num_elements)
        silly_vector.name = 'silly_vector'
        cost = silly_vector.sum()
        cost.name = 'cost'
        # note that cost simplifies to be the same as "total"
        g = gradient.grad(cost, x, add_names=False)
        # we still need to pass in x because it determines the shape of
        # the output
        f = theano.function([x], g)
        rng = np.random.RandomState([2012, 9, 5])
        x = np.cast[x.dtype](rng.randn(3))
        g = f(x)
        assert np.allclose(g, np.ones(x.shape, dtype=x.dtype)) 
Example #8
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_Nin_Nout(self):
        """Test grad is called correctly for a many-to-many op"""
        gval0 = theano.tensor.matrix()
        gval1 = theano.tensor.matrix()

        class O(gof.op.Op):
            __props__ = ()

            def make_node(self):
                inputs = [theano.tensor.matrix(), theano.tensor.matrix()]
                outputs = [theano.tensor.matrix(), theano.tensor.matrix()]
                return gof.Apply(self, inputs, outputs)

            def grad(self, inp, grads):
                return gval0, gval1
        a1 = O().make_node()
        g = grad_sources_inputs([(a1.outputs[0], one)], None)
        self.assertTrue(g[a1.inputs[0]] is gval0)
        self.assertTrue(g[a1.inputs[1]] is gval1) 
Example #9
Source File: test_blocksparse.py    From D-VAE with MIT License 6 votes vote down vote up
def test_sparseblockgemv_grad_shape(self):
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)
        go = theano.grad(o.sum(), [b, W, h])

        f = theano.function([W, h, iIdx, b, oIdx], go, mode=self.mode)

        W_val, h_val, iIdx_val, b_val, oIdx_val = \
            BlockSparse_Gemv_and_Outer.gemv_data()

        # just make sure that it runs correcly and all the shapes are ok.
        b_g, W_g, h_g = f(W_val, h_val, iIdx_val, b_val, oIdx_val)

        assert b_g.shape == b_val.shape
        assert h_g.shape == h_val.shape
        assert W_g.shape == W_val.shape 
Example #10
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_wrong_rval_len1(self):
        """Test that it is not ok to return the wrong number of gradient terms
        """
        class retOne(gof.op.Op):
            __props__ = ()

            def make_node(self, *inputs):
                outputs = [theano.tensor.vector()]
                return gof.Apply(self, inputs, outputs)

            def grad(self, inputs, grads):
                return [inputs[0].zeros_like()]

        i = theano.tensor.vector()
        j = theano.tensor.vector()
        a1 = retOne().make_node(i)
        grad_sources_inputs([(a1.out, one)], None)
        a2 = retOne().make_node(i, j)
        self.assertRaises(ValueError, grad_sources_inputs, [(a2.out, one)], None) 
Example #11
Source File: test_nnet.py    From D-VAE with MIT License 6 votes vote down vote up
def test_grad(self):
        c = T.matrix()
        p_y = T.exp(c) / T.exp(c).sum(axis=1).dimshuffle(0, 'x')

        # test that function contains softmax and softmaxgrad
        w = T.matrix()
        backup = config.warn.sum_div_dimshuffle_bug
        config.warn.sum_div_dimshuffle_bug = False
        try:
            g = theano.function([c, w], T.grad((p_y * w).sum(), c))
            hasattr(g.maker.fgraph.outputs[0].tag, 'trace')
        finally:
            config.warn.sum_div_dimshuffle_bug = backup
        g_ops = [n.op for n in g.maker.fgraph.toposort()]
        # print '--- g ='
        # printing.debugprint(g)
        # print '==='

        raise SkipTest('Optimization not enabled for the moment')
        assert len(g_ops) == 2
        assert softmax_op in g_ops
        assert softmax_grad in g_ops
        g(self.rng.rand(3, 4), self.rng.uniform(.5, 1, (3, 4))) 
Example #12
Source File: test_nnet.py    From D-VAE with MIT License 6 votes vote down vote up
def test_transpose_basic(self):
        # this should be a transposed softmax
        c = T.matrix()
        p_y = T.exp(c) / T.exp(c).sum(axis=0)

        # test that function contains softmax and no div.
        f = theano.function([c], p_y)
        # printing.debugprint(f)

        # test that function contains softmax and no div.
        backup = config.warn.sum_div_dimshuffle_bug
        config.warn.sum_div_dimshuffle_bug = False
        try:
            g = theano.function([c], T.grad(p_y.sum(), c))
            hasattr(g.maker.fgraph.outputs[0].tag, 'trace')
        finally:
            config.warn.sum_div_dimshuffle_bug = backup
        # printing.debugprint(g)
        raise SkipTest('Optimization not enabled for the moment') 
Example #13
Source File: test_nnet.py    From D-VAE with MIT License 6 votes vote down vote up
def test_1d_basic(self):
        # this should be a softmax, but of a one-row matrix
        c = T.vector()
        p_y = T.exp(c) / T.exp(c).sum()

        # test that function contains softmax and no div.
        f = theano.function([c], p_y)
        hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
        # printing.debugprint(f)

        # test that function contains softmax and no div.
        backup = config.warn.sum_div_dimshuffle_bug
        config.warn.sum_div_dimshuffle_bug = False
        try:
            g = theano.function([c], T.grad(p_y.sum(), c))
            hasattr(g.maker.fgraph.outputs[0].tag, 'trace')
        finally:
            config.warn.sum_div_dimshuffle_bug = backup
        # printing.debugprint(g)
        raise SkipTest('Optimization not enabled for the moment')

    # REPEAT 3 CASES in presence of log(softmax) with the advanced indexing
    # etc. 
Example #14
Source File: test_elemwise.py    From D-VAE with MIT License 6 votes vote down vote up
def test_other_grad_tests(self):
        x = theano.tensor.dmatrix()
        x_val1 = numpy.array([[1, 2, 3], [0, 5, 6], [0, 0, 9]],
             dtype='float32')
        x_val2 = numpy.array([[1, 2, 0], [0, 5, 6], [7, 8, 9], [9, 10, 0]],
             dtype='float32')
        rng = rng = numpy.random.RandomState(43)

        p = Prod(axis=1)
        grad_p = theano.tensor.grad(p(x).sum(), x)
        grad_fn = theano.function([x], grad_p, mode=self.mode)
        assert numpy.allclose(grad_fn(x_val1), [[6., 3., 2.], [30., 0.,
            0.], [0., 0., 0.]])
        assert numpy.allclose(grad_fn(x_val2), [[0., 0., 2.], [30.,
             0., 0.], [72., 63., 56.], [0., 0., 90.]])

        p_axis0 = Prod(axis=0)
        grad_p_axis0 = theano.tensor.grad(p_axis0(x).sum(), x)
        grad_fn_axis0 = theano.function([x], grad_p_axis0, mode=self.mode)
        assert numpy.allclose(grad_fn_axis0(x_val2), [[0., 400.,
             0.], [63., 160., 0.], [0., 100., 0.], [0., 80., 0.]])

        tensor.verify_grad(p, [x_val1], rng=rng, mode=self.mode) 
Example #15
Source File: test_basic.py    From D-VAE with MIT License 6 votes vote down vote up
def test_csm_grad(self):
        for sparsetype in ('csr', 'csc'):
            x = tensor.vector()
            y = tensor.ivector()
            z = tensor.ivector()
            s = tensor.ivector()
            call = getattr(sp, sparsetype + '_matrix')
            spm = call(random_lil((300, 400), config.floatX, 5))
            out = tensor.grad(dense_from_sparse(
                CSM(sparsetype)(x, y, z, s)
            ).sum(), x)
            self._compile_and_check([x, y, z, s],
                                    [out],
                                    [spm.data, spm.indices, spm.indptr,
                                     spm.shape],
                                    (CSMGrad, CSMGradC)
                                   ) 
Example #16
Source File: test_subtensor.py    From D-VAE with MIT License 6 votes vote down vote up
def test_inc_adv_subtensor_with_broadcasting(self):
        if inplace_increment is None:
            raise inplace_increment_missing

        inc = dscalar()
        a = inc_subtensor(self.m[self.ix1, self.ix12], inc)
        g_inc = tensor.grad(a.sum(), inc)

        assert a.type == self.m.type, (a.type, self.m.type)
        f = theano.function([self.m, self.ix1, self.ix12, inc], [a, g_inc],
                            allow_input_downcast=True)
        aval, gval = f([[.4, .9, .1],
                        [5, 6, 7],
                        [.5, .3, .15]],
                       [1, 2, 1],
                       [0, 1, 0],
                       2.1)
        assert numpy.allclose(aval,
                [[.4, .9, .1],
                  [5 + 2.1 * 2, 6, 7],
                  [.5, .3 + 2.1, .15]]), aval
        assert numpy.allclose(gval, 3.0), gval 
Example #17
Source File: test_subtensor.py    From D-VAE with MIT License 6 votes vote down vote up
def test_inc_adv_subtensor1_with_broadcasting(self):
        if inplace_increment is None:
            raise inplace_increment_missing

        inc = dscalar()
        a = inc_subtensor(self.m[self.ix1], inc)
        g_inc = tensor.grad(a.sum(), inc)

        assert a.type == self.m.type, (a.type, self.m.type)
        f = theano.function([self.m, self.ix1, inc], [a, g_inc],
                            allow_input_downcast=True)
        aval, gval = f([[.4, .9, .1],
                        [5, 6, 7],
                        [.5, .3, .15]],
                       [0, 1, 0],
                       2.1)
        assert numpy.allclose(aval,
                [[.4 + 2.1 * 2, .9  + 2.1 * 2, .1 + 2.1 * 2],
                  [5 + 2.1, 6 + 2.1, 7 + 2.1],
                  [.5, .3, .15]]), aval
        assert numpy.allclose(gval, 9.0), gval 
Example #18
Source File: test_subtensor.py    From D-VAE with MIT License 6 votes vote down vote up
def test_err_bound_list(self):
        n = self.shared(numpy.ones((2, 3), dtype=self.dtype) * 5)
        l = lvector()
        t = n[l]
        # We test again AdvancedSubtensor1 as we transfer data to the cpu.
        self.assertTrue(isinstance(t.owner.op, tensor.AdvancedSubtensor1))

        f = self.function([l], t, op=self.adv_sub1)

        # the grad
        g = self.function([l],
                          inc_subtensor(t, numpy.asarray([[1.]], self.dtype)),
                          op=self.adv_incsub1)

        for shp in [[0, 4], [0, -3], [-10]]:
            self.assertRaises(IndexError, f, shp)
            self.assertRaises(IndexError, g, shp) 
Example #19
Source File: gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def grad_not_implemented(op, x_pos, x, comment=""):
    """
    Return an un-computable symbolic variable of type `x.type`.

    If any call to tensor.grad results in an expression containing this
    un-computable variable, an exception (NotImplementedError) will be
    raised indicating that the gradient on the
    `x_pos`'th input of `op` has not been implemented. Likewise if
    any call to theano.function involves this variable.

    Optionally adds a comment to the exception explaining why this
    gradient is not implemented.
    """

    return (NullType((
        "This variable is Null because the grad method for "
        "input %s (%s) of the %s op is not implemented. %s"
    ) % (x_pos, x, op, comment)))() 
Example #20
Source File: test_subtensor.py    From D-VAE with MIT License 6 votes vote down vote up
def test_grad_0d(self):
        data = numpy.asarray(rand(2, 3), dtype=self.dtype)
        n = self.shared(data)
        t = n[1, 0]
        gn = theano.tensor.grad(theano.tensor.sum(theano.tensor.exp(t)), n)
        f = self.function([], gn)
        topo = f.maker.fgraph.toposort()
        topo_ = [node for node in topo if not isinstance(node.op,
             self.ignore_topo)]
        if not self.fast_compile:
            assert len(topo_) == 6
        assert numpy.sum([isinstance(node.op, self.inc_sub)
             for node in topo_]) == 1
        assert numpy.sum([isinstance(node.op, self.sub)
             for node in topo_]) == 1

        gval = f()
        good = numpy.zeros_like(data)
        good[1, 0] = numpy.exp(data[1, 0])
        self.assertTrue(numpy.allclose(gval, good), (gval, good)) 
Example #21
Source File: test_subtensor.py    From D-VAE with MIT License 6 votes vote down vote up
def test_grad_2d_inc_set_subtensor(self):
        for n_shape, m_shape in [
            [(2, 3), (2, 2)],
            [(3, 2), (2, 2)],
            [(3, 2), (1, 2)],
            [(3, 2), (2,)],
        ]:
            for op in [inc_subtensor, set_subtensor]:
                subi = 2
                data = numpy.asarray(rand(*n_shape), dtype=self.dtype)
                n = self.shared(data)
                z = scal.constant(subi)
                m = matrix('m', dtype=self.dtype)
                mv = numpy.asarray(rand(*m_shape), dtype=self.dtype)

                t = op(n[:z, :z], m)
                gn, gm = theano.tensor.grad(theano.tensor.sum(t), [n, m])
                utt.verify_grad(lambda m: op(n[:z, :z], m), [mv])
                utt.verify_grad(lambda nn: op(nn[:z, :z], mv), [data]) 
Example #22
Source File: test_subtensor.py    From D-VAE with MIT License 6 votes vote down vote up
def test_grad_1d(self):
        subi = 0
        data = numpy.asarray(rand(2, 3), dtype=self.dtype)
        n = self.shared(data)
        z = scal.constant(subi)
        t = n[z:, z]
        gn = theano.tensor.grad(theano.tensor.sum(theano.tensor.exp(t)), n)

        f = inplace_func([], gn, mode=self.mode)
        topo = f.maker.fgraph.toposort()
        topo_ = [node for node in topo if not isinstance(node.op,
                                                         self.ignore_topo)]
        if not self.fast_compile:
            assert len(topo_) == 6
        assert numpy.sum([isinstance(node.op, self.inc_sub)
                          for node in topo_]) == 1
        assert numpy.sum([isinstance(node.op, self.sub)
                          for node in topo_]) == 1
        gval = f()

        good = numpy.zeros_like(data)
        good[subi:, subi] = numpy.exp(data[subi:, subi])
        self.assertTrue(numpy.allclose(gval, good), (gval, good)) 
Example #23
Source File: gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def grad_undefined(op, x_pos, x, comment=""):
    """
    Return an un-computable symbolic variable of type `x.type`.

    If any call to tensor.grad results in an expression containing this
    un-computable variable, an exception (GradUndefinedError) will be
    raised indicating that the gradient on the
    `x_pos`'th input of `op` is mathematically undefined. Likewise if
    any call to theano.function involves this variable.

    Optionally adds a comment to the exception explaining why this
    gradient is not defined.
    """

    return (NullType(
        (
            "This variable is Null because the grad method for "
            "input %s (%s) of the %s op is mathematically undefined. %s"
        ) % (x_pos, x, op, comment)))() 
Example #24
Source File: test_elemwise.py    From D-VAE with MIT License 6 votes vote down vote up
def test_gt_grad():
    """A user test that failed.

    Something about it made Elemwise.grad return something that was
    too complicated for get_scalar_constant_value to recognize as being 0, so
    gradient.grad reported that it was not a valid gradient of an
    integer.

    """
    floatX = config.floatX
    T = theano.tensor

    input_ = T.vector(dtype=floatX)
    random_values = numpy.random.RandomState(1234).uniform(
                                                low=-1, high=1, size=(2, 2))
    W_values = numpy.asarray(random_values, dtype=floatX)
    W = theano.shared(value=W_values, name='weights')
    correct_score = T.dot(input_, W)
    wrong_input = T.vector(dtype=floatX)
    wrong_score = theano.clone(correct_score, {input_: wrong_input})
    # Hinge loss

    scores = T.ones_like(correct_score) - correct_score + wrong_score
    cost = (scores * (scores > 0)).sum()
    T.grad(cost, input_) 
Example #25
Source File: test_conv.py    From D-VAE with MIT License 6 votes vote down vote up
def test_broadcast_grad():
    rng = numpy.random.RandomState(utt.fetch_seed())
    x1 = T.tensor4('x')
    x1_data = rng.randn(1, 1, 300, 300)
    sigma = T.scalar('sigma')
    sigma_data = 20
    window_radius = 3

    filter_1d = T.arange(-window_radius, window_radius+1)
    filter_1d = filter_1d.astype(theano.config.floatX)
    filter_1d = T.exp(-0.5*filter_1d**2/sigma**2)
    filter_1d = filter_1d / filter_1d.sum()

    filter_W = filter_1d.dimshuffle(['x', 'x', 0, 'x'])

    y = theano.tensor.nnet.conv2d(x1, filter_W, border_mode='full',
                                  filter_shape=[1, 1, None, None])
    theano.grad(y.sum(), sigma) 
Example #26
Source File: test_gradient.py    From D-VAE with MIT License 5 votes vote down vote up
def test_grad_duplicate_input(self):

        # test that the grad works when a variable
        # appears in more than one place in a node's input list

        def output(x):
            return (x * x)

        rng = np.random.RandomState([2012, 8, 28])

        vx = rng.randn(2)

        theano.tests.unittest_tools.verify_grad(output, [vx]) 
Example #27
Source File: test_gradient.py    From D-VAE with MIT License 5 votes vote down vote up
def test_undefined_grad_grad(self):
        # tests that undefined grads are caught in the grad method

        V = theano.tensor.TensorType(dtype=config.floatX,
                                     broadcastable=(False, False, False, False, False))()
        W = theano.tensor.TensorType(dtype=config.floatX,
                                     broadcastable=(False, False, False, False, False))()
        b = theano.tensor.vector()
        d = theano.tensor.ivector()

        Z = conv3D(V, W, b, d)

        self.assertRaises(TypeError, theano.gradient.grad, Z.sum(), d) 
Example #28
Source File: test_gradient.py    From D-VAE with MIT License 5 votes vote down vote up
def test_grad_name(self):
        A = theano.tensor.matrix('A')
        x = theano.tensor.vector('x')
        f = theano.tensor.dot(x, theano.tensor.dot(A, x))
        f.name = 'f'
        g = theano.tensor.grad(f, x)
        assert g.name == '(df/dx)' 
Example #29
Source File: test_subtensor.py    From D-VAE with MIT License 5 votes vote down vote up
def test_adv_sub1_idx_broadcast(self):
        # The idx can be a broadcastable vector.
        ones = numpy.ones((4, 3), dtype=self.dtype)
        n = self.shared(ones * 5)
        idx = tensor.TensorType(dtype='int64', broadcastable=(True,))()
        assert idx.type.broadcastable == (True,)
        t = n[idx]
        self.assertTrue(isinstance(t.owner.op, tensor.AdvancedSubtensor1))

        f = self.function([idx], t, op=self.adv_sub1)
        topo = f.maker.fgraph.toposort()
        topo_ = [node for node in topo if not isinstance(node.op,
                                                         self.ignore_topo)]
        assert len(topo_) == 1
        self.assertTrue(isinstance(topo_[0].op, self.adv_sub1))
        f_0 = f([0])
        self.assertTrue(f_0.shape == (1, 3))
        self.assertTrue(numpy.allclose(f_0, 5))

        # Test the gradient
        c = t.sum()
        gn = theano.grad(c, n)
        g = self.function([idx], gn, op=self.adv_incsub1)
        g_0 = g([0])
        self.assertTrue(g_0.shape == (4, 3))
        self.assertTrue(numpy.allclose(g_0[0], 1))
        self.assertTrue(numpy.allclose(g_0[1:], 0)) 
Example #30
Source File: test_gradient.py    From D-VAE with MIT License 5 votes vote down vote up
def test_grad_grad_quadratic(self):

        # test the gradient on a graph constructed using the gradient

        def output(x, A):
            orig_cost = theano.tensor.dot(x, theano.tensor.dot(A, x))
            return theano.gradient.grad(orig_cost, x)

        rng = np.random.RandomState([2012, 8, 28])

        vx = rng.randn(2)
        vA = rng.randn(2, 2)

        theano.tests.unittest_tools.verify_grad(output, [vx, vA])