Python theano.tensor.Elemwise() Examples

The following are 30 code examples of theano.tensor.Elemwise(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .
Example #1
Source File: theano_test.py    From OpenDeep with Apache License 2.0 6 votes vote down vote up
def main():
    vlen = 10 * 30 * 768  # 10 x #cores x # threads per core
    iters = 1000

    rng = numpy.random.RandomState(22)
    x = shared(numpy.asarray(rng.rand(vlen), config.floatX))
    f = function([], T.exp(x))
    print(f.maker.fgraph.toposort())
    t0 = time.time()
    for i in iter(range(iters)):
        r = f()
    t1 = time.time()
    print('Looping %d times took' % iters, t1 - t0, 'seconds')
    print('Result is', r)
    if numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]):
        print('Used the cpu')
    else:
        print('Used the gpu') 
Example #2
Source File: test_basic_ops.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_elemwise4():
    """ Test that two vectors can be broadcast to form an outer
    product (by performing rank-1 matrix update"""

    shape = (3, 4)
    a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
                                               dtype='float32'), 'a')
    b = tensor.fvector()
    c = tensor.fvector()
    f = pfunc([b, c], [],
              updates=[(a, (a + b.dimshuffle('x', 0) * c.dimshuffle(0, 'x')))],
              mode=mode_with_gpu)
    has_elemwise = False
    for i, node in enumerate(f.maker.fgraph.toposort()):
        has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
    assert not has_elemwise
    # let debugmode catch errors
    f(theano._asarray(numpy.random.rand(4), dtype='float32'),
      theano._asarray(numpy.random.rand(3), dtype='float32')) 
Example #3
Source File: test_basic_ops.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_elemwise3():
    """ Several kinds of elemwise expressions with dimension
    permutations and broadcasting"""

    shape = (3, 4, 5, 6)
    a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
                                               dtype='float32'), 'a')
    b = tensor.fvector()
    new_val = (a + b).dimshuffle([2, 0, 3, 1])
    new_val *= tensor.exp(1 + b ** a).dimshuffle([2, 0, 3, 1])
    f = pfunc([b], [], updates=[(a, new_val)], mode=mode_with_gpu)
    has_elemwise = False
    for i, node in enumerate(f.maker.fgraph.toposort()):
        has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
    assert not has_elemwise
    # let debugmode catch errors
    f(theano._asarray(numpy.random.rand(6), dtype='float32')) 
Example #4
Source File: test_opt.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_incsubtensor_mixed():

    # This catches a bug that occurred when incrementing
    # a float32 tensor by a float64 tensor.
    # The result is defined to be float32, so it is OK
    # to downcast the float64 increment in order to
    # transfer it to the GPU.
    # The bug was that the optimization called GpuFromHost
    # without casting first, causing the optimization to
    # fail.
    X = tensor.fmatrix()
    Y = tensor.dmatrix()
    Z = tensor.inc_subtensor(X[0:1, 0:1], Y)
    f = theano.function([X, Y], Z, mode=mode_with_gpu)
    packed, = f.maker.fgraph.inputs[1].clients
    client, idx = packed
    print(client)
    assert isinstance(client.op, tensor.Elemwise)
    assert isinstance(client.op.scalar_op, theano.scalar.Cast)
    packed, = client.outputs[0].clients
    client, idx = packed
    assert isinstance(client.op, cuda.GpuFromHost) 
Example #5
Source File: test_elemwise.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_gt_grad():
    """A user test that failed.

    Something about it made Elemwise.grad return something that was
    too complicated for get_scalar_constant_value to recognize as being 0, so
    gradient.grad reported that it was not a valid gradient of an
    integer.

    """
    floatX = config.floatX
    T = theano.tensor

    input_ = T.vector(dtype=floatX)
    random_values = numpy.random.RandomState(1234).uniform(
                                                low=-1, high=1, size=(2, 2))
    W_values = numpy.asarray(random_values, dtype=floatX)
    W = theano.shared(value=W_values, name='weights')
    correct_score = T.dot(input_, W)
    wrong_input = T.vector(dtype=floatX)
    wrong_score = theano.clone(correct_score, {input_: wrong_input})
    # Hinge loss

    scores = T.ones_like(correct_score) - correct_score + wrong_score
    cost = (scores * (scores > 0)).sum()
    T.grad(cost, input_) 
Example #6
Source File: test_elemwise.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_infer_shape(self):

        for s_left, s_right in [((5, 6), (5, 6)),
                           ((5, 6), (5, 1)),
                           ((5, 6), (1, 6)),
                           ((5, 1), (5, 6)),
                           ((1, 6), (5, 6)),
                           ((2, 3, 4, 5), (2, 3, 4, 5)),
                           ((2, 3, 4, 5), (2, 3, 1, 5)),
                            ((2, 3, 4, 5), (1, 3, 4, 5)),
                            ((2, 1, 4, 5), (2, 3, 4, 5)),
                            ((2, 3, 4, 1), (2, 3, 4, 5))]:
            dtype = theano.config.floatX
            t_left = TensorType(dtype, [(entry == 1) for entry in s_left])()
            t_right = TensorType(dtype, [(entry == 1) for entry in s_right])()
            t_left_val = numpy.zeros(s_left, dtype=dtype)
            t_right_val = numpy.zeros(s_right, dtype=dtype)
            self._compile_and_check([t_left, t_right],
                            [Elemwise(scalar.add)(t_left, t_right)],
                            [t_left_val, t_right_val], Elemwise) 
Example #7
Source File: test_basic_ops.py    From D-VAE with MIT License 6 votes vote down vote up
def test_elemwise4():
    """ Test that two vectors can be broadcast to form an outer
    product (by performing rank-1 matrix update"""

    shape = (3, 4)
    a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
                                               dtype='float32'), 'a')
    b = tensor.fvector()
    c = tensor.fvector()
    f = pfunc([b, c], [],
              updates=[(a, (a + b.dimshuffle('x', 0) * c.dimshuffle(0, 'x')))],
              mode=mode_with_gpu)
    has_elemwise = False
    for i, node in enumerate(f.maker.fgraph.toposort()):
        has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
    assert not has_elemwise
    # let debugmode catch errors
    f(theano._asarray(numpy.random.rand(4), dtype='float32'),
      theano._asarray(numpy.random.rand(3), dtype='float32')) 
Example #8
Source File: test_basic_ops.py    From D-VAE with MIT License 6 votes vote down vote up
def test_elemwise3():
    """ Several kinds of elemwise expressions with dimension
    permutations and broadcasting"""

    shape = (3, 4, 5, 6)
    a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
                                               dtype='float32'), 'a')
    b = tensor.fvector()
    new_val = (a + b).dimshuffle([2, 0, 3, 1])
    new_val *= tensor.exp(1 + b ** a).dimshuffle([2, 0, 3, 1])
    f = pfunc([b], [], updates=[(a, new_val)], mode=mode_with_gpu)
    has_elemwise = False
    for i, node in enumerate(f.maker.fgraph.toposort()):
        has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
    assert not has_elemwise
    # let debugmode catch errors
    f(theano._asarray(numpy.random.rand(6), dtype='float32')) 
Example #9
Source File: test_opt.py    From D-VAE with MIT License 6 votes vote down vote up
def test_incsubtensor_mixed():

    # This catches a bug that occurred when incrementing
    # a float32 tensor by a float64 tensor.
    # The result is defined to be float32, so it is OK
    # to downcast the float64 increment in order to
    # transfer it to the GPU.
    # The bug was that the optimization called GpuFromHost
    # without casting first, causing the optimization to
    # fail.
    X = tensor.fmatrix()
    Y = tensor.dmatrix()
    Z = tensor.inc_subtensor(X[0:1, 0:1], Y)
    f = theano.function([X, Y], Z, mode=mode_with_gpu)
    packed, = f.maker.fgraph.inputs[1].clients
    client, idx = packed
    print(client)
    assert isinstance(client.op, tensor.Elemwise)
    assert isinstance(client.op.scalar_op, theano.scalar.Cast)
    packed, = client.outputs[0].clients
    client, idx = packed
    assert isinstance(client.op, cuda.GpuFromHost) 
Example #10
Source File: test_if_using_gpu.py    From deep_disfluency with MIT License 6 votes vote down vote up
def test_if_using_GPU(verbose=False):
    dtype = config.floatX  # @UndefinedVariable
    vlen = 10 * 30 * 768  # 10 x #cores x # threads per core
    iters = 100

    rng = numpy.random.RandomState(22)
    x = shared(numpy.asarray(rng.rand(vlen), dtype))
    f = function([], tensor.exp(x))
    # print(f.maker.fgraph.toposort())
    t0 = time.time()
    for _ in range(iters):
        r = f()
    t1 = time.time()
    dur = t1 - t0
    if verbose:
        print("Looping %d times took %f seconds" % (iters, dur))
        print("Result is %s" % (r,))
    if numpy.any([isinstance(x.op, tensor.Elemwise) and
                  ('Gpu' not in type(x.op).__name__)
                  for x in f.maker.fgraph.toposort()]):
        print('Using the cpu')
        return False
    else:
        print('Using the gpu')
        return True 
Example #11
Source File: test_elemwise.py    From D-VAE with MIT License 6 votes vote down vote up
def test_gt_grad():
    """A user test that failed.

    Something about it made Elemwise.grad return something that was
    too complicated for get_scalar_constant_value to recognize as being 0, so
    gradient.grad reported that it was not a valid gradient of an
    integer.

    """
    floatX = config.floatX
    T = theano.tensor

    input_ = T.vector(dtype=floatX)
    random_values = numpy.random.RandomState(1234).uniform(
                                                low=-1, high=1, size=(2, 2))
    W_values = numpy.asarray(random_values, dtype=floatX)
    W = theano.shared(value=W_values, name='weights')
    correct_score = T.dot(input_, W)
    wrong_input = T.vector(dtype=floatX)
    wrong_score = theano.clone(correct_score, {input_: wrong_input})
    # Hinge loss

    scores = T.ones_like(correct_score) - correct_score + wrong_score
    cost = (scores * (scores > 0)).sum()
    T.grad(cost, input_) 
Example #12
Source File: theanotools.py    From pycog with MIT License 6 votes vote down vote up
def get_processor_type():
    """
    Test whether the GPU is being used, based on the example in

      http://deeplearning.net/software/theano/tutorial/using_gpu.html

    """
    rng = np.random.RandomState(22)

    n = 10*30*768
    x = shared(rng.rand(n))
    f = function([], T.exp(x))

    if np.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]):
        return 'cpu'
    return 'gpu' 
Example #13
Source File: test_elemwise.py    From D-VAE with MIT License 6 votes vote down vote up
def test_infer_shape(self):

        for s_left, s_right in [((5, 6), (5, 6)),
                           ((5, 6), (5, 1)),
                           ((5, 6), (1, 6)),
                           ((5, 1), (5, 6)),
                           ((1, 6), (5, 6)),
                           ((2, 3, 4, 5), (2, 3, 4, 5)),
                           ((2, 3, 4, 5), (2, 3, 1, 5)),
                            ((2, 3, 4, 5), (1, 3, 4, 5)),
                            ((2, 1, 4, 5), (2, 3, 4, 5)),
                            ((2, 3, 4, 1), (2, 3, 4, 5))]:
            dtype = theano.config.floatX
            t_left = TensorType(dtype, [(entry == 1) for entry in s_left])()
            t_right = TensorType(dtype, [(entry == 1) for entry in s_right])()
            t_left_val = numpy.zeros(s_left, dtype=dtype)
            t_right_val = numpy.zeros(s_right, dtype=dtype)
            self._compile_and_check([t_left, t_right],
                            [Elemwise(scalar.add)(t_left, t_right)],
                            [t_left_val, t_right_val], Elemwise) 
Example #14
Source File: test_elemwise.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def with_linker_inplace(self, linker, op, type, rand_val):
        for xsh, ysh in [((5, 5), (5, 5)),
                         ((5, 5), (1, 5)),
                         ((5, 5), (5, 1)),
                         ((1, 1), (1, 1)),
                         ((2, 3, 4, 5), (2, 3, 4, 5)),
                         ((2, 3, 4, 5), (1, 3, 1, 5)),
                         ((2, 3, 4, 5), (1, 1, 1, 1)),
                         ((), ())]:
            x = type('float64', [(entry == 1) for entry in xsh])('x')
            y = type('float64', [(entry == 1) for entry in ysh])('y')
            e = op(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y)
            f = copy(linker).accept(FunctionGraph([x, y], [e])).make_function()
            xv = rand_val(xsh)
            yv = rand_val(ysh)
            zv = xv + yv

            f(xv, yv)

            self.assertTrue((xv == zv).all())
            # test Elemwise.infer_shape
            # the Shape op don't implement c_code!
            if isinstance(linker, gof.PerformLinker):
                x = type('float64', [(entry == 1) for entry in xsh])('x')
                y = type('float64', [(entry == 1) for entry in ysh])('y')
                e = op(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y)
                f = copy(linker).accept(FunctionGraph(
                    [x, y], [e.shape])).make_function()
                xv = rand_val(xsh)
                yv = rand_val(ysh)
                zv = xv + yv

                f(xv, yv)

                assert xv.shape == zv.shape 
Example #15
Source File: cuda.py    From spinn with MIT License 5 votes vote down vote up
def local_gpua_row_switch(node):
    """
    Detects eligible Switch instances and replaces them with a GPU
    row switch.
    """

    if (node.op.__class__ == T.Elemwise
        and node.op.scalar_op.__class__ != theano.scalar.Switch):
        return False

    cond, ift, iff = node.inputs
    out, = node.outputs

    # Only applies to Switch instances where a vector mask broadcasts over
    # matrices.
    bcast = cond.broadcastable
    if not bcast or not (not bcast[0] and all(bcast[1:])
                         and ift.ndim in [2, 3]):
        return False

    if not (ift.dtype == iff.dtype == "float32"):
        return False

    if cond.owner and isinstance(cond.owner.op, HostFromGpu):
        gpu_cond, = cond.owner.inputs
    else:
        gpu_cond = as_cuda_ndarray_variable(
                T.cast(cond.flatten(), "float32"))

    if ift.owner and isinstance(ift.owner.op, HostFromGpu):
        gpu_ift, = ift.owner.inputs
    else:
        gpu_ift = as_cuda_ndarray_variable(ift)

    if iff.owner and isinstance(iff.owner.op, HostFromGpu):
        gpu_iff, = iff.owner.inputs
    else:
        gpu_iff = as_cuda_ndarray_variable(iff)

    gpu_op = GpuRowSwitch()
    return [HostFromGpu()(gpu_op(cond, gpu_ift, gpu_iff))] 
Example #16
Source File: test_basic_ops.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_elemwise2():
    """ Several kinds of elemwise expressions with dimension permutations """
    rng = numpy.random.RandomState(int(time.time()))
    shape = (3, 5)
    for pattern in [(0, 1), (1, 0)]:
        a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),
                                                   dtype='float32'), name=None)
        b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()
        f = pfunc([b], [], updates=[(a, (a + b).dimshuffle(pattern))],
                  mode=mode_with_gpu)
        has_elemwise = False
        for i, node in enumerate(f.maker.fgraph.toposort()):
            has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
        assert not has_elemwise
        # let debugmode catch errors
        f(theano._asarray(rng.rand(*shape), dtype='float32') * .3)

    shape = (3, 4, 5, 6)
    a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),
                                               dtype='float32'), 'a')
    b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()
    f = pfunc([b], [], updates=[(a, (a + b).dimshuffle([2, 0, 3, 1]) *
        tensor.exp(b ** a).dimshuffle([2, 0, 3, 1]))], mode=mode_with_gpu)
    has_elemwise = False
    for i, node in enumerate(f.maker.fgraph.toposort()):
        has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
    assert not has_elemwise
    # let debugmode catch errors
    f(theano._asarray(rng.rand(*shape), dtype='float32')) 
Example #17
Source File: test_opt.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_erfinvgpu():
    """ Test that local_gpu_elemwise_0 replaces Erfinv with ErfinvGPU """
    x = tensor.fmatrix()
    f = theano.function([x], tensor.Elemwise(erfinv)(x), mode=mode_with_gpu)
    f2 = theano.function([x], tensor.Elemwise(erfinv)(x),
                         mode=mode_without_gpu)
    assert isinstance(f.maker.fgraph.toposort()[1].op, cuda.GpuElemwise)
    assert isinstance(f.maker.fgraph.toposort()[1].op.scalar_op,
                      cuda.elemwise.ErfinvGPU)
    xv = numpy.random.rand(7, 8).astype('float32')
    assert numpy.allclose(f(xv), f2(xv)) 
Example #18
Source File: test_elemwise.py    From D-VAE with MIT License 5 votes vote down vote up
def with_linker(self, linker, op, type, rand_val):
        for xsh, ysh in [((3, 5), (3, 5)),
                         ((3, 5), (1, 5)),
                         ((3, 5), (3, 1)),
                         ((1, 5), (5, 1)),
                         ((1, 1), (1, 1)),
                         ((self.openmp_minsize,), (self.openmp_minsize,)),
                         ((self.openmp_minsize_sqrt,
                           self.openmp_minsize_sqrt),
                          (self.openmp_minsize_sqrt,
                           self.openmp_minsize_sqrt)),
                         ((2, 3, 4, 5), (2, 3, 4, 5)),
                         ((2, 3, 4, 5), (1, 3, 1, 5)),
                         ((2, 3, 4, 5), (1, 1, 1, 1)),
                         ((), ())]:
            x = type('float64', [(entry == 1) for entry in xsh])('x')
            y = type('float64', [(entry == 1) for entry in ysh])('y')
            e = op(scalar.add)(x, y)
            f = copy(linker).accept(FunctionGraph([x, y], [e])).make_function()
            xv = rand_val(xsh)
            yv = rand_val(ysh)
            zv = xv + yv

            unittest_tools.assert_allclose(f(xv, yv), zv)

            # test Elemwise.infer_shape
            # the Shape op don't implement c_code!
            if isinstance(linker, gof.PerformLinker):
                x = type('float64', [(entry == 1) for entry in xsh])('x')
                y = type('float64', [(entry == 1) for entry in ysh])('y')
                e = op(scalar.add)(x, y)
                f = copy(linker).accept(FunctionGraph(
                    [x, y], [e.shape])).make_function()
                assert tuple(f(xv, yv)) == tuple(zv.shape) 
Example #19
Source File: opt.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def local_gpu_elemwise_1(node):
    """
    gpu_from_host(Elemwise)) -> GpuElemwise(gpu_from_host(...))

    """
    if isinstance(node.op, GpuFromHost):
        host_i, = node.inputs
        if (host_i.owner and
                isinstance(host_i.owner.op, tensor.Elemwise) and
                len(host_i.clients) == 1 and
                dtype_in_elemwise_supported(node.op)):

            elemwise_node = host_i.owner
            # Don't set any inplace pattern.
            # gpu_inplace_elemwise_optimizer will do it later

            if isinstance(elemwise_node.op.scalar_op, Erfinv):
                new_op = GpuElemwise(erfinv_gpu)
            elif isinstance(elemwise_node.op.scalar_op, Erfcx):
                new_op = GpuElemwise(erfcx_gpu)
            else:
                try:
                    new_op = GpuElemwise(elemwise_node.op.scalar_op)
                except SupportCodeError:
                    # This happens when scalar_op requires support code
                    return False

            if all([i.dtype == 'float32' for i in elemwise_node.inputs]):
                gpu_elemwise = new_op(*[as_cuda_ndarray_variable(i)
                                        for i in elemwise_node.inputs])
                gpu_elemwise = split_huge_add_or_mul(gpu_elemwise.owner)
                if not gpu_elemwise:
                    return False
                return [gpu_elemwise.outputs[0]]
    return False 
Example #20
Source File: nnet.py    From TextDetector with GNU General Public License v3.0 5 votes vote down vote up
def arg_of_sigmoid(Y_hat):
    """
    Given the output of a call to theano.tensor.nnet.sigmoid,
    returns the argument to the sigmoid (by tracing the Theano
    graph).

    Parameters
    ----------
    Y_hat : Variable
        T.nnet.sigmoid(Z)

    Returns
    -------
    Z : Variable
        The variable that was passed to T.nnet.sigmoid to create `Y_hat`.
        Raises an error if `Y_hat` is not actually the output of a theano
        sigmoid.
    """
    assert hasattr(Y_hat, 'owner')
    owner = Y_hat.owner
    assert owner is not None
    op = owner.op
    if isinstance(op, Print):
        assert len(owner.inputs) == 1
        Y_hat, = owner.inputs
        owner = Y_hat.owner
        op = owner.op
    success = False
    if isinstance(op, T.Elemwise):
        if isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid):
            success = True
    if not success:
        raise TypeError("Expected Y_hat to be the output of a sigmoid, "
                        "but it appears to be the output of " + str(op) +
                        " of type " + str(type(op)))
    z, = owner.inputs
    assert z.ndim == 2
    return z 
Example #21
Source File: opt.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def dtype_in_elemwise_supported(op):
    """
    Return True of the Elemwise op is supported on the gpu.
    Return False otherwise.

    Notes
    -----
    We need to check inside the Composite op.

    """
    def get_all_basic_scalar(composite_op):
        l = []
        for i in composite_op.fgraph.toposort():
            if isinstance(i, theano.scalar.Composite):
                l += get_all_basic_scalar(i)
            else:
                l.append(i)
        return l
    if isinstance(op, GpuElemwise) or isinstance(op, tensor.Elemwise):
        if isinstance(op.scalar_op, theano.scalar.Composite):
            scals = get_all_basic_scalar(op.scalar_op)
            for s in scals:
                if any([i.type.dtype not in elemwise_cuda_dtype_supported
                        for i in s.inputs + s.outputs]):
                    return False
    return True 
Example #22
Source File: test_elemwise.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_not_implemented_elemwise_grad():
    """
    Regression test for unimplemented gradient in an Elemwise Op.
    """

    class TestOp(scalar.ScalarOp):

        def __init__(self):
            self.output_types_preference = scalar.upgrade_to_float

        def impl(self, n, x):
            return x * n

        def grad(self, inputs, gout):
            (n, x) = inputs
            (gz,) = gout
            dy_dx = n
            return [theano.gradient.grad_not_implemented(self, 0, n),
                    gz * dy_dx]

    test_op = tensor.Elemwise(TestOp())
    x = tensor.scalar()
    # The call to `grad` used to crash.
    tensor.grad(test_op(2, x), x)
    # Verify that trying to use the not implemented gradient fails.
    try:
        tensor.grad(test_op(x, 2), x)
        assert False
    except theano.gradient.NullTypeGradError:
        pass 
Example #23
Source File: test_elemwise.py    From D-VAE with MIT License 5 votes vote down vote up
def with_linker_inplace(self, linker, op, type, rand_val):
        for xsh, ysh in [((5, 5), (5, 5)),
                         ((5, 5), (1, 5)),
                         ((5, 5), (5, 1)),
                         ((1, 1), (1, 1)),
                         ((2, 3, 4, 5), (2, 3, 4, 5)),
                         ((2, 3, 4, 5), (1, 3, 1, 5)),
                         ((2, 3, 4, 5), (1, 1, 1, 1)),
                         ((), ())]:
            x = type('float64', [(entry == 1) for entry in xsh])('x')
            y = type('float64', [(entry == 1) for entry in ysh])('y')
            e = op(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y)
            f = copy(linker).accept(FunctionGraph([x, y], [e])).make_function()
            xv = rand_val(xsh)
            yv = rand_val(ysh)
            zv = xv + yv

            f(xv, yv)

            self.assertTrue((xv == zv).all())
            # test Elemwise.infer_shape
            # the Shape op don't implement c_code!
            if isinstance(linker, gof.PerformLinker):
                x = type('float64', [(entry == 1) for entry in xsh])('x')
                y = type('float64', [(entry == 1) for entry in ysh])('y')
                e = op(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y)
                f = copy(linker).accept(FunctionGraph(
                    [x, y], [e.shape])).make_function()
                xv = rand_val(xsh)
                yv = rand_val(ysh)
                zv = xv + yv

                f(xv, yv)

                assert xv.shape == zv.shape 
Example #24
Source File: test_elemwise.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def with_linker(self, linker, op, type, rand_val):
        for xsh, ysh in [((3, 5), (3, 5)),
                         ((3, 5), (1, 5)),
                         ((3, 5), (3, 1)),
                         ((1, 5), (5, 1)),
                         ((1, 1), (1, 1)),
                         ((self.openmp_minsize,), (self.openmp_minsize,)),
                         ((self.openmp_minsize_sqrt,
                           self.openmp_minsize_sqrt),
                          (self.openmp_minsize_sqrt,
                           self.openmp_minsize_sqrt)),
                         ((2, 3, 4, 5), (2, 3, 4, 5)),
                         ((2, 3, 4, 5), (1, 3, 1, 5)),
                         ((2, 3, 4, 5), (1, 1, 1, 1)),
                         ((), ())]:
            x = type('float64', [(entry == 1) for entry in xsh])('x')
            y = type('float64', [(entry == 1) for entry in ysh])('y')
            e = op(scalar.add)(x, y)
            f = copy(linker).accept(FunctionGraph([x, y], [e])).make_function()
            xv = rand_val(xsh)
            yv = rand_val(ysh)
            zv = xv + yv

            unittest_tools.assert_allclose(f(xv, yv), zv)

            # test Elemwise.infer_shape
            # the Shape op don't implement c_code!
            if isinstance(linker, gof.PerformLinker):
                x = type('float64', [(entry == 1) for entry in xsh])('x')
                y = type('float64', [(entry == 1) for entry in ysh])('y')
                e = op(scalar.add)(x, y)
                f = copy(linker).accept(FunctionGraph(
                    [x, y], [e.shape])).make_function()
                assert tuple(f(xv, yv)) == tuple(zv.shape) 
Example #25
Source File: test_elemwise.py    From D-VAE with MIT License 5 votes vote down vote up
def test_not_implemented_elemwise_grad():
    """
    Regression test for unimplemented gradient in an Elemwise Op.
    """

    class TestOp(scalar.ScalarOp):

        def __init__(self):
            self.output_types_preference = scalar.upgrade_to_float

        def impl(self, n, x):
            return x * n

        def grad(self, inputs, gout):
            (n, x) = inputs
            (gz,) = gout
            dy_dx = n
            return [theano.gradient.grad_not_implemented(self, 0, n),
                    gz * dy_dx]

    test_op = tensor.Elemwise(TestOp())
    x = tensor.scalar()
    # The call to `grad` used to crash.
    tensor.grad(test_op(2, x), x)
    # Verify that trying to use the not implemented gradient fails.
    try:
        tensor.grad(test_op(x, 2), x)
        assert False
    except theano.gradient.NullTypeGradError:
        pass 
Example #26
Source File: test_basic_ops.py    From D-VAE with MIT License 5 votes vote down vote up
def test_elemwise2():
    """ Several kinds of elemwise expressions with dimension permutations """
    rng = numpy.random.RandomState(int(time.time()))
    shape = (3, 5)
    for pattern in [(0, 1), (1, 0)]:
        a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),
                                                   dtype='float32'), name=None)
        b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()
        f = pfunc([b], [], updates=[(a, (a + b).dimshuffle(pattern))],
                  mode=mode_with_gpu)
        has_elemwise = False
        for i, node in enumerate(f.maker.fgraph.toposort()):
            has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
        assert not has_elemwise
        # let debugmode catch errors
        f(theano._asarray(rng.rand(*shape), dtype='float32') * .3)

    shape = (3, 4, 5, 6)
    a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),
                                               dtype='float32'), 'a')
    b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()
    f = pfunc([b], [], updates=[(a, (a + b).dimshuffle([2, 0, 3, 1]) *
        tensor.exp(b ** a).dimshuffle([2, 0, 3, 1]))], mode=mode_with_gpu)
    has_elemwise = False
    for i, node in enumerate(f.maker.fgraph.toposort()):
        has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
    assert not has_elemwise
    # let debugmode catch errors
    f(theano._asarray(rng.rand(*shape), dtype='float32')) 
Example #27
Source File: test_opt.py    From D-VAE with MIT License 5 votes vote down vote up
def test_erfinvgpu():
    """ Test that local_gpu_elemwise_0 replaces Erfinv with ErfinvGPU """
    x = tensor.fmatrix()
    f = theano.function([x], tensor.Elemwise(erfinv)(x), mode=mode_with_gpu)
    f2 = theano.function([x], tensor.Elemwise(erfinv)(x),
                         mode=mode_without_gpu)
    assert isinstance(f.maker.fgraph.toposort()[1].op, cuda.GpuElemwise)
    assert isinstance(f.maker.fgraph.toposort()[1].op.scalar_op,
                      cuda.elemwise.ErfinvGPU)
    xv = numpy.random.rand(7, 8).astype('float32')
    if imported_scipy_special:
        assert numpy.allclose(f(xv), f2(xv)) 
Example #28
Source File: opt.py    From D-VAE with MIT License 5 votes vote down vote up
def dtype_in_elemwise_supported(op):
    """
    Return True of the Elemwise op is supported on the gpu.
    Return False otherwise.

    Notes
    -----
    We need to check inside the Composite op.

    """
    def get_all_basic_scalar(composite_op):
        l = []
        for i in composite_op.fgraph.toposort():
            if isinstance(i, theano.scalar.Composite):
                l += get_all_basic_scalar(i)
            else:
                l.append(i)
        return l
    if isinstance(op, GpuElemwise) or isinstance(op, tensor.Elemwise):
        if isinstance(op.scalar_op, theano.scalar.Composite):
            scals = get_all_basic_scalar(op.scalar_op)
            for s in scals:
                if any([i.type.dtype not in elemwise_cuda_dtype_supported
                        for i in s.inputs + s.outputs]):
                    return False
    return True 
Example #29
Source File: opt.py    From D-VAE with MIT License 5 votes vote down vote up
def local_gpu_elemwise_1(node):
    """
    gpu_from_host(Elemwise)) -> GpuElemwise(gpu_from_host(...))

    """
    if isinstance(node.op, GpuFromHost):
        host_i, = node.inputs
        if (host_i.owner and
                isinstance(host_i.owner.op, tensor.Elemwise) and
                len(host_i.clients) == 1 and
                dtype_in_elemwise_supported(node.op)):

            elemwise_node = host_i.owner
            # Don't set any inplace pattern.
            # gpu_inplace_elemwise_optimizer will do it later

            if isinstance(elemwise_node.op.scalar_op, Erfinv):
                new_op = GpuElemwise(erfinv_gpu)
            elif isinstance(elemwise_node.op.scalar_op, Erfcx):
                new_op = GpuElemwise(erfcx_gpu)
            else:
                try:
                    new_op = GpuElemwise(elemwise_node.op.scalar_op)
                except SupportCodeError:
                    # This happens when scalar_op requires support code
                    return False

            if all([i.dtype == 'float32' for i in elemwise_node.inputs]):
                gpu_elemwise = new_op(*[as_cuda_ndarray_variable(i)
                                        for i in elemwise_node.inputs])
                gpu_elemwise = split_huge_add_or_mul(gpu_elemwise.owner)
                if not gpu_elemwise:
                    return False
                return [gpu_elemwise.outputs[0]]
    return False 
Example #30
Source File: nnet.py    From TextDetector with GNU General Public License v3.0 4 votes vote down vote up
def kl(Y, Y_hat, batch_axis):
    """
    Warning: This function expects a sigmoid nonlinearity in the
    output layer. Returns a batch (vector) of mean across units of
    KL divergence for each example,
    KL(P || Q) where P is defined by Y and Q is defined by Y_hat:

    p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
    For binary p, some terms drop out:
    - p log q - (1-p) log (1-q)
    - p log sigmoid(z) - (1-p) log sigmoid(-z)
    p softplus(-z) + (1-p) softplus(z)

    Parameters
    ----------
    Y : Variable
        targets for the sigmoid outputs. Currently Y must be purely binary.
        If it's not, you'll still get the right gradient, but the
        value in the monitoring channel will be wrong.
    Y_hat : Variable
        predictions made by the sigmoid layer. Y_hat must be generated by
        fprop, i.e., it must be a symbolic sigmoid.
    batch_axis : list
        list of axes to compute average kl divergence across.

    Returns
    -------
    ave : Variable
        average kl divergence between Y and Y_hat.
    """

    assert hasattr(Y_hat, 'owner')
    assert batch_axis is not None

    owner = Y_hat.owner
    assert owner is not None
    op = owner.op

    if not hasattr(op, 'scalar_op'):
        raise ValueError("Expected Y_hat to be generated by an Elemwise "
                         "op, got "+str(op)+" of type "+str(type(op)))
    assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid)

    for Yv in get_debug_values(Y):
        if not (Yv.min() >= 0.0 and Yv.max() <= 1.0):
            raise ValueError("Expected Y to be between 0 and 1. Either Y"
                             + "< 0 or Y > 1 was found in the input.")

    z, = owner.inputs

    term_1 = Y * T.nnet.softplus(-z)
    term_2 = (1 - Y) * T.nnet.softplus(z)

    total = term_1 + term_2
    naxes = total.ndim
    axes_to_reduce = list(range(naxes))
    del axes_to_reduce[batch_axis]
    ave = total.mean(axis=axes_to_reduce)

    return ave