Python theano.tensor.scalar() Examples
The following are 30
code examples of theano.tensor.scalar().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano.tensor
, or try the search function
.
Example #1
Source File: test_blas.py From D-VAE with MIT License | 6 votes |
def test_inplace0(): # should fail to insert gemm_inplace because gemm_inplace would # create cycles X, Y, Z, a, b = T.matrix('X'), T.matrix('Y'), T.matrix('Z'), T.scalar( 'a'), T.scalar('b') R, S, c = T.matrix('R'), T.matrix('S'), T.scalar('c') f = inplace_func([Z, b, R, S], [Z * (Z + b * T.dot(R, S).T)], mode='FAST_RUN') if (gemm_inplace in [n.op for n in f.maker.fgraph.apply_nodes]): print(pp(f.maker.fgraph.outputs[0])) raise Failure('gemm_inplace in graph') assert gemm_no_inplace in [n.op for n in f.maker.fgraph.apply_nodes] # gemm_inplace should be inserted here, to work in-place on Z*c f = inplace_func([X, Y, Z, a, b, R, S, c], [Z * (c * Z + a * T.dot(X, Y) + b * T.dot(R, S).T)], mode='FAST_RUN') if (not gemm_inplace in [n.op for n in f.maker.fgraph.apply_nodes]): theano.printing.debugprint(f) raise Failure('no gemm_inplace in graph')
Example #2
Source File: test_function_module.py From D-VAE with MIT License | 6 votes |
def test_state_access(self): a = T.scalar() # the a is for 'anonymous' (un-named). x, s = T.scalars('xs') f = function([x, In(a, value=1.0, name='a'), In(s, value=0.0, update=s + a * x)], s + a * x) self.assertTrue(f[a] == 1.0) self.assertTrue(f[s] == 0.0) self.assertTrue(f(3.0) == 3.0) self.assertTrue(f(3.0, a=2.0) == 9.0) # 3.0 + 2*3.0 self.assertTrue(f[a] == 1.0) # state hasn't changed permanently, we just overrode it last line self.assertTrue(f[s] == 9.0) f[a] = 5.0 self.assertTrue(f[a] == 5.0) self.assertTrue(f(3.0) == 24.0) # 9 + 3*5 self.assertTrue(f[s] == 24.0)
Example #3
Source File: basic.py From D-VAE with MIT License | 6 votes |
def make_node(self, x, y): x, y = as_sparse_variable(x), tensor.as_tensor_variable(y) assert x.format in ["csr", "csc"] # upcast the tensor. Is the cast of sparse done implemented? dtype = scalar.upcast(x.type.dtype, y.type.dtype) # The magic number two here arises because L{scipy.sparse} # objects must be matrices (have dimension 2) # Broadcasting of the sparse matrix is not supported. # We support nd == 0 used by grad of SpSum() assert y.type.ndim in [0, 2] out = SparseType(dtype=dtype, format=x.type.format)() return gof.Apply(self, [x, y], [out])
Example #4
Source File: test_function_module.py From D-VAE with MIT License | 6 votes |
def test_shared_state0(self): a = T.scalar() # the a is for 'anonymous' (un-named). x, s = T.scalars('xs') f = function([x, In(a, value=1.0, name='a'), In(s, value=0.0, update=s + a * x, mutable=True)], s + a * x) g = function([x, In(a, value=1.0, name='a'), In(s, value=f.container[s], update=s - a * x, mutable=True)], s + a * x) f(1, 2) self.assertTrue(f[s] == 2) self.assertTrue(g[s] == 2) g(1, 2) self.assertTrue(f[s] == 0) self.assertTrue(g[s] == 0)
Example #5
Source File: test_function_module.py From D-VAE with MIT License | 6 votes |
def test_shared_state1(self): a = T.scalar() # the a is for 'anonymous' (un-named). x, s = T.scalars('xs') f = function([x, In(a, value=1.0, name='a'), In(s, value=0.0, update=s + a * x, mutable=True)], s + a * x) g = function([x, In(a, value=1.0, name='a'), In(s, value=f.container[s])], s + a * x) f(1, 2) self.assertTrue(f[s] == 2) self.assertTrue(g[s] == 2) f(1, 2) g(1, 2) self.assertTrue(f[s] == 4) self.assertTrue(g[s] == 4)
Example #6
Source File: basic.py From D-VAE with MIT License | 6 votes |
def make_node(self, x, index): x = as_sparse_variable(x) assert x.format in ["csr", "csc"] assert len(index) == 2 input_op = [x] for ind in index: if isinstance(ind, slice): raise Exception("GetItemScalar called with a slice as index!") # in case of indexing using int instead of theano variable elif isinstance(ind, integer_types): ind = theano.tensor.constant(ind) input_op += [ind] # in case of indexing using theano variable elif ind.ndim == 0: input_op += [ind] else: raise NotImplemented() return gof.Apply(self, input_op, [tensor.scalar(dtype=x.dtype)])
Example #7
Source File: test_function_module.py From D-VAE with MIT License | 6 votes |
def test_shared_state2(self): a = T.scalar() # the a is for 'anonymous' (un-named). x, s = T.scalars('xs') f = function([x, In(a, value=1.0, name='a'), In(s, value=0.0, update=s + a * x, mutable=False)], s + a * x) g = function([x, In(a, value=1.0, name='a'), In(s, value=f.container[s])], s + a * x) f(1, 2) self.assertTrue(f[s] == 2) self.assertTrue(g[s] == 2) f(1, 2) self.assertTrue(f[s] == 4) self.assertTrue(g[s] == 4) g(1, 2) # has no effect on state self.assertTrue(f[s] == 4) self.assertTrue(g[s] == 4)
Example #8
Source File: test_plain_rnn.py From spinn with MIT License | 6 votes |
def _make_rnn(self, seq_length=4): self.embedding_dim = embedding_dim = 3 self.vocab_size = vocab_size = 10 self.seq_length = seq_length def compose_network(h_prev, inp, embedding_dim, model_dim, vs, name="compose"): # Just add the two embeddings! W = T.concatenate([T.eye(model_dim), T.eye(model_dim)], axis=0) i = T.concatenate([h_prev, inp], axis=1) return i.dot(W) X = T.imatrix("X") training_mode = T.scalar("training_mode") vs = VariableStore() embeddings = np.arange(vocab_size).reshape( (vocab_size, 1)).repeat(embedding_dim, axis=1) self.model = RNN( embedding_dim, embedding_dim, vocab_size, seq_length, compose_network, IdentityLayer, training_mode, None, vs, X=X, make_test_fn=True, initial_embeddings=embeddings)
Example #9
Source File: variable.py From python-mle with MIT License | 6 votes |
def var(name, label=None, observed=False, const=False, vector=False, lower=None, upper=None): if vector and not observed: raise ValueError('Currently, only observed variables can be vectors') if observed and const: raise ValueError('Observed variables are automatically const') if vector: var = T.vector(name) else: var = T.scalar(name) var._name = name var._label = label var._observed = observed var._const = observed or const var._lower = lower or -np.inf var._upper = upper or np.inf return var
Example #10
Source File: preprocessing.py From Projects with MIT License | 6 votes |
def __init__(self): X_in = T.matrix('X_in') u = T.matrix('u') s = T.vector('s') eps = T.scalar('eps') X_ = X_in - T.mean(X_in, 0) sigma = T.dot(X_.T, X_) / X_.shape[0] self.sigma = theano.function([X_in], sigma, allow_input_downcast=True) Z = T.dot(T.dot(u, T.nlinalg.diag(1. / T.sqrt(s + eps))), u.T) X_zca = T.dot(X_, Z.T) self.compute_zca = theano.function([X_in, u, s, eps], X_zca, allow_input_downcast=True) self._u = None self._s = None
Example #11
Source File: basic.py From D-VAE with MIT License | 6 votes |
def structured_monoid(tensor_op): # Generic operation to perform many kinds of monoid element-wise # operations on the non-zeros of a sparse matrix. # The first parameter must always be a sparse matrix. The other parameters # must be scalars which will be passed as argument to the tensor_op. def decorator(f): def wrapper(*args): x = as_sparse_variable(args[0]) assert x.format in ["csr", "csc"] xs = [scalar.as_scalar(arg) for arg in args[1:]] data, ind, ptr, shape = csm_properties(x) data = tensor_op(data, *xs) return CSM(x.format)(data, ind, ptr, shape) wrapper.__name__ = str(tensor_op.scalar_op) return wrapper return decorator
Example #12
Source File: basic.py From D-VAE with MIT License | 6 votes |
def make_node(self, a, b): a = as_sparse_variable(a) assert a.format in ["csr", "csc", "bsr"] if not _is_sparse_variable(a): raise TypeError('First argument must be of type SparseVariable ' 'or SparseConstant') dtype_out = scalar.upcast(a.type.dtype, b.type.dtype) if b.type.ndim != 2: raise NotImplementedError('non-matrix b') if _is_sparse_variable(b): return gof.Apply(self, [a, b], [SparseType(a.type.format, dtype_out)()]) else: return gof.Apply(self, [a, b], [tensor.tensor(dtype_out, (False, b.type.broadcastable[1]))])
Example #13
Source File: basic.py From D-VAE with MIT License | 6 votes |
def perform(self, node, inputs, outputs): (a_indices, a_indptr, b, g_ab) = inputs (out,) = outputs g_a_data = numpy.zeros(a_indices.shape, dtype=g_ab.dtype) for j in xrange(len(a_indptr) - 1): ind0 = a_indptr[j] ind1 = a_indptr[j + 1] for i_idx in xrange(ind0, ind1): i = a_indices[i_idx] # Depending on the type of g_ab and b (sparse or dense), # the following dot product can result in a scalar or # a (1, 1) sparse matrix. dot_val = numpy.dot(g_ab[i], b[j].T) if isinstance(dot_val, scipy.sparse.spmatrix): dot_val = dot_val[0, 0] g_a_data[i_idx] = dot_val out[0] = g_a_data
Example #14
Source File: basic.py From D-VAE with MIT License | 6 votes |
def make_node(self, alpha, x, y, z): if not _is_sparse_variable(x) and not _is_sparse_variable(y): # If x and y are tensor, we don't want to use this class # We should use Dot22 and Gemm in that case. raise TypeError(x) dtype_out = scalar.upcast(alpha.type.dtype, x.type.dtype, y.type.dtype, z.type.dtype) alpha = tensor.as_tensor_variable(alpha) z = tensor.as_tensor_variable(z) assert z.ndim == 2 assert alpha.type.broadcastable == (True,) * alpha.ndim if not _is_sparse_variable(x): x = tensor.as_tensor_variable(x) assert y.format in ["csr", "csc"] assert x.ndim == 2 if not _is_sparse_variable(y): y = tensor.as_tensor_variable(y) assert x.format in ["csr", "csc"] assert y.ndim == 2 return gof.Apply(self, [alpha, x, y, z], [tensor.tensor(dtype=dtype_out, broadcastable=(False, False))])
Example #15
Source File: test_function_module.py From D-VAE with MIT License | 6 votes |
def __init__(self): a = T.scalar() # the a is for 'anonymous' (un-named). x, s = T.scalars('xs') v = T.vector('v') self.s = s self.x = x self.v = v self.e = a * x + s self.f1 = function([x, In(a, value=1.0, name='a'), In(s, value=0.0, update=s + a * x, mutable=True)], s + a * x) self.f2 = function([x, In(a, value=1.0, name='a'), In(s, value=self.f1.container[s], update=s + a * x, mutable=True)], s + a * x)
Example #16
Source File: test_blas.py From D-VAE with MIT License | 6 votes |
def test_gemm_nested(): X, Y, Z, a, b = T.matrix('X'), T.matrix('Y'), T.matrix('Z'), T.scalar( 'a'), T.scalar('b') R, S, U, c, d = T.matrix('R'), T.matrix('S'), T.matrix('U'), T.scalar( 'c'), T.scalar('d') just_gemm([X, Y, Z, R, S, U, a, b, c, d], [a * Z - b * (c * T.dot(X, Y) + d * Z)], ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), ( 2, 4), (), (), (), ()], max_graphlen=1) # print "---------------------" just_gemm([X, Y, Z, R, S, U, a, b, c, d], [a * Z - b * (c * T.dot(X, Y) + d * Z + c * Z)], ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), ( 2, 4), (), (), (), ()], max_graphlen=1) # print "---------------------" just_gemm([X, Y, Z, R, S, U, a, b, c, d], [a * Z - b * (c * T.dot(X, Y) + d * Z + c * U)], ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), ( 2, 4), (), (), (), ()], max_graphlen=3)
Example #17
Source File: test_elemwise.py From D-VAE with MIT License | 6 votes |
def test_c(self): if not theano.config.cxx: raise SkipTest("G++ not available, so we need to skip this test.") for dtype in ["floatX", "complex64", "complex128", "int8", "uint8"]: self.with_linker(gof.CLinker(), scalar.add, dtype=dtype) self.with_linker(gof.CLinker(), scalar.mul, dtype=dtype) for dtype in ["floatX", "int8", "uint8"]: self.with_linker(gof.CLinker(), scalar.minimum, dtype=dtype) self.with_linker(gof.CLinker(), scalar.maximum, dtype=dtype) self.with_linker(gof.CLinker(), scalar.and_, dtype=dtype, tensor_op=tensor.all) self.with_linker(gof.CLinker(), scalar.or_, dtype=dtype, tensor_op=tensor.any) for dtype in ["int8", "uint8"]: self.with_linker(gof.CLinker(), scalar.or_, dtype=dtype) self.with_linker(gof.CLinker(), scalar.and_, dtype=dtype) self.with_linker(gof.CLinker(), scalar.xor, dtype=dtype)
Example #18
Source File: test_elemwise.py From D-VAE with MIT License | 6 votes |
def test_infer_shape(self, dtype=None, pre_scalar_op=None): if dtype is None: dtype = theano.config.floatX for xsh, tosum in self.cases: x = self.type(dtype, [(entry == 1) for entry in xsh])('x') if pre_scalar_op is not None: x = pre_scalar_op(x) if tosum is None: tosum = list(range(len(xsh))) xv = numpy.asarray(numpy.random.rand(*xsh), dtype=dtype) d = {} if pre_scalar_op is not None: xv = x.eval({x.owner.inputs[0]: xv}) d = {pre_scalar_op: pre_scalar_op} self._compile_and_check([x], [self.op(scalar.add, axis=tosum, *d)(x)], [xv], self.op, ["local_cut_useless_reduce"], warn=0 not in xsh)
Example #19
Source File: test_extra_ops.py From D-VAE with MIT License | 6 votes |
def test_perform(self): x = tensor.matrix() y = tensor.scalar() f = function([x, y], fill_diagonal(x, y)) for shp in [(8, 8), (5, 8), (8, 5)]: a = numpy.random.rand(*shp).astype(config.floatX) val = numpy.cast[config.floatX](numpy.random.rand()) out = f(a, val) # We can't use numpy.fill_diagonal as it is bugged. assert numpy.allclose(numpy.diag(out), val) assert (out == val).sum() == min(a.shape) # test for 3d tensor a = numpy.random.rand(3, 3, 3).astype(config.floatX) x = tensor.tensor3() y = tensor.scalar() f = function([x, y], fill_diagonal(x, y)) val = numpy.cast[config.floatX](numpy.random.rand() + 10) out = f(a, val) # We can't use numpy.fill_diagonal as it is bugged. assert out[0, 0, 0] == val assert out[1, 1, 1] == val assert out[2, 2, 2] == val assert (out == val).sum() == min(a.shape)
Example #20
Source File: test_elemwise.py From D-VAE with MIT License | 6 votes |
def test_mean_default_dtype(self): """ Test the default dtype of a mean(). """ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] for idx, dtype in enumerate(imap(str, theano.scalar.all_types)): axis = axes[idx % len(axes)] x = tensor.matrix(dtype=dtype) m = x.mean(axis=axis) if dtype in tensor.discrete_dtypes and axis != []: assert m.dtype == 'float64' else: assert m.dtype == dtype, (m, m.dtype, dtype) f = theano.function([x], m) data = numpy.random.rand(3, 4) * 10 data = data.astype(dtype) f(data)
Example #21
Source File: test_elemwise.py From D-VAE with MIT License | 6 votes |
def test_prod_without_zeros_default_dtype(self): """ Test the default dtype of a ProdWithoutZeros(). """ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] for idx, dtype in enumerate(imap(str, theano.scalar.all_types)): axis = axes[idx % len(axes)] x = ProdWithoutZeros(axis=axis)(tensor.matrix(dtype=dtype)) assert x.dtype == dict( int8='int64', int16='int64', int32='int64', uint8='uint64', uint16='uint64', uint32='uint64', ).get(dtype, dtype)
Example #22
Source File: test_conv.py From D-VAE with MIT License | 6 votes |
def test_broadcast_grad(): rng = numpy.random.RandomState(utt.fetch_seed()) x1 = T.tensor4('x') x1_data = rng.randn(1, 1, 300, 300) sigma = T.scalar('sigma') sigma_data = 20 window_radius = 3 filter_1d = T.arange(-window_radius, window_radius+1) filter_1d = filter_1d.astype(theano.config.floatX) filter_1d = T.exp(-0.5*filter_1d**2/sigma**2) filter_1d = filter_1d / filter_1d.sum() filter_W = filter_1d.dimshuffle(['x', 'x', 0, 'x']) y = theano.tensor.nnet.conv2d(x1, filter_W, border_mode='full', filter_shape=[1, 1, None, None]) theano.grad(y.sum(), sigma)
Example #23
Source File: test_elemwise.py From D-VAE with MIT License | 6 votes |
def test_prod_without_zeros_custom_dtype(self): """ Test ability to provide your own output dtype for a ProdWithoutZeros(). """ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] idx = 0 for input_dtype in imap(str, theano.scalar.all_types): x = tensor.matrix(dtype=input_dtype) for output_dtype in imap(str, theano.scalar.all_types): axis = axes[idx % len(axes)] prod_woz_var = ProdWithoutZeros( axis=axis, dtype=output_dtype)(x) assert prod_woz_var.dtype == output_dtype idx += 1 if ('complex' in output_dtype or 'complex' in input_dtype): continue f = theano.function([x], prod_woz_var) data = numpy.random.rand(2, 3) * 3 data = data.astype(input_dtype) f(data)
Example #24
Source File: test_elemwise.py From D-VAE with MIT License | 6 votes |
def test_infer_shape(self): for s_left, s_right in [((5, 6), (5, 6)), ((5, 6), (5, 1)), ((5, 6), (1, 6)), ((5, 1), (5, 6)), ((1, 6), (5, 6)), ((2, 3, 4, 5), (2, 3, 4, 5)), ((2, 3, 4, 5), (2, 3, 1, 5)), ((2, 3, 4, 5), (1, 3, 4, 5)), ((2, 1, 4, 5), (2, 3, 4, 5)), ((2, 3, 4, 1), (2, 3, 4, 5))]: dtype = theano.config.floatX t_left = TensorType(dtype, [(entry == 1) for entry in s_left])() t_right = TensorType(dtype, [(entry == 1) for entry in s_right])() t_left_val = numpy.zeros(s_left, dtype=dtype) t_right_val = numpy.zeros(s_right, dtype=dtype) self._compile_and_check([t_left, t_right], [Elemwise(scalar.add)(t_left, t_right)], [t_left_val, t_right_val], Elemwise)
Example #25
Source File: basic.py From D-VAE with MIT License | 6 votes |
def perform(self, node, inputs, outputs): (a_indices, a_indptr, b, g_ab) = inputs (out,) = outputs g_a_data = numpy.zeros(a_indices.shape, dtype=g_ab.dtype) for i in xrange(len(a_indptr) - 1): # loop over rows ind0 = a_indptr[i] ind1 = a_indptr[i + 1] # loop over values in that row (columns) for j_idx in xrange(ind0, ind1): j = a_indices[j_idx] # grad is dot product of i-th row of gradient with j-th row of b # Depending on the type of g_ab and b (sparse or dense), # the following dot product can result in a scalar or # a (1, 1) sparse matrix. dot_val = numpy.dot(g_ab[i], b[j].T) if isinstance(dot_val, scipy.sparse.spmatrix): dot_val = dot_val[0, 0] g_a_data[j_idx] = dot_val out[0] = g_a_data
Example #26
Source File: test_function_module.py From D-VAE with MIT License | 5 votes |
def test_pickle(self): a = T.scalar() # the a is for 'anonymous' (un-named). x, s = T.scalars('xs') f = function([x, In(a, value=1.0, name='a'), In(s, value=0.0, update=s + a * x, mutable=True)], s + a * x) try: # Note that here we also test protocol 0 on purpose, since it # should work (even though one should not use it). g = pickle.loads(pickle.dumps(f, protocol=0)) g = pickle.loads(pickle.dumps(f, protocol=-1)) except NotImplementedError as e: if e[0].startswith('DebugMode is not picklable'): return else: raise # if they both return, assume that they return equivalent things. # print [(k,id(k)) for k in f.finder.keys()] # print [(k,id(k)) for k in g.finder.keys()] self.assertFalse(g.container[0].storage is f.container[0].storage) self.assertFalse(g.container[1].storage is f.container[1].storage) self.assertFalse(g.container[2].storage is f.container[2].storage) self.assertFalse(x in g.container) self.assertFalse(x in g.value) self.assertFalse(g.value[1] is f.value[1]) # should not have been copied self.assertFalse(g.value[2] is f.value[2]) # should have been copied because it is mutable. self.assertFalse((g.value[2] != f.value[2]).any()) # its contents should be identical self.assertTrue(f(2, 1) == g(2)) # they should be in sync, default value should be copied. self.assertTrue(f(2, 1) == g(2)) # they should be in sync, default value should be copied. f(1, 2) # put them out of sync self.assertFalse(f(1, 2) == g(1, 2)) # they should not be equal anymore.
Example #27
Source File: test_sort.py From D-VAE with MIT License | 5 votes |
def test4(self): a = tensor.dmatrix() axis = tensor.scalar() l = sort(a, axis, "mergesort") f = theano.function([a, axis], l) for axis_val in 0, 1: gv = f(self.m_val, axis_val) gt = np.sort(self.m_val, axis_val) assert np.allclose(gv, gt)
Example #28
Source File: test_elemwise.py From D-VAE with MIT License | 5 votes |
def test_perform_nan(self): for dtype in ["floatX", "complex64", "complex128"]: self.with_linker(gof.PerformLinker(), scalar.add, dtype=dtype, test_nan=True) self.with_linker(gof.PerformLinker(), scalar.mul, dtype=dtype, test_nan=True) self.with_linker(gof.PerformLinker(), scalar.maximum, dtype=dtype, test_nan=True) self.with_linker(gof.PerformLinker(), scalar.minimum, dtype=dtype, test_nan=True) self.with_linker(gof.PerformLinker(), scalar.or_, dtype=dtype, test_nan=True, tensor_op=tensor.any) self.with_linker(gof.PerformLinker(), scalar.and_, dtype=dtype, test_nan=True, tensor_op=tensor.all)
Example #29
Source File: test_elemwise.py From D-VAE with MIT License | 5 votes |
def with_linker_inplace(self, linker, op, type, rand_val): for xsh, ysh in [((5, 5), (5, 5)), ((5, 5), (1, 5)), ((5, 5), (5, 1)), ((1, 1), (1, 1)), ((2, 3, 4, 5), (2, 3, 4, 5)), ((2, 3, 4, 5), (1, 3, 1, 5)), ((2, 3, 4, 5), (1, 1, 1, 1)), ((), ())]: x = type('float64', [(entry == 1) for entry in xsh])('x') y = type('float64', [(entry == 1) for entry in ysh])('y') e = op(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y) f = copy(linker).accept(FunctionGraph([x, y], [e])).make_function() xv = rand_val(xsh) yv = rand_val(ysh) zv = xv + yv f(xv, yv) self.assertTrue((xv == zv).all()) # test Elemwise.infer_shape # the Shape op don't implement c_code! if isinstance(linker, gof.PerformLinker): x = type('float64', [(entry == 1) for entry in xsh])('x') y = type('float64', [(entry == 1) for entry in ysh])('y') e = op(scalar.Add(scalar.transfer_type(0)), {0: 0})(x, y) f = copy(linker).accept(FunctionGraph( [x, y], [e.shape])).make_function() xv = rand_val(xsh) yv = rand_val(ysh) zv = xv + yv f(xv, yv) assert xv.shape == zv.shape
Example #30
Source File: test_elemwise.py From D-VAE with MIT License | 5 votes |
def test_c_nan(self): if not theano.config.cxx: raise SkipTest("G++ not available, so we need to skip this test.") for dtype in ["floatX", "complex64", "complex128"]: self.with_linker(gof.CLinker(), scalar.add, dtype=dtype, test_nan=True) self.with_linker(gof.CLinker(), scalar.mul, dtype=dtype, test_nan=True) for dtype in ["floatX"]: self.with_linker(gof.CLinker(), scalar.minimum, dtype=dtype, test_nan=True) self.with_linker(gof.CLinker(), scalar.maximum, dtype=dtype, test_nan=True)