Python theano.tensor.constant() Examples

The following are 30 code examples of theano.tensor.constant(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .
Example #1
Source File: test_base.py    From carl with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_mixin_sklearn_params():
    # get_params
    p = Normal(mu=0.0, sigma=1.0)
    params = p.get_params()
    assert len(params) == 2
    assert "mu" in params
    assert "sigma" in params

    # for parameters, set_params should change the value contained
    old_mu = p.get_params()["mu"]
    p.set_params(mu=42.0)
    new_mu = p.get_params()["mu"]
    assert old_mu is new_mu
    assert new_mu.get_value() == 42.0

    # check errors
    p = Normal(mu=T.constant(0.0), sigma=1.0)
    assert_raises(ValueError, p.set_params, mu=1.0) 
Example #2
Source File: test_basic.py    From D-VAE with MIT License 6 votes vote down vote up
def test_csm_unsorted(self):
        """
        Test support for gradients of unsorted inputs.
        """
        sp_types = {'csc': sp.csc_matrix,
                    'csr': sp.csr_matrix}

        for format in ['csr', 'csc', ]:
            for dtype in ['float32', 'float64']:
                x = tensor.tensor(dtype=dtype, broadcastable=(False,))
                y = tensor.ivector()
                z = tensor.ivector()
                s = tensor.ivector()
                # Sparse advanced indexing produces unsorted sparse matrices
                a = sparse_random_inputs(format, (4, 3), out_dtype=dtype,
                                         unsorted_indices=True)[1][0]
                # Make sure it's unsorted
                assert not a.has_sorted_indices
                def my_op(x):
                    y = tensor.constant(a.indices)
                    z = tensor.constant(a.indptr)
                    s = tensor.constant(a.shape)
                    return tensor.sum(
                        dense_from_sparse(CSM(format)(x, y, z, s) * a))
                verify_grad_sparse(my_op, [a.data]) 
Example #3
Source File: cost.py    From kusanagi with MIT License 6 votes vote down vote up
def huber_loss(mx, Sx, target, Q, width=1.0, *args, **kwargs):
    '''
        Huber loss
    '''
    if Sx is None:
        # deterministic case
        if mx.ndim == 1:
            mx = mx[None, :]
        delta = mx-target
        Q = tt.constant(Q) if isinstance(Q, np.ndarray) else Q
        deltaQ = delta.dot(Q)
        abs_deltaQ = abs(deltaQ)
        cost = tt.switch(
            abs_deltaQ <= width,
            0.5*deltaQ**2,
            width*(abs_deltaQ - width/2)).sum(-1)
        return cost
    else:
        # stochastic case (moment matching)
        raise NotImplementedError 
Example #4
Source File: test_mixture.py    From carl with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_fit():
    p1 = Normal(mu=T.constant(0.0), sigma=T.constant(2.0))
    p2 = Normal(mu=T.constant(3.0), sigma=T.constant(2.0))
    p3 = Exponential(inverse_scale=T.constant(0.5))
    g = theano.shared(0.5)
    m = Mixture(components=[p1, p2, p3], weights=[g, g*g])

    X = np.concatenate([st.norm(loc=0.0, scale=2.0).rvs(300, random_state=0),
                        st.norm(loc=3.0, scale=2.0).rvs(100, random_state=1),
                        st.expon(scale=1. / 0.5).rvs(500, random_state=2)])
    X = X.reshape(-1, 1)
    s0 = m.score(X)

    m.fit(X)
    assert np.abs(g.eval() - 1. / 3.) < 0.05
    assert m.score(X) >= s0 
Example #5
Source File: layers.py    From kusanagi with MIT License 6 votes vote down vote up
def sample_noise(self, input):
        # get noise_shape
        noise_shape = self.input_shape
        if any(s is None for s in noise_shape):
            noise_shape = input.shape

        # respect shared axes
        if self.shared_axes:
            shared_axes = tuple(a if a >= 0 else a + input.ndim
                                for a in self.shared_axes)
            noise_shape = tuple(1 if a in shared_axes else s
                                for a, s in enumerate(noise_shape))

        one = tt.constant(1)
        retain_prob = one - self.p
        noise = self._srng.binomial(noise_shape, p=retain_prob,
                                    dtype=floatX)

        if self.shared_axes:
            bcast = tuple(bool(s == 1) for s in noise_shape)
            noise = tt.patternbroadcast(noise, bcast)

        return noise 
Example #6
Source File: aa.py    From D-VAE with MIT License 6 votes vote down vote up
def print_graph_linker(print_prog=True):
    if 1:
        imap = {None:'-'}
        def blah(i, node, thunk):
            imap[node] = str(i)
            if print_prog:# and node.op.__class__ is T.DimShuffle:
                if False and  node.op == T.DimShuffle((), ['x', 'x'], inplace = True):
                    print(node.op == T.DimShuffle((), ['x', 'x'],
                                                  inplace=True), end=' ')
                    print(node.inputs[0], type(node.inputs[0]), end=' ')
                    print(node.inputs[0].equals(T.constant(2)), end=' ')
                outputs = node.outputs
                inputs = theano.gof.graph.inputs(outputs)
                print('node ', i, node, end=' ')
                print(':'.join([imap[inp.owner] for inp in node.inputs]))
                #print theano.sandbox.pprint.pp.process_graph(inputs, outputs)
        return theano.sandbox.wraplinker.WrapLinkerMany(
                [theano.gof.OpWiseCLinker()],
                [theano.sandbox.wraplinker.run_all
                    ,blah
                    #,theano.sandbox.wraplinker.numpy_notall_isfinite
                    ])
    else:
        return theano.gof.OpWiseCLinker() 
Example #7
Source File: raw_random.py    From D-VAE with MIT License 6 votes vote down vote up
def get_size(self, shape_info):
        # The size is the data, that have constant size.
        state = numpy.random.RandomState().get_state()
        size = 0
        for elem in state:
            if isinstance(elem, str):
                size += len(elem)
            elif isinstance(elem, numpy.ndarray):
                size += elem.size * elem.itemsize
            elif isinstance(elem, int):
                size += numpy.dtype("int").itemsize
            elif isinstance(elem, float):
                size += numpy.dtype("float").itemsize
            else:
                raise NotImplementedError()
        return size 
Example #8
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def stop_gradient(variables):
    """Returns `variables` but with zero gradient w.r.t. every other variable.

    # Arguments
        variables: tensor or list of tensors to consider constant with respect
            to any other variable.

    # Returns
        A single tensor or a list of tensors (depending on the passed argument)
            that has constant gradient with respect to any other variable.
    """
    if isinstance(variables, (list, tuple)):
        return map(theano.gradient.disconnected_grad, variables)
    else:
        return theano.gradient.disconnected_grad(variables)


# CONTROL FLOW 
Example #9
Source File: raw_random.py    From D-VAE with MIT License 6 votes vote down vote up
def infer_shape(self, node, i_shapes):
        r, shp = node.inputs[0:2]

        # if shp is a constant array of len 0, then it means 'automatic shape'
        unknown_shape = len(getattr(shp, 'data', [0, 1, 2])) == 0

        # if ndim_added == 0 and shape != () then shape
        if self.ndim_added == 0 and not unknown_shape:
            sample_shp = shp
        else:
            # if shape == () then it will depend on args
            # if ndim_added != 0 and shape != () then it will depend on args
            # Use the default infer_shape implementation.
            raise tensor.ShapeError()

        return [None, [sample_shp[i] for i in xrange(node.outputs[1].ndim)]] 
Example #10
Source File: basic.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def make_node(self, x, index):
        assert isinstance(x.type, TypedListType)
        if not isinstance(index, Variable):
            if isinstance(index, slice):
                index = Constant(SliceType(), index)
                return Apply(self, [x, index], [x.type()])
            else:
                index = T.constant(index, ndim=0, dtype='int64')
                return Apply(self, [x, index], [x.ttype()])
        if isinstance(index.type, SliceType):
            return Apply(self, [x, index], [x.type()])
        elif isinstance(index, T.TensorVariable) and index.ndim == 0:
            assert index.dtype == 'int64'
            return Apply(self, [x, index], [x.ttype()])
        else:
            raise TypeError('Expected scalar or slice as index.') 
Example #11
Source File: test_compute_test_value.py    From D-VAE with MIT License 6 votes vote down vote up
def test_constant(self):
        orig_compute_test_value = theano.config.compute_test_value
        try:
            theano.config.compute_test_value = 'raise'

            x = T.constant(numpy.random.rand(2, 3), dtype=config.floatX)
            y = theano.shared(numpy.random.rand(3, 6).astype(config.floatX),
                              'y')

            # should work
            z = T.dot(x, y)
            assert hasattr(z.tag, 'test_value')
            f = theano.function([], z)
            assert _allclose(f(), z.tag.test_value)

            # this test should fail
            x = T.constant(numpy.random.rand(2, 4), dtype=config.floatX)
            self.assertRaises(ValueError, T.dot, x, y)
        finally:
            theano.config.compute_test_value = orig_compute_test_value 
Example #12
Source File: test_opt.py    From D-VAE with MIT License 6 votes vote down vote up
def test_gpualloc():
    '''
    This tests tries to catch the scenario when, due to infer_shape,
    the input of the alloc changes from tensor scalar to a constant
    1. In this case the original constracted broadcastable pattern will
    have a False for that dimension, but the new broadcastable pattern
    that will be inserted by gpualloc will have  a True since it knows the
    dimension is 1 and therefore broadcastable.
    '''

    x = theano.shared(numpy.ones(3, dtype='float32'), 'x')
    m = (x).dimshuffle(['x', 0])
    v = tensor.alloc(1., *m.shape)
    f = theano.function([], v + x,
                        mode=mode_with_gpu.excluding("local_elemwise_alloc"))
    l = f.maker.fgraph.toposort()
    assert numpy.any([isinstance(x.op, cuda.GpuAlloc) for x in l]) 
Example #13
Source File: test_opt.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_gpualloc():
    '''
    This tests tries to catch the scenario when, due to infer_shape,
    the input of the alloc changes from tensor scalar to a constant
    1. In this case the original constracted broadcastable pattern will
    have a False for that dimension, but the new broadcastable pattern
    that will be inserted by gpualloc will have  a True since it knows the
    dimension is 1 and therefore broadcastable.
    '''

    x = theano.shared(numpy.ones(3, dtype='float32'), 'x')
    m = (x).dimshuffle(['x', 0])
    v = tensor.alloc(1., *m.shape)
    f = theano.function([], v + x,
                        mode=mode_with_gpu.excluding("local_elemwise_alloc"))
    l = f.maker.fgraph.toposort()
    assert numpy.any([isinstance(x.op, cuda.GpuAlloc) for x in l]) 
Example #14
Source File: basic.py    From D-VAE with MIT License 6 votes vote down vote up
def make_node(self, x, index):
        assert isinstance(x.type, TypedListType)
        if not isinstance(index, Variable):
            if isinstance(index, slice):
                index = Constant(SliceType(), index)
                return Apply(self, [x, index], [x.type()])
            else:
                index = T.constant(index, ndim=0, dtype='int64')
                return Apply(self, [x, index], [x.ttype()])
        if isinstance(index.type, SliceType):
            return Apply(self, [x, index], [x.type()])
        elif isinstance(index, T.TensorVariable) and index.ndim == 0:
            assert index.dtype == 'int64'
            return Apply(self, [x, index], [x.ttype()])
        else:
            raise TypeError('Expected scalar or slice as index.') 
Example #15
Source File: raw_random.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def get_size(self, shape_info):
        # The size is the data, that have constant size.
        state = numpy.random.RandomState().get_state()
        size = 0
        for elem in state:
            if isinstance(elem, str):
                size += len(elem)
            elif isinstance(elem, numpy.ndarray):
                size += elem.size * elem.itemsize
            elif isinstance(elem, int):
                size += numpy.dtype("int").itemsize
            elif isinstance(elem, float):
                size += numpy.dtype("float").itemsize
            else:
                raise NotImplementedError()
        return size 
Example #16
Source File: custom_layers.py    From luna16 with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_output_for(self, input, deterministic=False, **kwargs):
        """
        Parameters
        ----------
        input : tensor
            output from the previous layer
        deterministic : bool
            If true dropout and scaling is disabled, see notes
        """
        if deterministic or self.p == 0:
            return input
        else:
            # Using theano constant to prevent upcasting
            one = T.constant(1)

            retain_prob = one - self.p
            if self.rescale:
                input /= retain_prob

            mask = _srng.binomial(input.shape[:2], p=retain_prob,
                                      dtype=theano.config.floatX)
            axes = [0, 1] + (['x'] * (input.ndim - 2))
            mask = mask.dimshuffle(*axes)

            return input * mask 
Example #17
Source File: test_basic.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_csm_unsorted(self):
        """
        Test support for gradients of unsorted inputs.
        """
        sp_types = {'csc': sp.csc_matrix,
                    'csr': sp.csr_matrix}

        for format in ['csr', 'csc', ]:
            for dtype in ['float32', 'float64']:
                x = tensor.tensor(dtype=dtype, broadcastable=(False,))
                y = tensor.ivector()
                z = tensor.ivector()
                s = tensor.ivector()
                # Sparse advanced indexing produces unsorted sparse matrices
                a = sparse_random_inputs(format, (4, 3), out_dtype=dtype,
                                         unsorted_indices=True)[1][0]
                # Make sure it's unsorted
                assert not a.has_sorted_indices
                def my_op(x):
                    y = tensor.constant(a.indices)
                    z = tensor.constant(a.indptr)
                    s = tensor.constant(a.shape)
                    return tensor.sum(
                        dense_from_sparse(CSM(format)(x, y, z, s) * a))
                verify_grad_sparse(my_op, [a.data]) 
Example #18
Source File: theano_backend.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def stop_gradient(variables):
    """Returns `variables` but with zero gradient w.r.t. every other variable.

    # Arguments
        variables: tensor or list of tensors to consider constant with respect
            to any other variable.

    # Returns
        A single tensor or a list of tensors (depending on the passed argument)
            that has constant gradient with respect to any other variable.
    """
    if isinstance(variables, (list, tuple)):
        return map(theano.gradient.disconnected_grad, variables)
    else:
        return theano.gradient.disconnected_grad(variables)


# CONTROL FLOW 
Example #19
Source File: aa.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def print_graph_linker(print_prog=True):
    if 1:
        imap = {None:'-'}
        def blah(i, node, thunk):
            imap[node] = str(i)
            if print_prog:# and node.op.__class__ is T.DimShuffle:
                if False and  node.op == T.DimShuffle((), ['x', 'x'], inplace = True):
                    print(node.op == T.DimShuffle((), ['x', 'x'],
                                                  inplace=True), end=' ')
                    print(node.inputs[0], type(node.inputs[0]), end=' ')
                    print(node.inputs[0].equals(T.constant(2)), end=' ')
                outputs = node.outputs
                inputs = theano.gof.graph.inputs(outputs)
                print('node ', i, node, end=' ')
                print(':'.join([imap[inp.owner] for inp in node.inputs]))
                #print theano.sandbox.pprint.pp.process_graph(inputs, outputs)
        return theano.sandbox.wraplinker.WrapLinkerMany(
                [theano.gof.OpWiseCLinker()],
                [theano.sandbox.wraplinker.run_all
                    ,blah
                    #,theano.sandbox.wraplinker.numpy_notall_isfinite
                    ])
    else:
        return theano.gof.OpWiseCLinker() 
Example #20
Source File: test_base.py    From carl with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_mixin_constants():
    # Check with constants
    mu = T.constant(0.0)
    sigma = T.constant(1.0)
    p = Normal(mu=mu, sigma=sigma)
    assert len(p.parameters_) == 0
    assert len(p.constants_) == 2
    assert mu in p.constants_
    assert sigma in p.constants_ 
Example #21
Source File: models.py    From drmad with MIT License 5 votes vote down vote up
def __init__(self, x, y, args):
        self.params_theta = []
        self.params_lambda = []
        self.params_weight = []
        if args.dataset == 'mnist':
            input_size = (None, 1, 28, 28)
        elif args.dataset == 'cifar10':
            input_size = (None, 3, 32, 32)
        else:
            raise AssertionError
        layers = [ll.InputLayer(input_size)]
        self.penalty = theano.shared(np.array(0.))

        #conv1
        layers.append(Conv2DLayerWithReg(args, layers[-1], 20, 5))
        self.add_params_to_self(args, layers[-1])
        layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))
        #conv1
        layers.append(Conv2DLayerWithReg(args, layers[-1], 50, 5))
        self.add_params_to_self(args, layers[-1])
        layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))
        #fc1
        layers.append(DenseLayerWithReg(args, layers[-1], num_units=500))
        self.add_params_to_self(args, layers[-1])
        #softmax
        layers.append(DenseLayerWithReg(args, layers[-1], num_units=10, nonlinearity=nonlinearities.softmax))
        self.add_params_to_self(args, layers[-1])

        self.layers = layers
        self.y = ll.get_output(layers[-1], x, deterministic=False)
        self.prediction = T.argmax(self.y, axis=1)
        # self.penalty = penalty if penalty != 0. else T.constant(0.)
        print(self.params_lambda)
        # time.sleep(20)
        # cost function
        self.loss = T.mean(categorical_crossentropy(self.y, y))
        self.lossWithPenalty = T.add(self.loss, self.penalty)
        print "loss and losswithpenalty", type(self.loss), type(self.lossWithPenalty) 
Example #22
Source File: test_raw_random.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_dtype_normal_uniform_687(self):
        # Regression test for #687.
        rng_R = random_state_type()
        assert uniform(rng_R, low=tensor.constant(0, dtype='float64'),
                       dtype='float32')[1].dtype == 'float32'

        assert normal(rng_R, avg=tensor.constant(0, dtype='float64'),
                      dtype='float32')[1].dtype == 'float32' 
Example #23
Source File: test_blas.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_inplace(self):
        A = self.shared(numpy.random.rand(4, 5).astype(self.dtype))
        f = self.function([self.x, self.y], [],
                          updates=[(A, A + T.constant(0.1, dtype=self.dtype) *
                                   T.outer(self.x, self.y))])
        self.assertFunctionContains(f, self.ger_destructive)
        f(numpy.random.rand(4).astype(self.dtype),
          numpy.random.rand(5).astype(self.dtype))

        A.set_value(
            A.get_value(borrow=True, return_internal_type=True)[::-1, ::-1],
            borrow=True)
        f(numpy.random.rand(4).astype(self.dtype),
          numpy.random.rand(5).astype(self.dtype)) 
Example #24
Source File: test_blas.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test1(self):
        """Test that it fails on nonscalar constants"""
        a = T.constant(numpy.ones(5))
        self.assertTrue(None == _as_scalar(a))
        self.assertTrue(None == _as_scalar(T.DimShuffle([False], [0, 'x'])(a))) 
Example #25
Source File: test_blas.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_destroy_map4(self):
        """test that dot args can be aliased"""
        Z = shared(self.rand(2, 2), name='Z')
        A = shared(self.rand(2, 2), name='A')
        one = T.constant(1.0).astype(Z.dtype)
        f = inplace_func([], gemm_inplace(Z, one, A, A, one))
        f()
        f = inplace_func([], gemm_inplace(Z, one, A, A.T, one))
        f() 
Example #26
Source File: test_blas.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_factorised_scalar(self):
        a = T.matrix()
        b = T.matrix()
        c = T.matrix()
        s = theano.shared(numpy.zeros((5, 5)).astype(config.floatX))

        lr1 = T.constant(0.01).astype(config.floatX)
        lr2 = T.constant(2).astype(config.floatX)
        l2_reg = T.constant(0.0001).astype(config.floatX)

        # test constant merge with gemm
        f = theano.function([a, b], updates=[(s, lr1 * T.dot(a, b) +
                                                l2_reg * lr2 * s)],
                            mode=mode_not_fast_compile).maker.fgraph.toposort()
        #[Gemm{inplace}(<TensorType(float64, matrix)>, 0.01,
        # <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
        # 2e-06)]
        assert len(f) == 1
        assert f[0].op == gemm_inplace

        # test factored scalar with merge
        f = theano.function([a, b], updates=[(s, lr1 * (T.dot(a, b) -
                                                        l2_reg * s))],
                            mode=mode_not_fast_compile).maker.fgraph.toposort()
        #[Gemm{inplace}(<TensorType(float64, matrix)>, 0.01,
        # <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
        # -2e-06)]
        assert len(f) == 1
        assert f[0].op == gemm_inplace

        # test factored scalar with merge and neg
        f = theano.function([a, b],
                            updates=[(s, s - lr1 * (s * .0002 + T.dot(a, b)))],
                            mode=mode_not_fast_compile).maker.fgraph.toposort()
        #[Gemm{inplace}(<TensorType(float64, matrix)>, -0.01,
        # <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
        # 0.999998)]
        assert len(f) == 1
        assert f[0].op == gemm_inplace 
Example #27
Source File: test_basic.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def setUp(self):
        super(AddSSDataTester, self).setUp()
        self.op_class = AddSSData

        for format in sparse.sparse_formats:
            variable = getattr(theano.sparse, format + '_matrix')

            rand = numpy.array(
                numpy.random.random_integers(3, size=(3, 4)) - 1,
                dtype=theano.config.floatX)
            constant = as_sparse_format(rand, format)

            self.x[format] = [variable() for t in range(2)]
            self.a[format] = [constant for t in range(2)] 
Example #28
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def constant(value, dtype=None, shape=None, name=None):
    if dtype is None:
        dtype = floatx()
    if shape is None:
        shape = ()
    np_value = value * np.ones(shape)
    const = T.constant(np_value,
                       dtype=dtype,
                       name=_prepare_name(name, 'constant'))
    const._keras_shape = shape
    const._uses_learning_phase = False
    return const 
Example #29
Source File: utils.py    From LV_groundhog with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def const(value):
    return TT.constant(numpy.asarray(value, dtype=theano.config.floatX)) 
Example #30
Source File: test_scan_utils.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def test_opfromgraph(self):
        # as with the scan tests above, insert foreign inputs into the
        # inner graph.
        outer = tensor.scalar("outer")
        shared = theano.shared(
            numpy.array(1., dtype=theano.config.floatX),
            name="shared")
        constant = tensor.constant(1., name="constant")
        z = outer * (shared + constant)

        # construct the inner graph
        a = tensor.scalar()
        b = tensor.scalar()
        r = a + b
        r.tag.replacement = z * (a - b)

        # construct the outer graph
        c = tensor.scalar()
        d = tensor.scalar()
        u = theano.OpFromGraph([a, b], [r])(c, d)
        t = z * u
        v, = map_variables(self.replacer, [t])
        t2 = z * v

        f = theano.function([c, d, outer], [t, t2])
        for m, n in itertools.combinations(range(10), 2):
            assert f(m, n, outer=0.5) == [m + n, m - n]

        # test that the unsupported case of replacement with a shared
        # variable with updates crashes
        shared.update = shared + 1
        self.assertRaises(NotImplementedError,
                          map_variables, self.replacer, [t])