Python theano.function() Examples

The following are 30 code examples for showing how to use theano.function(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module theano , or try the search function .

Example 1
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    License: Apache License 2.0 6 votes vote down vote up
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
    '''Apply batch normalization on x given mean, var, beta and gamma.
    '''
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_test is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_test'):
        return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)

    if mean.ndim == 1:
        # based on TensorFlow's default: normalize along rightmost dimension
        reduction_axes = range(x.ndim - 1)
    else:
        reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]

    return T.nnet.bn.batch_normalization_test(
        x, gamma, beta, mean, var, reduction_axes, epsilon)


# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated 
Example 2
Project: Depth-Map-Prediction   Author: hjimce   File: pooling.py    License: GNU General Public License v3.0 6 votes vote down vote up
def test_cmrnorm():
    from theano.tests.unittest_tools import verify_grad

    xtest = np.random.rand(2,8,3,4)
    xtest = xtest.astype(theano.config.floatX)

    x = T.tensor4('x', dtype=theano.config.floatX)
    x.tag.test_value = xtest

    y = cmrnorm(x, input_shape=xtest.shape[1:])
    f = theano.function([x], y, mode='DEBUG_MODE')
    f(xtest)

    f = theano.function([x], gpu_from_host(T.grad(T.sum(y), wrt=x)),
                        mode='DEBUG_MODE')
    f(xtest)
    theano.printing.debugprint(f)

    T.verify_grad(lambda x: cmrnorm(x, input_shape=xtest.shape[1:]),
                  (xtest,),
                  rng=np.random.RandomState(0))

    print 'cmrnorm passed' 
Example 3
Project: spinn   Author: stanfordnlp   File: classifier.py    License: MIT License 6 votes vote down vote up
def build_cost(logits, targets):
    """
    Build a classification cost function.
    """
    # Clip gradients coming from the cost function.
    logits = theano.gradient.grad_clip(
        logits, -1. * FLAGS.clipping_max_value, FLAGS.clipping_max_value)

    predicted_dist = T.nnet.softmax(logits)

    costs = T.nnet.categorical_crossentropy(predicted_dist, targets)
    cost = costs.mean()

    pred = T.argmax(logits, axis=1)
    acc = 1. - T.mean(T.cast(T.neq(pred, targets), theano.config.floatX))

    return cost, acc 
Example 4
Project: spinn   Author: stanfordnlp   File: fat_classifier.py    License: MIT License 6 votes vote down vote up
def build_cost(logits, targets):
    """
    Build a classification cost function.
    """
    # Clip gradients coming from the cost function.
    logits = theano.gradient.grad_clip(
        logits, -1. * FLAGS.clipping_max_value, FLAGS.clipping_max_value)

    predicted_dist = T.nnet.softmax(logits)

    costs = T.nnet.categorical_crossentropy(predicted_dist, targets)
    cost = costs.mean()

    pred = T.argmax(logits, axis=1)
    acc = 1. - T.mean(T.cast(T.neq(pred, targets), theano.config.floatX))

    return cost, acc 
Example 5
Project: spinn   Author: stanfordnlp   File: test_stack.py    License: MIT License 6 votes vote down vote up
def test_speed(self):
        top = self.stack.final_stack[-self.batch_size:]
        cost = self._make_cost(top)
        error_signal = T.grad(cost, top)

        # Build automatic backprop function.
        self.stack.make_backprop_scan(error_signal, [self.y],
                                      compute_embedding_gradients=False)
        f = theano.function(
            [self.X, self.transitions, self.y],
            [cost] + self.stack.gradients.values(),
            updates=self.stack.scan_updates + self.stack.bscan_updates)
        theano.printing.debugprint(f.maker.fgraph.outputs[1])

        for t in range(10):
            self._run_batch(f) 
Example 6
Project: deeplearn_hsi   Author: hantek   File: dA.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_corrupted_input(self, input, corruption_level):
        """This function keeps ``1-corruption_level`` entries of the inputs the
        same and zero-out randomly selected subset of size ``coruption_level``
        Note : first argument of theano.rng.binomial is the shape(size) of
               random numbers that it should produce
               second argument is the number of trials
               third argument is the probability of success of any trial

                this will produce an array of 0s and 1s where 1 has a
                probability of 1 - ``corruption_level`` and 0 with
                ``corruption_level``

                The binomial function return int64 data type by
                default.  int64 multiplicated by the input
                type(floatX) always return float64.  To keep all data
                in floatX when floatX is float32, we set the dtype of
                the binomial to floatX. As in our case the value of
                the binomial is always 0 or 1, this don't change the
                result. This is needed to allow the gpu to work
                correctly as it only support float32 for now.

        """
        return self.theano_rng.binomial(size=input.shape, n=1,
                                        p=1 - corruption_level,
                                        dtype=theano.config.floatX) * input 
Example 7
Project: 3D-R2N2   Author: chrischoy   File: solver.py    License: MIT License 6 votes vote down vote up
def save(self, training_losses, save_dir, step):
        ''' Save the current network parameters to the save_dir and make a
        symlink to the latest param so that the training function can easily
        load the latest model'''
        save_path = os.path.join(save_dir, 'weights.%d' % (step))
        self.net.save(save_path)

        # Make a symlink for weights.npy
        symlink_path = os.path.join(save_dir, 'weights.npy')
        if os.path.lexists(symlink_path):
            os.remove(symlink_path)

        # Make a symlink to the latest network params
        os.symlink("%s.npy" % os.path.abspath(save_path), symlink_path)

        # Write the losses
        with open(os.path.join(save_dir, 'loss.%d.txt' % step), 'w') as f:
            f.write('\n'.join([str(l) for l in training_losses])) 
Example 8
Project: adage   Author: greenelab   File: SdA_train.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def return_network(self):
        '''This function returns weight matrix and bias vectors of each hidden layer in the 
        final network after training.'''

        weights_all_layer = []
        bias_all_layer = []
        bias_prime_all_layer = []

        for dA_layer in self.dA_layers:
            weight = dA_layer.W.get_value(borrow = True)
            bias = dA_layer.b.get_value(borrow = True)
            bias_prime = dA_layer.b_prime.get_value(borrow = True)
            weights_all_layer.append(weight)
            bias_all_layer.append(bias)
            bias_prime_all_layer.append(bias_prime)

        return weights_all_layer, bias_all_layer, bias_prime_all_layer 
Example 9
Project: CAPTCHA-breaking   Author: lllcho   File: test_activations.py    License: MIT License 6 votes vote down vote up
def test_softmax():

    from keras.activations import softmax as s

    # Test using a reference implementation of softmax
    def softmax(values):
        m = max(values)
        values = numpy.array(values)
        e = numpy.exp(values - m)
        dist = list(e / numpy.sum(e))

        return dist

    x = T.vector()
    exp = s(x)
    f = theano.function([x], exp)
    test_values=get_standard_values()

    result = f(test_values)
    expected = softmax(test_values)

    print(str(result))
    print(str(expected))

    list_assert_equal(result, expected) 
Example 10
Project: CAPTCHA-breaking   Author: lllcho   File: test_activations.py    License: MIT License 6 votes vote down vote up
def test_tanh():

    from keras.activations import tanh as t
    test_values = get_standard_values()

    x = T.vector()
    exp = t(x)
    f = theano.function([x], exp)

    result = f(test_values)
    expected = [math.tanh(v) for v in test_values]

    print(result)
    print(expected)

    list_assert_equal(result, expected) 
Example 11
Project: nmt   Author: arctic-nmt   File: nmt.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def adadelta(lr, tparams, grads, inp, cost):
    running_up2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rup2'%k) for k, p in tparams.iteritems()]
    running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]

    rg2_new = [0.95 * rg2 + 0.05 * (g ** 2) for rg2, g in zip(running_grads2, grads)]
    rg2up = [(rg2, r_n) for rg2, r_n in zip(running_grads2, rg2_new)]
    
    
    updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(grads, running_up2, rg2_new)]
    ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
    param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]

    inp += [lr]
    f_update = theano.function(inp, cost, updates=rg2up+ru2up+param_up, on_unused_input='ignore', profile=profile)

    return f_update 
Example 12
Project: nmt   Author: arctic-nmt   File: nmt.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def debugging_adadelta(lr, tparams, grads, inp, cost):
    zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_grad'%k) for k, p in tparams.iteritems()]
    running_up2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rup2'%k) for k, p in tparams.iteritems()]
    running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]

    zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
    rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]

    f_grad_shared = theano.function(inp, cost, updates=zgup+rg2up, profile=profile)
    
    
    updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
    ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
    param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]

    f_update = theano.function([lr], [], updates=ru2up+param_up, on_unused_input='ignore', profile=profile)

    return f_grad_shared, f_update 
Example 13
Project: nmt   Author: arctic-nmt   File: nmt.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def rmsprop(lr, tparams, grads, inp, cost):
    zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_grad'%k) for k, p in tparams.iteritems()]
    running_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad'%k) for k, p in tparams.iteritems()]
    running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]

    zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
    rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
    rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]

    f_grad_shared = theano.function(inp, cost, updates=zgup+rgup+rg2up, profile=profile)

    updir = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_updir'%k) for k, p in tparams.iteritems()]
    updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4)) for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads, running_grads2)]
    param_up = [(p, p + udn[1]) for p, udn in zip(itemlist(tparams), updir_new)]
    f_update = theano.function([lr], [], updates=updir_new+param_up, on_unused_input='ignore', profile=profile)

    return f_grad_shared, f_update 
Example 14
Project: Att-ChemdNER   Author: lingluodlut   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def modelScore(self,tag_ids,scores,s_len):
    #{{{
        """
            ATTENTATION THIS FUNCTION IS SYMBOL PROGRAMMING
            this function is to return the score of our model at a fixed sentence label 
        @param:
            scores:        the scores matrix ,the output of our model
            tag:           a numpy array, which represent one sentence label 
            sent_lens:     a scalar number, the length of sentence.
                because our sentence label will be expand to max sentence length,
                so we will use this to get the original sentence label. 
        @return: 
            a scalar number ,the score;
        """
    #{{{
        n_tags=self.output_dim;
        transitions=self.transitions;
        #score from tags_scores
        real_path_score = scores[T.arange(s_len), tag_ids].sum()

        # Score from transitions
        b_id = theano.shared(value=np.array([n_tags], dtype=np.int32))
        e_id = theano.shared(value=np.array([n_tags + 1], dtype=np.int32))
        padded_tags_ids = T.concatenate([b_id, tag_ids, e_id], axis=0)
        real_path_score += transitions[
                padded_tags_ids[T.arange(s_len + 1)],
                padded_tags_ids[T.arange(s_len + 1) + 1]
            ].sum()
        #to prevent T.exp(real_path_score) to be inf 
        #return real_path_score;
        return real_path_score/s_len;
    #}}}
    #}}} 
Example 15
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    License: Apache License 2.0 5 votes vote down vote up
def arange(start, stop=None, step=1, dtype='int32'):
    '''Creates a 1-D tensor containing a sequence of integers.

    The function arguments use the same convention as
    Theano's arange: if only one argument is provided,
    it is in fact the "stop" argument.

    The default type of the returned tensor is 'int32' to
    match TensorFlow's default.
    '''
    return T.arange(start, stop=stop, step=step, dtype=dtype) 
Example 16
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, inputs, outputs, updates=[], **kwargs):
        unique_variables_to_update = {}
        for v, nv in updates:
            if v not in unique_variables_to_update:
                unique_variables_to_update[v] = nv
        updates = unique_variables_to_update.items()
        self.function = theano.function(inputs, outputs, updates=updates,
                                        allow_input_downcast=True,
                                        on_unused_input='ignore',
                                        **kwargs) 
Example 17
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    License: Apache License 2.0 5 votes vote down vote up
def __call__(self, inputs):
        assert isinstance(inputs, (list, tuple))
        return self.function(*inputs) 
Example 18
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    License: Apache License 2.0 5 votes vote down vote up
def elu(x, alpha=1.0):
    """ Exponential linear unit

    # Arguments
        x: Tensor to compute the activation function for.
        alpha: scalar
    """
    _assert_has_capability(T.nnet, 'elu')
    return T.nnet.elu(x, alpha) 
Example 19
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    License: Apache License 2.0 5 votes vote down vote up
def map_fn(fn, elems, name=None):
    '''Map the function fn over the elements elems and return the outputs.

    # Arguments
        fn: Callable that will be called upon each element in elems
        elems: tensor, at least 2 dimensional
        name: A string name for the map node in the graph

    # Returns
        Tensor with first dimension equal to the elems and second depending on
        fn
    '''
    return theano.map(fn, elems, name=name)[0] 
Example 20
Project: Recipes   Author: Lasagne   File: lstm_text_generation.py    License: MIT License 5 votes vote down vote up
def gen_data(p, batch_size = BATCH_SIZE, data=in_text, return_target=True):
    '''
    This function produces a semi-redundant batch of training samples from the location 'p' in the provided string (data).
    For instance, assuming SEQ_LENGTH = 5 and p=0, the function would create batches of 
    5 characters of the string (starting from the 0th character and stepping by 1 for each semi-redundant batch)
    as the input and the next character as the target.
    To make this clear, let us look at a concrete example. Assume that SEQ_LENGTH = 5, p = 0 and BATCH_SIZE = 2
    If the input string was "The quick brown fox jumps over the lazy dog.",
    For the first data point,
    x (the inputs to the neural network) would correspond to the encoding of 'T','h','e',' ','q'
    y (the targets of the neural network) would be the encoding of 'u'
    For the second point,
    x (the inputs to the neural network) would correspond to the encoding of 'h','e',' ','q', 'u'
    y (the targets of the neural network) would be the encoding of 'i'
    The data points are then stacked (into a three-dimensional tensor of size (batch_size,SEQ_LENGTH,vocab_size))
    and returned. 
    Notice that there is overlap of characters between the batches (hence the name, semi-redundant batch).
    '''
    x = np.zeros((batch_size,SEQ_LENGTH,vocab_size))
    y = np.zeros(batch_size)

    for n in range(batch_size):
        ptr = n
        for i in range(SEQ_LENGTH):
            x[n,i,char_to_ix[data[p+ptr+i]]] = 1.
        if(return_target):
            y[n] = char_to_ix[data[p+ptr+SEQ_LENGTH]]
    return x, np.array(y,dtype='int32') 
Example 21
Project: Depth-Map-Prediction   Author: hjimce   File: thutil.py    License: GNU General Public License v3.0 5 votes vote down vote up
def theano_function(*vars_by_pos, **kwargs):
    '''theano function decorator'''
    mode = kwargs.pop('mode', 'FAST_RUN')
    check_valid = kwargs.pop('check_valid', False)
    checks = kwargs.pop('checks', ())
    vars_by_name = kwargs
    def compile_func(f):
        argnames = f.func_code.co_varnames[:f.func_code.co_argcount]
        if any([a in vars_by_name for a in argnames[:len(vars_by_pos)]]):
            raise ValueError('Argument supplied twice to %s' % f.func_name)
        varspec = dict(vars_by_name)
        varspec.update(zip(argnames[:len(vars_by_pos)], vars_by_pos))
        argvars = []
        for name in argnames:
            spec = varspec[name]
            if isinstance(spec, (tuple, list)):
                (var, test_val) = spec
            else:
                var = spec
                test_val = None
            assert isinstance(var, T.Variable)
            var.name = name
            if test_val is not None:
                var.tag.test_value = test_val
            argvars.append(var)
        return function(argvars, f(*argvars),
                        check_valid=check_valid,
                        checks=checks,
                        mode=mode)
    return compile_func 
Example 22
Project: Depth-Map-Prediction   Author: hjimce   File: thutil.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __call__(self, *args, **kwargs):
        inputs = args
        if self.input_names:
            assert not inputs, \
                   'theano function with kw args cannot take positional args'
            inputs = [kwargs[k] for k in self.input_names]

        outputs = self.f(*inputs)

        if self.output_names:
            outputs = self._NamedOutputs(outputs)

        return outputs 
Example 23
Project: Depth-Map-Prediction   Author: hjimce   File: thutil.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __call__(self, *args, **kwargs):
        try:
            return self.f(*args, **kwargs)
        except AssertionError:
            _log.exception('assertion failed in function %s' % self.f.name)
            if self.f_dbg is None:
                _log.info('creating debug function for %s' % self.f.name)
                self.f_dbg = theano.function(**self.dbg_kwargs)
            _log.error('calling debug function for %s' % self.f.name)
            self.f_dbg(*args, **kwargs)
            _log.error('debug version seems to have passed' % self.f.name)
            raise 
Example 24
Project: spinn   Author: stanfordnlp   File: stack.py    License: MIT License 5 votes vote down vote up
def zero(self):
        if self._zero is None:
            # JIT-prepare the zero function.
            zero_updates = {var: np.zeros(var.get_value().shape,
                                          dtype=np.float32)
                            for var in self._zero_updates}
            self._zero = theano.function([], [], updates=zero_updates)

        self._zero() 
Example 25
Project: spinn   Author: stanfordnlp   File: classifier.py    License: MIT License 5 votes vote down vote up
def build_transition_cost(logits, targets, num_transitions):
    """
    Build a parse action prediction cost function.
    """

    # swap seq_length dimension to front so that we can scan per timestep
    logits = T.swapaxes(logits, 0, 1)
    targets = targets.T

    def cost_t(logits, tgt, num_transitions):
        # TODO(jongauthier): Taper down xent cost as we proceed through
        # sequence?
        predicted_dist = T.nnet.softmax(logits)
        cost = T.nnet.categorical_crossentropy(predicted_dist, tgt)

        pred = T.argmax(logits, axis=1)
        error = T.neq(pred, tgt)
        return cost, error

    results, _ = theano.scan(cost_t, [logits, targets], non_sequences=[num_transitions])
    costs, errors = results

    # Create a mask that selects only transitions that involve real data.
    unrolling_length = T.shape(costs)[0]
    padding = unrolling_length - num_transitions
    padding = T.reshape(padding, (1, -1))
    rng = T.arange(unrolling_length) + 1
    rng = T.reshape(rng, (-1, 1))
    mask = T.gt(rng, padding)

    # Compute acc using the mask
    acc = 1.0 - (T.sum(errors * mask, dtype=theano.config.floatX)
                 / T.sum(num_transitions, dtype=theano.config.floatX))

    # Compute cost directly, since we *do* want a cost incentive to get the padding
    # transitions right.
    cost = T.mean(costs)
    return cost, acc 
Example 26
Project: spinn   Author: stanfordnlp   File: test_stack.py    License: MIT License 5 votes vote down vote up
def _test_backprop(self, sim_top, stack, X, transitions, y):
        rel_vars = [(name, var) for name, var in self.vs.trainable_vars.iteritems()
                    if name != "embeddings"]

        sim_cost = self._make_cost(sim_top)
        all_grads = [T.grad(sim_cost, var) for _, var in rel_vars]
        f_sim = theano.function(
            [self.X, self.y],
            [sim_top, sim_cost] + all_grads + [T.grad(sim_cost, stack.embeddings)])

        top = stack.final_stack[-self.batch_size:]
        cost = self._make_cost(top)
        error_signal = T.grad(cost, top)

        stack.make_backprop_scan(error_signal, [self.y],
                                 compute_embedding_gradients=not self.skip_embeddings)
        outputs = [top, cost] + [stack.gradients[var] for _, var in rel_vars]
        if not self.skip_embeddings:
            outputs.append(stack.embedding_gradients)
        f = theano.function(
            [self.X, self.transitions, self.y], outputs,
            updates=stack.scan_updates + stack.bscan_updates)

        checks = ["top", "cost"] + ["d/%s" % name for name, _ in rel_vars]
        if not self.skip_embeddings:
            checks.append("d/embeddings")

        util.theano_random.seed(1234)
        sim = f_sim(X, y)

        util.theano_random.seed(1234)
        real = f(X, transitions, y)

        for check, sim_i, real_i in zip(checks, sim, real):
            np.testing.assert_almost_equal(sim_i, real_i, err_msg=check,
                                           decimal=4, verbose=True) 
Example 27
Project: deeplearn_hsi   Author: hantek   File: dA.py    License: BSD 2-Clause "Simplified" License 5 votes vote down vote up
def get_cost_updates(self, corruption_level, learning_rate):
        """ This function computes the cost and the updates for one trainng
        step of the dA """

        tilde_x = self.get_corrupted_input(self.x, corruption_level)
        y = self.get_hidden_values(tilde_x)
        z = self.get_reconstructed_input(y)
        # note : we sum over the size of a datapoint; if we are using
        #        minibatches, L will be a vector, with one entry per
        #        example in minibatch
        L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
        # note : L is now a vector, where each element is the
        #        cross-entropy cost of the reconstruction of the
        #        corresponding example of the minibatch. We need to
        #        compute the average of all these to get the cost of
        #        the minibatch
        cost = T.mean(L)

        # compute the gradients of the cost of the `dA` with respect
        # to its parameters
        gparams = T.grad(cost, self.params)
        # generate the list of updates
        updates = [
            (param, param - learning_rate * gparam)
            for param, gparam in zip(self.params, gparams)
        ]

        return (cost, updates) 
Example 28
Project: 3D-R2N2   Author: chrischoy   File: solver.py    License: MIT License 5 votes vote down vote up
def train_loss(self):
        if self._train_loss is None:
            print('Compiling training function')
            self._train_loss = theano.function(
                [self.net.x, self.net.y], self.net.loss, updates=self.updates, profile=cfg.PROFILE)
        self.iteration.set_value(self.iteration.get_value() + 1)
        return self._train_loss 
Example 29
Project: 3D-R2N2   Author: chrischoy   File: solver.py    License: MIT License 5 votes vote down vote up
def test_output(self, x, y=None):
        '''Generate the reconstruction, loss, and activation. Evaluate loss if
        ground truth output is given. Otherwise, return reconstruction and
        activation'''
        # Cache the output function.
        if self._test_output is None:
            print('Compiling testing function')
            # Lazy load the test function
            self._test_output = theano.function([self.net.x, self.net.y],
                                                [self.net.output,
                                                 self.net.loss,
                                                 *self.net.activations])

        # If the ground truth data is given, evaluate loss. O.w. feed zeros and
        # does not return the loss
        if y is None:
            n_vox = cfg.CONST.N_VOX
            no_loss_return = True
            y_val = np.zeros(
                (cfg.CONST.BATCH_SIZE, n_vox, 2, n_vox, n_vox)).astype(theano.config.floatX)
        else:
            no_loss_return = False
            y_val = y

        # Parse the result
        results = self._test_output(x, y_val)
        prediction = results[0]
        loss = results[1]
        activations = results[2:]

        if no_loss_return:
            return prediction, activations
        else:
            return prediction, loss, activations 
Example 30
Project: adversarial   Author: goodfeli   File: __init__.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def theano_parzen(data, mu, sigma):
    """
    Credit: Yann N. Dauphin
    """
    x = data

    a = ( x.dimshuffle(0, 'x', 1) - mu.dimshuffle('x', 0, 1) ) / sigma

    E = log_mean_exp(-0.5*(a**2).sum(2))

    Z = mu.shape[1] * T.log(sigma * numpy.sqrt(numpy.pi * 2))

    #return theano.function([x], E - Z)
    return E - Z