Python theano.tensor.minimum() Examples

The following are 30 code examples for showing how to use theano.tensor.minimum(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .

Example 1
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    License: Apache License 2.0 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example 2
Project: GraphicDesignPatternByPython   Author: Relph1119   File: theano_backend.py    License: MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example 3
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: theano_backend.py    License: MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example 4
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: theano_backend.py    License: MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example 5
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: theano_backend.py    License: MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example 6
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: theano_backend.py    License: MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example 7
Project: deepy   Author: zomux   File: rprop.py    License: MIT License 6 votes vote down vote up
def rprop_core(params, gradients, rprop_increase=1.01, rprop_decrease=0.99, rprop_min_step=0, rprop_max_step=100,
               learning_rate=0.01):
    """
    Rprop optimizer.
    See http://sci2s.ugr.es/keel/pdf/algorithm/articulo/2003-Neuro-Igel-IRprop+.pdf.
    """
    for param, grad in zip(params, gradients):
        grad_tm1 = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_grad')
        step_tm1 = theano.shared(np.zeros_like(param.get_value()) + learning_rate, name=param.name+ '_step')

        test = grad * grad_tm1
        same = T.gt(test, 0)
        diff = T.lt(test, 0)
        step = T.minimum(rprop_max_step, T.maximum(rprop_min_step, step_tm1 * (
            T.eq(test, 0) +
            same * rprop_increase +
            diff * rprop_decrease)))
        grad = grad - diff * grad
        yield param, param - T.sgn(grad) * step
        yield grad_tm1, grad
        yield step_tm1, step 
Example 8
Project: deepQuest   Author: sheffieldnlp   File: theano_backend.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example 9
Project: keras-lambda   Author: sunilmallya   File: theano_backend.py    License: MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example 10
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    License: Apache License 2.0 5 votes vote down vote up
def minimum(x, y):
    return T.minimum(x, y) 
Example 11
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    License: Apache License 2.0 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x 
Example 12
Project: Depth-Map-Prediction   Author: hjimce   File: thutil.py    License: GNU General Public License v3.0 5 votes vote down vote up
def minimum(x, y):
    if checkgrad:
        return x + y
    return T.minimum(x, y) 
Example 13
def minimum(x, y):
    return T.minimum(x, y)


# SHAPE OPERATIONS 
Example 14
def relu(x, alpha=0., max_value=None):
    assert hasattr(T.nnet, 'relu'), ('It looks like like your version of '
                                     'Theano is out of date. '
                                     'Install the latest version with:\n'
                                     'pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x 
Example 15
Project: D-VAE   Author: muhanzhang   File: basic.py    License: MIT License 5 votes vote down vote up
def infer_shape(self, nodes, shapes):
        return [(tensor.minimum(*shapes[0]), )] 
Example 16
Project: D-VAE   Author: muhanzhang   File: basic.py    License: MIT License 5 votes vote down vote up
def structured_minimum(x, y):
    """
    Structured elemwise minimum of sparse matrix x by scalar y.

    """
    # see decorator for function body 
Example 17
Project: Deep_MRI_brain_extraction   Author: GUR9000   File: TransferFunctions.py    License: MIT License 5 votes vote down vote up
def parse_transfer_function(string_identifier, slope_parameter = None):
    """ This function returns the appropriate activation function, as selected by the string argument.
    
    string_identifier: 
        possible values are tanh, ReLU/relu, sigmoid/sig, abs, maxout <number>, linear/lin
    
    RETURNS: 
        transfer_function(python/theano function), string_identifier (normalized), dict (for special cases)
            
    """
    cross_channel_pooling_groups=None
    
    
    if string_identifier=='tanh':
        Activation_f = T.tanh
    elif string_identifier in ['ReLU', 'relu']: #rectified linear unit
        string_identifier = "relu"
        Activation_f = lambda x: x*(x>0)
    elif string_identifier in ['sigmoid', 'sig']:
        string_identifier = "sigmoid"
        Activation_f = T.nnet.sigmoid
    elif string_identifier in ['abs', 'Abs', 'absolute']:
        string_identifier='abs'
        Activation_f = T.abs_
    elif string_identifier in ['plu','PLu','PLU','piecewise']: #piece-wise linear function
        string_identifier = "PLU"
        print "parse_transfer_function::Remember to optimize the 'slope_parameter'"
        assert slope_parameter is not None,"...and better pass it to this function, as well! (type: Theano.Tensor, shape: same as activation, unif. random values [-1,1] should be fine)"
        Activation_f = lambda x: T.maximum(0,x) + T.minimum(0,x) * slope_parameter
    elif "maxout" in string_identifier:
        r=int(string_identifier.split(" ")[1])
        assert r>=2
        cross_channel_pooling_groups = r
    elif string_identifier in ['linear',"lin"]:
        string_identifier = "linear"
        Activation_f = lambda x:x
    else:
        raise NotImplementedError()
    return Activation_f, string_identifier, {"cross_channel_pooling_groups":cross_channel_pooling_groups} 
Example 18
Project: PyGame-Learning-Environment   Author: ntasfi   File: example_support.py    License: MIT License 5 votes vote down vote up
def q_loss(self, y_true, y_pred):
        # assume clip_delta is 1.0
        # along with sum accumulator.
        diff = y_true - y_pred
        _quad = T.minimum(abs(diff), 1.0)
        _lin = abs(diff) - _quad
        loss = 0.5 * _quad ** 2 + _lin
        loss = T.sum(loss)

        return loss 
Example 19
Project: Theano-Lights   Author: Ivaylo-Popov   File: toolbox.py    License: MIT License 5 votes vote down vote up
def cliplin(X):
    return T.minimum(T.maximum(X, -2.), 2.) 
Example 20
Project: GraphicDesignPatternByPython   Author: Relph1119   File: theano_backend.py    License: MIT License 5 votes vote down vote up
def minimum(x, y):
    return T.minimum(x, y) 
Example 21
Project: attention-lvcsr   Author: rizar   File: basic.py    License: MIT License 5 votes vote down vote up
def infer_shape(self, nodes, shapes):
        return [(tensor.minimum(*shapes[0]), )] 
Example 22
Project: attention-lvcsr   Author: rizar   File: basic.py    License: MIT License 5 votes vote down vote up
def structured_minimum(x, y):
    """
    Structured elemwise minimum of sparse matrix x by scalar y.

    """
    # see decorator for function body 
Example 23
Project: kaggle-galaxies   Author: benanne   File: layers.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def rescaled_weights(self, c): # c is the maximal norm of the weight vector going into a single filter.
        norms = T.sqrt(T.sqr(self.W).mean(0, keepdims=True))
        scale_factors = T.minimum(c / norms, 1)
        return self.W * scale_factors 
Example 24
Project: kaggle-galaxies   Author: benanne   File: layers.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def rescaled_weights(self, c): # c is the maximal norm of the weight vector going into a single filter.
        weights_shape = self.W.shape
        W_flat = self.W.reshape((weights_shape[0], T.prod(weights_shape[1:])))
        norms = T.sqrt(T.sqr(W_flat).mean(1))
        scale_factors = T.minimum(c / norms, 1)
        return self.W * scale_factors.dimshuffle(0, 'x', 'x', 'x') 
Example 25
Project: kaggle-galaxies   Author: benanne   File: layers.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def rescaled_weights(self, c): # c is the maximal norm of the weight vector going into a single filter.
        weights_shape = self.W.shape
        W_flat = self.W.reshape((weights_shape[0], T.prod(weights_shape[1:])))
        norms = T.sqrt(T.sqr(W_flat).mean(1))
        scale_factors = T.minimum(c / norms, 1)
        return self.W * scale_factors.dimshuffle(0, 'x', 'x', 'x') 
Example 26
Project: punctuator2   Author: ottokart   File: models.py    License: MIT License 5 votes vote down vote up
def PReLU(a, x):
    return T.maximum(0.0, x) + a * T.minimum(0.0, x) 
Example 27
Project: downhill   Author: lmjohns3   File: adaptive.py    License: MIT License 5 votes vote down vote up
def _get_updates_for(self, param, grad):
        grad_tm1 = util.shared_like(param, 'grad')
        step_tm1 = util.shared_like(param, 'step', self.learning_rate.eval())
        test = grad * grad_tm1
        diff = TT.lt(test, 0)
        steps = step_tm1 * (TT.eq(test, 0) +
                            TT.gt(test, 0) * self.step_increase +
                            diff * self.step_decrease)
        step = TT.minimum(self.max_step, TT.maximum(self.min_step, steps))
        grad = grad - diff * grad
        yield param, TT.sgn(grad) * step
        yield grad_tm1, grad
        yield step_tm1, step 
Example 28
Project: downhill   Author: lmjohns3   File: base.py    License: MIT License 5 votes vote down vote up
def _differentiate(self, params=None):
        '''Return a sequence of gradients for our parameters.

        If this optimizer has been configured with a gradient norm limit, or
        with elementwise gradient clipping, this method applies the appropriate
        rescaling and clipping operations before returning the gradient.

        Parameters
        ----------
        params : list of Theano variables, optional
            Return the gradient with respect to these parameters. Defaults to
            all parameters that the optimizer knows about.

        Yields
        ------
        pairs : (param, grad) tuples
            Generates a sequence of tuples representing each of the parameters
            requested and the corresponding Theano gradient expressions.
        '''
        if params is None:
            params = self._params
        for param, grad in zip(params, TT.grad(self._loss, params)):
            if self.max_gradient_elem > 0:
                limit = util.as_float(self.max_gradient_elem)
                yield param, TT.clip(grad, -limit, limit)
            elif self.max_gradient_norm > 0:
                norm = TT.sqrt((grad * grad).sum())
                limit = util.as_float(self.max_gradient_norm)
                yield param, grad * TT.minimum(1, limit / norm)
            else:
                yield param, grad 
Example 29
Project: deep-learning-models   Author: kuleshov   File: convvae.py    License: MIT License 5 votes vote down vote up
def create_objectives(self, deterministic=False):
    """ELBO objective with the analytic expectation trick"""
    # load network input
    X = self.inputs[0]

    # load network output
    if self.model == 'bernoulli':
      q_mu, q_logsigma, sample, _ \
          = lasagne.layers.get_output(self.network[2:], deterministic=deterministic)
    elif self.model in ('gaussian', 'svhn'):
      p_mu, p_logsigma, q_mu, q_logsigma, _, _ \
          = lasagne.layers.get_output(self.network, deterministic=deterministic)

    # first term of the ELBO: kl-divergence (using the closed form expression)
    kl_div = 0.5 * T.sum(1 + 2*q_logsigma - T.sqr(q_mu) 
                         - T.exp(2 * T.minimum(q_logsigma,50)), axis=1).mean()

    # second term: log-likelihood of the data under the model
    if self.model == 'bernoulli':
      logpxz = -lasagne.objectives.binary_crossentropy(sample, X.flatten(2)).sum(axis=1).mean()
    elif self.model in ('gaussian', 'svhn'):
      # def log_lik(x, mu, log_sig):
      #     return T.sum(-(np.float32(0.5 * np.log(2 * np.pi)) + log_sig)
      #                   - 0.5 * T.sqr(x - mu) / T.exp(2 * log_sig), axis=1)
      # logpxz = log_lik(X.flatten(2), p_mu, p_logsigma).mean()
      logpxz = log_normal2(X.flatten(2), p_mu, p_logsigma).sum(axis=1).mean()

    loss = -1 * (logpxz + kl_div)

    # we don't use the spearate accuracy metric right now
    return loss, -kl_div 
Example 30
Project: kusanagi   Author: mcgillmrl   File: saturation.py    License: MIT License 5 votes vote down vote up
def maxSat(u, e):
    return tt.minimum(theano.tensor.maximum(u, -e), e)