Python theano.tensor.minimum() Examples

The following are 30 code examples of theano.tensor.minimum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .
Example #1
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #2
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #3
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #4
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #5
Source File: rprop.py    From deepy with MIT License 6 votes vote down vote up
def rprop_core(params, gradients, rprop_increase=1.01, rprop_decrease=0.99, rprop_min_step=0, rprop_max_step=100,
               learning_rate=0.01):
    """
    Rprop optimizer.
    See http://sci2s.ugr.es/keel/pdf/algorithm/articulo/2003-Neuro-Igel-IRprop+.pdf.
    """
    for param, grad in zip(params, gradients):
        grad_tm1 = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_grad')
        step_tm1 = theano.shared(np.zeros_like(param.get_value()) + learning_rate, name=param.name+ '_step')

        test = grad * grad_tm1
        same = T.gt(test, 0)
        diff = T.lt(test, 0)
        step = T.minimum(rprop_max_step, T.maximum(rprop_min_step, step_tm1 * (
            T.eq(test, 0) +
            same * rprop_increase +
            diff * rprop_decrease)))
        grad = grad - diff * grad
        yield param, param - T.sgn(grad) * step
        yield grad_tm1, grad
        yield step_tm1, step 
Example #6
Source File: theano_backend.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #7
Source File: theano_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #8
Source File: theano_backend.py    From keras-lambda with MIT License 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #9
Source File: theano_backend.py    From Att-ChemdNER with Apache License 2.0 6 votes vote down vote up
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
Example #10
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x 
Example #11
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x 
Example #12
Source File: ctc.py    From chamanti_ocr with Apache License 2.0 5 votes vote down vote up
def _log_add(x, y):
    maxx = tt.maximum(x, y)
    minn = tt.minimum(x, y)
    return maxx + tt.log(1 + tt.exp(minn - maxx)) 
Example #13
Source File: ctc.py    From rnn_ctc with Apache License 2.0 5 votes vote down vote up
def logadd_advanced(x, y):
    maxx = tt.maximum(x, y)
    minn = tt.minimum(x, y)
    return maxx + tt.log(1 + tt.exp(minn - maxx)) 
Example #14
Source File: ctc.py    From rnn_ctc with Apache License 2.0 5 votes vote down vote up
def safe_exp(x):
    return tt.exp(tt.minimum(x, epsinv).astype(theano.config.floatX)) 
Example #15
Source File: example_support.py    From humanRL_prior_games with MIT License 5 votes vote down vote up
def q_loss(self, y_true, y_pred):
        # assume clip_delta is 1.0
        # along with sum accumulator.
        diff = y_true - y_pred
        _quad = T.minimum(abs(diff), 1.0)
        _lin = abs(diff) - _quad
        loss = 0.5 * _quad ** 2 + _lin
        loss = T.sum(loss)

        return loss 
Example #16
Source File: theano_backend.py    From KerasNeuralFingerprint with MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    assert hasattr(T.nnet, 'relu'), ('It looks like like your version of '
                                     'Theano is out of date. '
                                     'Install the latest version with:\n'
                                     'pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x 
Example #17
Source File: theano_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x 
Example #18
Source File: activations.py    From deepy with MIT License 5 votes vote down vote up
def get_activation(act=None):
        def compose(a, b):
            c = lambda z: b(a(z))
            c.__theanets_name__ = '%s(%s)' % (b.__theanets_name__, a.__theanets_name__)
            return c
        if '+' in act:
            return functools.reduce(
                compose, (get_activation(a) for a in act.split('+')))
        options = {
            'tanh': T.tanh,
            'linear': lambda z: z,
            'logistic': T.nnet.sigmoid,
            'sigmoid': T.nnet.sigmoid,
            'hard_sigmoid': T.nnet.hard_sigmoid,
            'softplus': T.nnet.softplus,
            'softmax': softmax,
            'theano_softmax': T.nnet.softmax,

            # shorthands
            'relu': lambda z: T.nnet.relu(z),
            'leaky_relu': lambda z: T.nnet.relu(z, 0.01),
            'trel': lambda z: z * (z > 0) * (z < 1),
            'trec': lambda z: z * (z > 1),
            'tlin': lambda z: z * (abs(z) > 1),

            # modifiers
            'rect:max': lambda z: T.minimum(1, z),
            'rect:min': lambda z: T.maximum(0, z),

            # normalization
            'norm:dc': lambda z: (z.T - z.mean(axis=1)).T,
            'norm:max': lambda z: (z.T / T.maximum(1e-10, abs(z).max(axis=1))).T,
            'norm:std': lambda z: (z.T / T.maximum(1e-10, T.std(z, axis=1))).T,
            }
        for k, v in options.items():
            v.__theanets_name__ = k
        try:
            return options[act]
        except KeyError:
            raise KeyError('unknown activation %r' % act) 
Example #19
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def minimum(x, y):
    return T.minimum(x, y) 
Example #20
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def minimum(x, y):
    return T.minimum(x, y) 
Example #21
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x 
Example #22
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def minimum(x, y):
    return T.minimum(x, y) 
Example #23
Source File: theano_backend.py    From keras-lambda with MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x 
Example #24
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x 
Example #25
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def minimum(x, y):
    return T.minimum(x, y) 
Example #26
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x 
Example #27
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def minimum(x, y):
    return T.minimum(x, y) 
Example #28
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    _assert_has_capability(T.nnet, 'relu')
    x = T.nnet.relu(x, alpha)
    if max_value is not None:
        x = T.minimum(x, max_value)
    return x 
Example #29
Source File: theano_backend.py    From Att-ChemdNER with Apache License 2.0 5 votes vote down vote up
def minimum(x, y):
    return T.minimum(x, y) 
Example #30
Source File: theano_backend.py    From keras-lambda with MIT License 5 votes vote down vote up
def minimum(x, y):
    return T.minimum(x, y)