Python theano.tensor.ge() Examples

The following are 30 code examples of theano.tensor.ge(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .
Example #1
Source File: cost.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def apply(self, y, y_hat):
        # Support checkpoints that predate self.top_k
        top_k = getattr(self, 'top_k', 1)
        if top_k == 1:
            mistakes = tensor.neq(y, y_hat.argmax(axis=1))
        else:
            row_offsets = theano.tensor.arange(0, y_hat.flatten().shape[0],
                                               y_hat.shape[1])
            truth_score = y_hat.flatten()[row_offsets + y]
            # We use greater than _or equals_ here so that the model
            # _must_ have its guess in the top k, and cannot extend
            # its effective "list of predictions" by tying lots of things
            # for k-th place.
            higher_scoring = tensor.ge(y_hat, truth_score.dimshuffle(0, 'x'))
            # Because we used greater-than-or-equal we have to correct for
            # counting the true label.
            num_higher = higher_scoring.sum(axis=1) - 1
            mistakes = tensor.ge(num_higher, top_k)
        return mistakes.mean(dtype=theano.config.floatX) 
Example #2
Source File: test_basic_ops.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_elemwise_comparaison_cast():
    """
    test if an elemwise comparaison followed by a cast to float32 are
    pushed to gpu.
    """

    a = tensor.fmatrix()
    b = tensor.fmatrix()
    av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
    bv = numpy.ones((4, 4), dtype='float32')

    for g, ans in [(tensor.lt, av < bv), (tensor.gt, av > bv),
                   (tensor.le, av <= bv), (tensor.ge, av >= bv)]:

        f = pfunc([a, b], tensor.cast(g(a, b), 'float32'), mode=mode_with_gpu)

        out = f(av, bv)
        assert numpy.all(out == ans)
        assert any([isinstance(node.op, cuda.GpuElemwise)
                    for node in f.maker.fgraph.toposort()]) 
Example #3
Source File: test_basic_ops.py    From D-VAE with MIT License 6 votes vote down vote up
def test_elemwise_comparaison_cast():
    """
    test if an elemwise comparaison followed by a cast to float32 are
    pushed to gpu.
    """

    a = tensor.fmatrix()
    b = tensor.fmatrix()
    av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
    bv = numpy.ones((4, 4), dtype='float32')

    for g, ans in [(tensor.lt, av < bv), (tensor.gt, av > bv),
                   (tensor.le, av <= bv), (tensor.ge, av >= bv)]:

        f = pfunc([a, b], tensor.cast(g(a, b), 'float32'), mode=mode_with_gpu)

        out = f(av, bv)
        assert numpy.all(out == ans)
        assert any([isinstance(node.op, cuda.GpuElemwise)
                    for node in f.maker.fgraph.toposort()]) 
Example #4
Source File: Model4DistancePrediction.py    From RaptorX-Contact with GNU General Public License v3.0 6 votes vote down vote up
def errors4one(self, z, out, weight=None, distLabelType='12C'):
	distBins = config.distCutoffs[distLabelType]
	label8 = DistanceUtils.LabelsOfOneDistance(config.ContactDefinition, distBins)
	label15 = DistanceUtils.LabelsOfOneDistance(config.InteractionLimit, distBins)

	z3C = T.cast( T.ge(z, label8), 'int32') + T.cast( T.ge(z, label15), 'int32')
	o3C = T.cast( T.ge(out, label8), 'int32') + T.cast( T.ge(out, label15), 'int32')

	if weight is not None:
            err = T.sum( T.mul(weight, T.neq(o3C, z3C) ) )*1./T.sum(weight)
	else:
            err = T.mean( T.neq(o3C , z3C) ) 

	## err is s scalar, convert it to a tensor with ndim=1
	return T.stack([err] )

    ## this function returns a vector of errors, the size of this vector is equal to the sum of ValueDims for all the responses 
Example #5
Source File: optimizers.py    From sentence_classification with MIT License 6 votes vote down vote up
def SGD(tparams, cost, inps, lr,clip_norm=5):
    """ default: lr=0.01 """
    
    grads = tensor.grad(cost, tparams.values())
    norm = tensor.sqrt(sum([tensor.sum(g**2) for g in grads]))
    if tensor.ge(norm, clip_norm):
        grads = [g*clip_norm/norm for g in grads]
        
    gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k) 
                for k, p in tparams.iteritems()]
    gsup = [(gs, g) for gs, g in zip(gshared, grads)]
    f_grad_shared = theano.function(inps, cost, updates=gsup)
    
    updates = []

    for p, g in zip(tparams.values(), gshared):       
        updated_p = p - lr * g
        updates.append((p, updated_p))
    
    f_update = theano.function([lr], [], updates=updates)
    
    return f_grad_shared, f_update 
Example #6
Source File: core.py    From starry with MIT License 6 votes vote down vote up
def flux(self, xo, yo, zo, ro, u):
        """Compute the light curve."""
        # Initialize flat light curve
        flux = tt.ones_like(xo)

        # Compute the occultation mask
        b = tt.sqrt(xo ** 2 + yo ** 2)
        b_occ = tt.invert(tt.ge(b, 1.0 + ro) | tt.le(zo, 0.0) | tt.eq(ro, 0.0))
        i_occ = tt.arange(b.size)[b_occ]

        # Get the Agol `c` coefficients
        c = self._get_cl(u)
        if self.udeg == 0:
            c_norm = c / (np.pi * c[0])
        else:
            c_norm = c / (np.pi * (c[0] + 2 * c[1] / 3))

        # Compute the occultation flux
        los = zo[i_occ]
        r = ro * tt.ones_like(los)
        flux = tt.set_subtensor(
            flux[i_occ], self._limbdark(c_norm, b[i_occ], r, los)[0]
        )
        return flux 
Example #7
Source File: optimizers.py    From sentence_classification with MIT License 6 votes vote down vote up
def Adagrad(tparams, cost, inps, lr, epsilon=1e-6,clip_norm=5):
    """ default: lr=0.01 """
    
    grads = tensor.grad(cost, tparams.values())
    norm = tensor.sqrt(sum([tensor.sum(g**2) for g in grads]))
    if tensor.ge(norm, clip_norm):
        grads = [g*clip_norm/norm for g in grads]
        
    gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k) 
                for k, p in tparams.iteritems()]
    gsup = [(gs, g) for gs, g in zip(gshared, grads)]
    f_grad_shared = theano.function(inps, cost, updates=gsup)    
    
    updates = []
    
    for p, g in zip(tparams.values(), gshared):
        acc = theano.shared(p.get_value() * 0.)
        acc_t = acc + g ** 2
        updates.append((acc, acc_t))
        p_t = p - (lr / tensor.sqrt(acc_t + epsilon)) * g
        updates.append((p, p_t))
    
    f_update = theano.function([lr], [], updates=updates)
    
    return f_grad_shared, f_update 
Example #8
Source File: gradient_clipping.py    From TextDetector with GNU General Public License v3.0 6 votes vote down vote up
def get_gradients(self, model, data, **kwargs):
        gradients, updates = self.cost.get_gradients(model, data, **kwargs)

        norm = tensor.sqrt(tensor.sum(
            [tensor.sum(param_gradient ** 2) for param, param_gradient
             in six.iteritems(gradients)
             if param.name not in self.exclude_params]
        ))

        clipped_gradients = OrderedDict()
        for param, param_gradient in six.iteritems(gradients):
            if param.name not in self.exclude_params:
                clipped_gradients[param] = tensor.switch(
                    tensor.ge(norm, self.clipping_value),
                    param_gradient / norm * self.clipping_value,
                    param_gradient
                )
        gradients.update(clipped_gradients)
        return gradients, updates 
Example #9
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def in_top_k(predictions, targets, k):
    """Returns whether the `targets` are in the top `k` `predictions`.

    # Arguments
        predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
        targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
        k: An `int`, number of top elements to consider.

    # Returns
        A 1D tensor of length `batch_size` and type `bool`.
        `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
        values of `predictions[i]`.
    """
    # handle k < 1 and k >= predictions.shape[1] cases to match TF behavior
    if k < 1:
        # dtype='bool' is only available since Theano 0.9.0
        try:
            return T.zeros_like(targets, dtype='bool')
        except TypeError:
            return T.zeros_like(targets, dtype='int8')

    if k >= int_shape(predictions)[1]:
        try:
            return T.ones_like(targets, dtype='bool')
        except TypeError:
            return T.ones_like(targets, dtype='int8')

    predictions_k = T.sort(predictions)[:, -k]
    targets_values = predictions[T.arange(targets.shape[0]), targets]
    return T.ge(targets_values, predictions_k)


# CONVOLUTIONS 
Example #10
Source File: theano_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def in_top_k(predictions, targets, k):
    """Returns whether the `targets` are in the top `k` `predictions`.

    # Arguments
        predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
        targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
        k: An `int`, number of top elements to consider.

    # Returns
        A 1D tensor of length `batch_size` and type `bool`.
        `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
        values of `predictions[i]`.
    """
    # handle k < 1 and k >= predictions.shape[1] cases to match TF behavior
    if k < 1:
        # dtype='bool' is only available since Theano 0.9.0
        try:
            return T.zeros_like(targets, dtype='bool')
        except TypeError:
            return T.zeros_like(targets, dtype='int8')

    if k >= int_shape(predictions)[1]:
        try:
            return T.ones_like(targets, dtype='bool')
        except TypeError:
            return T.ones_like(targets, dtype='int8')

    predictions_k = T.sort(predictions)[:, -k]
    targets_values = predictions[T.arange(targets.shape[0]), targets]
    return T.ge(targets_values, predictions_k)


# CONVOLUTIONS 
Example #11
Source File: theano_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def greater_equal(x, y):
    return T.ge(x, y) 
Example #12
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def in_top_k(predictions, targets, k):
    """Returns whether the `targets` are in the top `k` `predictions`.

    # Arguments
        predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
        targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
        k: An `int`, number of top elements to consider.

    # Returns
        A 1D tensor of length `batch_size` and type `bool`.
        `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
        values of `predictions[i]`.
    """
    # handle k < 1 and k >= predictions.shape[1] cases to match TF behavior
    if k < 1:
        # dtype='bool' is only available since Theano 0.9.0
        try:
            return T.zeros_like(targets, dtype='bool')
        except TypeError:
            return T.zeros_like(targets, dtype='int8')

    if k >= int_shape(predictions)[1]:
        try:
            return T.ones_like(targets, dtype='bool')
        except TypeError:
            return T.ones_like(targets, dtype='int8')

    predictions_k = T.sort(predictions)[:, -k]
    targets_values = predictions[T.arange(targets.shape[0]), targets]
    return T.ge(targets_values, predictions_k)


# CONVOLUTIONS 
Example #13
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def greater_equal(x, y):
    return T.ge(x, y) 
Example #14
Source File: format.py    From deep-learning-multipliers with GNU General Public License v2.0 5 votes vote down vote up
def overflow(vector, NOB, NOIB):
    
    # compute the max value of the fixed point representation (i.e. the overflow value)
    max = ((2.**NOB)-1)/(2.**(NOB - NOIB))
    
    # compute the overflow rate of the vector
    overflow = T.mean(T.switch(T.ge(T.abs_(vector), max), 1., 0.))
    
    return overflow 
Example #15
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def greater_equal(x, y):
    return T.ge(x, y) 
Example #16
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def greater_equal(x, y):
    return T.ge(x, y) 
Example #17
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def in_top_k(predictions, targets, k):
    """Returns whether the `targets` are in the top `k` `predictions`.

    # Arguments
        predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
        targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
        k: An `int`, number of top elements to consider.

    # Returns
        A 1D tensor of length `batch_size` and type `bool`.
        `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
        values of `predictions[i]`.
    """
    # handle k < 1 and k >= predictions.shape[1] cases to match TF behavior
    if k < 1:
        # dtype='bool' is only available since Theano 0.9.0
        try:
            return T.zeros_like(targets, dtype='bool')
        except TypeError:
            return T.zeros_like(targets, dtype='int8')

    if k >= int_shape(predictions)[1]:
        try:
            return T.ones_like(targets, dtype='bool')
        except TypeError:
            return T.ones_like(targets, dtype='int8')

    predictions_k = T.sort(predictions)[:, -k]
    targets_values = predictions[T.arange(targets.shape[0]), targets]
    return T.ge(targets_values, predictions_k)


# CONVOLUTIONS 
Example #18
Source File: lstm_seqlabel_optimizer.py    From neural_wfst with MIT License 5 votes vote down vote up
def clip_gradients(stack_config, grad_param):
    ''' TODO  Gradients need to be clipped while updating.
    Params
    ------
    stack_config :
    grads        :
    params       :
    '''
    threshold = stack_config['clipping_value']
    print 'clip_gradients threshold', threshold
    if threshold > 0:
        gradients_to_clip = []
        gradients_not_to_clip = []
        for (g, p) in grad_param:
            if (hasattr(p, 'clip_gradient') and p.clip_gradient):
                gradients_to_clip.append((g, p))
                print p.name, 'gradient is being clipped in optimizer.clip_gradients'
            else:
                gradients_not_to_clip.append((g, p))

        if len(gradients_to_clip) == 0:
            return grad_param

        total_grad_norm = tensor.sqrt(tensor.sum(
            [tensor.sum(g * g) for (g, _) in gradients_to_clip]))
        grad_norm_gt_threshold = tensor.ge(total_grad_norm, threshold)
        grad_thresholder = lambda _g: (tensor.switch(
            grad_norm_gt_threshold,
            _g * (threshold / total_grad_norm),
            _g))
        clipped_grad_param = []
        for (g, p) in gradients_to_clip:
            cg = grad_thresholder(g)
            cg.wrt_name = g.wrt_name
            clipped_grad_param.append((cg, p))
        clipped_grad_param += gradients_not_to_clip
        return clipped_grad_param
    else:
        return grad_param 
Example #19
Source File: theano_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def in_top_k(predictions, targets, k):
    """Returns whether the `targets` are in the top `k` `predictions`.

    # Arguments
        predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
        targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
        k: An `int`, number of top elements to consider.

    # Returns
        A 1D tensor of length `batch_size` and type `bool`.
        `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
        values of `predictions[i]`.
    """
    # handle k < 1 and k >= predictions.shape[1] cases to match TF behavior
    if k < 1:
        # dtype='bool' is only available since Theano 0.9.0
        try:
            return T.zeros_like(targets, dtype='bool')
        except TypeError:
            return T.zeros_like(targets, dtype='int8')

    if k >= int_shape(predictions)[1]:
        try:
            return T.ones_like(targets, dtype='bool')
        except TypeError:
            return T.ones_like(targets, dtype='int8')

    predictions_k = T.sort(predictions)[:, -k]
    targets_values = predictions[T.arange(targets.shape[0]), targets]
    return T.ge(targets_values, predictions_k)


# CONVOLUTIONS 
Example #20
Source File: theano_backend.py    From KerasNeuralFingerprint with MIT License 5 votes vote down vote up
def greater_equal(x, y):
    return T.ge(x, y) 
Example #21
Source File: xnor_net.py    From theano-xnor-net with MIT License 5 votes vote down vote up
def binarize_conv_filters(W):
    """Binarize convolution weights and find the weight scaling factor
    W : theano tensor : convolution layer weight of dimension no_filters x no_feat_maps x h x w
    """
    # symbolic binary weight
    Wb = T.cast(T.switch(T.ge(W, 0),1,-1), theano.config.floatX)
    # BinaryNet method
    #Wb = T.cast(T.switch(T.round(hard_sigmoid(W),1,-1)), theano.config.floatX)

    # weight scaling factor
    # FIXME: directly compute the mean along axis 1,2,3 instead of reshaping    
    alpha = T.mean( T.reshape(T.abs_(W), (W.shape[0], W.shape[1]*W.shape[2]*W.shape[3])), axis=1)

    return Wb, alpha 
Example #22
Source File: xnor_net.py    From theano-xnor-net with MIT License 5 votes vote down vote up
def binarize_fc_weights(W):
    # symbolic binary weight
    Wb = T.cast(T.switch(T.ge(W, 0),1,-1), theano.config.floatX)
    # BinaryNet method
    #Wb = T.cast(T.switch(T.round(hard_sigmoid(W)),1,-1), theano.config.floatX)

    alpha = T.mean(T.abs_(W), axis=0)
    return Wb, alpha 
Example #23
Source File: xnornet_layers.py    From theano-xnor-net with MIT License 5 votes vote down vote up
def SignTheano(x):
    return T.cast(2.*T.ge(x,0)-1., theano.config.floatX) 
Example #24
Source File: theano_backend.py    From keras-lambda with MIT License 5 votes vote down vote up
def greater_equal(x, y):
    return T.ge(x, y) 
Example #25
Source File: theano_backend.py    From keras-lambda with MIT License 5 votes vote down vote up
def in_top_k(predictions, targets, k):
    """Returns whether the `targets` are in the top `k` `predictions`.

    # Arguments
        predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
        targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
        k: An `int`, number of top elements to consider.

    # Returns
        A 1D tensor of length `batch_size` and type `bool`.
        `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
        values of `predictions[i]`.
    """
    # handle k < 1 and k >= predictions.shape[1] cases to match TF behavior
    if k < 1:
        # dtype='bool' is only available since Theano 0.9.0
        try:
            return T.zeros_like(targets, dtype='bool')
        except TypeError:
            return T.zeros_like(targets, dtype='int8')

    if k >= int_shape(predictions)[1]:
        try:
            return T.ones_like(targets, dtype='bool')
        except TypeError:
            return T.ones_like(targets, dtype='int8')

    predictions_k = T.sort(predictions)[:, -k]
    targets_values = predictions[T.arange(targets.shape[0]), targets]
    return T.ge(targets_values, predictions_k)


# CONVOLUTIONS 
Example #26
Source File: layers_lscnn.py    From ShapeNet with GNU General Public License v3.0 5 votes vote down vote up
def cubicBSpline(self, L):
    b = T.zeros_like(L)

    idx4 = T.ge(L, 0) * T.lt(L, 1)
    idx3 = T.ge(L, 1) * T.lt(L, 2)
    idx2 = T.ge(L, 2) * T.lt(L, 3)
    idx1 = T.ge(L, 3) * T.le(L, 4)

    b = T.switch(T.eq(idx4, 1), T.pow(L, 3) / 6, b)
    b = T.switch(T.eq(idx3, 1), (-3*T.pow(L-1,3) + 3*T.pow(L-1,2) + 3*(L-1) + 1) / 6, b)
    b = T.switch(T.eq(idx2, 1), ( 3*T.pow(L-2,3) - 6*T.pow(L-2,2)           + 4) / 6, b)
    b = T.switch(T.eq(idx1, 1), (-  T.pow(L-3,3) + 3*T.pow(L-3,2) - 3*(L-3) + 1) / 6, b)
    
    return b.T # b is K x K' and thus, as we multiply from the right with
               # betas, we need to transpose it 
Example #27
Source File: theano_backend.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def greater_equal(x, y):
    return T.ge(x, y) 
Example #28
Source File: layers.py    From 3D-R2N2 with MIT License 5 votes vote down vote up
def error(self, y, threshold=0.5):
        return tensor.mean(tensor.eq(tensor.ge(self.prediction(), threshold), y)) 
Example #29
Source File: constraints.py    From CAPTCHA-breaking with MIT License 5 votes vote down vote up
def __call__(self, p):
        p *= T.ge(p, 0)
        return p 
Example #30
Source File: optimizers.py    From sentence_classification with MIT License 5 votes vote down vote up
def Momentum(tparams, cost, inps, lr, momentum=0.9,clip_norm=5):
    """ default: lr=0.01 """
    
    grads = tensor.grad(cost, tparams.values())
    norm = tensor.sqrt(sum([tensor.sum(g**2) for g in grads]))
    if tensor.ge(norm, clip_norm):
        grads = [g*clip_norm/norm for g in grads]
        
    gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k) 
                for k, p in tparams.iteritems()]
    gsup = [(gs, g) for gs, g in zip(gshared, grads)]
    f_grad_shared = theano.function(inps, cost, updates=gsup) 
    
    updates = []

    for p, g in zip(tparams.values(), gshared): 
        m = theano.shared(p.get_value() * 0.)
        m_new = momentum * m - lr * g
        updates.append((m, m_new))        
        
        updated_p = p + m_new
        updates.append((p, updated_p))
    
    f_update = theano.function([lr], [], updates=updates)
    
    return f_grad_shared, f_update