Python tensorflow.keras.backend.max() Examples

The following are 30 code examples of tensorflow.keras.backend.max(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.backend , or try the search function .
Example #1
Source File: backend_keras.py    From kapre with MIT License 6 votes vote down vote up
def amplitude_to_decibel(x, amin=1e-10, dynamic_range=80.0):
    """[K] Convert (linear) amplitude to decibel (log10(x)).

    Parameters
    ----------
    x: Keras *batch* tensor or variable. It has to be batch because of sample-wise `K.max()`.

    amin: minimum amplitude. amplitude smaller than `amin` is set to this.

    dynamic_range: dynamic_range in decibel

    """
    log_spec = 10 * K.log(K.maximum(x, amin)) / np.log(10).astype(K.floatx())
    if K.ndim(x) > 1:
        axis = tuple(range(K.ndim(x))[1:])
    else:
        axis = None

    log_spec = log_spec - K.max(log_spec, axis=axis, keepdims=True)  # [-?, 0]
    log_spec = K.maximum(log_spec, -1 * dynamic_range)  # [-80, 0]
    return log_spec 
Example #2
Source File: backend.py    From DeepPoseKit with Apache License 2.0 6 votes vote down vote up
def _find_maxima(x, coordinate_scale=1, confidence_scale=255.0):

    x = K.cast(x, K.floatx())

    col_max = K.max(x, axis=1)
    row_max = K.max(x, axis=2)

    maxima = K.max(col_max, 1)
    maxima = K.expand_dims(maxima, -2) / confidence_scale

    cols = K.cast(K.argmax(col_max, -2), K.floatx())
    rows = K.cast(K.argmax(row_max, -2), K.floatx())
    cols = K.expand_dims(cols, -2) * coordinate_scale
    rows = K.expand_dims(rows, -2) * coordinate_scale

    maxima = K.concatenate([cols, rows, maxima], -2)

    return maxima 
Example #3
Source File: keras_layers.py    From DeepPavlov with Apache License 2.0 6 votes vote down vote up
def call(self, x, **kwargs):
        assert isinstance(x, list)
        inp_a, inp_b = x
        m = []
        for i in range(self.output_dim):
            outp_a = inp_a * self.W[i]
            outp_b = inp_b * self.W[i]
            outp_a = K.l2_normalize(outp_a, -1)
            outp_b = K.l2_normalize(outp_b, -1)
            outp = K.batch_dot(outp_a, outp_b, axes=[2, 2])
            outp = K.max(outp, -1, keepdims=True)
            m.append(outp)
        if self.output_dim > 1:
            persp = K.concatenate(m, 2)
        else:
            persp = m[0]
        return [persp, persp] 
Example #4
Source File: utils.py    From neuron with GNU General Public License v3.0 6 votes vote down vote up
def _softmax(x, axis=-1, alpha=1):
    """
    building on keras implementation, with additional alpha parameter

    Softmax activation function.
    # Arguments
        x : Tensor.
        axis: Integer, axis along which the softmax normalization is applied.
        alpha: a value to multiply all x
    # Returns
        Tensor, output of softmax transformation.
    # Raises
        ValueError: In case `dim(x) == 1`.
    """
    x = alpha * x
    ndim = K.ndim(x)
    if ndim == 2:
        return K.softmax(x)
    elif ndim > 2:
        e = K.exp(x - K.max(x, axis=axis, keepdims=True))
        s = K.sum(e, axis=axis, keepdims=True)
        return e / s
    else:
        raise ValueError('Cannot apply softmax to a tensor that is 1D') 
Example #5
Source File: matching.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs, **kwargs):
        sent1 = inputs[0]
        sent2 = inputs[1]

        v1 = K.expand_dims(sent1, -2) * self.kernel
        v2 = K.expand_dims(sent2, -2) * self.kernel
        v1 = K.l2_normalize(v1, axis=-1)
        v2 = K.l2_normalize(v2, axis=-1)
        matching = K.max(K.sum(K.expand_dims(v1, 2) * K.expand_dims(v2, 1), axis=-1), axis=-2)
        return matching 
Example #6
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def max(self):
    """Get the maximum value that quantized_tanh can represent."""
    unsigned_bits = self.bits - 1
    if unsigned_bits > 0:
      return max(1.0, np.power(2.0, self.integer))
    else:
      return 1.0 
Example #7
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def min(self):
    """Get the minimum value that quantized_tanh can represent."""
    unsigned_bits = self.bits - 1
    if unsigned_bits > 0:
      return -max(1.0, np.power(2.0, self.integer))
    else:
      return -1.0 
Example #8
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def max(self):
    """Get the maximum value that quantized_po2 can represent."""
    if self.max_value:
      return max(1.0, self.max_value)
    else:
      return max(1.0, 2**self._max_exp) 
Example #9
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def min(self):
    """Get the minimum value that quantized_po2 can represent."""
    if self.max_value:
      return -max(1.0, self.max_value)
    else:
      return -max(1.0, 2**self._max_exp) 
Example #10
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def max(self):
    """Get the maximum value that quantized_relu_po2 can represent."""
    if self.max_value:
      return max(1.0, self.max_value)
    else:
      return max(1.0, 2**self._max_exp) 
Example #11
Source File: losses.py    From ivis with GNU General Public License v2.0 5 votes vote down vote up
def _chebyshev_distance(x, y):
    return K.max(K.abs(x - y), axis=-1, keepdims=True) 
Example #12
Source File: losses.py    From ivis with GNU General Public License v2.0 5 votes vote down vote up
def consecutive_indexed(Y):
    """ Assumes that Y is zero-indexed. """
    n_classes = len(np.unique(Y[Y != np.array(-1)]))
    if max(Y) >= n_classes:
        return False
    return True 
Example #13
Source File: training.py    From medaka with Mozilla Public License 2.0 5 votes vote down vote up
def qscore(y_true, y_pred):
    """Keras metric function for calculating scaled error.

    :param y_true: tensor of true class labels.
    :param y_pred: class output scores from network.

    :returns: class error expressed as a phred score.
    """
    from tensorflow.keras import backend as K
    error = K.cast(K.not_equal(
        K.max(y_true, axis=-1), K.cast(K.argmax(y_pred, axis=-1), K.floatx())),
        K.floatx()
    )
    error = K.sum(error) / K.sum(K.ones_like(error))
    return -10.0 * 0.434294481 * K.log(error) 
Example #14
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def max(self):
    """Get the maximum value that quantized_ulaw can represent."""
    unsigned_bits = self.bits - 1

    if unsigned_bits > 0:
      return max(1.0, np.power(2.0, self.integer))
    else:
      return 1.0 
Example #15
Source File: bilstm_siamese_network.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def _batch_hard_triplet_loss(self, y_true: Tensor, pairwise_dist: Tensor) -> Tensor:
        mask_anchor_positive = self._get_anchor_positive_triplet_mask(y_true, pairwise_dist)
        anchor_positive_dist = mask_anchor_positive * pairwise_dist
        hardest_positive_dist = K.max(anchor_positive_dist, axis=1, keepdims=True)
        mask_anchor_negative = self._get_anchor_negative_triplet_mask(y_true, pairwise_dist)
        anchor_negative_dist = mask_anchor_negative * pairwise_dist
        mask_anchor_negative = self._get_semihard_anchor_negative_triplet_mask(anchor_negative_dist,
                                                                               hardest_positive_dist,
                                                                               mask_anchor_negative)
        max_anchor_negative_dist = K.max(pairwise_dist, axis=1, keepdims=True)
        anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
        hardest_negative_dist = K.min(anchor_negative_dist, axis=1, keepdims=True)
        triplet_loss = K.clip(hardest_positive_dist - hardest_negative_dist + self.margin, 0.0, None)
        triplet_loss = K.mean(triplet_loss)
        return triplet_loss 
Example #16
Source File: bilstm_siamese_network.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def _get_semihard_anchor_negative_triplet_mask(self, negative_dist: Tensor,
                                                   hardest_positive_dist: Tensor,
                                                   mask_negative: Tensor) -> Tensor:
        # mask max(dist(a,p)) < dist(a,n)
        mask = K.greater(negative_dist, hardest_positive_dist)
        mask = K.cast(mask, K.dtype(negative_dist))
        mask_semihard = K.cast(K.expand_dims(K.greater(K.sum(mask, 1), 0.0), 1), K.dtype(negative_dist))
        mask = mask_negative * (1 - mask_semihard) + mask * mask_semihard
        return mask 
Example #17
Source File: morpho_tagger.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def _build_word_cnn(self, inputs):
        """Builds word-level network
        """
        inputs = Lambda(K.one_hot, arguments={"num_classes": len(self.symbols)},
                        output_shape=lambda x: tuple(x) + (len(self.symbols),))(inputs)
        char_embeddings = Dense(self.char_embeddings_size, use_bias=False)(inputs)
        conv_outputs = []
        self.char_output_dim_ = 0
        for window_size, filters_number in zip(self.char_window_size, self.char_filters):
            curr_output = char_embeddings
            curr_filters_number = (min(self.char_filter_multiple * window_size, 200)
                                   if filters_number is None else filters_number)
            for _ in range(self.char_conv_layers - 1):
                curr_output = Conv2D(curr_filters_number, (1, window_size),
                                     padding="same", activation="relu",
                                     data_format="channels_last")(curr_output)
                if self.conv_dropout > 0.0:
                    curr_output = Dropout(self.conv_dropout)(curr_output)
            curr_output = Conv2D(curr_filters_number, (1, window_size),
                                 padding="same", activation="relu",
                                 data_format="channels_last")(curr_output)
            conv_outputs.append(curr_output)
            self.char_output_dim_ += curr_filters_number
        if len(conv_outputs) > 1:
            conv_output = Concatenate(axis=-1)(conv_outputs)
        else:
            conv_output = conv_outputs[0]
        highway_input = Lambda(K.max, arguments={"axis": -2})(conv_output)
        if self.intermediate_dropout > 0.0:
            highway_input = Dropout(self.intermediate_dropout)(highway_input)
        for i in range(self.char_highway_layers - 1):
            highway_input = Highway(activation="relu")(highway_input)
            if self.highway_dropout > 0.0:
                highway_input = Dropout(self.highway_dropout)(highway_input)
        highway_output = Highway(activation="relu")(highway_input)
        return highway_output 
Example #18
Source File: morpho_tagger.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def _transform_batch(self, data, labels=None, transform_to_one_hot=True):
        data, additional_data = data[0], data[1:]
        L = max(len(x) for x in data)
        X = np.array([self._make_sent_vector(x, L) for x in data])
        X = [X] + [np.array(x) for x in additional_data]
        if labels is not None:
            Y = np.array([self._make_tags_vector(y, L) for y in labels])
            if transform_to_one_hot:
                Y = to_one_hot(Y, len(self.tags))
            return X, Y
        else:
            return X 
Example #19
Source File: postprocess.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def yolo2_filter_boxes(boxes, box_confidence, box_class_probs, threshold=.6):
    """Filter YOLOv2 boxes based on object and class confidence."""
    box_scores = box_confidence * box_class_probs
    box_classes = K.argmax(box_scores, axis=-1)
    box_class_scores = K.max(box_scores, axis=-1)
    prediction_mask = box_class_scores >= threshold

    # TODO: Expose tf.boolean_mask to Keras backend?
    boxes = tf.boolean_mask(boxes, prediction_mask)
    scores = tf.boolean_mask(box_class_scores, prediction_mask)
    classes = tf.boolean_mask(box_classes, prediction_mask)
    return boxes, scores, classes 
Example #20
Source File: custom_activation.py    From Echo with MIT License 5 votes vote down vote up
def call(self, inputs):
        return K.max(inputs) 
Example #21
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def max(self):
    """Get the maximum value bernoulli class can represent."""
    if self.alpha is None or isinstance(self.alpha, six.string_types):
      return 1.0
    else:
      return max(1.0, self.alpha) 
Example #22
Source File: dqn.py    From keras-rl2 with MIT License 5 votes vote down vote up
def mean_q(y_true, y_pred):
    return K.mean(K.max(y_pred, axis=-1)) 
Example #23
Source File: metrics.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def _hard_max(tens, axis):
    """
    we can't use the argmax function in a loss, as it's not differentiable
    We can use it in a metric, but not in a loss function
    therefore, we replace the 'hard max' operation (i.e. argmax + onehot)
    with this approximation
    """
    tensmax = K.max(tens, axis=axis, keepdims=True)
    eps_hot = K.maximum(tens - tensmax + K.epsilon(), 0)
    one_hot = eps_hot / K.epsilon()
    return one_hot 
Example #24
Source File: utils.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def next_pred_label(model, data_generator, verbose=False):
    """
    predict the next sample batch from the generator, and compute max labels
    return sample, prediction, max_labels
    """
    sample = next(data_generator)
    with timer.Timer('prediction', verbose):
        pred = model.predict(sample[0])
    sample_input = sample[0] if not isinstance(sample[0], (list, tuple)) else sample[0][0]
    max_labels = pred_to_label(sample_input, pred)
    return (sample, pred) + max_labels 
Example #25
Source File: utils.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def next_label(model, data_generator):
    """
    predict the next sample batch from the generator, and compute max labels
    return max_labels
    """
    batch_proc = next_pred_label(model, data_generator)
    return (batch_proc[2], batch_proc[3]) 
Example #26
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def max(self):
    """Get maximum value that quantized_bits class can represent."""
    unsigned_bits = self.bits - self.keep_negative

    if unsigned_bits > 0:
      return max(1.0, np.power(2.0, self.integer))
    else:
      return 1.0 
Example #27
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def min(self):
    """Get minimum value that quantized_bits class can represent."""
    if not self.keep_negative:
      return 0.0
    unsigned_bits = self.bits - self.keep_negative
    if unsigned_bits > 0:
      return -max(1.0, np.power(2.0, self.integer))
    else:
      return -1.0 
Example #28
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def min(self):
    """Get the minimum value that quantized_ulaw can represent."""
    unsigned_bits = self.bits - 1

    if unsigned_bits > 0:
      return -max(1.0, np.power(2.0, self.integer))
    else:
      return -1.0 
Example #29
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def max(self):
    """Get the maximum value that ternary can respresent."""
    if self.alpha is None or isinstance(self.alpha, six.string_types):
      return 1.0
    else:
      return max(1.0, self.alpha) 
Example #30
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def min(self):
    """Get the minimum value that ternary can respresent."""
    if self.alpha is None or isinstance(self.alpha, six.string_types):
      return -1.0
    else:
      return -max(1.0, self.alpha)