Python keras.backend.gather() Examples

The following are 30 code examples of keras.backend.gather(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: region.py    From keras-yolo with MIT License 6 votes vote down vote up
def _process_input(self, x):
        """Apply logistic and softmax activations to input tensor
        """
        logistic_activate = lambda x: 1.0/(1.0 + K.exp(-x))
        
        (batch, w, h, channels) = x.get_shape()
        x_temp = K.permute_dimensions(x, (3, 0, 1, 2))
        x_t = []
        for i in range(self.num):
            k = self._entry_index(i, 0)
            x_t.extend([
                logistic_activate(K.gather(x_temp, (k, k + 1))), # 0
                K.gather(x_temp, (k + 2, k + 3))])
            if self.background:
                x_t.append(K.gather(x_temp, (k + 4,)))
            else:
                x_t.append(logistic_activate(K.gather(x_temp, (k + 4,))))
                
            x_t.append(
                softmax(
                    K.gather(x_temp, tuple(range(k + 5, k + self.coords + self.classes + 1))),
                    axis=0))
        x_t = K.concatenate(x_t, axis=0)
        return K.permute_dimensions(x_t, (1, 2, 3, 0)) 
Example #2
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_gather(self):
        shape = (10, 2, 3)
        ref = np.arange(np.prod(shape)).reshape(shape)
        inds = [1, 3, 7, 9]
        z_list = [k.eval(k.gather(k.variable(ref), k.variable(inds, dtype='int32')))
                  for k in BACKENDS]

        assert_list_pairwise(z_list)
        assert_list_keras_shape(z_list)

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 3, 4))
            indices = K.placeholder(shape=(5, 6), dtype='int32')
            y = K.gather(x, indices)
            assert y._keras_shape == (5, 6, 3, 4) 
Example #3
Source File: custom_loss.py    From mhcflurry with Apache License 2.0 6 votes vote down vote up
def loss(self, y_true, y_pred):
        from keras import backend as K
        y_true = K.flatten(y_true)

        output_indices = y_true // 10
        updated_y_true = y_true - (10 * output_indices)

        # We index into y_pred using flattened indices since Keras backend
        # supports gather but has no equivalent of tf.gather_nd:
        ordinals = K.arange(K.shape(y_true)[0])
        flattened_indices = (
            ordinals * y_pred.shape[1] + K.cast(output_indices, "int32"))
        updated_y_pred = K.gather(K.flatten(y_pred), flattened_indices)

        # Alternative implementation using tensorflow, which could be used if
        # we drop support for other backends:
        # import tensorflow as tf
        # indexer = K.stack([
        #     ordinals,
        #     K.cast(output_indices, "int32")
        # ], axis=-1)
        #updated_y_pred = tf.gather_nd(y_pred, indexer)

        return MSEWithInequalities().loss(updated_y_true, updated_y_pred) 
Example #4
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_gather(self):
        shape = (10, 2, 3)
        ref = np.arange(np.prod(shape)).reshape(shape)
        inds = [1, 3, 7, 9]
        z_list = [k.eval(k.gather(k.variable(ref), k.variable(inds, dtype='int32')))
                  for k in BACKENDS]

        assert_list_pairwise(z_list)
        assert_list_keras_shape(z_list)

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 3, 4))
            indices = K.placeholder(shape=(5, 6), dtype='int32')
            y = K.gather(x, indices)
            assert y._keras_shape == (5, 6, 3, 4) 
Example #5
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_gather(self):
        shape = (10, 2, 3)
        ref = np.arange(np.prod(shape)).reshape(shape)
        inds = [1, 3, 7, 9]
        z_list = [k.eval(k.gather(k.variable(ref), k.variable(inds, dtype='int32')))
                  for k in BACKENDS]

        assert_list_pairwise(z_list)
        assert_list_keras_shape(z_list)

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 3, 4))
            indices = K.placeholder(shape=(5, 6), dtype='int32')
            y = K.gather(x, indices)
            assert y._keras_shape == (5, 6, 3, 4) 
Example #6
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_gather(self):
        shape = (10, 2, 3)
        ref = np.arange(np.prod(shape)).reshape(shape)
        inds = [1, 3, 7, 9]
        z_list = [k.eval(k.gather(k.variable(ref), k.variable(inds, dtype='int32')))
                  for k in BACKENDS]

        assert_list_pairwise(z_list)
        assert_list_keras_shape(z_list)

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 3, 4))
            indices = K.placeholder(shape=(5, 6), dtype='int32')
            y = K.gather(x, indices)
            assert y._keras_shape == (5, 6, 3, 4) 
Example #7
Source File: keras_yolov3.py    From perceptron-benchmark with Apache License 2.0 6 votes vote down vote up
def _target_class_loss(
            self,
            target_class,
            box_scores,
            box_class_probs_logits):
        """ Evaluate target_class_loss w.r.t. the input.

        """
        box_scores = K.squeeze(box_scores, axis=0)
        box_class_probs_logits = K.squeeze(box_class_probs_logits, axis=0)
        import tensorflow as tf
        boi_idx = tf.where(box_scores[:, target_class] > self._score)
        loss_box_class_conf = tf.reduce_mean(
            tf.gather(box_class_probs_logits[:, target_class], boi_idx))

        # Avoid the propagation of nan
        return tf.cond(
            tf.is_nan(loss_box_class_conf),
            lambda: tf.constant(0.),
            lambda: loss_box_class_conf) 
Example #8
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_gather(self):
        shape = (10, 2, 3)
        ref = np.arange(np.prod(shape)).reshape(shape)
        inds = [1, 3, 7, 9]
        z_list = [k.eval(k.gather(k.variable(ref), k.variable(inds, dtype='int32')))
                  for k in BACKENDS]

        assert_list_pairwise(z_list)
        assert_list_keras_shape(z_list)

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 3, 4))
            indices = K.placeholder(shape=(5, 6), dtype='int32')
            y = K.gather(x, indices)
            assert y._keras_shape == (5, 6, 3, 4) 
Example #9
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_gather(self):
        shape = (10, 2, 3)
        ref = np.arange(np.prod(shape)).reshape(shape)
        inds = [1, 3, 7, 9]
        z_list = [k.eval(k.gather(k.variable(ref), k.variable(inds, dtype='int32')))
                  for k in BACKENDS]

        assert_list_pairwise(z_list)
        assert_list_keras_shape(z_list)

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 3, 4))
            indices = K.placeholder(shape=(5, 6), dtype='int32')
            y = K.gather(x, indices)
            assert y._keras_shape == (5, 6, 3, 4) 
Example #10
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_gather(self):
        shape = (10, 2, 3)
        ref = np.arange(np.prod(shape)).reshape(shape)
        inds = [1, 3, 7, 9]
        z_list = [k.eval(k.gather(k.variable(ref), k.variable(inds, dtype='int32')))
                  for k in BACKENDS]

        assert_list_pairwise(z_list)
        assert_list_keras_shape(z_list)

        # test theano shape inference when
        # input shape has None entries
        if K.backend() == 'theano':
            x = K.placeholder(shape=(None, 3, 4))
            indices = K.placeholder(shape=(5, 6), dtype='int32')
            y = K.gather(x, indices)
            assert y._keras_shape == (5, 6, 3, 4) 
Example #11
Source File: resnet.py    From keras-CenterNet with Apache License 2.0 5 votes vote down vote up
def decode(hm, wh, reg, max_objects=100, nms=True, flip_test=False, num_classes=20, score_threshold=0.1):
    if flip_test:
        hm = (hm[0:1] + hm[1:2, :, ::-1]) / 2
        wh = (wh[0:1] + wh[1:2, :, ::-1]) / 2
        reg = reg[0:1]
    scores, indices, class_ids, xs, ys = topk(hm, max_objects=max_objects)
    b = tf.shape(hm)[0]
    # (b, h * w, 2)
    reg = tf.reshape(reg, (b, -1, tf.shape(reg)[-1]))
    # (b, h * w, 2)
    wh = tf.reshape(wh, (b, -1, tf.shape(wh)[-1]))
    # (b, k, 2)
    topk_reg = tf.gather(reg, indices, batch_dims=1)
    # (b, k, 2)
    topk_wh = tf.cast(tf.gather(wh, indices, batch_dims=1), tf.float32)
    topk_cx = tf.cast(tf.expand_dims(xs, axis=-1), tf.float32) + topk_reg[..., 0:1]
    topk_cy = tf.cast(tf.expand_dims(ys, axis=-1), tf.float32) + topk_reg[..., 1:2]
    scores = tf.expand_dims(scores, axis=-1)
    class_ids = tf.cast(tf.expand_dims(class_ids, axis=-1), tf.float32)
    topk_x1 = topk_cx - topk_wh[..., 0:1] / 2
    topk_x2 = topk_cx + topk_wh[..., 0:1] / 2
    topk_y1 = topk_cy - topk_wh[..., 1:2] / 2
    topk_y2 = topk_cy + topk_wh[..., 1:2] / 2
    # (b, k, 6)
    detections = tf.concat([topk_x1, topk_y1, topk_x2, topk_y2, scores, class_ids], axis=-1)
    if nms:
        detections = tf.map_fn(lambda x: evaluate_batch_item(x[0],
                                                             num_classes=num_classes,
                                                             score_threshold=score_threshold),
                               elems=[detections],
                               dtype=tf.float32)
    return detections 
Example #12
Source File: yolov3.py    From keras-onnx with MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        boxes = inputs[0]
        box_scores = inputs[1]
        box_scores_transpose = tf.transpose(box_scores, perm=[1, 0])
        boxes_number = tf.shape(boxes)[0]
        box_range = tf.range(boxes_number)

        mask = box_scores >= self.score_threshold
        max_boxes_tensor = K.constant(self.max_boxes, dtype='int32')
        classes_ = []
        batch_indexs_ = []
        nms_indexes_ = []
        class_box_range_ = []
        for c in range(self.num_classes):
            class_boxes = tf.boolean_mask(boxes, mask[:, c])
            class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
            class_box_range = tf.boolean_mask(box_range, mask[:, c])
            nms_index = tf.image.non_max_suppression(
                class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=self.iou_threshold)
            class_box_scores = K.gather(class_box_scores, nms_index)
            class_box_range = K.gather(class_box_range, nms_index)
            classes = K.ones_like(class_box_scores, 'int32') * c
            batch_index = K.zeros_like(class_box_scores, 'int32')
            batch_indexs_.append(batch_index)
            classes_.append(classes)
            nms_indexes_.append(nms_index)
            class_box_range_.append(class_box_range)

        classes_ = K.concatenate(classes_, axis=0)
        batch_indexs_ = K.concatenate(batch_indexs_, axis=0)
        class_box_range_ = K.concatenate(class_box_range_, axis=0)

        boxes_1 = tf.expand_dims(boxes, 0)
        classes_1 = tf.expand_dims(classes_, 1)
        batch_indexs_ = tf.expand_dims(batch_indexs_, 1)
        class_box_range_ = tf.expand_dims(class_box_range_, 1)
        box_scores_transpose_1 = tf.expand_dims(box_scores_transpose, 0)
        nms_final_ = K.concatenate([batch_indexs_, classes_1, class_box_range_], axis=1)
        nms_final_1 = tf.expand_dims(nms_final_, 0)
        return [boxes_1, box_scores_transpose_1, nms_final_1] 
Example #13
Source File: resnet.py    From keras-CenterNet with Apache License 2.0 5 votes vote down vote up
def evaluate_batch_item(batch_item_detections, num_classes, max_objects_per_class=20, max_objects=100,
                        iou_threshold=0.5, score_threshold=0.1):
    batch_item_detections = tf.boolean_mask(batch_item_detections,
                                            tf.greater(batch_item_detections[:, 4], score_threshold))
    detections_per_class = []
    for cls_id in range(num_classes):
        class_detections = tf.boolean_mask(batch_item_detections, tf.equal(batch_item_detections[:, 5], cls_id))
        nms_keep_indices = tf.image.non_max_suppression(class_detections[:, :4],
                                                        class_detections[:, 4],
                                                        max_objects_per_class,
                                                        iou_threshold=iou_threshold)
        class_detections = K.gather(class_detections, nms_keep_indices)
        detections_per_class.append(class_detections)

    batch_item_detections = K.concatenate(detections_per_class, axis=0)

    def filter():
        nonlocal batch_item_detections
        _, indices = tf.nn.top_k(batch_item_detections[:, 4], k=max_objects)
        batch_item_detections_ = tf.gather(batch_item_detections, indices)
        return batch_item_detections_

    def pad():
        nonlocal batch_item_detections
        batch_item_num_detections = tf.shape(batch_item_detections)[0]
        batch_item_num_pad = tf.maximum(max_objects - batch_item_num_detections, 0)
        batch_item_detections_ = tf.pad(tensor=batch_item_detections,
                                        paddings=[
                                            [0, batch_item_num_pad],
                                            [0, 0]],
                                        mode='CONSTANT',
                                        constant_values=0.0)
        return batch_item_detections_

    batch_item_detections = tf.cond(tf.shape(batch_item_detections)[0] >= 100,
                                    filter,
                                    pad)
    return batch_item_detections 
Example #14
Source File: modeling.py    From BERT_with_keras with MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        if not self.mask:
            if K.dtype(inputs) != 'int32':
                inputs = K.cast(inputs, 'int32')
            out0 = K.gather(self.embeddings, inputs)
            out1 = tf.convert_to_tensor(self.embeddings)
            return [out0, out1]
        else:
            inputs = inputs[0]
            if K.dtype(inputs) != 'int32':
                inputs = K.cast(inputs, 'int32')
            out0 = K.gather(self.embeddings, inputs)
            out1 = tf.convert_to_tensor(self.embeddings)
            return [out0, out1] 
Example #15
Source File: layers.py    From delft with Apache License 2.0 5 votes vote down vote up
def batch_gather(reference, indices):
    ref_shape = K.shape(reference)
    batch_size = ref_shape[0]
    n_classes = ref_shape[1]
    flat_indices = K.arange(0, batch_size) * n_classes + K.flatten(indices)
    return K.gather(K.flatten(reference), flat_indices) 
Example #16
Source File: ChainCRF.py    From emnlp2017-bilstm-cnn-crf with Apache License 2.0 5 votes vote down vote up
def path_energy0(y, x, U, mask=None):
    '''Path energy without boundary potential handling.'''
    n_classes = K.shape(x)[2]
    y_one_hot = K.one_hot(y, n_classes)

    # Tag path energy
    energy = K.sum(x * y_one_hot, 2)
    energy = K.sum(energy, 1)

    # Transition energy
    y_t = y[:, :-1]
    y_tp1 = y[:, 1:]
    U_flat = K.reshape(U, [-1])
    # Convert 2-dim indices (y_t, y_tp1) of U to 1-dim indices of U_flat:
    flat_indices = y_t * n_classes + y_tp1
    U_y_t_tp1 = K.gather(U_flat, flat_indices)

    if mask is not None:
        mask = K.cast(mask, K.floatx())
        y_t_mask = mask[:, :-1]
        y_tp1_mask = mask[:, 1:]
        U_y_t_tp1 *= y_t_mask * y_tp1_mask

    energy += K.sum(U_y_t_tp1, axis=1)

    return energy 
Example #17
Source File: ChainCRF.py    From emnlp2017-bilstm-cnn-crf with Apache License 2.0 5 votes vote down vote up
def batch_gather(reference, indices):
    ref_shape = K.shape(reference)
    batch_size = ref_shape[0]
    n_classes = ref_shape[1]
    flat_indices = K.arange(0, batch_size) * n_classes + K.flatten(indices)
    return K.gather(K.flatten(reference), flat_indices) 
Example #18
Source File: losses.py    From kaggle-carvana-2017 with MIT License 5 votes vote down vote up
def online_bootstrapping(y_true, y_pred, pixels=512, threshold=0.5):
    """ Implements nline Bootstrapping crossentropy loss, to train only on hard pixels,
        see  https://arxiv.org/abs/1605.06885 Bridging Category-level and Instance-level Semantic Image Segmentation
        The implementation is a bit different as we use binary crossentropy instead of softmax
        SUPPORTS ONLY MINIBATCH WITH 1 ELEMENT!
    # Arguments
        y_true: A tensor with labels.

        y_pred: A tensor with predicted probabilites.

        pixels: number of hard pixels to keep

        threshold: confidence to use, i.e. if threshold is 0.7, y_true=1, prediction=0.65 then we consider that pixel as hard
    # Returns
        Mean loss value
    """
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    difference = K.abs(y_true - y_pred)

    values, indices = K.tf.nn.top_k(difference, sorted=True, k=pixels)
    min_difference = (1 - threshold)
    y_true = K.tf.gather(K.gather(y_true, indices), K.tf.where(values > min_difference))
    y_pred = K.tf.gather(K.gather(y_pred, indices), K.tf.where(values > min_difference))

    return K.mean(K.binary_crossentropy(y_true, y_pred)) 
Example #19
Source File: attn_utils.py    From Music-Transcription-with-Semantic-Segmentation with GNU General Public License v3.0 5 votes vote down vote up
def gather_blocks_2d(x, indices):
    x_shape = K.shape(x)
    x = reshape_range(x, 2, 4, [tf.reduce_prod(x_shape[2:4])])
    # [length, batch, heads, dim]
    x_t = K.permute_dimensions(x, [2, 0, 1, 3])
    x_new = K.gather(x_t, indices)
    # returns [batch, heads, num_blocks, block_ength**2, dim]
    return K.permute_dimensions(x_new, [2, 3, 0, 1, 4]) 
Example #20
Source File: interaction_linear.py    From NNCF with MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        uid, vid = x[0], x[1]
        # regression = self.b_u[uid] + self.b_v[vid] + self.b_g
        regression = K.gather(self.b_u, uid) + K.gather(self.b_v, vid) + self.b_g
        regression = K.reshape(regression, (-1, 1))
        return regression 
Example #21
Source File: msra_resnet.py    From keras-CenterNet with Apache License 2.0 5 votes vote down vote up
def evaluate_batch_item(batch_item_detections, num_classes, max_objects_per_class=20, max_objects=100,
                        iou_threshold=0.5, score_threshold=0.1):
    batch_item_detections = tf.boolean_mask(batch_item_detections,
                                            tf.greater(batch_item_detections[:, 4], score_threshold))
    detections_per_class = []
    for cls_id in range(num_classes):
        # (num_keep_this_class_boxes, 4) score 大于 score_threshold 的当前 class 的 boxes
        class_detections = tf.boolean_mask(batch_item_detections, tf.equal(batch_item_detections[:, 5], cls_id))
        nms_keep_indices = tf.image.non_max_suppression(class_detections[:, :4],
                                                        class_detections[:, 4],
                                                        max_objects_per_class,
                                                        iou_threshold=iou_threshold)
        class_detections = K.gather(class_detections, nms_keep_indices)
        detections_per_class.append(class_detections)

    batch_item_detections = K.concatenate(detections_per_class, axis=0)

    def filter():
        nonlocal batch_item_detections
        _, indices = tf.nn.top_k(batch_item_detections[:, 4], k=max_objects)
        batch_item_detections_ = tf.gather(batch_item_detections, indices)
        return batch_item_detections_

    def pad():
        nonlocal batch_item_detections
        batch_item_num_detections = tf.shape(batch_item_detections)[0]
        batch_item_num_pad = tf.maximum(max_objects - batch_item_num_detections, 0)
        batch_item_detections_ = tf.pad(tensor=batch_item_detections,
                                        paddings=[
                                            [0, batch_item_num_pad],
                                            [0, 0]],
                                        mode='CONSTANT',
                                        constant_values=0.0)
        return batch_item_detections_

    batch_item_detections = tf.cond(tf.shape(batch_item_detections)[0] >= 100,
                                    filter,
                                    pad)
    return batch_item_detections 
Example #22
Source File: msra_resnet.py    From keras-CenterNet with Apache License 2.0 5 votes vote down vote up
def decode(hm, wh, reg, max_objects=100, nms=True, num_classes=20, score_threshold=0.1):
    scores, indices, class_ids, xs, ys = topk(hm, max_objects=max_objects)
    b = tf.shape(hm)[0]
    # (b, h * w, 2)
    reg = tf.reshape(reg, (b, -1, tf.shape(reg)[-1]))
    # (b, h * w, 2)
    wh = tf.reshape(wh, (b, -1, tf.shape(wh)[-1]))
    # (b, k, 2)
    topk_reg = tf.gather(reg, indices, batch_dims=1)
    # (b, k, 2)
    topk_wh = tf.cast(tf.gather(wh, indices, batch_dims=1), tf.float32)
    topk_cx = tf.cast(tf.expand_dims(xs, axis=-1), tf.float32) + topk_reg[..., 0:1]
    topk_cy = tf.cast(tf.expand_dims(ys, axis=-1), tf.float32) + topk_reg[..., 1:2]
    scores = tf.expand_dims(scores, axis=-1)
    class_ids = tf.cast(tf.expand_dims(class_ids, axis=-1), tf.float32)
    topk_x1 = topk_cx - topk_wh[..., 0:1] / 2
    topk_x2 = topk_cx + topk_wh[..., 0:1] / 2
    topk_y1 = topk_cy - topk_wh[..., 1:2] / 2
    topk_y2 = topk_cy + topk_wh[..., 1:2] / 2
    # (b, k, 6)
    detections = tf.concat([topk_x1, topk_y1, topk_x2, topk_y2, scores, class_ids], axis=-1)
    if nms:
        detections = tf.map_fn(lambda x: evaluate_batch_item(x[0],
                                                             num_classes=num_classes,
                                                             score_threshold=score_threshold),
                               elems=[detections],
                               dtype=tf.float32)
    return detections 
Example #23
Source File: keras_yolo.py    From YOLO-Pi with Apache License 2.0 5 votes vote down vote up
def yolo_eval(yolo_outputs,
              image_shape,
              max_boxes=10,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input batch and return filtered boxes."""
    box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
    boxes = yolo_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo_filter_boxes(
        boxes, box_confidence, box_class_probs, threshold=score_threshold)

    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    # TODO: Something must be done about this ugly hack!
    max_boxes_tensor = K.variable(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(
        boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    return boxes, scores, classes 
Example #24
Source File: Autonomous+driving+application+-+Car+detection+-+v1.py    From Coursera-Ng-Convolutional-Neural-Networks with MIT License 5 votes vote down vote up
def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):
    """
    Applies Non-max suppression (NMS) to set of boxes
    
    Arguments:
    scores -- tensor of shape (None,), output of yolo_filter_boxes()
    boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later)
    classes -- tensor of shape (None,), output of yolo_filter_boxes()
    max_boxes -- integer, maximum number of predicted boxes you'd like
    iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
    
    Returns:
    scores -- tensor of shape (, None), predicted score for each box
    boxes -- tensor of shape (4, None), predicted box coordinates
    classes -- tensor of shape (, None), predicted class for each box
    
    Note: The "None" dimension of the output tensors has obviously to be less than max_boxes. Note also that this
    function will transpose the shapes of scores, boxes, classes. This is made for convenience.
    """
    
    max_boxes_tensor = K.variable(max_boxes, dtype='int32')     # tensor to be used in tf.image.non_max_suppression()
    K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor
    
    # Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep
    ### START CODE HERE ### (≈ 1 line)
    nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes_tensor, iou_threshold)
    ### END CODE HERE ###
    
    # Use K.gather() to select only nms_indices from scores, boxes and classes
    ### START CODE HERE ### (≈ 3 lines)
    scores = tf.gather(scores, nms_indices)
    boxes = tf.gather(boxes, nms_indices)
    classes = tf.gather(classes, nms_indices)
    ### END CODE HERE ###
    
    return scores, boxes, classes


# In[29]: 
Example #25
Source File: utils.py    From Look-Into-Person with MIT License 5 votes vote down vote up
def cross_entropy(y_true, y_pred):
    y_true = K.reshape(y_true, (-1, num_classes))
    y_pred = K.reshape(y_pred, (-1, num_classes))

    idx_max = K.argmax(y_true, axis=1)
    weights = K.gather(prior_factor, idx_max)
    weights = K.reshape(weights, (-1, 1))

    # multiply y_true by weights
    y_true = y_true * weights

    cross_ent = K.categorical_crossentropy(y_pred, y_true)
    cross_ent = K.mean(cross_ent, axis=-1)

    return cross_ent 
Example #26
Source File: layers.py    From delft with Apache License 2.0 5 votes vote down vote up
def path_energy0(y, x, U, mask=None):
    """Path energy without boundary potential handling."""
    n_classes = K.shape(x)[2]
    y_one_hot = K.one_hot(y, n_classes)

    # Tag path energy
    energy = K.sum(x * y_one_hot, 2)
    energy = K.sum(energy, 1)

    # Transition energy
    y_t = y[:, :-1]
    y_tp1 = y[:, 1:]
    U_flat = K.reshape(U, [-1])
    # Convert 2-dim indices (y_t, y_tp1) of U to 1-dim indices of U_flat:
    flat_indices = y_t * n_classes + y_tp1
    U_y_t_tp1 = K.gather(U_flat, flat_indices)

    if mask is not None:
        mask = K.cast(mask, K.floatx())
        y_t_mask = mask[:, :-1]
        y_tp1_mask = mask[:, 1:]
        U_y_t_tp1 *= y_t_mask * y_tp1_mask

    energy += K.sum(U_y_t_tp1, axis=1)

    return energy 
Example #27
Source File: losses.py    From keras-gp with MIT License 5 votes vote down vote up
def gen_gp_loss(gp):
    """Generate an internal objective, `dlik_dh * H`, for a given GP layer.
    """
    def loss(_, H):
        dlik_dh_times_H = H * K.gather(gp.dlik_dh, gp.batch_ids[:gp.batch_sz])
        return K.sum(dlik_dh_times_H, axis=1, keepdims=True)
    return loss


# Aliases 
Example #28
Source File: keras_models.py    From fancyimpute with Apache License 2.0 5 votes vote down vote up
def call(self, inputs):
        if K.dtype(inputs) != 'int32':
            inputs = K.cast(inputs, 'int32')
        # get the embeddings
        i = inputs[:, 0]  # by convention
        j = inputs[:, 1]
        i_embedding = K.gather(self.i_embedding, i)
        j_embedding = K.gather(self.j_embedding, j)
        # <i_embed, j_embed> + i_bias + j_bias + constant
        out = K.batch_dot(i_embedding, j_embedding, axes=[1, 1])
        if self.use_bias:
            i_bias = K.gather(self.i_bias, i)
            j_bias = K.gather(self.j_bias, j)
            out += (i_bias + j_bias + self.constant)
        return out 
Example #29
Source File: keras_models.py    From ME-Net with MIT License 5 votes vote down vote up
def call(self, inputs):
        if K.dtype(inputs) != 'int32':
            inputs = K.cast(inputs, 'int32')
        # get the embeddings
        i = inputs[:, 0]  # by convention
        j = inputs[:, 1]
        i_embedding = K.gather(self.i_embedding, i)
        j_embedding = K.gather(self.j_embedding, j)
        # <i_embed, j_embed> + i_bias + j_bias + constant
        out = K.batch_dot(i_embedding, j_embedding, axes=[1, 1])
        if self.use_bias:
            i_bias = K.gather(self.i_bias, i)
            j_bias = K.gather(self.j_bias, j)
            out += (i_bias + j_bias + self.constant)
        return out 
Example #30
Source File: ChainCRF.py    From naacl18-multitask_argument_mining with Apache License 2.0 5 votes vote down vote up
def path_energy0(y, x, U, mask=None):
    '''Path energy without boundary potential handling.'''
    n_classes = K.shape(x)[2]
    y_one_hot = K.one_hot(y, n_classes)

    # Tag path energy
    energy = K.sum(x * y_one_hot, 2)
    energy = K.sum(energy, 1)

    # Transition energy
    y_t = y[:, :-1]
    y_tp1 = y[:, 1:]
    U_flat = K.reshape(U, [-1])
    # Convert 2-dim indices (y_t, y_tp1) of U to 1-dim indices of U_flat:
    flat_indices = y_t * n_classes + y_tp1
    U_y_t_tp1 = K.gather(U_flat, flat_indices)

    if mask is not None:
        mask = K.cast(mask, K.floatx())
        y_t_mask = mask[:, :-1]
        y_tp1_mask = mask[:, 1:]
        U_y_t_tp1 *= y_t_mask * y_tp1_mask

    energy += K.sum(U_y_t_tp1, axis=1)

    return energy