Python numpy.maximum() Examples

The following are 30 code examples of numpy.maximum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: gradcam.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
        if ReluOp.guided_backprop:
            # Get output and gradients of output
            y = out_data[0]
            dy = out_grad[0]
            # Zero out the negatives in the gradients of the output
            dy_positives = nd.maximum(dy, nd.zeros_like(dy))
            # What output values were greater than 0?
            y_ones = y.__gt__(0)
            # Mask out the values for which at least one of dy or y is negative
            dx = dy_positives * y_ones
            self.assign(in_grad[0], req[0], dx)
        else:
            # Regular backward for ReLU
            x = in_data[0]
            x_gt_zero = x.__gt__(0)
            dx = out_grad[0] * x_gt_zero
            self.assign(in_grad[0], req[0], dx) 
Example #2
Source File: map_utils.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def resize_maps(map, map_scales, resize_method):
  scaled_maps = []
  for i, sc in enumerate(map_scales):
    if resize_method == 'antialiasing':
      # Resize using open cv so that we can compute the size.
      # Use PIL resize to use anti aliasing feature.
      map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR)
      w = map_.shape[1]; h = map_.shape[0]

      map_img = PIL.Image.fromarray((map*255).astype(np.uint8))
      map__img = map_img.resize((w,h), PIL.Image.ANTIALIAS)
      map_ = np.asarray(map__img).astype(np.float32)
      map_ = map_/255.
      map_ = np.minimum(map_, 1.0)
      map_ = np.maximum(map_, 0.0)
    elif resize_method == 'linear_noantialiasing':
      map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR)
    else:
      logging.error('Unknown resizing method')
    scaled_maps.append(map_)
  return scaled_maps 
Example #3
Source File: test_utils.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def create_random_boxes(num_boxes, max_height, max_width):
  """Creates random bounding boxes of specific maximum height and width.

  Args:
    num_boxes: number of boxes.
    max_height: maximum height of boxes.
    max_width: maximum width of boxes.

  Returns:
    boxes: numpy array of shape [num_boxes, 4]. Each row is in form
        [y_min, x_min, y_max, x_max].
  """

  y_1 = np.random.uniform(size=(1, num_boxes)) * max_height
  y_2 = np.random.uniform(size=(1, num_boxes)) * max_height
  x_1 = np.random.uniform(size=(1, num_boxes)) * max_width
  x_2 = np.random.uniform(size=(1, num_boxes)) * max_width

  boxes = np.zeros(shape=(num_boxes, 4))
  boxes[:, 0] = np.minimum(y_1, y_2)
  boxes[:, 1] = np.minimum(x_1, x_2)
  boxes[:, 2] = np.maximum(y_1, y_2)
  boxes[:, 3] = np.maximum(x_1, x_2)

  return boxes.astype(np.float32) 
Example #4
Source File: moving_mnist.py    From DDPAE-video-prediction with MIT License 6 votes vote down vote up
def generate_moving_mnist(self, num_digits=2):
    '''
    Get random trajectories for the digits and generate a video.
    '''
    data = np.zeros((self.n_frames_total, self.image_size_, self.image_size_), dtype=np.float32)
    for n in range(num_digits):
      # Trajectory
      start_y, start_x = self.get_random_trajectory(self.n_frames_total)
      ind = random.randint(0, self.mnist.shape[0] - 1)
      digit_image = self.mnist[ind]
      for i in range(self.n_frames_total):
        top    = start_y[i]
        left   = start_x[i]
        bottom = top + self.digit_size_
        right  = left + self.digit_size_
        # Draw digit
        data[i, top:bottom, left:right] = np.maximum(data[i, top:bottom, left:right], digit_image)

    data = data[..., np.newaxis]
    return data 
Example #5
Source File: np_box_ops.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def intersection(boxes1, boxes2):
  """Compute pairwise intersection areas between boxes.

  Args:
    boxes1: a numpy array with shape [N, 4] holding N boxes
    boxes2: a numpy array with shape [M, 4] holding M boxes

  Returns:
    a numpy array with shape [N*M] representing pairwise intersection area
  """
  [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
  [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)

  all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
  all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
  intersect_heights = np.maximum(
      np.zeros(all_pairs_max_ymin.shape),
      all_pairs_min_ymax - all_pairs_max_ymin)
  all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
  all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
  intersect_widths = np.maximum(
      np.zeros(all_pairs_max_xmin.shape),
      all_pairs_min_xmax - all_pairs_max_xmin)
  return intersect_heights * intersect_widths 
Example #6
Source File: face_attack.py    From Adversarial-Face-Attack with GNU General Public License v3.0 6 votes vote down vote up
def detect(self, img):
        """
        img: rgb 3 channel
        """
        minsize = 20  # minimum size of face
        threshold = [0.6, 0.7, 0.7]  # three steps's threshold
        factor = 0.709  # scale factor

        bounding_boxes, _ = FaceDet.detect_face(
                img, minsize, self.pnet, self.rnet, self.onet, threshold, factor)
        area = (bounding_boxes[:, 2] - bounding_boxes[:, 0]) * (bounding_boxes[:, 3] - bounding_boxes[:, 1])
        face_idx = area.argmax()
        bbox = bounding_boxes[face_idx][:4]  # xy,xy

        margin = 32
        x0 = np.maximum(bbox[0] - margin // 2, 0)
        y0 = np.maximum(bbox[1] - margin // 2, 0)
        x1 = np.minimum(bbox[2] + margin // 2, img.shape[1])
        y1 = np.minimum(bbox[3] + margin // 2, img.shape[0])
        x0, y0, x1, y1 = bbox = [int(k + 0.5) for k in [x0, y0, x1, y1]]
        cropped = img[y0:y1, x0:x1, :]
        scaled = cv2.resize(cropped, (160, 160), interpolation=cv2.INTER_LINEAR)
        return scaled, bbox 
Example #7
Source File: structures.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def crop(self, bbox):
        """See :func:`BaseInstanceMasks.crop`."""
        assert isinstance(bbox, np.ndarray)
        assert bbox.ndim == 1

        # clip the boundary
        bbox = bbox.copy()
        bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
        bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
        x1, y1, x2, y2 = bbox
        w = np.maximum(x2 - x1, 1)
        h = np.maximum(y2 - y1, 1)

        if len(self.masks) == 0:
            cropped_masks = np.empty((0, h, w), dtype=np.uint8)
        else:
            cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]
        return BitmapMasks(cropped_masks, h, w) 
Example #8
Source File: test_quantization.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_quantize_float32_to_int8():
    shape = rand_shape_nd(4)
    data = rand_ndarray(shape, 'default', dtype='float32')
    min_range = mx.nd.min(data)
    max_range = mx.nd.max(data)
    qdata, min_val, max_val = mx.nd.contrib.quantize(data, min_range, max_range, out_type='int8')
    data_np = data.asnumpy()
    min_range = min_range.asscalar()
    max_range = max_range.asscalar()
    real_range = np.maximum(np.abs(min_range), np.abs(max_range))
    quantized_range = 127.0
    scale = quantized_range / real_range
    assert qdata.dtype == np.int8
    assert min_val.dtype == np.float32
    assert max_val.dtype == np.float32
    assert same(min_val.asscalar(), -real_range)
    assert same(max_val.asscalar(), real_range)
    qdata_np = (np.sign(data_np) * np.minimum(np.abs(data_np) * scale + 0.5, quantized_range)).astype(np.int8)
    assert_almost_equal(qdata.asnumpy(), qdata_np, atol = 1) 
Example #9
Source File: attacks_tf.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max):
    """
    TensorFlow implementation for apply perturbations to input features based
    on salency maps
    :param i: index of first selected feature
    :param j: index of second selected feature
    :param X: a matrix containing our input features for our sample
    :param increase: boolean; true if we are increasing pixels, false otherwise
    :param theta: delta for each feature adjustment
    :param clip_min: mininum value for a feature in our sample
    :param clip_max: maximum value for a feature in our sample
    : return: a perturbed input feature matrix for a target class
    """

    # perturb our input sample
    if increase:
        X[0, i] = np.minimum(clip_max, X[0, i] + theta)
        X[0, j] = np.minimum(clip_max, X[0, j] + theta)
    else:
        X[0, i] = np.maximum(clip_min, X[0, i] - theta)
        X[0, j] = np.maximum(clip_min, X[0, j] - theta)

    return X 
Example #10
Source File: pascal_voc.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
        if use_07_metric:
            ap = 0.
            for t in np.arange(0., 1.1, 0.1):
                if np.sum(rec >= t) == 0:
                    p = 0
                else:
                    p = np.max(prec[rec >= t])
                ap += p / 11.
        else:
            # append sentinel values at both ends
            mrec = np.concatenate(([0.], rec, [1.]))
            mpre = np.concatenate(([0.], prec, [0.]))

            # compute precision integration ladder
            for i in range(mpre.size - 1, 0, -1):
                mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

            # look for recall value changes
            i = np.where(mrec[1:] != mrec[:-1])[0]

            # sum (\delta recall) * prec
            ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
        return ap 
Example #11
Source File: enjoy_TF_InvertedPendulumBulletEnv_v0_2017may.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def relu(x):
    return np.maximum(x, 0) 
Example #12
Source File: test.py    From cvpr2018-hnd with MIT License 5 votes vote down vote up
def count_test(p, counters, preds, labels, T, hierarchical_measure=False):

    label_hnd = T['label_hnd']
    
    if hierarchical_measure:
        HP_mat = T['HP_mat']
        HF_mat = T['HF_mat']
        dist_mat = T['dist_mat']
    
    for l in np.unique(labels.cpu().numpy()):
        preds_l = preds[(labels == int(l)).cpu().numpy().astype(bool)]
        acc = np.zeros_like(preds_l, dtype=bool)
        if hierarchical_measure:
            HE = MAX_DIST*np.ones_like(preds_l, dtype=int)
            HP, HR, HF = np.zeros_like(preds_l), np.zeros_like(preds_l), np.zeros_like(preds_l)
        for c in label_hnd[l]:
            acc |= (preds_l == c)
            if hierarchical_measure:
                HE = np.minimum(HE, dist_mat[preds_l, c])
                HP = np.maximum(HP, HP_mat[preds_l, c])
                HR = np.maximum(HR, HP_mat[c, preds_l])
                HF = np.maximum(HF, HF_mat[preds_l, c])
        
        if p == 0: counters['data'][l] += preds_l.shape[0]
        counters['acc'][p,l] += acc.sum()
        if hierarchical_measure:
            counters['HE'][p,l] += HE.sum()
            counters['HP'][p,l] += HP.sum()
            counters['HR'][p,l] += HR.sum()
            counters['HF'][p,l] += HF.sum() 
Example #13
Source File: utils.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def py_cpu_nms(dets, thresh):
    """Pure Python NMS baseline."""
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    scores = dets[:, 4]

    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
    ## index for dets
    order = scores.argsort()[::-1]

    keep = []
    while order.size > 0:
        i = order[0]
        keep.append(i)
        xx1 = np.maximum(x1[i], x1[order[1:]])
        yy1 = np.maximum(y1[i], y1[order[1:]])
        xx2 = np.minimum(x2[i], x2[order[1:]])
        yy2 = np.minimum(y2[i], y2[order[1:]])

        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h
        ovr = inter / (areas[i] + areas[order[1:]] - inter)

        inds = np.where(ovr <= thresh)[0]
        order = order[inds + 1]

    return keep 
Example #14
Source File: enjoy_TF_HalfCheetahBulletEnv_v0_2017may.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def relu(x):
    return np.maximum(x, 0) 
Example #15
Source File: enjoy_TF_Walker2DBulletEnv_v0_2017may.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def relu(x):
    return np.maximum(x, 0) 
Example #16
Source File: enjoy_TF_InvertedDoublePendulumBulletEnv_v0_2017may.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def relu(x):
    return np.maximum(x, 0) 
Example #17
Source File: common.py    From numpynet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _relu(x, deriv=False):
        """
        Rectified linear unit activation function
        """
        if deriv:
            return 1.0 * (x >= 0)
        return np.maximum(x, 0) 
Example #18
Source File: utils.py    From GST-Tacotron with MIT License 5 votes vote down vote up
def griffin_lim(spectrogram):
    '''Applies Griffin-Lim's raw.
    '''
    X_best = copy.deepcopy(spectrogram)
    for i in range(hp.n_iter):
        X_t = invert_spectrogram(X_best)
        est = librosa.stft(X_t, hp.n_fft, hp.hop_length, win_length=hp.win_length)
        phase = est / np.maximum(1e-8, np.abs(est))
        X_best = spectrogram * phase
    X_t = invert_spectrogram(X_best)
    y = np.real(X_t)

    return y 
Example #19
Source File: kitti_common.py    From kitti-object-eval-python with MIT License 5 votes vote down vote up
def intersection(boxes1, boxes2, add1=False):
    """Compute pairwise intersection areas between boxes.

    Args:
        boxes1: a numpy array with shape [N, 4] holding N boxes
        boxes2: a numpy array with shape [M, 4] holding M boxes

    Returns:
        a numpy array with shape [N*M] representing pairwise intersection area
    """
    [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
    [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)

    all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
    all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
    if add1:
        all_pairs_min_ymax += 1.0
    intersect_heights = np.maximum(
        np.zeros(all_pairs_max_ymin.shape),
        all_pairs_min_ymax - all_pairs_max_ymin)

    all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
    all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
    if add1:
        all_pairs_min_xmax += 1.0
    intersect_widths = np.maximum(
        np.zeros(all_pairs_max_xmin.shape),
        all_pairs_min_xmax - all_pairs_max_xmin)
    return intersect_heights * intersect_widths 
Example #20
Source File: test_executor.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_bind():
    def check_bind(disable_bulk_exec):
        if disable_bulk_exec:
            prev_bulk_inf_val = mx.test_utils.set_env_var("MXNET_EXEC_BULK_EXEC_INFERENCE", "0", "1")
            prev_bulk_train_val = mx.test_utils.set_env_var("MXNET_EXEC_BULK_EXEC_TRAIN", "0", "1")

        nrepeat = 10
        maxdim = 4
        for repeat in range(nrepeat):
            for dim in range(1, maxdim):
                check_bind_with_uniform(lambda x, y: x + y,
                                        lambda g, x, y: (g, g),
                                        dim)
                check_bind_with_uniform(lambda x, y: x - y,
                                        lambda g, x, y: (g, -g),
                                        dim)
                check_bind_with_uniform(lambda x, y: x * y,
                                        lambda g, x, y: (y * g, x * g),
                                        dim)
                check_bind_with_uniform(lambda x, y: x / y,
                                        lambda g, x, y: (g / y, -x * g/ (y**2)),
                                        dim)

                check_bind_with_uniform(lambda x, y: np.maximum(x, y),
                                        lambda g, x, y: (g * (x>=y), g * (y>x)),
                                        dim,
                                        sf=mx.symbol.maximum)
                check_bind_with_uniform(lambda x, y: np.minimum(x, y),
                                        lambda g, x, y: (g * (x<=y), g * (y<x)),
                                        dim,
                                        sf=mx.symbol.minimum)
        if disable_bulk_exec:
           mx.test_utils.set_env_var("MXNET_EXEC_BULK_EXEC_INFERENCE", prev_bulk_inf_val)
           mx.test_utils.set_env_var("MXNET_EXEC_BULK_EXEC_TRAIN", prev_bulk_train_val)

    check_bind(True)
    check_bind(False)


# @roywei: Removing fixed seed as flakiness in this test is fixed
# tracked at https://github.com/apache/incubator-mxnet/issues/11686 
Example #21
Source File: gym_problems.py    From fine-lm with MIT License 5 votes vote down vote up
def collect_statistics_and_generate_debug_image(self, index,
                                                  observation,
                                                  reward, done, action):
    stat = self.statistics

    # TODO(piotrmilos): possibly make the same behaviour as
    # in the BasicStatistics
    stat.sum_of_rewards += reward
    stat.episode_sim_reward += reward

    ob = np.ndarray.astype(observation, np.int)
    err = np.ndarray.astype(
        np.maximum(np.abs(stat.real_ob - ob, dtype=np.int) - 10, 0), np.uint8)
    debug_im = np.concatenate([observation, stat.real_ob, err], axis=1)

    assert (self._internal_memory_size == self.num_testing_steps and
            self._internal_memory_force_beginning_resets), (
                "The collect memory should be set in force_beginning_resets "
                "mode for the code below to work properly.")

    if (index+1) % self._internal_memory_size == 0:

      if stat.episode_sim_reward == stat.episode_real_reward:
        stat.successful_episode_reward_predictions += 1
        stat.episode_sim_reward = 0.0
        stat.episode_real_reward = 0.0

      stat.number_of_dones += 1
      self._reset_real_env()
    else:
      stat.real_ob, real_reward, _, _ = stat.real_env.step(action)
      stat.episode_real_reward += real_reward

    return debug_im 
Example #22
Source File: test_image.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def _generate_objects():
    num = np.random.randint(1, 10)
    xy = np.random.rand(num, 2)
    wh = np.random.rand(num, 2) / 2
    left = (xy[:, 0] - wh[:, 0])[:, np.newaxis]
    right = (xy[:, 0] + wh[:, 0])[:, np.newaxis]
    top = (xy[:, 1] - wh[:, 1])[:, np.newaxis]
    bot = (xy[:, 1] + wh[:, 1])[:, np.newaxis]
    boxes = np.maximum(0., np.minimum(1., np.hstack((left, top, right, bot))))
    cid = np.random.randint(0, 20, size=num)
    label = np.hstack((cid[:, np.newaxis], boxes)).ravel().tolist()
    return [2, 5] + label 
Example #23
Source File: detection.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def _intersect(self, label, xmin, ymin, xmax, ymax):
        """Calculate intersect areas, normalized."""
        left = np.maximum(label[:, 0], xmin)
        right = np.minimum(label[:, 2], xmax)
        top = np.maximum(label[:, 1], ymin)
        bot = np.minimum(label[:, 3], ymax)
        invalid = np.where(np.logical_or(left >= right, top >= bot))[0]
        out = label.copy()
        out[:, 0] = left
        out[:, 1] = top
        out[:, 2] = right
        out[:, 3] = bot
        out[invalid, :] = 0
        return out 
Example #24
Source File: detection.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def _calculate_areas(self, label):
        """Calculate areas for multiple labels"""
        heights = np.maximum(0, label[:, 3] - label[:, 1])
        widths = np.maximum(0, label[:, 2] - label[:, 0])
        return heights * widths 
Example #25
Source File: voc_eval.py    From cascade-rcnn_Pytorch with MIT License 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
  """ ap = voc_ap(rec, prec, [use_07_metric])
  Compute VOC AP given precision and recall.
  If use_07_metric is true, uses the
  VOC 07 11 point method (default:False).
  """
  if use_07_metric:
    # 11 point metric
    ap = 0.
    for t in np.arange(0., 1.1, 0.1):
      if np.sum(rec >= t) == 0:
        p = 0
      else:
        p = np.max(prec[rec >= t])
      ap = ap + p / 11.
  else:
    # correct AP calculation
    # first append sentinel values at the end
    mrec = np.concatenate(([0.], rec, [1.]))
    mpre = np.concatenate(([0.], prec, [0.]))

    # compute the precision envelope
    for i in range(mpre.size - 1, 0, -1):
      mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

    # to calculate area under PR curve, look for points
    # where X axis (recall) changes value
    i = np.where(mrec[1:] != mrec[:-1])[0]

    # and sum (\Delta recall) * prec
    ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
  return ap 
Example #26
Source File: bbox.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def nms(dets, thresh):
    """
    greedily select boxes with high confidence and overlap with current maximum <= thresh
    rule out overlap >= thresh
    :param dets: [[x1, y1, x2, y2 score]]
    :param thresh: retain overlap < thresh
    :return: indexes to keep
    """
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    scores = dets[:, 4]

    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
    order = scores.argsort()[::-1]

    keep = []
    while order.size > 0:
        i = order[0]
        keep.append(i)
        xx1 = np.maximum(x1[i], x1[order[1:]])
        yy1 = np.maximum(y1[i], y1[order[1:]])
        xx2 = np.minimum(x2[i], x2[order[1:]])
        yy2 = np.minimum(y2[i], y2[order[1:]])

        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h
        ovr = inter / (areas[i] + areas[order[1:]] - inter)

        inds = np.where(ovr <= thresh)[0]
        order = order[inds + 1]

    return keep 
Example #27
Source File: eval_voc.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
    """
    average precision calculations
    [precision integrated to recall]
    :param rec: recall
    :param prec: precision
    :param use_07_metric: 2007 metric is 11-recall-point based AP
    :return: average precision
    """
    if use_07_metric:
        ap = 0.
        for t in np.arange(0., 1.1, 0.1):
            if np.sum(rec >= t) == 0:
                p = 0
            else:
                p = np.max(prec[rec >= t])
            ap += p / 11.
    else:
        # append sentinel values at both ends
        mrec = np.concatenate(([0.], rec, [1.]))
        mpre = np.concatenate(([0.], prec, [0.]))

        # compute precision integration ladder
        for i in range(mpre.size - 1, 0, -1):
            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

        # look for recall value changes
        i = np.where(mrec[1:] != mrec[:-1])[0]

        # sum (\delta recall) * prec
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
    return ap 
Example #28
Source File: eval_metric.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def _average_precision(self, rec, prec):
        """
        calculate average precision

        Params:
        ----------
        rec : numpy.array
            cumulated recall
        prec : numpy.array
            cumulated precision
        Returns:
        ----------
        ap as float
        """
        # append sentinel values at both ends
        mrec = np.concatenate(([0.], rec, [1.]))
        mpre = np.concatenate(([0.], prec, [0.]))

        # compute precision integration ladder
        for i in range(mpre.size - 1, 0, -1):
            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

        # look for recall value changes
        i = np.where(mrec[1:] != mrec[:-1])[0]

        # sum (\delta recall) * prec
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
        return ap 
Example #29
Source File: nn.py    From Kaggler with MIT License 5 votes vote down vote up
def sigm(x):
    """Return the value of the sigmoid function at x.

    Args:
        x (np.array of float or float)

    Returns:
        value(s) of the sigmoid function for x.
    """

    # Avoid numerical overflow by capping the input to the exponential
    # function - doesn't affect the return value.
    return 1 / (1 + np.exp(-np.maximum(x, -20))) 
Example #30
Source File: utils.py    From pruning_yolov3 with GNU General Public License v3.0 5 votes vote down vote up
def compute_ap(recall, precision):
    """ Compute the average precision, given the recall and precision curves.
    Source: https://github.com/rbgirshick/py-faster-rcnn.
    # Arguments
        recall:    The recall curve (list).
        precision: The precision curve (list).
    # Returns
        The average precision as computed in py-faster-rcnn.
    """

    # Append sentinel values to beginning and end
    mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
    mpre = np.concatenate(([0.], precision, [0.]))

    # Compute the precision envelope
    for i in range(mpre.size - 1, 0, -1):
        mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

    # Integrate area under curve
    method = 'interp'  # methods: 'continuous', 'interp'
    if method == 'interp':
        x = np.linspace(0, 1, 101)  # 101-point interp (COCO)
        ap = np.trapz(np.interp(x, mrec, mpre), x)  # integrate
    else:  # 'continuous'
        i = np.where(mrec[1:] != mrec[:-1])[0]  # points where x axis (recall) changes
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])  # area under curve

    return ap