Python numpy.ndarray() Examples

The following are 60 code examples for showing how to use numpy.ndarray(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

You may also check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: vergeml   Author: mme   File: cache.py    License: MIT License 6 votes vote down vote up
def _serialize_data(self, data):

        # Default to raw bytes
        type_ = _BYTES

        if isinstance(data, np.ndarray):
        # When the data is a numpy array, use the more compact native
        # numpy format.
            buf = io.BytesIO()
            np.save(buf, data)
            data = buf.getvalue()
            type_ = _NUMPY

        elif not isinstance(data, (bytearray, bytes)):
        # Everything else except byte data is serialized in pickle format.
            data = pickle.dumps(data)
            type_ = _PICKLE

        if self.compress:
        # Optional compression
            data = lz4.frame.compress(data)

        return type_, data 
Example 2
Project: DDPAE-video-prediction   Author: jthsieh   File: video_transforms.py    License: MIT License 6 votes vote down vote up
def __call__(self, video):
    """
    Args:
        video (numpy.ndarray): Video to be scaled.
    Returns:
        numpy.ndarray: Rescaled video.
    """
    if isinstance(self.size, int):
      w, h = video.shape[-2], video.shape[-3]
      if (w <= h and w == self.size) or (h <= w and h == self.size):
        return video
      if w < h:
        ow = self.size
        oh = int(self.size*h/w)
        return resize(video, (ow, oh), self.interpolation)
      else:
        oh = self.size
        ow = int(self.size*w/h)
        return resize(video, (ow, oh), self.interpolation)
    else:
      return resize(video, self.size, self.interpolation) 
Example 3
Project: DDPAE-video-prediction   Author: jthsieh   File: video_transforms.py    License: MIT License 6 votes vote down vote up
def __call__(self, video):
    """
    Args:
        video (np.ndarray): Video to be cropped.
    Returns:
        np.ndarray: Cropped video.
    """
    if self.padding > 0:
      pad = Pad(self.padding, 0)
      video = pad(video)

    w, h = video.shape[-2], video.shape[-3]
    th, tw = self.size
    if w == tw and h == th:
      return video

    x1 = random.randint(0, w-tw)
    y1 = random.randint(0, h-th)
    return video[..., y1:y1+th, x1:x1+tw, :] 
Example 4
Project: kaldi-python-io   Author: funcwj   File: _io_kernel.py    License: Apache License 2.0 6 votes vote down vote up
def read_common_mat(fd):
    """ 
        Read common matrix(for class Matrix in kaldi setup)
        see matrix/kaldi-matrix.cc::
            void Matrix<Real>::Read(std::istream & is, bool binary, bool add)
        Return a numpy ndarray object
    """
    mat_type = read_token(fd)
    print_info(f'\tType of the common matrix: {mat_type}')
    if mat_type not in ["FM", "DM"]:
        raise RuntimeError(f"Unknown matrix type in kaldi: {mat_type}")
    float_size = 4 if mat_type == 'FM' else 8
    float_type = np.float32 if mat_type == 'FM' else np.float64
    num_rows = read_int32(fd)
    num_cols = read_int32(fd)
    print_info(f'\tSize of the common matrix: {num_rows} x {num_cols}')
    mat_data = fd.read(float_size * num_cols * num_rows)
    mat = np.fromstring(mat_data, dtype=float_type)
    return mat.reshape(num_rows, num_cols) 
Example 5
Project: kaldi-python-io   Author: funcwj   File: _io_kernel.py    License: Apache License 2.0 6 votes vote down vote up
def read_compress_mat(fd):
    """ 
        Reference to function Read in CompressMatrix
        Return a numpy ndarray object
    """
    cps_type = read_token(fd)
    print_info(f'\tFollowing matrix type: {cps_type}')
    head = struct.unpack('ffii', fd.read(16))
    print_info(f'\tCompress matrix header: {head}')
    # 8: sizeof PerColHeader
    # head: {min_value, range, num_rows, num_cols}
    num_rows, num_cols = head[2], head[3]
    if cps_type == 'CM':
        remain_size = num_cols * (8 + num_rows)
    elif cps_type == 'CM2':
        remain_size = 2 * num_rows * num_cols
    elif cps_type == 'CM3':
        remain_size = num_rows * num_cols
    else:
        throw_on_error(False, f'Unknown matrix compressing type: {cps_type}')
    # now uncompress it
    compress_data = fd.read(remain_size)
    mat = uncompress(compress_data, cps_type, head)
    return mat 
Example 6
Project: dustmaps   Author: gregreen   File: json_serializers.py    License: GNU General Public License v2.0 6 votes vote down vote up
def serialize_ndarray_b64(o):
    """
    Serializes a :obj:`numpy.ndarray` in a format where the datatype and shape are
    human-readable, but the array data itself is binary64 encoded.

    Args:
        o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.

    Returns:
        A dictionary that can be passed to :obj:`json.dumps`.
    """
    if o.flags['C_CONTIGUOUS']:
        o_data = o.data
    else:
        o_data = np.ascontiguousarray(o).data
    data_b64 = base64.b64encode(o_data)
    return dict(
        _type='np.ndarray',
        data=data_b64.decode('utf-8'),
        dtype=o.dtype,
        shape=o.shape) 
Example 7
Project: dustmaps   Author: gregreen   File: json_serializers.py    License: GNU General Public License v2.0 6 votes vote down vote up
def serialize_ndarray_npy(o):
    """
    Serializes a :obj:`numpy.ndarray` using numpy's built-in :obj:`save` function.
    This produces totally unreadable (and very un-JSON-like) results (in "npy"
    format), but it's basically guaranteed to work in 100% of cases.

    Args:
        o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.

    Returns:
        A dictionary that can be passed to :obj:`json.dumps`.
    """
    with io.BytesIO() as f:
        np.save(f, o)
        f.seek(0)
        serialized = json.dumps(f.read().decode('latin-1'))
    return dict(
        _type='np.ndarray',
        npy=serialized) 
Example 8
Project: dustmaps   Author: gregreen   File: json_serializers.py    License: GNU General Public License v2.0 6 votes vote down vote up
def deserialize_ndarray_npy(d):
    """
    Deserializes a JSONified :obj:`numpy.ndarray` that was created using numpy's
    :obj:`save` function.

    Args:
        d (:obj:`dict`): A dictionary representation of an :obj:`ndarray` object, created
            using :obj:`numpy.save`.

    Returns:
        An :obj:`ndarray` object.
    """
    with io.BytesIO() as f:
        f.write(json.loads(d['npy']).encode('latin-1'))
        f.seek(0)
        return np.load(f) 
Example 9
Project: mmdetection   Author: open-mmlab   File: coco.py    License: Apache License 2.0 6 votes vote down vote up
def xyxy2xywh(self, bbox):
        """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
        evaluation.

        Args:
            bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
                ``xyxy`` order.

        Returns:
            list[float]: The converted bounding boxes, in ``xywh`` order.
        """

        _bbox = bbox.tolist()
        return [
            _bbox[0],
            _bbox[1],
            _bbox[2] - _bbox[0],
            _bbox[3] - _bbox[1],
        ] 
Example 10
Project: mmdetection   Author: open-mmlab   File: formating.py    License: Apache License 2.0 6 votes vote down vote up
def to_tensor(data):
    """Convert objects of various python types to :obj:`torch.Tensor`.

    Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
    :class:`Sequence`, :class:`int` and :class:`float`.

    Args:
        data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
            be converted.
    """

    if isinstance(data, torch.Tensor):
        return data
    elif isinstance(data, np.ndarray):
        return torch.from_numpy(data)
    elif isinstance(data, Sequence) and not mmcv.is_str(data):
        return torch.tensor(data)
    elif isinstance(data, int):
        return torch.LongTensor([data])
    elif isinstance(data, float):
        return torch.FloatTensor([data])
    else:
        raise TypeError(f'type {type(data)} cannot be converted to tensor.') 
Example 11
Project: mmdetection   Author: open-mmlab   File: recall.py    License: Apache License 2.0 6 votes vote down vote up
def plot_iou_recall(recalls, iou_thrs):
    """Plot IoU-Recalls curve.

    Args:
        recalls(ndarray or list): shape (k,)
        iou_thrs(ndarray or list): same shape as `recalls`
    """
    if isinstance(iou_thrs, np.ndarray):
        _iou_thrs = iou_thrs.tolist()
    else:
        _iou_thrs = iou_thrs
    if isinstance(recalls, np.ndarray):
        _recalls = recalls.tolist()
    else:
        _recalls = recalls

    import matplotlib.pyplot as plt
    f = plt.figure()
    plt.plot(_iou_thrs + [1.0], _recalls + [0.])
    plt.xlabel('IoU')
    plt.ylabel('Recall')
    plt.axis([iou_thrs.min(), 1, 0, 1])
    f.show() 
Example 12
Project: mmdetection   Author: open-mmlab   File: mean_ap.py    License: Apache License 2.0 6 votes vote down vote up
def get_cls_results(det_results, annotations, class_id):
    """Get det results and gt information of a certain class.

    Args:
        det_results (list[list]): Same as `eval_map()`.
        annotations (list[dict]): Same as `eval_map()`.
        class_id (int): ID of a specific class.

    Returns:
        tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes
    """
    cls_dets = [img_res[class_id] for img_res in det_results]
    cls_gts = []
    cls_gts_ignore = []
    for ann in annotations:
        gt_inds = ann['labels'] == class_id
        cls_gts.append(ann['bboxes'][gt_inds, :])

        if ann.get('labels_ignore', None) is not None:
            ignore_inds = ann['labels_ignore'] == class_id
            cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :])
        else:
            cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32))

    return cls_dets, cls_gts, cls_gts_ignore 
Example 13
Project: mmdetection   Author: open-mmlab   File: misc.py    License: Apache License 2.0 6 votes vote down vote up
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
    """Convert tensor to images.

    Args:
        tensor (torch.Tensor): Tensor that contains multiple images
        mean (tuple[float], optional): Mean of images. Defaults to (0, 0, 0).
        std (tuple[float], optional): Standard deviation of images.
            Defaults to (1, 1, 1).
        to_rgb (bool, optional): Whether convert the images to RGB format.
            Defaults to True.

    Returns:
        list[np.ndarray]: A list that contains multiple images.
    """
    num_imgs = tensor.size(0)
    mean = np.array(mean, dtype=np.float32)
    std = np.array(std, dtype=np.float32)
    imgs = []
    for img_id in range(num_imgs):
        img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
        img = mmcv.imdenormalize(
            img, mean, std, to_bgr=to_rgb).astype(np.uint8)
        imgs.append(np.ascontiguousarray(img))
    return imgs 
Example 14
Project: mmdetection   Author: open-mmlab   File: structures.py    License: Apache License 2.0 6 votes vote down vote up
def crop_and_resize(self,
                        bboxes,
                        out_shape,
                        inds,
                        device,
                        interpolation='bilinear'):
        """Crop and resize masks by the given bboxes.

        This function is mainly used in mask targets computation.
        It firstly align mask to bboxes by assigned_inds, then crop mask by the
        assigned bbox and resize to the size of (mask_h, mask_w)

        Args:
            bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4)
            out_shape (tuple[int]): Target (h, w) of resized mask
            inds (ndarray): Indexes to assign masks to each bbox
            device (str): Device of bboxes
            interpolation (str): See `mmcv.imresize`

        Return:
            BaseInstanceMasks: the cropped and resized masks.
        """
        pass 
Example 15
Project: mmdetection   Author: open-mmlab   File: structures.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, masks, height, width):
        self.height = height
        self.width = width
        if len(masks) == 0:
            self.masks = np.empty((0, self.height, self.width), dtype=np.uint8)
        else:
            assert isinstance(masks, (list, np.ndarray))
            if isinstance(masks, list):
                assert isinstance(masks[0], np.ndarray)
                assert masks[0].ndim == 2  # (H, W)
            else:
                assert masks.ndim == 3  # (N, H, W)

            self.masks = np.stack(masks).reshape(-1, height, width)
            assert self.masks.shape[1] == self.height
            assert self.masks.shape[2] == self.width 
Example 16
Project: mmdetection   Author: open-mmlab   File: structures.py    License: Apache License 2.0 6 votes vote down vote up
def crop(self, bbox):
        """See :func:`BaseInstanceMasks.crop`."""
        assert isinstance(bbox, np.ndarray)
        assert bbox.ndim == 1

        # clip the boundary
        bbox = bbox.copy()
        bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
        bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
        x1, y1, x2, y2 = bbox
        w = np.maximum(x2 - x1, 1)
        h = np.maximum(y2 - y1, 1)

        if len(self.masks) == 0:
            cropped_masks = np.empty((0, h, w), dtype=np.uint8)
        else:
            cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]
        return BitmapMasks(cropped_masks, h, w) 
Example 17
Project: mmdetection   Author: open-mmlab   File: structures.py    License: Apache License 2.0 6 votes vote down vote up
def __getitem__(self, index):
        """Index the polygon masks.

        Args:
            index (ndarray | List): The indices.

        Returns:
            :obj:`PolygonMasks`: The indexed polygon masks.
        """
        if isinstance(index, np.ndarray):
            index = index.tolist()
        if isinstance(index, list):
            masks = [self.masks[i] for i in index]
        else:
            try:
                masks = self.masks[index]
            except Exception:
                raise ValueError(
                    f'Unsupported input of type {type(index)} for indexing!')
        if isinstance(masks[0], np.ndarray):
            masks = [masks]  # ensure a list of three levels
        return PolygonMasks(masks, self.height, self.width) 
Example 18
Project: mlimages   Author: icoxfog417   File: chainer_alex.py    License: MIT License 6 votes vote down vote up
def predict(limit):
    _limit = limit if limit > 0 else 5

    td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, mean_image_file=MEAN_IMAGE_FILE, image_property=IMAGE_PROP)
    label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE)
    model = alex.Alex(len(label_def))
    serializers.load_npz(MODEL_FILE, model)

    i = 0
    for arr, im in td.generate():
        x = np.ndarray((1,) + arr.shape, arr.dtype)
        x[0] = arr
        x = chainer.Variable(np.asarray(x), volatile="on")
        y = model.predict(x)
        p = np.argmax(y.data)
        print("predict {0}, actual {1}".format(label_def[p], label_def[im.label]))
        im.image.show()
        i += 1
        if i >= _limit:
            break 
Example 19
Project: Sound-Recognition-Tutorial   Author: JasonZhang156   File: feature_extraction.py    License: Apache License 2.0 6 votes vote down vote up
def extract_mfcc(y, sr, size=3):
    """
    extract MFCC feature
    :param y: np.ndarray [shape=(n,)], real-valued the input signal (audio time series)
    :param sr: sample rate of 'y'
    :param size: the length (seconds) of random crop from original audio, default as 3 seconds
    :return: MFCC feature
    """
    # normalization
    y = y.astype(np.float32)
    normalization_factor = 1 / np.max(np.abs(y))
    y = y * normalization_factor

    # random crop
    start = random.randint(0, len(y) - size * sr)
    y = y[start: start + size * sr]

    # extract log mel spectrogram #####
    melspectrogram = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=2048, hop_length=1024)
    mfcc = librosa.feature.mfcc(S=librosa.power_to_db(melspectrogram), n_mfcc=20)
    mfcc_delta = librosa.feature.delta(mfcc)
    mfcc_delta_delta = librosa.feature.delta(mfcc_delta)
    mfcc_comb = np.concatenate([mfcc, mfcc_delta, mfcc_delta_delta], axis=0)

    return mfcc_comb 
Example 20
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: SampleIO.py    License: BSD 2-Clause "Simplified" License 5 votes vote down vote up
def extract_sample(img, image_mean=None, resize=-1):
    """Extract image content from image string or from file
    TAKE:
    input - either file content as string or numpy array
    image_mean - numpy array of image mean or a values of size (1,3)
    resize - to resize image, set resize > 0; otherwise, don't resize
    """
    try:
        # if input is a file name, then read image; otherwise decode_imgstr
        if type(img) is np.ndarray:
            img_data = img
        else:
            img_data = decode_imgstr(img)
        if type(resize) in [tuple, list]:
            # resize in two dimensions
            img_data = scipy.misc.imresize(img_data, (resize[0], resize[1]))
        elif resize > 0:
            img_data = scipy.misc.imresize(img_data, (resize, resize))
        img_data = img_data.astype(np.float32, copy=False)
        img_data = img_data[:, :, ::-1]
        # change channel for caffe:
        img_data = img_data.transpose(2, 0, 1)  # to CxHxW
        # substract_mean
        if image_mean is not None:
            img_data = substract_mean(img_data, image_mean)
        return img_data
    except:
        print sys.exc_info()[0], sys.exc_info()[1]
        return 
Example 21
Project: vergeml   Author: mme   File: env.py    License: MIT License 5 votes vote down vote up
def _convert(self, vals):
        res = {}
        for k, v in vals.items():
            if isinstance(v, (np.int, np.int8, np.int16, np.int32, np.int64)):
                v = int(v)
            elif isinstance(v, (np.float, np.float16, np.float32, np.float64)):
                v = float(v)
            elif isinstance(v, Labels):
                v = list(v)
            elif isinstance(v, np.ndarray):
                v = v.tolist()
            elif isinstance(v, dict):
                v = self._convert(v)
            res[k] = v
        return res 
Example 22
Project: cat-bbs   Author: aleju   File: bbs.py    License: MIT License 5 votes vote down vote up
def fix_by_image_dimensions(self, height, width=None):
        if isinstance(height, (tuple, list)):
            assert width is None
            height, width = height[0], height[1]
        elif isinstance(height, (np.ndarray, np.generic)):
            assert width is None
            height, width = height.shape[0], height.shape[1]
        else:
            assert width is not None
            assert isinstance(height, int)
            assert isinstance(width, int)

        self.x1 = int(np.clip(self.x1, 0, width-1))
        self.x2 = int(np.clip(self.x2, 0, width-1))
        self.y1 = int(np.clip(self.y1, 0, height-1))
        self.y2 = int(np.clip(self.y2, 0, height-1))

        if self.x1 > self.x2:
            self.x1, self.x2 = self.x2, self.x1
        if self.y1 > self.y2:
            self.y1, self.y2 = self.y2, self.y1

        if self.x1 == self.x2:
            if self.x1 > 0:
                self.x1 = self.x1 - 1
            else:
                self.x2 = self.x2 + 1

        if self.y1 == self.y2:
            if self.y1 > 0:
                self.y1 = self.y1 - 1
            else:
                self.y2 = self.y2 + 1

        #self.width = self.x2 - self.x1
        #self.height = self.y2 - self.y1 
Example 23
Project: aospy   Author: spencerahill   File: io.py    License: Apache License 2.0 5 votes vote down vote up
def time_label(intvl, return_val=True):
    """Create time interval label for aospy data I/O."""
    # Monthly labels are 2 digit integers: '01' for jan, '02' for feb, etc.
    if type(intvl) in [list, tuple, np.ndarray] and len(intvl) == 1:
        label = '{:02}'.format(intvl[0])
        value = np.array(intvl)
    elif type(intvl) == int and intvl in range(1, 13):
        label = '{:02}'.format(intvl)
        value = np.array([intvl])
    # Seasonal and annual time labels are short strings.
    else:
        labels = {'jfm': (1, 2, 3),
                  'fma': (2, 3, 4),
                  'mam': (3, 4, 5),
                  'amj': (4, 5, 6),
                  'mjj': (5, 6, 7),
                  'jja': (6,  7,  8),
                  'jas': (7, 8, 9),
                  'aso': (8, 9, 10),
                  'son': (9, 10, 11),
                  'ond': (10, 11, 12),
                  'ndj': (11, 12, 1),
                  'djf': (1, 2, 12),
                  'jjas': (6, 7, 8, 9),
                  'djfm': (12, 1, 2, 3),
                  'ann': range(1, 13)}
        for lbl, vals in labels.items():
            if intvl == lbl or set(intvl) == set(vals):
                label = lbl
                value = np.array(vals)
                break
    if return_val:
        return label, value
    else:
        return label 
Example 24
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: bbox.py    License: MIT License 5 votes vote down vote up
def bbox_overlaps(boxes, query_boxes):
    """
    Parameters
    ----------
    boxes: (N, 4) ndarray or tensor or variable
    query_boxes: (K, 4) ndarray or tensor or variable
    Returns
    -------
    overlaps: (N, K) overlap between boxes and query_boxes
    """
    if isinstance(boxes, np.ndarray):
        boxes = torch.from_numpy(boxes)
        query_boxes = torch.from_numpy(query_boxes)
        out_fn = lambda x: x.numpy() # If input is ndarray, turn the overlaps back to ndarray when return
    else:
        out_fn = lambda x: x

    box_areas = (boxes[:, 2] - boxes[:, 0] + 1) * \
            (boxes[:, 3] - boxes[:, 1] + 1)
    query_areas = (query_boxes[:, 2] - query_boxes[:, 0] + 1) * \
            (query_boxes[:, 3] - query_boxes[:, 1] + 1)

    iw = (torch.min(boxes[:, 2:3], query_boxes[:, 2:3].t()) - torch.max(boxes[:, 0:1], query_boxes[:, 0:1].t()) + 1).clamp(min=0)
    ih = (torch.min(boxes[:, 3:4], query_boxes[:, 3:4].t()) - torch.max(boxes[:, 1:2], query_boxes[:, 1:2].t()) + 1).clamp(min=0)
    ua = box_areas.view(-1, 1) + query_areas.view(1, -1) - iw * ih
    overlaps = iw * ih / ua
    return out_fn(overlaps) 
Example 25
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: config.py    License: MIT License 5 votes vote down vote up
def _merge_a_into_b(a, b):
  """Merge config dictionary a into config dictionary b, clobbering the
  options in b whenever they are also specified in a.
  """
  if type(a) is not edict:
    return

  for k, v in a.items():
    # a must specify keys that are in b
    if k not in b:
      raise KeyError('{} is not a valid config key'.format(k))

    # the types must match, too
    old_type = type(b[k])
    if old_type is not type(v):
      if isinstance(b[k], np.ndarray):
        v = np.array(v, dtype=b[k].dtype)
      else:
        raise ValueError(('Type mismatch ({} vs. {}) '
                          'for config key: {}').format(type(b[k]),
                                                       type(v), k))

    # recursively merge dicts
    if type(v) is edict:
      try:
        _merge_a_into_b(a[k], b[k])
      except:
        print(('Error under config key: {}'.format(k)))
        raise
    else:
      b[k] = v 
Example 26
Project: DDPAE-video-prediction   Author: jthsieh   File: video_transforms.py    License: MIT License 5 votes vote down vote up
def __call__(self, arr):
    if isinstance(arr, np.ndarray):
      video = torch.from_numpy(np.rollaxis(arr, axis=-1, start=-3))

      if self.scale:
        return video.float().div(255)
      else:
        return video.float()
    else:
      raise NotImplementedError 
Example 27
Project: DDPAE-video-prediction   Author: jthsieh   File: video_transforms.py    License: MIT License 5 votes vote down vote up
def __call__(self, video):
    """
    Args:
        video (np.ndarray): Video to be padded.
    Returns:
        np.ndarray: Padded video.
    """
    pad_width = ((0, 0), (self.padding, self.padding), (self.padding, self.padding), (0, 0))
    return np.pad(video, pad_width=pad_width, mode='constant', constant_values=self.fill) 
Example 28
Project: DDPAE-video-prediction   Author: jthsieh   File: misc.py    License: MIT License 5 votes vote down vote up
def to_numpy(array):
  """
  :param array: Variable, GPU tensor, or CPU tensor
  :return: numpy
  """
  if isinstance(array, np.ndarray):
    return array
  if isinstance(array, torch.autograd.Variable):
    array = array.data
  if array.is_cuda:
    array = array.cpu()

  return array.numpy() 
Example 29
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: flow.py    License: MIT License 5 votes vote down vote up
def return_predict(self, im):
    assert isinstance(im, np.ndarray), \
				'Image is not a np.ndarray'
    h, w, _ = im.shape
    im = self.framework.resize_input(im)
    this_inp = np.expand_dims(im, 0)
    feed_dict = {self.inp : this_inp}

    out = self.sess.run(self.out, feed_dict)[0]
    boxes = self.framework.findboxes(out)
    threshold = self.FLAGS.threshold
    boxesInfo = list()
    for box in boxes:
        tmpBox = self.framework.process_box(box, h, w, threshold)
        if tmpBox is None:
            continue
        boxesInfo.append({
            "label": tmpBox[4],
            "confidence": tmpBox[6],
            "topleft": {
                "x": tmpBox[0],
                "y": tmpBox[2]},
            "bottomright": {
                "x": tmpBox[1],
                "y": tmpBox[3]}
        })
    return boxesInfo 
Example 30
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: predict.py    License: MIT License 5 votes vote down vote up
def preprocess(self, im, allobj = None):
	"""
	Takes an image, return it as a numpy tensor that is readily
	to be fed into tfnet. If there is an accompanied annotation (allobj),
	meaning this preprocessing is serving the train process, then this
	image will be transformed with random noise to augment training data,
	using scale, translation, flipping and recolor. The accompanied
	parsed annotation (allobj) will also be modified accordingly.
	"""
	if type(im) is not np.ndarray:
		im = cv2.imread(im)

	if allobj is not None: # in training mode
		result = imcv2_affine_trans(im)
		im, dims, trans_param = result
		scale, offs, flip = trans_param
		for obj in allobj:
			_fix(obj, dims, scale, offs)
			if not flip: continue
			obj_1_ =  obj[1]
			obj[1] = dims[0] - obj[3]
			obj[3] = dims[0] - obj_1_
		im = imcv2_recolor(im)

	im = self.resize_input(im)
	if allobj is None: return im
	return im#, np.array(im) # for unit testing 
Example 31
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: baseop.py    License: MIT License 5 votes vote down vote up
def _shape(tensor): # work for both tf.Tensor & np.ndarray
    if type(tensor) in [tf.Variable, tf.Tensor]: 
        return tensor.get_shape()
    else: return tensor.shape 
Example 32
Project: kaldi-python-io   Author: funcwj   File: _io_kernel.py    License: Apache License 2.0 5 votes vote down vote up
def write_float_mat_vec(fd, mat_or_vec):
    """
    Write float matrix or vector
    """
    if isinstance(mat_or_vec, np.ndarray):
        if mat_or_vec.ndim == 2:
            write_common_mat(fd, mat_or_vec)
        else:
            write_float_vec(fd, mat_or_vec)
    else:
        raise TypeError(f"Unsupport type: {type(mat_or_vec)}") 
Example 33
Project: dustmaps   Author: gregreen   File: bayestar.py    License: GNU General Public License v2.0 5 votes vote down vote up
def _interpret_percentile(self, mode, pct):
        if mode == 'percentile':
            if pct is None:
                raise ValueError(
                    '"percentile" mode requires an additional keyword '
                    'argument: "pct"')
            if (type(pct) in (list,tuple)) or isinstance(pct, np.ndarray):
                try:
                    pct = np.array(pct, dtype='f8')
                except ValueError as err:
                    raise ValueError(
                        'Invalid "pct" specification. Must be number or '
                        'list/array of numbers.')
                if np.any((pct < 0) | (pct > 100)):
                    raise ValueError('"pct" must be between 0 and 100.')
                scalar_pct = False
            else:
                try:
                    pct = float(pct)
                except ValueError as err:
                    raise ValueError(
                        'Invalid "pct" specification. Must be number or '
                        'list/array of numbers.')
                if (pct < 0) or (pct > 100):
                    raise ValueError('"pct" must be between 0 and 100.')
                scalar_pct = True

            return pct, scalar_pct
        else:
            return None, None 
Example 34
Project: dustmaps   Author: gregreen   File: json_serializers.py    License: GNU General Public License v2.0 5 votes vote down vote up
def serialize_ndarray_readable(o):
    """
    Serializes a :obj:`numpy.ndarray` in a human-readable format.

    Args:
        o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.

    Returns:
        A dictionary that can be passed to :obj:`json.dumps`.
    """
    return dict(
        _type='np.ndarray',
        dtype=o.dtype,
        value=hint_tuples(o.tolist())) 
Example 35
Project: dustmaps   Author: gregreen   File: json_serializers.py    License: GNU General Public License v2.0 5 votes vote down vote up
def object_hook(self, d):
        if isinstance(d, dict):
            if ('_type' in d):
                if d['_type'] == 'astropy.coordinates.SkyCoord':
                    return deserialize_skycoord(d)
                elif d['_type'] == 'astropy.units.Quantity':
                    return deserialize_quantity(d)
                elif d['_type'] == 'np.ndarray':
                    return deserialize_ndarray(d)
                elif d['_type'] == 'np.dtype':
                    return deserialize_dtype(d)
                elif d['_type'] == 'tuple':
                    return deserialize_tuple(d)
        return d 
Example 36
Project: mmdetection   Author: open-mmlab   File: nms_wrapper.py    License: Apache License 2.0 5 votes vote down vote up
def nms_match(dets, thresh):
    """Matched dets into different groups by NMS.

    NMS match is Similar to NMS but when a bbox is suppressed, nms match will
    record the indice of supporessed bbox and form a group with the indice of
    kept bbox. In each group, indice is sorted as score order.

    Arguments:
        dets (torch.Tensor | np.ndarray): Det bboxes with scores, shape (N, 5).
        iou_thr (float): IoU thresh for NMS.

    Returns:
        List[Tensor | ndarray]: The outer list corresponds different matched
            group, the inner Tensor corresponds the indices for a group in
            score order.
    """
    if dets.shape[0] == 0:
        matched = []
    else:
        assert dets.shape[-1] == 5, 'inputs dets.shape should be (N, 5), ' \
                                    f'but get {dets.shape}'
        if isinstance(dets, torch.Tensor):
            dets_t = dets.detach().cpu()
        else:
            dets_t = torch.from_numpy(dets)
        matched = nms_ext.nms_match(dets_t, thresh)

    if isinstance(dets, torch.Tensor):
        return [dets.new_tensor(m, dtype=torch.long) for m in matched]
    else:
        return [np.array(m, dtype=np.int) for m in matched] 
Example 37
Project: mmdetection   Author: open-mmlab   File: coco.py    License: Apache License 2.0 5 votes vote down vote up
def results2json(self, results, outfile_prefix):
        """Dump the detection results to a COCO style json file.

        There are 3 types of results: proposals, bbox predictions, mask
        predictions, and they have different data types. This method will
        automatically recognize the type, and dump them to json files.

        Args:
            results (list[list | tuple | ndarray]): Testing results of the
                dataset.
            outfile_prefix (str): The filename prefix of the json files. If the
                prefix is "somepath/xxx", the json files will be named
                "somepath/xxx.bbox.json", "somepath/xxx.segm.json",
                "somepath/xxx.proposal.json".

        Returns:
            dict[str: str]: Possible keys are "bbox", "segm", "proposal", and
                values are corresponding filenames.
        """
        result_files = dict()
        if isinstance(results[0], list):
            json_results = self._det2json(results)
            result_files['bbox'] = f'{outfile_prefix}.bbox.json'
            result_files['proposal'] = f'{outfile_prefix}.bbox.json'
            mmcv.dump(json_results, result_files['bbox'])
        elif isinstance(results[0], tuple):
            json_results = self._segm2json(results)
            result_files['bbox'] = f'{outfile_prefix}.bbox.json'
            result_files['proposal'] = f'{outfile_prefix}.bbox.json'
            result_files['segm'] = f'{outfile_prefix}.segm.json'
            mmcv.dump(json_results[0], result_files['bbox'])
            mmcv.dump(json_results[1], result_files['segm'])
        elif isinstance(results[0], np.ndarray):
            json_results = self._proposal2json(results)
            result_files['proposal'] = f'{outfile_prefix}.proposal.json'
            mmcv.dump(json_results, result_files['proposal'])
        else:
            raise TypeError('invalid type of results')
        return result_files 
Example 38
Project: mmdetection   Author: open-mmlab   File: transforms.py    License: Apache License 2.0 5 votes vote down vote up
def bbox_flip(self, bboxes, img_shape, direction):
        """Flip bboxes horizontally.

        Args:
            bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k)
            img_shape (tuple[int]): Image shape (height, width)
            direction (str): Flip direction. Options are 'horizontal',
                'vertical'.

        Returns:
            numpy.ndarray: Flipped bounding boxes.
        """

        assert bboxes.shape[-1] % 4 == 0
        flipped = bboxes.copy()
        if direction == 'horizontal':
            w = img_shape[1]
            flipped[..., 0::4] = w - bboxes[..., 2::4]
            flipped[..., 2::4] = w - bboxes[..., 0::4]
        elif direction == 'vertical':
            h = img_shape[0]
            flipped[..., 1::4] = h - bboxes[..., 3::4]
            flipped[..., 3::4] = h - bboxes[..., 1::4]
        else:
            raise ValueError(f"Invalid flipping direction '{direction}'")
        return flipped 
Example 39
Project: mmdetection   Author: open-mmlab   File: recall.py    License: Apache License 2.0 5 votes vote down vote up
def print_recall_summary(recalls,
                         proposal_nums,
                         iou_thrs,
                         row_idxs=None,
                         col_idxs=None,
                         logger=None):
    """Print recalls in a table.

    Args:
        recalls (ndarray): calculated from `bbox_recalls`
        proposal_nums (ndarray or list): top N proposals
        iou_thrs (ndarray or list): iou thresholds
        row_idxs (ndarray): which rows(proposal nums) to print
        col_idxs (ndarray): which cols(iou thresholds) to print
        logger (logging.Logger | str | None): The way to print the recall
            summary. See `mmdet.utils.print_log()` for details. Default: None.
    """
    proposal_nums = np.array(proposal_nums, dtype=np.int32)
    iou_thrs = np.array(iou_thrs)
    if row_idxs is None:
        row_idxs = np.arange(proposal_nums.size)
    if col_idxs is None:
        col_idxs = np.arange(iou_thrs.size)
    row_header = [''] + iou_thrs[col_idxs].tolist()
    table_data = [row_header]
    for i, num in enumerate(proposal_nums[row_idxs]):
        row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()]
        row.insert(0, num)
        table_data.append(row)
    table = AsciiTable(table_data)
    print_log('\n' + table.table, logger=logger) 
Example 40
Project: mmdetection   Author: open-mmlab   File: iou_balanced_neg_sampler.py    License: Apache License 2.0 5 votes vote down vote up
def sample_via_interval(self, max_overlaps, full_set, num_expected):
        """Sample according to the iou interval.

        Args:
            max_overlaps (torch.Tensor): IoU between bounding boxes and ground
                truth boxes.
            full_set (set(int)): A full set of indices of boxes。
            num_expected (int): Number of expected samples。

        Returns:
            np.ndarray: Indices  of samples
        """
        max_iou = max_overlaps.max()
        iou_interval = (max_iou - self.floor_thr) / self.num_bins
        per_num_expected = int(num_expected / self.num_bins)

        sampled_inds = []
        for i in range(self.num_bins):
            start_iou = self.floor_thr + i * iou_interval
            end_iou = self.floor_thr + (i + 1) * iou_interval
            tmp_set = set(
                np.where(
                    np.logical_and(max_overlaps >= start_iou,
                                   max_overlaps < end_iou))[0])
            tmp_inds = list(tmp_set & full_set)
            if len(tmp_inds) > per_num_expected:
                tmp_sampled_set = self.random_choice(tmp_inds,
                                                     per_num_expected)
            else:
                tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
            sampled_inds.append(tmp_sampled_set)

        sampled_inds = np.concatenate(sampled_inds)
        if len(sampled_inds) < num_expected:
            num_extra = num_expected - len(sampled_inds)
            extra_inds = np.array(list(full_set - set(sampled_inds)))
            if len(extra_inds) > num_extra:
                extra_inds = self.random_choice(extra_inds, num_extra)
            sampled_inds = np.concatenate([sampled_inds, extra_inds])

        return sampled_inds 
Example 41
Project: mmdetection   Author: open-mmlab   File: structures.py    License: Apache License 2.0 5 votes vote down vote up
def crop(self, bbox):
        """Crop each mask by the given bbox.

        Args:
            bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ).

        Return:
            BaseInstanceMasks: The cropped masks.
        """
        pass 
Example 42
Project: mmdetection   Author: open-mmlab   File: structures.py    License: Apache License 2.0 5 votes vote down vote up
def areas(self):
        """ndarray: areas of each instance."""
        pass 
Example 43
Project: mmdetection   Author: open-mmlab   File: structures.py    License: Apache License 2.0 5 votes vote down vote up
def to_ndarray(self):
        """Convert masks to the format of ndarray.

        Return:
            ndarray: Converted masks in the format of ndarray.
        """
        pass 
Example 44
Project: mmdetection   Author: open-mmlab   File: structures.py    License: Apache License 2.0 5 votes vote down vote up
def crop_and_resize(self,
                        bboxes,
                        out_shape,
                        inds,
                        device='cpu',
                        interpolation='bilinear'):
        """See :func:`BaseInstanceMasks.crop_and_resize`."""
        if len(self.masks) == 0:
            empty_masks = np.empty((0, *out_shape), dtype=np.uint8)
            return BitmapMasks(empty_masks, *out_shape)

        # convert bboxes to tensor
        if isinstance(bboxes, np.ndarray):
            bboxes = torch.from_numpy(bboxes).to(device=device)
        if isinstance(inds, np.ndarray):
            inds = torch.from_numpy(inds).to(device=device)

        num_bbox = bboxes.shape[0]
        fake_inds = torch.arange(
            num_bbox, device=device).to(dtype=bboxes.dtype)[:, None]
        rois = torch.cat([fake_inds, bboxes], dim=1)  # Nx5
        rois = rois.to(device=device)
        if num_bbox > 0:
            gt_masks_th = torch.from_numpy(self.masks).to(device).index_select(
                0, inds).to(dtype=rois.dtype)
            targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape,
                                1.0, 0, True).squeeze(1)
            resized_masks = (targets >= 0.5).cpu().numpy()
        else:
            resized_masks = []
        return BitmapMasks(resized_masks, *out_shape) 
Example 45
Project: mmdetection   Author: open-mmlab   File: structures.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, masks, height, width):
        assert isinstance(masks, list)
        if len(masks) > 0:
            assert isinstance(masks[0], list)
            assert isinstance(masks[0][0], np.ndarray)

        self.height = height
        self.width = width
        self.masks = masks 
Example 46
Project: mmdetection   Author: open-mmlab   File: structures.py    License: Apache License 2.0 5 votes vote down vote up
def crop(self, bbox):
        """see :func:`BaseInstanceMasks.crop`"""
        assert isinstance(bbox, np.ndarray)
        assert bbox.ndim == 1

        # clip the boundary
        bbox = bbox.copy()
        bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
        bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
        x1, y1, x2, y2 = bbox
        w = np.maximum(x2 - x1, 1)
        h = np.maximum(y2 - y1, 1)

        if len(self.masks) == 0:
            cropped_masks = PolygonMasks([], h, w)
        else:
            cropped_masks = []
            for poly_per_obj in self.masks:
                cropped_poly_per_obj = []
                for p in poly_per_obj:
                    # pycocotools will clip the boundary
                    p = p.copy()
                    p[0::2] -= bbox[0]
                    p[1::2] -= bbox[1]
                    cropped_poly_per_obj.append(p)
                cropped_masks.append(cropped_poly_per_obj)
            cropped_masks = PolygonMasks(cropped_masks, h, w)
        return cropped_masks 
Example 47
Project: mmdetection   Author: open-mmlab   File: structures.py    License: Apache License 2.0 5 votes vote down vote up
def _polygon_area(self, x, y):
        """Compute the area of a component of a polygon.

        Using the shoelace formula:
        https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates

        Args:
            x (ndarray): x coordinates of the component
            y (ndarray): y coordinates of the component

        Return:
            float: the are of the component
        """  # noqa: 501
        return 0.5 * np.abs(
            np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) 
Example 48
Project: mmdetection   Author: open-mmlab   File: structures.py    License: Apache License 2.0 5 votes vote down vote up
def to_ndarray(self):
        """Convert masks to the format of ndarray."""
        if len(self.masks) == 0:
            return np.empty((0, self.height, self.width), dtype=np.uint8)
        bitmap_masks = []
        for poly_per_obj in self.masks:
            bitmap_masks.append(
                polygon_to_bitmap(poly_per_obj, self.height, self.width))
        return np.stack(bitmap_masks) 
Example 49
Project: mmdetection   Author: open-mmlab   File: structures.py    License: Apache License 2.0 5 votes vote down vote up
def polygon_to_bitmap(polygons, height, width):
    """Convert masks from the form of polygons to bitmaps.

    Args:
        polygons (list[ndarray]): masks in polygon representation
        height (int): mask height
        width (int): mask width

    Return:
        ndarray: the converted masks in bitmap representation
    """
    rles = maskUtils.frPyObjects(polygons, height, width)
    rle = maskUtils.merge(rles)
    bitmap_mask = maskUtils.decode(rle).astype(np.bool)
    return bitmap_mask 
Example 50
Project: mmdetection   Author: open-mmlab   File: test_fp16.py    License: Apache License 2.0 5 votes vote down vote up
def test_cast_tensor_type():
    inputs = torch.FloatTensor([5.])
    src_type = torch.float32
    dst_type = torch.int32
    outputs = cast_tensor_type(inputs, src_type, dst_type)
    assert isinstance(outputs, torch.Tensor)
    assert outputs.dtype == dst_type

    inputs = 'tensor'
    src_type = str
    dst_type = str
    outputs = cast_tensor_type(inputs, src_type, dst_type)
    assert isinstance(outputs, str)

    inputs = np.array([5.])
    src_type = np.ndarray
    dst_type = np.ndarray
    outputs = cast_tensor_type(inputs, src_type, dst_type)
    assert isinstance(outputs, np.ndarray)

    inputs = dict(
        tensor_a=torch.FloatTensor([1.]), tensor_b=torch.FloatTensor([2.]))
    src_type = torch.float32
    dst_type = torch.int32
    outputs = cast_tensor_type(inputs, src_type, dst_type)
    assert isinstance(outputs, dict)
    assert outputs['tensor_a'].dtype == dst_type
    assert outputs['tensor_b'].dtype == dst_type

    inputs = [torch.FloatTensor([1.]), torch.FloatTensor([2.])]
    src_type = torch.float32
    dst_type = torch.int32
    outputs = cast_tensor_type(inputs, src_type, dst_type)
    assert isinstance(outputs, list)
    assert outputs[0].dtype == dst_type
    assert outputs[1].dtype == dst_type

    inputs = 5
    outputs = cast_tensor_type(inputs, None, None)
    assert isinstance(outputs, int) 
Example 51
Project: mmdetection   Author: open-mmlab   File: test_masks.py    License: Apache License 2.0 5 votes vote down vote up
def dummy_raw_bitmap_masks(size):
    """
    Args:
        size (tuple): expected shape of dummy masks, (H, W) or (N, H, W)

    Return:
        ndarray: dummy mask
    """
    return np.random.randint(0, 2, size, dtype=np.uint8) 
Example 52
Project: mmdetection   Author: open-mmlab   File: test_masks.py    License: Apache License 2.0 5 votes vote down vote up
def test_bitmap_mask_init():
    # init with empty ndarray masks
    raw_masks = np.empty((0, 28, 28), dtype=np.uint8)
    bitmap_masks = BitmapMasks(raw_masks, 28, 28)
    assert len(bitmap_masks) == 0
    assert bitmap_masks.height == 28
    assert bitmap_masks.width == 28

    # init with empty list masks
    raw_masks = []
    bitmap_masks = BitmapMasks(raw_masks, 28, 28)
    assert len(bitmap_masks) == 0
    assert bitmap_masks.height == 28
    assert bitmap_masks.width == 28

    # init with ndarray masks contain 3 instances
    raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
    bitmap_masks = BitmapMasks(raw_masks, 28, 28)
    assert len(bitmap_masks) == 3
    assert bitmap_masks.height == 28
    assert bitmap_masks.width == 28

    # init with list masks contain 3 instances
    raw_masks = [dummy_raw_bitmap_masks((28, 28)) for _ in range(3)]
    bitmap_masks = BitmapMasks(raw_masks, 28, 28)
    assert len(bitmap_masks) == 3
    assert bitmap_masks.height == 28
    assert bitmap_masks.width == 28

    # init with raw masks of unsupported type
    with pytest.raises(AssertionError):
        raw_masks = [[dummy_raw_bitmap_masks((28, 28))]]
        BitmapMasks(raw_masks, 28, 28) 
Example 53
Project: mmdetection   Author: open-mmlab   File: test_masks.py    License: Apache License 2.0 5 votes vote down vote up
def test_bitmap_mask_to_ndarray():
    # empty bitmap masks to ndarray
    raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
    bitmap_masks = BitmapMasks(raw_masks, 28, 28)
    ndarray_masks = bitmap_masks.to_ndarray()
    assert isinstance(ndarray_masks, np.ndarray)
    assert ndarray_masks.shape == (0, 28, 28)

    # bitmap masks contain 3 instances to ndarray
    raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
    bitmap_masks = BitmapMasks(raw_masks, 28, 28)
    ndarray_masks = bitmap_masks.to_ndarray()
    assert isinstance(ndarray_masks, np.ndarray)
    assert ndarray_masks.shape == (3, 28, 28)
    assert (ndarray_masks == raw_masks).all() 
Example 54
Project: mmdetection   Author: open-mmlab   File: test_masks.py    License: Apache License 2.0 5 votes vote down vote up
def test_polygon_mask_init():
    # init with empty masks
    raw_masks = []
    polygon_masks = BitmapMasks(raw_masks, 28, 28)
    assert len(polygon_masks) == 0
    assert polygon_masks.height == 28
    assert polygon_masks.width == 28

    # init with masks contain 3 instances
    raw_masks = dummy_raw_polygon_masks((3, 28, 28))
    polygon_masks = PolygonMasks(raw_masks, 28, 28)
    assert isinstance(polygon_masks.masks, list)
    assert isinstance(polygon_masks.masks[0], list)
    assert isinstance(polygon_masks.masks[0][0], np.ndarray)
    assert len(polygon_masks) == 3
    assert polygon_masks.height == 28
    assert polygon_masks.width == 28
    assert polygon_masks.to_ndarray().shape == (3, 28, 28)

    # init with raw masks of unsupported type
    with pytest.raises(AssertionError):
        raw_masks = [[[]]]
        PolygonMasks(raw_masks, 28, 28)

        raw_masks = [dummy_raw_polygon_masks((3, 28, 28))]
        PolygonMasks(raw_masks, 28, 28) 
Example 55
Project: mmdetection   Author: open-mmlab   File: test_masks.py    License: Apache License 2.0 5 votes vote down vote up
def test_polygon_mask_to_ndarray():
    # empty polygon masks to ndarray
    raw_masks = dummy_raw_polygon_masks((0, 28, 28))
    polygon_masks = PolygonMasks(raw_masks, 28, 28)
    ndarray_masks = polygon_masks.to_ndarray()
    assert isinstance(ndarray_masks, np.ndarray)
    assert ndarray_masks.shape == (0, 28, 28)

    # polygon masks contain 3 instances to ndarray
    raw_masks = dummy_raw_polygon_masks((3, 28, 28))
    polygon_masks = PolygonMasks(raw_masks, 28, 28)
    ndarray_masks = polygon_masks.to_ndarray()
    assert isinstance(ndarray_masks, np.ndarray)
    assert ndarray_masks.shape == (3, 28, 28) 
Example 56
Project: mmdetection   Author: open-mmlab   File: test_masks.py    License: Apache License 2.0 5 votes vote down vote up
def test_polygon_mask_index():
    raw_masks = dummy_raw_polygon_masks((3, 28, 28))
    polygon_masks = PolygonMasks(raw_masks, 28, 28)
    # index by integer
    polygon_masks[0]
    # index by list
    polygon_masks[[0, 1]]
    # index by ndarray
    polygon_masks[np.asarray([0, 1])]
    with pytest.raises(ValueError):
        # invalid index
        polygon_masks[torch.Tensor([1, 2])] 
Example 57
Project: neural-fingerprinting   Author: StephanZheng   File: attacks.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_or_guess_labels(self, x, kwargs):
        """
        Get the label to use in generating an adversarial example for x.
        The kwargs are fed directly from the kwargs of the attack.
        If 'y' is in kwargs, then assume it's an untargeted attack and
        use that as the label.
        If 'y_target' is in kwargs and is not none, then assume it's a
        targeted attack and use that as the label.
        Otherwise, use the model's prediction as the label and perform an
        untargeted attack.
        """
        import tensorflow as tf

        if 'y' in kwargs and 'y_target' in kwargs:
            raise ValueError("Can not set both 'y' and 'y_target'.")
        elif 'y' in kwargs:
            labels = kwargs['y']
        elif 'y_target' in kwargs and kwargs['y_target'] is not None:
            labels = kwargs['y_target']
        else:
            preds = self.model.get_probs(x)
            preds_max = reduce_max(preds, 1, keepdims=True)
            original_predictions = tf.to_float(tf.equal(preds, preds_max))
            labels = tf.stop_gradient(original_predictions)
        if isinstance(labels, np.ndarray):
            nb_classes = labels.shape[1]
        else:
            nb_classes = labels.get_shape().as_list()[1]
        return labels, nb_classes 
Example 58
Project: neural-fingerprinting   Author: StephanZheng   File: attacks.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_or_guess_labels(self, x, kwargs):
        """
        Get the label to use in generating an adversarial example for x.
        The kwargs are fed directly from the kwargs of the attack.
        If 'y' is in kwargs, then assume it's an untargeted attack and
        use that as the label.
        If 'y_target' is in kwargs and is not none, then assume it's a
        targeted attack and use that as the label.
        Otherwise, use the model's prediction as the label and perform an
        untargeted attack.
        """
        import tensorflow as tf

        if 'y' in kwargs and 'y_target' in kwargs:
            raise ValueError("Can not set both 'y' and 'y_target'.")
        elif 'y' in kwargs:
            labels = kwargs['y']
        elif 'y_target' in kwargs and kwargs['y_target'] is not None:
            labels = kwargs['y_target']
        else:
            preds = self.model.get_probs(x)
            preds_max = reduce_max(preds, 1, keepdims=True)
            original_predictions = tf.to_float(tf.equal(preds, preds_max))
            labels = tf.stop_gradient(original_predictions)
        if isinstance(labels, np.ndarray):
            nb_classes = labels.shape[1]
        else:
            nb_classes = labels.get_shape().as_list()[1]
        return labels, nb_classes 
Example 59
Project: mlimages   Author: icoxfog417   File: training.py    License: MIT License 5 votes vote down vote up
def make_mean_image(self, mean_image_file=""):
        m_file = mean_image_file if mean_image_file else os.path.join(self.label_file.path, "./mean_image.png")
        l_file = FileAPI.add_ext_name(self.label_file.path, "_used_in_mean")
        _, ext = os.path.splitext(os.path.basename(m_file))
        im_iterator = self.label_file._fetch_raw()

        sum_image = None
        count = 0

        with open(l_file, mode="w", encoding="utf-8") as f:
            for im, line in im_iterator:
                try:
                    converted = self.convert(im)
                    arr = converted.to_array(np)
                    if sum_image is None:
                        sum_image = np.ndarray(arr.shape)
                        sum_image[:] = arr
                    else:
                        sum_image += arr
                    count += 1
                    f.write(line)
                except:
                    pass

        mean = sum_image / count
        if ext.lower() == ".npy":
            pickle.dump(mean, open(m_file, "wb"), -1)
        else:
            mean_image = LabeledImage.from_array(mean)
            mean_image.image.save(m_file)
        self.mean_image_file = m_file
        self.label_file.path = l_file 
Example 60
Project: mlimages   Author: icoxfog417   File: training.py    License: MIT License 5 votes vote down vote up
def generate_batches(self, size):
        mean = self.__load_mean()

        async def to_array(im):
            im.load()
            converted = self.convert(im)
            arr = self.__to_array(converted, mean)
            im.image = None  # don't use image any more, so release reference
            return arr, im.label

        batch = []
        loop = asyncio.get_event_loop()

        for im in self.label_file.fetch(load_image=False):
            batch.append(to_array(im))

            if len(batch) == size:
                tasks = asyncio.wait(batch)
                done, pending = loop.run_until_complete(tasks)
                results = []
                for d in done:
                    try:
                        results.append(d.result())
                    except:
                        pass

                x_sample, y_sample = results[0]
                x_batch = np.ndarray((size,) + x_sample.shape, x_sample.dtype)
                y_batch = np.ndarray((size,), np.int32)

                for j, r in enumerate(results):
                    x_batch[j], y_batch[j] = r

                yield x_batch, y_batch
                i = 0
                batch.clear()