Python numpy.ndarray() Examples

The following are code examples for showing how to use numpy.ndarray(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: prediction-constrained-topic-models   Author: dtak   File: util_array.py    MIT License 6 votes vote down vote up
def as1D(x):
    """ Convert input into to 1D numpy array.

    Returns
    -------
    x : 1D array

    Examples
    -------
    >>> as1D(5)
    array([5])
    >>> as1D([1,2,3])
    array([1, 2, 3])
    >>> as1D([[3,4,5,6]])
    array([3, 4, 5, 6])
    """
    if not isinstance(x, np.ndarray):
        x = np.asarray_chkfinite(x)
    if x.ndim < 1:
        x = np.asarray_chkfinite([x])
    elif x.ndim > 1:
        x = np.squeeze(x)
    return x 
Example 2
Project: prediction-constrained-topic-models   Author: dtak   File: util_array.py    MIT License 6 votes vote down vote up
def as2D(x):
    """ Convert input into to 2D numpy array.


    Returns
    -------
    x : 2D array

    Examples
    -------
    >>> as2D(5)
    array([[5]])
    >>> as2D([1,2,3])
    array([[1, 2, 3]])
    >>> as2D([[3,4,5,6]])
    array([[3, 4, 5, 6]])
    """
    if not isinstance(x, np.ndarray):
        x = np.asarray_chkfinite(x)
    if x.ndim < 1:
        x = np.asarray_chkfinite([x])
    while x.ndim < 2:
        x = x[np.newaxis, :]
    return x 
Example 3
Project: prediction-constrained-topic-models   Author: dtak   File: util_array.py    MIT License 6 votes vote down vote up
def as3D(x):
    """ Convert input into to 3D numpy array.

    Returns
    -------
    x : 3D array

    Examples
    -------
    >>> as3D(5)
    array([[[5]]])
    >>> as3D([1,2,3])
    array([[[1, 2, 3]]])
    >>> as3D([[3,4,5,6]])
    array([[[3, 4, 5, 6]]])
    """
    if not isinstance(x, np.ndarray):
        x = np.asarray_chkfinite(x)
    if x.ndim < 1:
        x = np.asarray_chkfinite([x])
    while x.ndim < 3:
        x = x[np.newaxis, :]
    return x 
Example 4
Project: mmdetection   Author: open-mmlab   File: inference.py    Apache License 2.0 6 votes vote down vote up
def inference_detector(model, img):
    """Inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.

    Returns:
        If imgs is a str, a generator will be returned, otherwise return the
        detection results directly.
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]
    # forward the model
    with torch.no_grad():
        result = model(return_loss=False, rescale=True, **data)
    return result 
Example 5
Project: mmdetection   Author: open-mmlab   File: inference.py    Apache License 2.0 6 votes vote down vote up
def show_result_pyplot(img,
                       result,
                       class_names,
                       score_thr=0.3,
                       fig_size=(15, 10)):
    """Visualize the detection results on the image.

    Args:
        img (str or np.ndarray): Image filename or loaded image.
        result (tuple[list] or list): The detection result, can be either
            (bbox, segm) or just bbox.
        class_names (list[str] or tuple[str]): A list of class names.
        score_thr (float): The threshold to visualize the bboxes and masks.
        fig_size (tuple): Figure size of the pyplot figure.
        out_file (str, optional): If specified, the visualization result will
            be written to the out file instead of shown in a window.
    """
    img = show_result(
        img, result, class_names, score_thr=score_thr, show=False)
    plt.figure(figsize=fig_size)
    plt.imshow(mmcv.bgr2rgb(img)) 
Example 6
Project: mmdetection   Author: open-mmlab   File: transforms.py    Apache License 2.0 6 votes vote down vote up
def bbox_flip(self, bboxes, img_shape, direction):
        """Flip bboxes horizontally.

        Args:
            bboxes(ndarray): shape (..., 4*k)
            img_shape(tuple): (height, width)
        """
        assert bboxes.shape[-1] % 4 == 0
        flipped = bboxes.copy()
        if direction == 'horizontal':
            w = img_shape[1]
            flipped[..., 0::4] = w - bboxes[..., 2::4] - 1
            flipped[..., 2::4] = w - bboxes[..., 0::4] - 1
        elif direction == 'vertical':
            h = img_shape[0]
            flipped[..., 1::4] = h - bboxes[..., 3::4] - 1
            flipped[..., 3::4] = h - bboxes[..., 1::4] - 1
        else:
            raise ValueError(
                'Invalid flipping direction "{}"'.format(direction))
        return flipped 
Example 7
Project: mmdetection   Author: open-mmlab   File: formating.py    Apache License 2.0 6 votes vote down vote up
def to_tensor(data):
    """Convert objects of various python types to :obj:`torch.Tensor`.

    Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
    :class:`Sequence`, :class:`int` and :class:`float`.
    """
    if isinstance(data, torch.Tensor):
        return data
    elif isinstance(data, np.ndarray):
        return torch.from_numpy(data)
    elif isinstance(data, Sequence) and not mmcv.is_str(data):
        return torch.tensor(data)
    elif isinstance(data, int):
        return torch.LongTensor([data])
    elif isinstance(data, float):
        return torch.FloatTensor([data])
    else:
        raise TypeError('type {} cannot be converted to tensor.'.format(
            type(data))) 
Example 8
Project: mmdetection   Author: open-mmlab   File: recall.py    Apache License 2.0 6 votes vote down vote up
def plot_num_recall(recalls, proposal_nums):
    """Plot Proposal_num-Recalls curve.

    Args:
        recalls(ndarray or list): shape (k,)
        proposal_nums(ndarray or list): same shape as `recalls`
    """
    if isinstance(proposal_nums, np.ndarray):
        _proposal_nums = proposal_nums.tolist()
    else:
        _proposal_nums = proposal_nums
    if isinstance(recalls, np.ndarray):
        _recalls = recalls.tolist()
    else:
        _recalls = recalls

    import matplotlib.pyplot as plt
    f = plt.figure()
    plt.plot([0] + _proposal_nums, [0] + _recalls)
    plt.xlabel('Proposal num')
    plt.ylabel('Recall')
    plt.axis([0, proposal_nums.max(), 0, 1])
    f.show() 
Example 9
Project: mmdetection   Author: open-mmlab   File: recall.py    Apache License 2.0 6 votes vote down vote up
def plot_iou_recall(recalls, iou_thrs):
    """Plot IoU-Recalls curve.

    Args:
        recalls(ndarray or list): shape (k,)
        iou_thrs(ndarray or list): same shape as `recalls`
    """
    if isinstance(iou_thrs, np.ndarray):
        _iou_thrs = iou_thrs.tolist()
    else:
        _iou_thrs = iou_thrs
    if isinstance(recalls, np.ndarray):
        _recalls = recalls.tolist()
    else:
        _recalls = recalls

    import matplotlib.pyplot as plt
    f = plt.figure()
    plt.plot(_iou_thrs + [1.0], _recalls + [0.])
    plt.xlabel('IoU')
    plt.ylabel('Recall')
    plt.axis([iou_thrs.min(), 1, 0, 1])
    f.show() 
Example 10
Project: mmdetection   Author: open-mmlab   File: coco_utils.py    Apache License 2.0 6 votes vote down vote up
def results2json(dataset, results, out_file):
    result_files = dict()
    if isinstance(results[0], list):
        json_results = det2json(dataset, results)
        result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
        result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
        mmcv.dump(json_results, result_files['bbox'])
    elif isinstance(results[0], tuple):
        json_results = segm2json(dataset, results)
        result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
        result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
        result_files['segm'] = '{}.{}.json'.format(out_file, 'segm')
        mmcv.dump(json_results[0], result_files['bbox'])
        mmcv.dump(json_results[1], result_files['segm'])
    elif isinstance(results[0], np.ndarray):
        json_results = proposal2json(dataset, results)
        result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal')
        mmcv.dump(json_results, result_files['proposal'])
    else:
        raise TypeError('invalid type of results')
    return result_files 
Example 11
Project: mmdetection   Author: open-mmlab   File: transforms.py    Apache License 2.0 6 votes vote down vote up
def bbox2result(bboxes, labels, num_classes):
    """Convert detection results to a list of numpy arrays.

    Args:
        bboxes (Tensor): shape (n, 5)
        labels (Tensor): shape (n, )
        num_classes (int): class number, including background class

    Returns:
        list(ndarray): bbox results of each class
    """
    if bboxes.shape[0] == 0:
        return [
            np.zeros((0, 5), dtype=np.float32) for i in range(num_classes - 1)
        ]
    else:
        bboxes = bboxes.cpu().numpy()
        labels = labels.cpu().numpy()
        return [bboxes[labels == i, :] for i in range(num_classes - 1)] 
Example 12
Project: mmdetection   Author: open-mmlab   File: utils.py    Apache License 2.0 6 votes vote down vote up
def cast_tensor_type(inputs, src_type, dst_type):
    if isinstance(inputs, torch.Tensor):
        return inputs.to(dst_type)
    elif isinstance(inputs, str):
        return inputs
    elif isinstance(inputs, np.ndarray):
        return inputs
    elif isinstance(inputs, abc.Mapping):
        return type(inputs)({
            k: cast_tensor_type(v, src_type, dst_type)
            for k, v in inputs.items()
        })
    elif isinstance(inputs, abc.Iterable):
        return type(inputs)(
            cast_tensor_type(item, src_type, dst_type) for item in inputs)
    else:
        return inputs 
Example 13
Project: GreenGuard   Author: D3-AI   File: pipeline.py    MIT License 6 votes vote down vote up
def _to_dicts(self, hyperparameters):
        params_tree = defaultdict(dict)
        for (block, hyperparameter), value in hyperparameters.items():
            if isinstance(value, np.integer):
                value = int(value)

            elif isinstance(value, np.floating):
                value = float(value)

            elif isinstance(value, np.ndarray):
                value = value.tolist()

            elif value == 'None':
                value = None

            params_tree[block][hyperparameter] = value

        return params_tree 
Example 14
Project: Keras-Unet   Author: MLearing   File: data.py    GNU General Public License v2.0 6 votes vote down vote up
def create_test_data(self):

        # 测试集生成npy文件
        i = 0
        print('-' * 30)
        print('Creating training images...')
        print('-' * 30)
        imgs = glob.glob(self.test_path + "/*." + self.img_type)           # deform/train
        print(len(imgs))
        imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)
        for imgname in imgs:
            midname = imgname[imgname.rindex("/") + 1:]   # 图像的名字
            img = load_img(self.test_path + "/" + midname, grayscale=True)   # 转换为灰度图
            img = img_to_array(img)
            imgdatas[i] = img
            if i % 100 == 0:
                print('Done: {0}/{1} images'.format(i, len(imgs)))
            i += 1
        print('loading done', imgdatas.shape)
        np.save(self.npy_path + '/imgs_test.npy', imgdatas)            # 将30张训练集和30张label生成npy数据
        print('Saving to .npy files done.') 
Example 15
Project: mlimages   Author: icoxfog417   File: chainer_alex.py    MIT License 6 votes vote down vote up
def predict(limit):
    _limit = limit if limit > 0 else 5

    td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, mean_image_file=MEAN_IMAGE_FILE, image_property=IMAGE_PROP)
    label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE)
    model = alex.Alex(len(label_def))
    serializers.load_npz(MODEL_FILE, model)

    i = 0
    for arr, im in td.generate():
        x = np.ndarray((1,) + arr.shape, arr.dtype)
        x[0] = arr
        x = chainer.Variable(np.asarray(x), volatile="on")
        y = model.predict(x)
        p = np.argmax(y.data)
        print("predict {0}, actual {1}".format(label_def[p], label_def[im.label]))
        im.image.show()
        i += 1
        if i >= _limit:
            break 
Example 16
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: SampleIO.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def extract_sample(img, image_mean=None, resize=-1):
    """Extract image content from image string or from file
    TAKE:
    input - either file content as string or numpy array
    image_mean - numpy array of image mean or a values of size (1,3)
    resize - to resize image, set resize > 0; otherwise, don't resize
    """
    try:
        # if input is a file name, then read image; otherwise decode_imgstr
        if type(img) is np.ndarray:
            img_data = img
        else:
            img_data = decode_imgstr(img)
        if type(resize) in [tuple, list]:
            # resize in two dimensions
            img_data = scipy.misc.imresize(img_data, (resize[0], resize[1]))
        elif resize > 0:
            img_data = scipy.misc.imresize(img_data, (resize, resize))
        img_data = img_data.astype(np.float32, copy=False)
        img_data = img_data[:, :, ::-1]
        # change channel for caffe:
        img_data = img_data.transpose(2, 0, 1)  # to CxHxW
        # substract_mean
        if image_mean is not None:
            img_data = substract_mean(img_data, image_mean)
        return img_data
    except:
        print sys.exc_info()[0], sys.exc_info()[1]
        return 
Example 17
Project: cat-bbs   Author: aleju   File: bbs.py    MIT License 5 votes vote down vote up
def fix_by_image_dimensions(self, height, width=None):
        if isinstance(height, (tuple, list)):
            assert width is None
            height, width = height[0], height[1]
        elif isinstance(height, (np.ndarray, np.generic)):
            assert width is None
            height, width = height.shape[0], height.shape[1]
        else:
            assert width is not None
            assert isinstance(height, int)
            assert isinstance(width, int)

        self.x1 = int(np.clip(self.x1, 0, width-1))
        self.x2 = int(np.clip(self.x2, 0, width-1))
        self.y1 = int(np.clip(self.y1, 0, height-1))
        self.y2 = int(np.clip(self.y2, 0, height-1))

        if self.x1 > self.x2:
            self.x1, self.x2 = self.x2, self.x1
        if self.y1 > self.y2:
            self.y1, self.y2 = self.y2, self.y1

        if self.x1 == self.x2:
            if self.x1 > 0:
                self.x1 = self.x1 - 1
            else:
                self.x2 = self.x2 + 1

        if self.y1 == self.y2:
            if self.y1 > 0:
                self.y1 = self.y1 - 1
            else:
                self.y2 = self.y2 + 1

        #self.width = self.x2 - self.x1
        #self.height = self.y2 - self.y1 
Example 18
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def specification(self, specification):
        if isinstance(specification, (int)):
            if np.abs(specification) > self._triangsamples.vertlist.shape[0]:
                raise ValueError("""The Number of selected basic functions is
                too large.""")
            else:
                if specification == 0:
                    self._specification = \
                        np.ones(self._triangsamples.vertlist.shape[0])
                else:
                    self._specification = \
                        np.zeros(self._triangsamples.vertlist.shape[0])
                    if specification > 0:
                        self._specification[:specification] = 1
                    else:
                        self._specification[specification:] = 1
        elif isinstance(specification, (list, tuple, np.ndarray)):
            specification = np.asarray(specification)
            if specification.shape[0] != self._triangsamples.vertlist.shape[1]:
                raise IndexError("""The length of the specification vector
                does not match the number of spatial sample points. """)
            else:
                self._specification = specification
        else:
            raise TypeError("""The parameter specification has to be
            int or a vecor""") 
Example 19
Project: aospy   Author: spencerahill   File: io.py    Apache License 2.0 5 votes vote down vote up
def time_label(intvl, return_val=True):
    """Create time interval label for aospy data I/O."""
    # Monthly labels are 2 digit integers: '01' for jan, '02' for feb, etc.
    if type(intvl) in [list, tuple, np.ndarray] and len(intvl) == 1:
        label = '{:02}'.format(intvl[0])
        value = np.array(intvl)
    elif type(intvl) == int and intvl in range(1, 13):
        label = '{:02}'.format(intvl)
        value = np.array([intvl])
    # Seasonal and annual time labels are short strings.
    else:
        labels = {'jfm': (1, 2, 3),
                  'fma': (2, 3, 4),
                  'mam': (3, 4, 5),
                  'amj': (4, 5, 6),
                  'mjj': (5, 6, 7),
                  'jja': (6,  7,  8),
                  'jas': (7, 8, 9),
                  'aso': (8, 9, 10),
                  'son': (9, 10, 11),
                  'ond': (10, 11, 12),
                  'ndj': (11, 12, 1),
                  'djf': (1, 2, 12),
                  'jjas': (6, 7, 8, 9),
                  'djfm': (12, 1, 2, 3),
                  'ann': range(1, 13)}
        for lbl, vals in labels.items():
            if intvl == lbl or set(intvl) == set(vals):
                label = lbl
                value = np.array(vals)
                break
    if return_val:
        return label, value
    else:
        return label 
Example 20
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: bbox.py    MIT License 5 votes vote down vote up
def bbox_overlaps(boxes, query_boxes):
    """
    Parameters
    ----------
    boxes: (N, 4) ndarray or tensor or variable
    query_boxes: (K, 4) ndarray or tensor or variable
    Returns
    -------
    overlaps: (N, K) overlap between boxes and query_boxes
    """
    if isinstance(boxes, np.ndarray):
        boxes = torch.from_numpy(boxes)
        query_boxes = torch.from_numpy(query_boxes)
        out_fn = lambda x: x.numpy() # If input is ndarray, turn the overlaps back to ndarray when return
    else:
        out_fn = lambda x: x

    box_areas = (boxes[:, 2] - boxes[:, 0] + 1) * \
            (boxes[:, 3] - boxes[:, 1] + 1)
    query_areas = (query_boxes[:, 2] - query_boxes[:, 0] + 1) * \
            (query_boxes[:, 3] - query_boxes[:, 1] + 1)

    iw = (torch.min(boxes[:, 2:3], query_boxes[:, 2:3].t()) - torch.max(boxes[:, 0:1], query_boxes[:, 0:1].t()) + 1).clamp(min=0)
    ih = (torch.min(boxes[:, 3:4], query_boxes[:, 3:4].t()) - torch.max(boxes[:, 1:2], query_boxes[:, 1:2].t()) + 1).clamp(min=0)
    ua = box_areas.view(-1, 1) + query_areas.view(1, -1) - iw * ih
    overlaps = iw * ih / ua
    return out_fn(overlaps) 
Example 21
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: config.py    MIT License 5 votes vote down vote up
def _merge_a_into_b(a, b):
  """Merge config dictionary a into config dictionary b, clobbering the
  options in b whenever they are also specified in a.
  """
  if type(a) is not edict:
    return

  for k, v in a.items():
    # a must specify keys that are in b
    if k not in b:
      raise KeyError('{} is not a valid config key'.format(k))

    # the types must match, too
    old_type = type(b[k])
    if old_type is not type(v):
      if isinstance(b[k], np.ndarray):
        v = np.array(v, dtype=b[k].dtype)
      else:
        raise ValueError(('Type mismatch ({} vs. {}) '
                          'for config key: {}').format(type(b[k]),
                                                       type(v), k))

    # recursively merge dicts
    if type(v) is edict:
      try:
        _merge_a_into_b(a[k], b[k])
      except:
        print(('Error under config key: {}'.format(k)))
        raise
    else:
      b[k] = v 
Example 22
Project: AutoDL   Author: tanguofu   File: inspect_graph_shape.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def getShapeMap(sess, starts, shape_map):
    run_options = tf.RunOptions()
    run_metadata = tf.RunMetadata()

    for o in sess.graph.get_operations():
        if o.type == 'Placeholder':
            continue

        for out in o.outputs:
            if out.name in shape_map:
               continue

            if o.type == 'Const':
               value   = out.eval(session=sess)
               shape_map[out.name] = getValueLabel(value)
               continue

            result =  sess.run(out, starts, run_options, run_metadata)
            starts[out.name] = result
            if isinstance(result, np.ndarray):
                shape_map[out.name] = getValueLabel(result)
            elif isinstance(result, np.float32):
                shape_map[out.name] = "scalar:" + repr(result)
            else :
                shape_map[out.name] = repr(out.get_shape()) + "," +repr(type(result))
                print("Not array:" + repr((o.name, o.type, out.name, out.get_shape(), type(result))))

    return shape_map 
Example 23
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: flow.py    MIT License 5 votes vote down vote up
def return_predict(self, im):
    assert isinstance(im, np.ndarray), \
				'Image is not a np.ndarray'
    h, w, _ = im.shape
    im = self.framework.resize_input(im)
    this_inp = np.expand_dims(im, 0)
    feed_dict = {self.inp : this_inp}

    out = self.sess.run(self.out, feed_dict)[0]
    boxes = self.framework.findboxes(out)
    threshold = self.FLAGS.threshold
    boxesInfo = list()
    for box in boxes:
        tmpBox = self.framework.process_box(box, h, w, threshold)
        if tmpBox is None:
            continue
        boxesInfo.append({
            "label": tmpBox[4],
            "confidence": tmpBox[6],
            "topleft": {
                "x": tmpBox[0],
                "y": tmpBox[2]},
            "bottomright": {
                "x": tmpBox[1],
                "y": tmpBox[3]}
        })
    return boxesInfo 
Example 24
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: predict.py    MIT License 5 votes vote down vote up
def preprocess(self, im, allobj = None):
	"""
	Takes an image, return it as a numpy tensor that is readily
	to be fed into tfnet. If there is an accompanied annotation (allobj),
	meaning this preprocessing is serving the train process, then this
	image will be transformed with random noise to augment training data,
	using scale, translation, flipping and recolor. The accompanied
	parsed annotation (allobj) will also be modified accordingly.
	"""
	if type(im) is not np.ndarray:
		im = cv2.imread(im)

	if allobj is not None: # in training mode
		result = imcv2_affine_trans(im)
		im, dims, trans_param = result
		scale, offs, flip = trans_param
		for obj in allobj:
			_fix(obj, dims, scale, offs)
			if not flip: continue
			obj_1_ =  obj[1]
			obj[1] = dims[0] - obj[3]
			obj[3] = dims[0] - obj_1_
		im = imcv2_recolor(im)

	im = self.resize_input(im)
	if allobj is None: return im
	return im#, np.array(im) # for unit testing 
Example 25
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: baseop.py    MIT License 5 votes vote down vote up
def _shape(tensor): # work for both tf.Tensor & np.ndarray
    if type(tensor) in [tf.Variable, tf.Tensor]: 
        return tensor.get_shape()
    else: return tensor.shape 
Example 26
Project: ieml   Author: IEMLdev   File: test_dictionary.py    GNU General Public License v3.0 5 votes vote down vote up
def test_scripts(self):
        self.assertIsInstance(self.d.scripts, np.ndarray)
        self.assertEqual(self.d.scripts.ndim, 1)
        self.assertEqual(self.d.scripts.shape, (len(self.d),))
        for s in self.d.scripts:
            self.assertIsInstance(s, Script) 
Example 27
Project: ieml   Author: IEMLdev   File: test_dictionary.py    GNU General Public License v3.0 5 votes vote down vote up
def test_one_hot(self):
        for i, s in enumerate(self.d.scripts):
            oh = self.d.one_hot(s)

            self.assertIsInstance(oh, np.ndarray)
            self.assertEqual(oh.ndim, 1)
            self.assertEqual(oh.shape, (len(self.d),))
            self.assertEqual(oh.dtype, int)

            self.assertTrue(all(e == 0 for j, e in enumerate(oh) if j != i))
            # print(oh[i-2:i+2], s)

            self.assertEqual(oh[i], 1) 
Example 28
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def from_axis_angle_translation(cls, axis_angle, translation):
        assert isinstance(axis_angle, np.ndarray) and axis_angle.shape == (4,)
        assert isinstance(translation, np.ndarray) and translation.shape == (3,)
        R = axis_angle_to_rotation(axis_angle[1:], axis_angle[0])
        return RotationTranslationData(rt=(R, translation)) 
Example 29
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def yaw(points):
    # calculate yaw of points given in nx3 format. yaw in [-pi, pi]
    assert isinstance(points, np.ndarray)
    assert points.ndim == 2
    assert points.shape[1] == 3
    return np.arctan2(points[:, 1], points[:, 0]) 
Example 30
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def pitch(points):
    # calculate pitch of points given in nx3 format. pitch in [-pi/2, pi/2]
    assert isinstance(points, np.ndarray)
    assert points.ndim == 2
    assert points.shape[1] == 3
    return np.arctan2(points[:, 2], np.linalg.norm(points[:, :2], axis=1)) 
Example 31
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def cross_product_matrix(vector):
    assert isinstance(vector, np.ndarray) and vector.shape == (3,)
    matrix = np.array([[0, -vector[2], vector[1]],
                       [vector[2], 0, -vector[0]],
                       [-vector[1], vector[0], 0]])
    return matrix 
Example 32
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_axis_angle(rot_mat):
    # Convert from rotation matrix to axis angle. This conversion is good for angles in [0, pi], angles in [-pi, 0] will
    # be mapped to [pi, 0] (pi downto 0) with the negative axis. To handle this issue we can use quaternions.
    assert isinstance(rot_mat, np.ndarray) and rot_mat.shape == (3, 3,)
    u = np.array([rot_mat[2, 1] - rot_mat[1, 2],
                  rot_mat[0, 2] - rot_mat[2, 0],
                  rot_mat[1, 0] - rot_mat[0, 1]])
    angle = np.arccos(np.trace(rot_mat[:3, :3]) / 2 - 0.5)
    if np.linalg.norm(u) == 0.:
        return np.array([0., 0., 0., 1.])
    else:
        u = u / np.linalg.norm(u)
        return np.array([angle, u[0], u[1], u[2]]) 
Example 33
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def from_numpy(cls, numpy_arr):
        # assumes input as (x,y,wx,wy,h,angle)
        assert isinstance(numpy_arr, np.ndarray)
        assert (numpy_arr.ndim == 1 and numpy_arr.size == 6) or (numpy_arr.ndim == 2 and numpy_arr.shape[1] == 6)
        if numpy_arr.ndim == 1:
            return Box(numpy_arr[0], numpy_arr[1], numpy_arr[2], numpy_arr[3], numpy_arr[5], numpy_arr[4])
        else:
            return [Box(numpy_arr[i, 0], numpy_arr[i, 1], numpy_arr[i, 2],
                        numpy_arr[i, 3], numpy_arr[i, 5], numpy_arr[i, 4]) for i in range(numpy_arr.shape[0])] 
Example 34
Project: kuaa   Author: rafaelwerneck   File: hashfile.py    GNU General Public License v3.0 5 votes vote down vote up
def loadVars(self, *args):
        """
        """
        assert len(args) > 0

        try:
            data = np.load(self.__hashname)
            vartypes = dict(map(tuple, data['vartypes']))
            ret = [np.array(data[arg])
                     if vartypes[arg] == np.ndarray
                     else vartypes[arg](data[arg]) for arg in args]
        except:
            raise Exception

        return ret if len(ret) > 1 else ret[0] 
Example 35
Project: kuaa   Author: rafaelwerneck   File: common.py    GNU General Public License v3.0 5 votes vote down vote up
def printDataset(labels, feats):
    if isinstance(labels, np.ndarray):
        labels = labels.tolist()
    yellow_err('######################## Dataset informations ########################')
    acs = list(set(labels))
    yellow_err('Total number of samples:       {0}'.format(len(labels)))
    yellow_err('Available classes:             {0}'.format(acs))
    yellow_err('Feature vector size:           {0}'.format(len(feats[0])))
    for clss in acs:
        yellow_err('Number of samples of the class {0}: {1}'.format(clss, labels.count(clss)))
    yellow_err('######################################################################') 
Example 36
Project: kuaa   Author: rafaelwerneck   File: common.py    GNU General Public License v3.0 5 votes vote down vote up
def loadFeatures(file_name):
    """
    Input:
    file_name :: String
    file_name: The name of the file containing the feature vectors.
    Each element must be separated by white space.
    The first element is must be an integer n, such that n > 0,
    indicating the label of the features.

    Output:
    labels :: np.array([Int])
    labels: Array of labels.
    feats :: np.array([[Float]])
    feats: Each internal vector is a feature vector.
    """
    assert isinstance(file_name, str)

    import numpy as np

    with open(file_name) as fp:
        data = fp.readlines()

        data = np.array(map(lambda x: map(float, x.split()), data))

        labels = data[:, 0].astype('int')
        feats = data[:, 1:]

    assert isinstance(labels, np.ndarray)
    assert isinstance(feats, np.ndarray)
    assert len(set(map(len, feats))) == 1

    return labels, feats 
Example 37
Project: kuaa   Author: rafaelwerneck   File: common.py    GNU General Public License v3.0 5 votes vote down vote up
def scaleData(ftr, fte=None):
    """
    np.array(feats_train) -> np.array(feats_test) -> (np.array(feats_train), np.array(feats_test))

    The resulting features will be in [0, 1].
    """
    assert isinstance(ftr, np.ndarray)
    if fte is not None:
        assert isinstance(fte, np.ndarray)

    max_column = ftr.max(axis=0)
    min_column = ftr.min(axis=0)
    dif_column = (max_column - min_column).astype('float')
    dif_column[dif_column == 0] = 1

    ftr2 = (ftr - min_column) / dif_column
    if fte is not None:
        fte2 = (fte - min_column) / dif_column

    assert isinstance(ftr2, np.ndarray)
    if fte is not None:
        assert isinstance(fte2, np.ndarray)

    if fte is not None:
        return ftr2, fte2
    else:
        return ftr2 
Example 38
Project: kuaa   Author: rafaelwerneck   File: Classifier.py    GNU General Public License v3.0 5 votes vote down vote up
def fit(self, vftr, vltr):
        """
        The set of training feature must contain no sample of a
        unknown class.

        See doc of 'Classifier' class.
        """
        assert not self._trained

        self._vftr = vftr.tolist() if isinstance(vftr, np.ndarray) else copy.deepcopy(vftr)
        self._vltr = vltr.tolist() if isinstance(vltr, np.ndarray) else copy.deepcopy(vltr)

        assert isinstance(self._vftr, list)
        assert isinstance(self._vltr, list)
        assert len(self._vftr) == len(self._vltr)
        assert len(set(map(len, self._vftr))) == 1

        self._acs = list(set(filter(lambda label: label is not None,
                                    self._vltr)))
        self._unknown_label = min(self._acs) - 999

        assert self._unknown_label not in self._acs
        assert None not in self._vltr

        if self._gridsearch:
            self.__gridsearch__()
        self.simplefit()

        del self._vftr
        del self._vltr

        self._trained = True
        magenta_err((self, 'fitted')) 
Example 39
Project: kuaa   Author: rafaelwerneck   File: Classifier.py    GNU General Public License v3.0 5 votes vote down vote up
def classify(self, vfte, vlte=None, auxiliar=None):
        """
        The unknown testing labels must be 'None'.

        See doc of 'Classifier' class.
        """
        assert self._trained

        self._vfte = vfte.tolist() if isinstance(vfte, np.ndarray) else copy.deepcopy(vfte)
        if vlte is not None:
            self._vlte = vlte.tolist() if isinstance(vlte, np.ndarray) else copy.deepcopy(vlte)
        else:
            self._vlte = [None] * len(self._vfte)

        assert isinstance(self._vfte, list)
        assert isinstance(self._vlte, list)
        assert len(self._vfte) == len(self._vlte)
        assert len(set(map(len, self._vfte))) == 1

        assert self._unknown_label not in self._vlte
        assert all([label in self._acs
                    for label in self._vlte
                    if label is not None])

        ret = self.simpleclassify(auxiliar)

        del self._vfte
        del self._vlte

        assert self._unknown_label not in ret
        assert all([label in self._acs
                    for label in ret
                    if label is not None])

        return ret 
Example 40
Project: speedrun   Author: inferno-pytorch   File: io_logging.py    Apache License 2.0 5 votes vote down vote up
def to_array(value):
        assert np is not None, "numpy is required for checking if value is numpy array (surprise!)."
        if torch is not None and torch.is_tensor(value):
            return value.detach().cpu().numpy()
        elif isinstance(value, np.ndarray):
            return value
        else:
            raise ValueError(f"Can't convert {value.__class__.__name__} to np.array.")

    # noinspection PyUnresolvedReferences 
Example 41
Project: mmdetection   Author: open-mmlab   File: inference.py    Apache License 2.0 5 votes vote down vote up
def async_inference_detector(model, img):
    """Async inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
            images.

    Returns:
        Awaitable detection results.
    """
    cfg = model.cfg
    device = next(model.parameters()).device  # model device
    # build the data pipeline
    test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
    test_pipeline = Compose(test_pipeline)
    # prepare data
    data = dict(img=img)
    data = test_pipeline(data)
    data = scatter(collate([data], samples_per_gpu=1), [device])[0]

    # We don't restore `torch.is_grad_enabled()` value during concurrent
    # inference since execution can overlap
    torch.set_grad_enabled(False)
    result = await model.aforward_test(rescale=True, **data)
    return result


# TODO: merge this method with the one in BaseDetector 
Example 42
Project: mmdetection   Author: open-mmlab   File: nms_wrapper.py    Apache License 2.0 5 votes vote down vote up
def soft_nms(dets, iou_thr, method='linear', sigma=0.5, min_score=1e-3):
    """
    Example:
        >>> dets = np.array([[4., 3., 5., 3., 0.9],
        >>>                  [4., 3., 5., 4., 0.9],
        >>>                  [3., 1., 3., 1., 0.5],
        >>>                  [3., 1., 3., 1., 0.5],
        >>>                  [3., 1., 3., 1., 0.4],
        >>>                  [3., 1., 3., 1., 0.0]], dtype=np.float32)
        >>> iou_thr = 0.7
        >>> supressed, inds = soft_nms(dets, iou_thr, sigma=0.5)
        >>> assert len(inds) == len(supressed) == 3
    """
    if isinstance(dets, torch.Tensor):
        is_tensor = True
        dets_np = dets.detach().cpu().numpy()
    elif isinstance(dets, np.ndarray):
        is_tensor = False
        dets_np = dets
    else:
        raise TypeError(
            'dets must be either a Tensor or numpy array, but got {}'.format(
                type(dets)))

    method_codes = {'linear': 1, 'gaussian': 2}
    if method not in method_codes:
        raise ValueError('Invalid method for SoftNMS: {}'.format(method))
    new_dets, inds = soft_nms_cpu(
        dets_np,
        iou_thr,
        method=method_codes[method],
        sigma=sigma,
        min_score=min_score)

    if is_tensor:
        return dets.new_tensor(new_dets), dets.new_tensor(
            inds, dtype=torch.long)
    else:
        return new_dets.astype(np.float32), inds.astype(np.int64) 
Example 43
Project: mmdetection   Author: open-mmlab   File: recall.py    Apache License 2.0 5 votes vote down vote up
def eval_recalls(gts,
                 proposals,
                 proposal_nums=None,
                 iou_thrs=None,
                 print_summary=True):
    """Calculate recalls.

    Args:
        gts(list or ndarray): a list of arrays of shape (n, 4)
        proposals(list or ndarray): a list of arrays of shape (k, 4) or (k, 5)
        proposal_nums(int or list of int or ndarray): top N proposals
        thrs(float or list or ndarray): iou thresholds

    Returns:
        ndarray: recalls of different ious and proposal nums
    """

    img_num = len(gts)
    assert img_num == len(proposals)

    proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs)

    all_ious = []
    for i in range(img_num):
        if proposals[i].ndim == 2 and proposals[i].shape[1] == 5:
            scores = proposals[i][:, 4]
            sort_idx = np.argsort(scores)[::-1]
            img_proposal = proposals[i][sort_idx, :]
        else:
            img_proposal = proposals[i]
        prop_num = min(img_proposal.shape[0], proposal_nums[-1])
        if gts[i] is None or gts[i].shape[0] == 0:
            ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)
        else:
            ious = bbox_overlaps(gts[i], img_proposal[:prop_num, :4])
        all_ious.append(ious)
    all_ious = np.array(all_ious)
    recalls = _recalls(all_ious, proposal_nums, iou_thrs)
    if print_summary:
        print_recall_summary(recalls, proposal_nums, iou_thrs)
    return recalls 
Example 44
Project: mmdetection   Author: open-mmlab   File: random_sampler.py    Apache License 2.0 5 votes vote down vote up
def random_choice(gallery, num):
        """Random select some elements from the gallery.

        It seems that Pytorch's implementation is slower than numpy so we use
        numpy to randperm the indices.
        """
        assert len(gallery) >= num
        if isinstance(gallery, list):
            gallery = np.array(gallery)
        cands = np.arange(len(gallery))
        np.random.shuffle(cands)
        rand_inds = cands[:num]
        if not isinstance(gallery, np.ndarray):
            rand_inds = torch.from_numpy(rand_inds).long().to(gallery.device)
        return gallery[rand_inds] 
Example 45
Project: neural-fingerprinting   Author: StephanZheng   File: attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_or_guess_labels(self, x, kwargs):
        """
        Get the label to use in generating an adversarial example for x.
        The kwargs are fed directly from the kwargs of the attack.
        If 'y' is in kwargs, then assume it's an untargeted attack and
        use that as the label.
        If 'y_target' is in kwargs and is not none, then assume it's a
        targeted attack and use that as the label.
        Otherwise, use the model's prediction as the label and perform an
        untargeted attack.
        """
        import tensorflow as tf

        if 'y' in kwargs and 'y_target' in kwargs:
            raise ValueError("Can not set both 'y' and 'y_target'.")
        elif 'y' in kwargs:
            labels = kwargs['y']
        elif 'y_target' in kwargs and kwargs['y_target'] is not None:
            labels = kwargs['y_target']
        else:
            preds = self.model.get_probs(x)
            preds_max = reduce_max(preds, 1, keepdims=True)
            original_predictions = tf.to_float(tf.equal(preds, preds_max))
            labels = tf.stop_gradient(original_predictions)
        if isinstance(labels, np.ndarray):
            nb_classes = labels.shape[1]
        else:
            nb_classes = labels.get_shape().as_list()[1]
        return labels, nb_classes 
Example 46
Project: neural-fingerprinting   Author: StephanZheng   File: attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_or_guess_labels(self, x, kwargs):
        """
        Get the label to use in generating an adversarial example for x.
        The kwargs are fed directly from the kwargs of the attack.
        If 'y' is in kwargs, then assume it's an untargeted attack and
        use that as the label.
        If 'y_target' is in kwargs and is not none, then assume it's a
        targeted attack and use that as the label.
        Otherwise, use the model's prediction as the label and perform an
        untargeted attack.
        """
        import tensorflow as tf

        if 'y' in kwargs and 'y_target' in kwargs:
            raise ValueError("Can not set both 'y' and 'y_target'.")
        elif 'y' in kwargs:
            labels = kwargs['y']
        elif 'y_target' in kwargs and kwargs['y_target'] is not None:
            labels = kwargs['y_target']
        else:
            preds = self.model.get_probs(x)
            preds_max = reduce_max(preds, 1, keepdims=True)
            original_predictions = tf.to_float(tf.equal(preds, preds_max))
            labels = tf.stop_gradient(original_predictions)
        if isinstance(labels, np.ndarray):
            nb_classes = labels.shape[1]
        else:
            nb_classes = labels.get_shape().as_list()[1]
        return labels, nb_classes 
Example 47
Project: voice-recognition   Author: golabies   File: read_data.py    MIT License 5 votes vote down vote up
def read_wave(self, name='out_put_0.wav'):
        self.name = name
        _, self.voice = wavfile.read(self.name)
        self.voice: np.ndarray
        self.voice = np.array([self.voice]) 
Example 48
Project: Keras-Unet   Author: MLearing   File: data.py    GNU General Public License v2.0 5 votes vote down vote up
def create_train_data(self):
        # 将增强之后的训练集生成npy文件         
        i = 0
        print('-' * 30)
        print('creating train image')
        print('-' * 30)
        count = 0
        for indir in os.listdir(self.aug_merge_path):
            path = os.path.join(self.aug_merge_path, indir)
            count += len(os.listdir(path))
        imgdatas = np.ndarray((count, self.out_rows, self.out_cols, 1), dtype=np.uint8)
        imglabels = np.ndarray((count, self.out_rows, self.out_cols, 1), dtype=np.uint8)
        for indir in os.listdir(self.aug_merge_path):
            trainPath = os.path.join(self.aug_train_path, indir)
            labelPath = os.path.join(self.aug_label_path, indir)
            print(trainPath, labelPath)
            imgs = glob.glob(trainPath + '/*' + '.tif')
            for imgname in imgs:
                trainmidname = imgname[imgname.rindex('/') + 1:]
                labelimgname = imgname[imgname.rindex('/') + 1:imgname.rindex('_')] + '_label.tif'
                print(trainmidname, labelimgname)
                img = load_img(trainPath + '/' + trainmidname, grayscale=True)
                label = load_img(labelPath + '/' + labelimgname, grayscale=True)
                img = img_to_array(img)
                label = img_to_array(label)
                imgdatas[i] = img
                imglabels[i] = label
                if i % 100 == 0:
                    print('Done: {0}/{1} images'.format(i, len(imgs)))
                i += 1
                print(i)
        print('loading done', imgdatas.shape)
        np.save(self.npy_path + '/augimgs_train.npy', imgdatas)            # 将30张训练集和30张label生成npy数据
        np.save(self.npy_path + '/augimgs_mask_train.npy', imglabels)
        print('Saving to .npy files done.') 
Example 49
Project: mlimages   Author: icoxfog417   File: training.py    MIT License 5 votes vote down vote up
def make_mean_image(self, mean_image_file=""):
        m_file = mean_image_file if mean_image_file else os.path.join(self.label_file.path, "./mean_image.png")
        l_file = FileAPI.add_ext_name(self.label_file.path, "_used_in_mean")
        _, ext = os.path.splitext(os.path.basename(m_file))
        im_iterator = self.label_file._fetch_raw()

        sum_image = None
        count = 0

        with open(l_file, mode="w", encoding="utf-8") as f:
            for im, line in im_iterator:
                try:
                    converted = self.convert(im)
                    arr = converted.to_array(np)
                    if sum_image is None:
                        sum_image = np.ndarray(arr.shape)
                        sum_image[:] = arr
                    else:
                        sum_image += arr
                    count += 1
                    f.write(line)
                except:
                    pass

        mean = sum_image / count
        if ext.lower() == ".npy":
            pickle.dump(mean, open(m_file, "wb"), -1)
        else:
            mean_image = LabeledImage.from_array(mean)
            mean_image.image.save(m_file)
        self.mean_image_file = m_file
        self.label_file.path = l_file 
Example 50
Project: mlimages   Author: icoxfog417   File: training.py    MIT License 5 votes vote down vote up
def generate_batches(self, size):
        mean = self.__load_mean()

        async def to_array(im):
            im.load()
            converted = self.convert(im)
            arr = self.__to_array(converted, mean)
            im.image = None  # don't use image any more, so release reference
            return arr, im.label

        batch = []
        loop = asyncio.get_event_loop()

        for im in self.label_file.fetch(load_image=False):
            batch.append(to_array(im))

            if len(batch) == size:
                tasks = asyncio.wait(batch)
                done, pending = loop.run_until_complete(tasks)
                results = []
                for d in done:
                    try:
                        results.append(d.result())
                    except:
                        pass

                x_sample, y_sample = results[0]
                x_batch = np.ndarray((size,) + x_sample.shape, x_sample.dtype)
                y_batch = np.ndarray((size,), np.int32)

                for j, r in enumerate(results):
                    x_batch[j], y_batch[j] = r

                yield x_batch, y_batch
                i = 0
                batch.clear()