Python numpy.ndarray() Examples
The following are 30
code examples of numpy.ndarray().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.

Example #1
Source File: coco.py From mmdetection with Apache License 2.0 | 7 votes |
def xyxy2xywh(self, bbox): """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO evaluation. Args: bbox (numpy.ndarray): The bounding boxes, shape (4, ), in ``xyxy`` order. Returns: list[float]: The converted bounding boxes, in ``xywh`` order. """ _bbox = bbox.tolist() return [ _bbox[0], _bbox[1], _bbox[2] - _bbox[0], _bbox[3] - _bbox[1], ]
Example #2
Source File: mean_ap.py From mmdetection with Apache License 2.0 | 7 votes |
def get_cls_results(det_results, annotations, class_id): """Get det results and gt information of a certain class. Args: det_results (list[list]): Same as `eval_map()`. annotations (list[dict]): Same as `eval_map()`. class_id (int): ID of a specific class. Returns: tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes """ cls_dets = [img_res[class_id] for img_res in det_results] cls_gts = [] cls_gts_ignore = [] for ann in annotations: gt_inds = ann['labels'] == class_id cls_gts.append(ann['bboxes'][gt_inds, :]) if ann.get('labels_ignore', None) is not None: ignore_inds = ann['labels_ignore'] == class_id cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :]) else: cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32)) return cls_dets, cls_gts, cls_gts_ignore
Example #3
Source File: structures.py From mmdetection with Apache License 2.0 | 7 votes |
def __init__(self, masks, height, width): self.height = height self.width = width if len(masks) == 0: self.masks = np.empty((0, self.height, self.width), dtype=np.uint8) else: assert isinstance(masks, (list, np.ndarray)) if isinstance(masks, list): assert isinstance(masks[0], np.ndarray) assert masks[0].ndim == 2 # (H, W) else: assert masks.ndim == 3 # (N, H, W) self.masks = np.stack(masks).reshape(-1, height, width) assert self.masks.shape[1] == self.height assert self.masks.shape[2] == self.width
Example #4
Source File: cache.py From vergeml with MIT License | 6 votes |
def _serialize_data(self, data): # Default to raw bytes type_ = _BYTES if isinstance(data, np.ndarray): # When the data is a numpy array, use the more compact native # numpy format. buf = io.BytesIO() np.save(buf, data) data = buf.getvalue() type_ = _NUMPY elif not isinstance(data, (bytearray, bytes)): # Everything else except byte data is serialized in pickle format. data = pickle.dumps(data) type_ = _PICKLE if self.compress: # Optional compression data = lz4.frame.compress(data) return type_, data
Example #5
Source File: video_transforms.py From DDPAE-video-prediction with MIT License | 6 votes |
def __call__(self, video): """ Args: video (numpy.ndarray): Video to be scaled. Returns: numpy.ndarray: Rescaled video. """ if isinstance(self.size, int): w, h = video.shape[-2], video.shape[-3] if (w <= h and w == self.size) or (h <= w and h == self.size): return video if w < h: ow = self.size oh = int(self.size*h/w) return resize(video, (ow, oh), self.interpolation) else: oh = self.size ow = int(self.size*w/h) return resize(video, (ow, oh), self.interpolation) else: return resize(video, self.size, self.interpolation)
Example #6
Source File: video_transforms.py From DDPAE-video-prediction with MIT License | 6 votes |
def __call__(self, video): """ Args: video (np.ndarray): Video to be cropped. Returns: np.ndarray: Cropped video. """ if self.padding > 0: pad = Pad(self.padding, 0) video = pad(video) w, h = video.shape[-2], video.shape[-3] th, tw = self.size if w == tw and h == th: return video x1 = random.randint(0, w-tw) y1 = random.randint(0, h-th) return video[..., y1:y1+th, x1:x1+tw, :]
Example #7
Source File: _io_kernel.py From kaldi-python-io with Apache License 2.0 | 6 votes |
def read_common_mat(fd): """ Read common matrix(for class Matrix in kaldi setup) see matrix/kaldi-matrix.cc:: void Matrix<Real>::Read(std::istream & is, bool binary, bool add) Return a numpy ndarray object """ mat_type = read_token(fd) print_info(f'\tType of the common matrix: {mat_type}') if mat_type not in ["FM", "DM"]: raise RuntimeError(f"Unknown matrix type in kaldi: {mat_type}") float_size = 4 if mat_type == 'FM' else 8 float_type = np.float32 if mat_type == 'FM' else np.float64 num_rows = read_int32(fd) num_cols = read_int32(fd) print_info(f'\tSize of the common matrix: {num_rows} x {num_cols}') mat_data = fd.read(float_size * num_cols * num_rows) mat = np.fromstring(mat_data, dtype=float_type) return mat.reshape(num_rows, num_cols)
Example #8
Source File: _io_kernel.py From kaldi-python-io with Apache License 2.0 | 6 votes |
def read_compress_mat(fd): """ Reference to function Read in CompressMatrix Return a numpy ndarray object """ cps_type = read_token(fd) print_info(f'\tFollowing matrix type: {cps_type}') head = struct.unpack('ffii', fd.read(16)) print_info(f'\tCompress matrix header: {head}') # 8: sizeof PerColHeader # head: {min_value, range, num_rows, num_cols} num_rows, num_cols = head[2], head[3] if cps_type == 'CM': remain_size = num_cols * (8 + num_rows) elif cps_type == 'CM2': remain_size = 2 * num_rows * num_cols elif cps_type == 'CM3': remain_size = num_rows * num_cols else: throw_on_error(False, f'Unknown matrix compressing type: {cps_type}') # now uncompress it compress_data = fd.read(remain_size) mat = uncompress(compress_data, cps_type, head) return mat
Example #9
Source File: json_serializers.py From dustmaps with GNU General Public License v2.0 | 6 votes |
def serialize_ndarray_b64(o): """ Serializes a :obj:`numpy.ndarray` in a format where the datatype and shape are human-readable, but the array data itself is binary64 encoded. Args: o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized. Returns: A dictionary that can be passed to :obj:`json.dumps`. """ if o.flags['C_CONTIGUOUS']: o_data = o.data else: o_data = np.ascontiguousarray(o).data data_b64 = base64.b64encode(o_data) return dict( _type='np.ndarray', data=data_b64.decode('utf-8'), dtype=o.dtype, shape=o.shape)
Example #10
Source File: json_serializers.py From dustmaps with GNU General Public License v2.0 | 6 votes |
def serialize_ndarray_npy(o): """ Serializes a :obj:`numpy.ndarray` using numpy's built-in :obj:`save` function. This produces totally unreadable (and very un-JSON-like) results (in "npy" format), but it's basically guaranteed to work in 100% of cases. Args: o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized. Returns: A dictionary that can be passed to :obj:`json.dumps`. """ with io.BytesIO() as f: np.save(f, o) f.seek(0) serialized = json.dumps(f.read().decode('latin-1')) return dict( _type='np.ndarray', npy=serialized)
Example #11
Source File: json_serializers.py From dustmaps with GNU General Public License v2.0 | 6 votes |
def deserialize_ndarray_npy(d): """ Deserializes a JSONified :obj:`numpy.ndarray` that was created using numpy's :obj:`save` function. Args: d (:obj:`dict`): A dictionary representation of an :obj:`ndarray` object, created using :obj:`numpy.save`. Returns: An :obj:`ndarray` object. """ with io.BytesIO() as f: f.write(json.loads(d['npy']).encode('latin-1')) f.seek(0) return np.load(f)
Example #12
Source File: recall.py From mmdetection with Apache License 2.0 | 6 votes |
def plot_iou_recall(recalls, iou_thrs): """Plot IoU-Recalls curve. Args: recalls(ndarray or list): shape (k,) iou_thrs(ndarray or list): same shape as `recalls` """ if isinstance(iou_thrs, np.ndarray): _iou_thrs = iou_thrs.tolist() else: _iou_thrs = iou_thrs if isinstance(recalls, np.ndarray): _recalls = recalls.tolist() else: _recalls = recalls import matplotlib.pyplot as plt f = plt.figure() plt.plot(_iou_thrs + [1.0], _recalls + [0.]) plt.xlabel('IoU') plt.ylabel('Recall') plt.axis([iou_thrs.min(), 1, 0, 1]) f.show()
Example #13
Source File: misc.py From mmdetection with Apache License 2.0 | 6 votes |
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True): """Convert tensor to images. Args: tensor (torch.Tensor): Tensor that contains multiple images mean (tuple[float], optional): Mean of images. Defaults to (0, 0, 0). std (tuple[float], optional): Standard deviation of images. Defaults to (1, 1, 1). to_rgb (bool, optional): Whether convert the images to RGB format. Defaults to True. Returns: list[np.ndarray]: A list that contains multiple images. """ num_imgs = tensor.size(0) mean = np.array(mean, dtype=np.float32) std = np.array(std, dtype=np.float32) imgs = [] for img_id in range(num_imgs): img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0) img = mmcv.imdenormalize( img, mean, std, to_bgr=to_rgb).astype(np.uint8) imgs.append(np.ascontiguousarray(img)) return imgs
Example #14
Source File: structures.py From mmdetection with Apache License 2.0 | 6 votes |
def crop_and_resize(self, bboxes, out_shape, inds, device, interpolation='bilinear'): """Crop and resize masks by the given bboxes. This function is mainly used in mask targets computation. It firstly align mask to bboxes by assigned_inds, then crop mask by the assigned bbox and resize to the size of (mask_h, mask_w) Args: bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4) out_shape (tuple[int]): Target (h, w) of resized mask inds (ndarray): Indexes to assign masks to each bbox device (str): Device of bboxes interpolation (str): See `mmcv.imresize` Return: BaseInstanceMasks: the cropped and resized masks. """ pass
Example #15
Source File: structures.py From mmdetection with Apache License 2.0 | 6 votes |
def crop(self, bbox): """See :func:`BaseInstanceMasks.crop`.""" assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 # clip the boundary bbox = bbox.copy() bbox[0::2] = np.clip(bbox[0::2], 0, self.width) bbox[1::2] = np.clip(bbox[1::2], 0, self.height) x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) if len(self.masks) == 0: cropped_masks = np.empty((0, h, w), dtype=np.uint8) else: cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w] return BitmapMasks(cropped_masks, h, w)
Example #16
Source File: structures.py From mmdetection with Apache License 2.0 | 6 votes |
def __getitem__(self, index): """Index the polygon masks. Args: index (ndarray | List): The indices. Returns: :obj:`PolygonMasks`: The indexed polygon masks. """ if isinstance(index, np.ndarray): index = index.tolist() if isinstance(index, list): masks = [self.masks[i] for i in index] else: try: masks = self.masks[index] except Exception: raise ValueError( f'Unsupported input of type {type(index)} for indexing!') if isinstance(masks[0], np.ndarray): masks = [masks] # ensure a list of three levels return PolygonMasks(masks, self.height, self.width)
Example #17
Source File: chainer_alex.py From mlimages with MIT License | 6 votes |
def predict(limit): _limit = limit if limit > 0 else 5 td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, mean_image_file=MEAN_IMAGE_FILE, image_property=IMAGE_PROP) label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE) model = alex.Alex(len(label_def)) serializers.load_npz(MODEL_FILE, model) i = 0 for arr, im in td.generate(): x = np.ndarray((1,) + arr.shape, arr.dtype) x[0] = arr x = chainer.Variable(np.asarray(x), volatile="on") y = model.predict(x) p = np.argmax(y.data) print("predict {0}, actual {1}".format(label_def[p], label_def[im.label])) im.image.show() i += 1 if i >= _limit: break
Example #18
Source File: feature_extraction.py From Sound-Recognition-Tutorial with Apache License 2.0 | 6 votes |
def extract_mfcc(y, sr, size=3): """ extract MFCC feature :param y: np.ndarray [shape=(n,)], real-valued the input signal (audio time series) :param sr: sample rate of 'y' :param size: the length (seconds) of random crop from original audio, default as 3 seconds :return: MFCC feature """ # normalization y = y.astype(np.float32) normalization_factor = 1 / np.max(np.abs(y)) y = y * normalization_factor # random crop start = random.randint(0, len(y) - size * sr) y = y[start: start + size * sr] # extract log mel spectrogram ##### melspectrogram = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=2048, hop_length=1024) mfcc = librosa.feature.mfcc(S=librosa.power_to_db(melspectrogram), n_mfcc=20) mfcc_delta = librosa.feature.delta(mfcc) mfcc_delta_delta = librosa.feature.delta(mfcc_delta) mfcc_comb = np.concatenate([mfcc, mfcc_delta, mfcc_delta_delta], axis=0) return mfcc_comb
Example #19
Source File: SampleIO.py From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License | 5 votes |
def extract_sample(img, image_mean=None, resize=-1): """Extract image content from image string or from file TAKE: input - either file content as string or numpy array image_mean - numpy array of image mean or a values of size (1,3) resize - to resize image, set resize > 0; otherwise, don't resize """ try: # if input is a file name, then read image; otherwise decode_imgstr if type(img) is np.ndarray: img_data = img else: img_data = decode_imgstr(img) if type(resize) in [tuple, list]: # resize in two dimensions img_data = scipy.misc.imresize(img_data, (resize[0], resize[1])) elif resize > 0: img_data = scipy.misc.imresize(img_data, (resize, resize)) img_data = img_data.astype(np.float32, copy=False) img_data = img_data[:, :, ::-1] # change channel for caffe: img_data = img_data.transpose(2, 0, 1) # to CxHxW # substract_mean if image_mean is not None: img_data = substract_mean(img_data, image_mean) return img_data except: print sys.exc_info()[0], sys.exc_info()[1] return
Example #20
Source File: env.py From vergeml with MIT License | 5 votes |
def _convert(self, vals): res = {} for k, v in vals.items(): if isinstance(v, (np.int, np.int8, np.int16, np.int32, np.int64)): v = int(v) elif isinstance(v, (np.float, np.float16, np.float32, np.float64)): v = float(v) elif isinstance(v, Labels): v = list(v) elif isinstance(v, np.ndarray): v = v.tolist() elif isinstance(v, dict): v = self._convert(v) res[k] = v return res
Example #21
Source File: bbs.py From cat-bbs with MIT License | 5 votes |
def fix_by_image_dimensions(self, height, width=None): if isinstance(height, (tuple, list)): assert width is None height, width = height[0], height[1] elif isinstance(height, (np.ndarray, np.generic)): assert width is None height, width = height.shape[0], height.shape[1] else: assert width is not None assert isinstance(height, int) assert isinstance(width, int) self.x1 = int(np.clip(self.x1, 0, width-1)) self.x2 = int(np.clip(self.x2, 0, width-1)) self.y1 = int(np.clip(self.y1, 0, height-1)) self.y2 = int(np.clip(self.y2, 0, height-1)) if self.x1 > self.x2: self.x1, self.x2 = self.x2, self.x1 if self.y1 > self.y2: self.y1, self.y2 = self.y2, self.y1 if self.x1 == self.x2: if self.x1 > 0: self.x1 = self.x1 - 1 else: self.x2 = self.x2 + 1 if self.y1 == self.y2: if self.y1 > 0: self.y1 = self.y1 - 1 else: self.y2 = self.y2 + 1 #self.width = self.x2 - self.x1 #self.height = self.y2 - self.y1
Example #22
Source File: io.py From aospy with Apache License 2.0 | 5 votes |
def time_label(intvl, return_val=True): """Create time interval label for aospy data I/O.""" # Monthly labels are 2 digit integers: '01' for jan, '02' for feb, etc. if type(intvl) in [list, tuple, np.ndarray] and len(intvl) == 1: label = '{:02}'.format(intvl[0]) value = np.array(intvl) elif type(intvl) == int and intvl in range(1, 13): label = '{:02}'.format(intvl) value = np.array([intvl]) # Seasonal and annual time labels are short strings. else: labels = {'jfm': (1, 2, 3), 'fma': (2, 3, 4), 'mam': (3, 4, 5), 'amj': (4, 5, 6), 'mjj': (5, 6, 7), 'jja': (6, 7, 8), 'jas': (7, 8, 9), 'aso': (8, 9, 10), 'son': (9, 10, 11), 'ond': (10, 11, 12), 'ndj': (11, 12, 1), 'djf': (1, 2, 12), 'jjas': (6, 7, 8, 9), 'djfm': (12, 1, 2, 3), 'ann': range(1, 13)} for lbl, vals in labels.items(): if intvl == lbl or set(intvl) == set(vals): label = lbl value = np.array(vals) break if return_val: return label, value else: return label
Example #23
Source File: bbox.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 5 votes |
def bbox_overlaps(boxes, query_boxes): """ Parameters ---------- boxes: (N, 4) ndarray or tensor or variable query_boxes: (K, 4) ndarray or tensor or variable Returns ------- overlaps: (N, K) overlap between boxes and query_boxes """ if isinstance(boxes, np.ndarray): boxes = torch.from_numpy(boxes) query_boxes = torch.from_numpy(query_boxes) out_fn = lambda x: x.numpy() # If input is ndarray, turn the overlaps back to ndarray when return else: out_fn = lambda x: x box_areas = (boxes[:, 2] - boxes[:, 0] + 1) * \ (boxes[:, 3] - boxes[:, 1] + 1) query_areas = (query_boxes[:, 2] - query_boxes[:, 0] + 1) * \ (query_boxes[:, 3] - query_boxes[:, 1] + 1) iw = (torch.min(boxes[:, 2:3], query_boxes[:, 2:3].t()) - torch.max(boxes[:, 0:1], query_boxes[:, 0:1].t()) + 1).clamp(min=0) ih = (torch.min(boxes[:, 3:4], query_boxes[:, 3:4].t()) - torch.max(boxes[:, 1:2], query_boxes[:, 1:2].t()) + 1).clamp(min=0) ua = box_areas.view(-1, 1) + query_areas.view(1, -1) - iw * ih overlaps = iw * ih / ua return out_fn(overlaps)
Example #24
Source File: config.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 5 votes |
def _merge_a_into_b(a, b): """Merge config dictionary a into config dictionary b, clobbering the options in b whenever they are also specified in a. """ if type(a) is not edict: return for k, v in a.items(): # a must specify keys that are in b if k not in b: raise KeyError('{} is not a valid config key'.format(k)) # the types must match, too old_type = type(b[k]) if old_type is not type(v): if isinstance(b[k], np.ndarray): v = np.array(v, dtype=b[k].dtype) else: raise ValueError(('Type mismatch ({} vs. {}) ' 'for config key: {}').format(type(b[k]), type(v), k)) # recursively merge dicts if type(v) is edict: try: _merge_a_into_b(a[k], b[k]) except: print(('Error under config key: {}'.format(k))) raise else: b[k] = v
Example #25
Source File: video_transforms.py From DDPAE-video-prediction with MIT License | 5 votes |
def __call__(self, arr): if isinstance(arr, np.ndarray): video = torch.from_numpy(np.rollaxis(arr, axis=-1, start=-3)) if self.scale: return video.float().div(255) else: return video.float() else: raise NotImplementedError
Example #26
Source File: video_transforms.py From DDPAE-video-prediction with MIT License | 5 votes |
def __call__(self, video): """ Args: video (np.ndarray): Video to be padded. Returns: np.ndarray: Padded video. """ pad_width = ((0, 0), (self.padding, self.padding), (self.padding, self.padding), (0, 0)) return np.pad(video, pad_width=pad_width, mode='constant', constant_values=self.fill)
Example #27
Source File: misc.py From DDPAE-video-prediction with MIT License | 5 votes |
def to_numpy(array): """ :param array: Variable, GPU tensor, or CPU tensor :return: numpy """ if isinstance(array, np.ndarray): return array if isinstance(array, torch.autograd.Variable): array = array.data if array.is_cuda: array = array.cpu() return array.numpy()
Example #28
Source File: flow.py From Traffic_sign_detection_YOLO with MIT License | 5 votes |
def return_predict(self, im): assert isinstance(im, np.ndarray), \ 'Image is not a np.ndarray' h, w, _ = im.shape im = self.framework.resize_input(im) this_inp = np.expand_dims(im, 0) feed_dict = {self.inp : this_inp} out = self.sess.run(self.out, feed_dict)[0] boxes = self.framework.findboxes(out) threshold = self.FLAGS.threshold boxesInfo = list() for box in boxes: tmpBox = self.framework.process_box(box, h, w, threshold) if tmpBox is None: continue boxesInfo.append({ "label": tmpBox[4], "confidence": tmpBox[6], "topleft": { "x": tmpBox[0], "y": tmpBox[2]}, "bottomright": { "x": tmpBox[1], "y": tmpBox[3]} }) return boxesInfo
Example #29
Source File: predict.py From Traffic_sign_detection_YOLO with MIT License | 5 votes |
def preprocess(self, im, allobj = None): """ Takes an image, return it as a numpy tensor that is readily to be fed into tfnet. If there is an accompanied annotation (allobj), meaning this preprocessing is serving the train process, then this image will be transformed with random noise to augment training data, using scale, translation, flipping and recolor. The accompanied parsed annotation (allobj) will also be modified accordingly. """ if type(im) is not np.ndarray: im = cv2.imread(im) if allobj is not None: # in training mode result = imcv2_affine_trans(im) im, dims, trans_param = result scale, offs, flip = trans_param for obj in allobj: _fix(obj, dims, scale, offs) if not flip: continue obj_1_ = obj[1] obj[1] = dims[0] - obj[3] obj[3] = dims[0] - obj_1_ im = imcv2_recolor(im) im = self.resize_input(im) if allobj is None: return im return im#, np.array(im) # for unit testing
Example #30
Source File: baseop.py From Traffic_sign_detection_YOLO with MIT License | 5 votes |
def _shape(tensor): # work for both tf.Tensor & np.ndarray if type(tensor) in [tf.Variable, tf.Tensor]: return tensor.get_shape() else: return tensor.shape