Python numpy.ascontiguousarray() Examples

The following are 30 code examples for showing how to use numpy.ascontiguousarray(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: dustmaps   Author: gregreen   File: json_serializers.py    License: GNU General Public License v2.0 6 votes vote down vote up
def serialize_ndarray_b64(o):
    """
    Serializes a :obj:`numpy.ndarray` in a format where the datatype and shape are
    human-readable, but the array data itself is binary64 encoded.

    Args:
        o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.

    Returns:
        A dictionary that can be passed to :obj:`json.dumps`.
    """
    if o.flags['C_CONTIGUOUS']:
        o_data = o.data
    else:
        o_data = np.ascontiguousarray(o).data
    data_b64 = base64.b64encode(o_data)
    return dict(
        _type='np.ndarray',
        data=data_b64.decode('utf-8'),
        dtype=o.dtype,
        shape=o.shape) 
Example 2
Project: mmdetection   Author: open-mmlab   File: misc.py    License: Apache License 2.0 6 votes vote down vote up
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
    """Convert tensor to images.

    Args:
        tensor (torch.Tensor): Tensor that contains multiple images
        mean (tuple[float], optional): Mean of images. Defaults to (0, 0, 0).
        std (tuple[float], optional): Standard deviation of images.
            Defaults to (1, 1, 1).
        to_rgb (bool, optional): Whether convert the images to RGB format.
            Defaults to True.

    Returns:
        list[np.ndarray]: A list that contains multiple images.
    """
    num_imgs = tensor.size(0)
    mean = np.array(mean, dtype=np.float32)
    std = np.array(std, dtype=np.float32)
    imgs = []
    for img_id in range(num_imgs):
        img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
        img = mmcv.imdenormalize(
            img, mean, std, to_bgr=to_rgb).astype(np.uint8)
        imgs.append(np.ascontiguousarray(img))
    return imgs 
Example 3
Project: DOTA_models   Author: ringringyi   File: swiftshader_renderer.py    License: Apache License 2.0 6 votes vote down vote up
def load_default_object(self):
    v = np.array([[0.0, 0.5, 0.0, 1.0, 1.0, 0.0, 1.0],
                  [-0.5, -0.5, 0.0, 1.0, 0.0, 1.0, 1.0],
                  [0.5, -0.5, 0.0, 1.0, 1.0, 1.0, 1.0]], dtype=np.float32)
    v = np.concatenate((v,v+0.1), axis=0)
    v = np.ascontiguousarray(v, dtype=np.float32)

    vbo = glGenBuffers(1)
    glBindBuffer (GL_ARRAY_BUFFER, vbo)
    glBufferData (GL_ARRAY_BUFFER, v.dtype.itemsize*v.size, v, GL_STATIC_DRAW)
    glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 28, ctypes.c_void_p(0))
    glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 28, ctypes.c_void_p(12))
    glEnableVertexAttribArray(0);
    glEnableVertexAttribArray(1);

    self.num_to_render = 6; 
Example 4
Project: DOTA_models   Author: ringringyi   File: swiftshader_renderer.py    License: Apache License 2.0 6 votes vote down vote up
def _load_mesh_into_gl(self, mesh, material):
    vvt = np.concatenate((mesh.vertices, mesh.texturecoords[0,:,:2]), axis=1)
    vvt = np.ascontiguousarray(vvt[mesh.faces.reshape((-1)),:], dtype=np.float32)
    num = vvt.shape[0]
    vvt = np.reshape(vvt, (-1))

    vbo = glGenBuffers(1)
    glBindBuffer(GL_ARRAY_BUFFER, vbo)
    glBufferData(GL_ARRAY_BUFFER, vvt.dtype.itemsize*vvt.size, vvt, GL_STATIC_DRAW)

    tbo = glGenTextures(1)
    glBindTexture(GL_TEXTURE_2D, tbo)
    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, material.shape[1],
                 material.shape[0], 0, GL_RGB, GL_UNSIGNED_BYTE,
                 np.reshape(material, (-1)))
    return num, vbo, tbo 
Example 5
Project: pruning_yolov3   Author: zbyuan   File: datasets.py    License: GNU General Public License v3.0 6 votes vote down vote up
def __next__(self):
        self.count += 1
        img0 = self.imgs.copy()
        if cv2.waitKey(1) == ord('q'):  # q to quit
            cv2.destroyAllWindows()
            raise StopIteration

        # Letterbox
        img = [letterbox(x, new_shape=self.img_size, interp=cv2.INTER_LINEAR)[0] for x in img0]

        # Stack
        img = np.stack(img, 0)

        # Normalize RGB
        img = img[:, :, :, ::-1].transpose(0, 3, 1, 2)  # BGR to RGB
        img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32)  # uint8 to fp16/fp32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0

        return self.sources, img, img0, None 
Example 6
Project: Pytorch-Project-Template   Author: moemen95   File: env_utils.py    License: MIT License 6 votes vote down vote up
def get_screen(self, env):
        screen = env.render(mode='rgb_array').transpose((2, 0, 1))  # transpose into torch order (CHW)
        # Strip off the top and bottom of the screen
        screen = screen[:, 160:320]
        view_width = 320
        cart_location = self.get_cart_location(env)
        if cart_location < view_width // 2:
            slice_range = slice(view_width)
        elif cart_location > (self.screen_width - view_width // 2):
            slice_range = slice(-view_width, None)
        else:
            slice_range = slice(cart_location - view_width // 2,
                                cart_location + view_width // 2)
        # Strip off the edges, so that we have a square image centered on a cart
        screen = screen[:, :, slice_range]
        # Convert to float, rescale, convert to torch tensor
        screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
        screen = torch.from_numpy(screen)
        # Resize, and add a batch dimension (BCHW)
        return resize(screen).unsqueeze(0) 
Example 7
Project: QCElemental   Author: MolSSI   File: serialization.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def default(self, obj: Any) -> Any:
        try:
            return pydantic_encoder(obj)
        except TypeError:
            pass

        if isinstance(obj, np.ndarray):
            if obj.shape:
                data = {"_nd_": True, "dtype": obj.dtype.str, "data": np.ascontiguousarray(obj).tobytes().hex()}
                if len(obj.shape) > 1:
                    data["shape"] = obj.shape
                return data

            else:
                # Converts np.array(5) -> 5
                return obj.tolist()

        return json.JSONEncoder.default(self, obj) 
Example 8
Project: audio   Author: pytorch   File: kaldi_io.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _convert_method_output_to_tensor(file_or_fd: Any,
                                     fn: Callable,
                                     convert_contiguous: bool = False) -> Iterable[Tuple[str, Tensor]]:
    r"""Takes a method invokes it. The output is converted to a tensor.

    Args:
        file_or_fd (str/FileDescriptor): File name or file descriptor
        fn (Callable): Function that has the signature (file name/descriptor) and converts it to
            Iterable[Tuple[str, Tensor]].
        convert_contiguous (bool, optional): Determines whether the array should be converted into a
            contiguous layout. (Default: ``False``)

    Returns:
        Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is vec/mat
    """
    for key, np_arr in fn(file_or_fd):
        if convert_contiguous:
            np_arr = np.ascontiguousarray(np_arr)
        yield key, torch.from_numpy(np_arr) 
Example 9
def load_image(img_path, net_input_shape):
    imgBGR = cv2.imread(img_path)
    img = cv2.resize(imgBGR, net_input_shape)
    # BGR -> RGB
    #img = img[:,:, (2, 1, 0)]

    ## Method 1
    # imgT = np.transpose(img, (2, 0, 1))  # c,w,h
    # imgF = np.asarray(imgT, dtype=np.float32)
    # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean
    # imgS = np.subtract(imgF,mean)

    ## Method 2
    imgF = np.asarray(img, dtype=np.float32)
    mean = [128.0, 128.0, 128.0] # Caffe image mean
    # mean = [88.159309, 97.966286, 103.66106] # Caffe image mean
    imgSS = np.subtract(imgF, mean)/128.0
    imgS = np.transpose(imgSS, (2, 0, 1))  # c,w,h

    # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32)

    return imgBGR, np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous 
Example 10
Project: iAI   Author: aimuch   File: call_engine_to_infer_all.py    License: MIT License 6 votes vote down vote up
def load_image(img_path, net_input_shape):
    img = cv2.resize(cv2.imread(img_path), net_input_shape)
    # BGR -> RGB
    #img = img[:,:, (2, 1, 0)]

    ## Method 1
    # imgT = np.transpose(img, (2, 0, 1))  # c,w,h
    # imgF = np.asarray(imgT, dtype=np.float32)
    # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean
    # imgS = np.subtract(imgF,mean)

    ## Method 2
    imgF = np.asarray(img, dtype=np.float32)
    mean = [88.159309, 97.966286, 103.66106] # Caffe image mean
    imgSS = np.subtract(imgF, mean)
    imgS = np.transpose(imgSS, (2, 0, 1))  # CHW

    # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32)

    return np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous 
Example 11
Project: iAI   Author: aimuch   File: call_engine_to_infer_all_analysis_error_6classes.py    License: MIT License 6 votes vote down vote up
def load_image(img_path, net_input_shape):
    imgBGR = cv2.imread(img_path)
    img = cv2.resize(imgBGR, net_input_shape)
    # BGR -> RGB
    #img = img[:,:, (2, 1, 0)]

    ## Method 1
    # imgT = np.transpose(img, (2, 0, 1))  # c,w,h
    # imgF = np.asarray(imgT, dtype=np.float32)
    # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean
    # imgS = np.subtract(imgF,mean)

    ## Method 2
    imgF = np.asarray(img, dtype=np.float32)
    mean = [128.0, 128.0, 128.0] # Caffe image mean
    # mean = [88.159309, 97.966286, 103.66106] # Caffe image mean
    imgSS = np.subtract(imgF, mean)/128.0
    imgS = np.transpose(imgSS, (2, 0, 1))  # c,w,h

    # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32)

    return imgBGR, np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous 
Example 12
Project: iAI   Author: aimuch   File: call_engine_to_infer_one.py    License: MIT License 6 votes vote down vote up
def load_image(img_path, net_input_shape):
    img = cv2.resize(cv2.imread(img_path), net_input_shape)
    # BGR -> RGB
    #img = img[:,:, (2, 1, 0)]

    ## Method 1
    # imgT = np.transpose(img, (2, 0, 1))  # c,w,h
    # imgF = np.asarray(imgT, dtype=np.float32)
    # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean
    # imgS = np.subtract(imgF,mean)

    ## Method 2
    imgF = np.asarray(img, dtype=np.float32)
    mean = [88.159309, 97.966286, 103.66106] # Caffe image mean
    imgSS = np.subtract(imgF, mean)
    imgS = np.transpose(imgSS, (2, 0, 1))  # CHW

    # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32)

    return np.ascontiguousarray(imgS, dtype=np.float32)   # avoid error: ndarray is not contiguous 
Example 13
Project: iAI   Author: aimuch   File: call_engine_to_infer_all_print_predict_on_image.py    License: MIT License 6 votes vote down vote up
def load_image(img_path, net_input_shape):
    imgBGR = cv2.imread(img_path)
    img = cv2.resize(imgBGR, net_input_shape)
    # BGR -> RGB
    #img = img[:,:, (2, 1, 0)]

    ## Method 1
    # imgT = np.transpose(img, (2, 0, 1))  # c,w,h
    # imgF = np.asarray(imgT, dtype=np.float32)
    # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean
    # imgS = np.subtract(imgF,mean)

    ## Method 2
    imgF = np.asarray(img, dtype=np.float32)
    mean = [88.159309, 97.966286, 103.66106] # Caffe image mean
    imgSS = np.subtract(imgF, mean)
    imgS = np.transpose(imgSS, (2, 0, 1))  # c,w,h

    # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32)

    return imgBGR, np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous 
Example 14
Project: pyGSTi   Author: pyGSTio   File: termforwardsim.py    License: Apache License 2.0 6 votes vote down vote up
def _fill_hprobs_block(self, mxToFill, dest_indices, dest_param_indices1,
                           dest_param_indices2, evalTree, param_slice1, param_slice2,
                           comm=None, memLimit=None):
        if param_slice1 is None or param_slice1.start is None: param_slice1 = slice(0, self.Np)
        if param_slice2 is None or param_slice2.start is None: param_slice2 = slice(0, self.Np)
        if dest_param_indices1 is None: dest_param_indices1 = slice(0, _slct.length(param_slice1))
        if dest_param_indices2 is None: dest_param_indices2 = slice(0, _slct.length(param_slice2))

        if self.mode == "direct":
            raise NotImplementedError("hprobs does not support direct path-integral evaluation yet")
            # hprobs = self.hprs_directly(evalTree, ...)
        else:  # "pruned" or "taylor order"
            # evaluate derivative of polys
            nEls = evalTree.num_final_elements()
            polys = evalTree.merged_compact_polys
            wrtInds1 = _np.ascontiguousarray(_slct.indices(param_slice1), _np.int64)
            wrtInds2 = _np.ascontiguousarray(_slct.indices(param_slice2), _np.int64)
            dpolys = _compact_deriv(polys[0], polys[1], wrtInds1)
            hpolys = _compact_deriv(dpolys[0], dpolys[1], wrtInds2)
            hprobs = _safe_bulk_eval_compact_polys(
                hpolys[0], hpolys[1], self.paramvec, (nEls, len(wrtInds1), len(wrtInds2)))
        _fas(mxToFill, [dest_indices, dest_param_indices1, dest_param_indices2], hprobs) 
Example 15
Project: pyGSTi   Author: pyGSTio   File: spamvec.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, outcomes):
        """
        Initialize a StabilizerEffectVec object.

        Parameters
        ----------
        outcomes : iterable
            A list or other iterable of integer 0 or 1 outcomes specifying
            which POVM effect vector this object represents within the
            full `stabilizerPOVM`
        """
        self._outcomes = _np.ascontiguousarray(_np.array(outcomes, int), _np.int64)
        #Note: dtype='i' => int in Cython, whereas dtype=int/np.int64 => long in Cython
        rep = replib.SBEffectRep(self._outcomes)  # dim == 2**nqubits == 2**len(outcomes)
        SPAMVec.__init__(self, rep, "stabilizer", "effect")

    #def torep(self, typ, outvec=None):
    #    # changes to_statevec/to_dmvec -> todense, and have
    #    # torep create an effect rep object...
    #    return replib.SBEffectRep(_np.ascontiguousarray(self._outcomes, _np.int64)) 
Example 16
Project: Parsing-R-CNN   Author: soeaver   File: boxes.py    License: MIT License 6 votes vote down vote up
def soft_nms(
    dets, sigma=0.5, overlap_thresh=0.3, score_thresh=0.001, method='linear'
):
    """Apply the soft NMS algorithm from https://arxiv.org/abs/1704.04503."""
    if dets.shape[0] == 0:
        return dets, []

    methods = {'hard': 0, 'linear': 1, 'gaussian': 2}
    assert method in methods, 'Unknown soft_nms method: {}'.format(method)

    dets, keep = cython_nms.soft_nms(
        np.ascontiguousarray(dets, dtype=np.float32),
        np.float32(sigma),
        np.float32(overlap_thresh),
        np.float32(score_thresh),
        np.uint8(methods[method])
    )
    return dets, keep 
Example 17
Project: VTuber_Unity   Author: kwea123   File: utils.py    License: MIT License 6 votes vote down vote up
def transform_np(point, center, scale, resolution, invert=False):
    _pt = np.ones(3)
    _pt[0] = point[0]
    _pt[1] = point[1]

    h = 200.0 * scale
    t = np.eye(3)
    t[0, 0] = resolution / h
    t[1, 1] = resolution / h
    t[0, 2] = resolution * (-center[0] / h + 0.5)
    t[1, 2] = resolution * (-center[1] / h + 0.5)

    if invert:
        t = np.ascontiguousarray(np.linalg.pinv(t))

    new_point = np.dot(t, _pt)[0:2]

    return new_point.astype(np.int32) 
Example 18
Project: KAIR   Author: cszn   File: utils_image.py    License: MIT License 6 votes vote down vote up
def augment_img_tensor(img, mode=0):
    '''Kai Zhang (github: https://github.com/cszn)
    '''
    img_size = img.size()
    img_np = img.data.cpu().numpy()
    if len(img_size) == 3:
        img_np = np.transpose(img_np, (1, 2, 0))
    elif len(img_size) == 4:
        img_np = np.transpose(img_np, (2, 3, 1, 0))
    img_np = augment_img(img_np, mode=mode)
    img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
    if len(img_size) == 3:
        img_tensor = img_tensor.permute(2, 0, 1)
    elif len(img_size) == 4:
        img_tensor = img_tensor.permute(3, 2, 0, 1)

    return img_tensor.type_as(img) 
Example 19
def pth_nms(dets, thresh):
  """
  dets has to be a tensor
  """
  if not dets.is_cuda:
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    scores = dets[:, 4]

    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
    order = scores.sort(0, descending=True)[1]
    # order = torch.from_numpy(np.ascontiguousarray(scores.numpy().argsort()[::-1])).long()

    keep = torch.LongTensor(dets.size(0))
    num_out = torch.LongTensor(1)
    nms.cpu_nms(keep, num_out, dets, order, areas, thresh)

    return keep[:num_out[0]]
  else:
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    scores = dets[:, 4]

    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
    order = scores.sort(0, descending=True)[1]
    # order = torch.from_numpy(np.ascontiguousarray(scores.cpu().numpy().argsort()[::-1])).long().cuda()

    dets = dets[order].contiguous()

    keep = torch.LongTensor(dets.size(0))
    num_out = torch.LongTensor(1)
    # keep = torch.cuda.LongTensor(dets.size(0))
    # num_out = torch.cuda.LongTensor(1)
    nms.gpu_nms(keep, num_out, dets, thresh)

    return order[keep[:num_out[0]].cuda()].contiguous()
    # return order[keep[:num_out[0]]].contiguous() 
Example 20
Project: mmdetection   Author: open-mmlab   File: formating.py    License: Apache License 2.0 5 votes vote down vote up
def __call__(self, results):
        """Call function to transform and format common fields in results.

        Args:
            results (dict): Result dict contains the data to convert.

        Returns:
            dict: The result dict contains the data that is formatted with
                default bundle.
        """

        if 'img' in results:
            img = results['img']
            # add default meta keys
            results = self._add_default_meta_keys(results)
            if len(img.shape) < 3:
                img = np.expand_dims(img, -1)
            img = np.ascontiguousarray(img.transpose(2, 0, 1))
            results['img'] = DC(to_tensor(img), stack=True)
        for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
            if key not in results:
                continue
            results[key] = DC(to_tensor(results[key]))
        if 'gt_masks' in results:
            results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)
        if 'gt_semantic_seg' in results:
            results['gt_semantic_seg'] = DC(
                to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)
        return results 
Example 21
Project: pruning_yolov3   Author: zbyuan   File: datasets.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __next__(self):
        if self.count == self.nF:
            raise StopIteration
        path = self.files[self.count]

        if self.video_flag[self.count]:
            # Read video
            self.mode = 'video'
            ret_val, img0 = self.cap.read()
            if not ret_val:
                self.count += 1
                self.cap.release()
                if self.count == self.nF:  # last video
                    raise StopIteration
                else:
                    path = self.files[self.count]
                    self.new_video(path)
                    ret_val, img0 = self.cap.read()

            self.frame += 1
            print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')

        else:
            # Read image
            self.count += 1
            img0 = cv2.imread(path)  # BGR
            assert img0 is not None, 'Image Not Found ' + path
            print('image %g/%g %s: ' % (self.count, self.nF, path), end='')

        # Padded resize
        img = letterbox(img0, new_shape=self.img_size)[0]

        # Normalize RGB
        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB
        img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32)  # uint8 to fp16/fp32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0

        # cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1])  # save letterbox image
        return path, img, img0, self.cap 
Example 22
Project: pruning_yolov3   Author: zbyuan   File: datasets.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __next__(self):
        self.count += 1
        if cv2.waitKey(1) == ord('q'):  # q to quit
            self.cap.release()
            cv2.destroyAllWindows()
            raise StopIteration

        # Read frame
        if self.pipe == 0:  # local camera
            ret_val, img0 = self.cap.read()
            img0 = cv2.flip(img0, 1)  # flip left-right
        else:  # IP camera
            n = 0
            while True:
                n += 1
                self.cap.grab()
                if n % 30 == 0:  # skip frames
                    ret_val, img0 = self.cap.retrieve()
                    if ret_val:
                        break

        # Print
        assert ret_val, 'Camera Error %s' % self.pipe
        img_path = 'webcam.jpg'
        print('webcam %g: ' % self.count, end='')

        # Padded resize
        img = letterbox(img0, new_shape=self.img_size)[0]

        # Normalize RGB
        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB
        img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32)  # uint8 to fp16/fp32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0

        return img_path, img, img0, None 
Example 23
Project: QCElemental   Author: MolSSI   File: serialization.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def msgpackext_encode(obj: Any) -> Any:
    """
    Encodes an object using pydantic and NumPy array serialization techniques suitable for msgpack.

    Parameters
    ----------
    obj : Any
        Any object that can be serialized with pydantic and NumPy encoding techniques.

    Returns
    -------
    Any
        A msgpack compatible form of the object.
    """

    # First try pydantic base objects
    try:
        return pydantic_encoder(obj)
    except TypeError:
        pass

    if isinstance(obj, np.ndarray):
        if obj.shape:
            data = {b"_nd_": True, b"dtype": obj.dtype.str, b"data": np.ascontiguousarray(obj).tobytes()}
            if len(obj.shape) > 1:
                data[b"shape"] = obj.shape
            return data

        else:
            # Converts np.array(5) -> 5
            return obj.tolist()

    return obj 
Example 24
Project: cascade-rcnn_Pytorch   Author: guoruoqian   File: nms_wrapper.py    License: MIT License 5 votes vote down vote up
def soft_nms(dets, sigma=0.5, Nt=0.3, threshold=0.001, method=1):
    keep = cpu_soft_nms(np.ascontiguousarray(dets, dtype=np.float32),
                        np.float32(sigma), np.float32(Nt),
                        np.float32(threshold),
                        np.uint8(method))
    keep = np.array(keep)
    return keep 
Example 25
Project: PSMNet   Author: JiaRenChang   File: SecenFlowLoader.py    License: MIT License 5 votes vote down vote up
def __getitem__(self, index):
        left  = self.left[index]
        right = self.right[index]
        disp_L= self.disp_L[index]


        left_img = self.loader(left)
        right_img = self.loader(right)
        dataL, scaleL = self.dploader(disp_L)
        dataL = np.ascontiguousarray(dataL,dtype=np.float32)

        if self.training:  
            w, h = left_img.size
            th, tw = 256, 512

            x1 = random.randint(0, w - tw)
            y1 = random.randint(0, h - th)

            left_img = left_img.crop((x1, y1, x1 + tw, y1 + th))
            right_img = right_img.crop((x1, y1, x1 + tw, y1 + th))

            dataL = dataL[y1:y1 + th, x1:x1 + tw]

            processed = preprocess.get_transform(augment=False)  
            left_img   = processed(left_img)
            right_img  = processed(right_img)

            return left_img, right_img, dataL
        else:
            processed = preprocess.get_transform(augment=False)  
            left_img       = processed(left_img)
            right_img      = processed(right_img) 
            return left_img, right_img, dataL 
Example 26
Project: DeepLung   Author: uci-cbcl   File: data.py    License: GNU General Public License v3.0 5 votes vote down vote up
def augment(sample, target, bboxes, coord, ifflip = True, ifrotate=True, ifswap = True):
    #                     angle1 = np.random.rand()*180
    if ifrotate:
        validrot = False
        counter = 0
        while not validrot:
            newtarget = np.copy(target)
            angle1 = np.random.rand()*180
            size = np.array(sample.shape[2:4]).astype('float')
            rotmat = np.array([[np.cos(angle1/180*np.pi),-np.sin(angle1/180*np.pi)],[np.sin(angle1/180*np.pi),np.cos(angle1/180*np.pi)]])
            newtarget[1:3] = np.dot(rotmat,target[1:3]-size/2)+size/2
            if np.all(newtarget[:3]>target[3]) and np.all(newtarget[:3]< np.array(sample.shape[1:4])-newtarget[3]):
                validrot = True
                target = newtarget
                sample = rotate(sample,angle1,axes=(2,3),reshape=False)
                coord = rotate(coord,angle1,axes=(2,3),reshape=False)
                for box in bboxes:
                    box[1:3] = np.dot(rotmat,box[1:3]-size/2)+size/2
            else:
                counter += 1
                if counter ==3:
                    break
    if ifswap:
        if sample.shape[1]==sample.shape[2] and sample.shape[1]==sample.shape[3]:
            axisorder = np.random.permutation(3)
            sample = np.transpose(sample,np.concatenate([[0],axisorder+1]))
            coord = np.transpose(coord,np.concatenate([[0],axisorder+1]))
            target[:3] = target[:3][axisorder]
            bboxes[:,:3] = bboxes[:,:3][:,axisorder]
            
    if ifflip:
#         flipid = np.array([np.random.randint(2),np.random.randint(2),np.random.randint(2)])*2-1
        flipid = np.array([1,np.random.randint(2),np.random.randint(2)])*2-1
        sample = np.ascontiguousarray(sample[:,::flipid[0],::flipid[1],::flipid[2]])
        coord = np.ascontiguousarray(coord[:,::flipid[0],::flipid[1],::flipid[2]])
        for ax in range(3):
            if flipid[ax]==-1:
                target[ax] = np.array(sample.shape[ax+1])-target[ax]
                bboxes[:,ax]= np.array(sample.shape[ax+1])-bboxes[:,ax]
    return sample, target, bboxes, coord 
Example 27
Project: DeepLung   Author: uci-cbcl   File: prepare.py    License: GNU General Public License v3.0 5 votes vote down vote up
def process_mask(mask):
    convex_mask = np.copy(mask)
    for i_layer in range(convex_mask.shape[0]):
        mask1  = np.ascontiguousarray(mask[i_layer])
        if np.sum(mask1)>0:
            mask2 = convex_hull_image(mask1)
            if np.sum(mask2)>1.5*np.sum(mask1):
                mask2 = mask1
        else:
            mask2 = mask1
        convex_mask[i_layer] = mask2
    struct = generate_binary_structure(3,1)  
    dilatedMask = binary_dilation(convex_mask,structure=struct,iterations=10) 
    return dilatedMask 
Example 28
Project: pymoo   Author: msu-coinlab   File: misc.py    License: Apache License 2.0 5 votes vote down vote up
def unique_rows(a):
    a = np.ascontiguousarray(a)
    unique_a = np.unique(a.view([('', a.dtype)] * a.shape[1]))
    return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1])) 
Example 29
Project: Voice_Converter_CycleGAN   Author: leimao   File: preprocess.py    License: MIT License 5 votes vote down vote up
def world_decode_spectral_envelop(coded_sp, fs):

    fftlen = pyworld.get_cheaptrick_fft_size(fs)
    #coded_sp = coded_sp.astype(np.float32)
    #coded_sp = np.ascontiguousarray(coded_sp)
    decoded_sp = pyworld.decode_spectral_envelope(coded_sp, fs, fftlen)

    return decoded_sp 
Example 30
Project: pyscf   Author: pyscf   File: shci.py    License: Apache License 2.0 5 votes vote down vote up
def make_rdm12(self, state, norb, nelec, link_index=None, **kwargs):
        nelectrons = 0
        if isinstance(nelec, (int, numpy.integer)):
            nelectrons = nelec
        else:
            nelectrons = nelec[0] + nelec[1]

        # The 2RDMs written by "SHCIrdm::saveRDM" in DICE
        # are written as E2[i1,j2,k1,l2]
        # and stored here as E2[i1,k1,j2,l2] (for PySCF purposes)
        # This is NOT done with SQA in mind.
        twopdm = numpy.zeros((norb, norb, norb, norb))
        file2pdm = "spatialRDM.%d.%d.txt" % (state, state)
        # file2pdm = file2pdm.encode()  # .encode for python3 compatibility
        r2RDM(twopdm, norb, os.path.join(self.scratchDirectory, file2pdm).encode())

        # Symmetry addon
        if (self.groupname == "Dooh" or self.groupname == "Coov") and self.useExtraSymm:
            nRows, rowIndex, rowCoeffs = DinfhtoD2h(self, norb, nelec)
            twopdmcopy = 1.0 * twopdm
            twopdm = 0.0 * twopdm
            transformRDMDinfh(
                norb,
                numpy.ascontiguousarray(nRows, numpy.int32),
                numpy.ascontiguousarray(rowIndex, numpy.int32),
                numpy.ascontiguousarray(rowCoeffs, numpy.float64),
                numpy.ascontiguousarray(twopdmcopy, numpy.float64),
                numpy.ascontiguousarray(twopdm, numpy.float64),
            )
            twopdmcopy = None

        # (This is coherent with previous statement about indexes)
        onepdm = numpy.einsum("ikjj->ki", twopdm)
        onepdm /= nelectrons - 1
        return onepdm, twopdm