Python cv2.resize() Examples

The following are 30 code examples of cv2.resize(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: video_transforms.py    From DDPAE-video-prediction with MIT License 7 votes vote down vote up
def resize(video, size, interpolation):
  if interpolation == 'bilinear':
    inter = cv2.INTER_LINEAR
  elif interpolation == 'nearest':
    inter = cv2.INTER_NEAREST
  else:
    raise NotImplementedError

  shape = video.shape[:-3]
  video = video.reshape((-1, *video.shape[-3:]))
  resized_video = np.zeros((video.shape[0], size[1], size[0], video.shape[-1]))
  for i in range(video.shape[0]):
    img = cv2.resize(video[i], size, inter)
    if len(img.shape) == 2:
      img = img[:, :, np.newaxis]
    resized_video[i] = img
  return resized_video.reshape((*shape, size[1], size[0], video.shape[-1])) 
Example #2
Source File: preprocessor.py    From signature-recognition with MIT License 7 votes vote down vote up
def prepare(input):
    # preprocessing the image input
    clean = cv2.fastNlMeansDenoising(input)
    ret, tresh = cv2.threshold(clean, 127, 1, cv2.THRESH_BINARY_INV)
    img = crop(tresh)

    # 40x10 image as a flatten array
    flatten_img = cv2.resize(img, (40, 10), interpolation=cv2.INTER_AREA).flatten()

    # resize to 400x100
    resized = cv2.resize(img, (400, 100), interpolation=cv2.INTER_AREA)
    columns = np.sum(resized, axis=0)  # sum of all columns
    lines = np.sum(resized, axis=1)  # sum of all lines

    h, w = img.shape
    aspect = w / h

    return [*flatten_img, *columns, *lines, aspect] 
Example #3
Source File: saliency.py    From OpenCV-Computer-Vision-Projects-with-Python with MIT License 6 votes vote down vote up
def __init__(self, img, use_numpy_fft=True, gauss_kernel=(5, 5)):
        """Constructor

            This method initializes the saliency algorithm.

            :param img: an RGB input image
            :param use_numpy_fft: flag whether to use NumPy's FFT (True) or
                                  OpenCV's FFT (False)
            :param gauss_kernel: Kernel size for Gaussian blur
        """
        self.use_numpy_fft = use_numpy_fft
        self.gauss_kernel = gauss_kernel
        self.frame_orig = img

        # downsample image for processing
        self.small_shape = (64, 64)
        self.frame_small = cv2.resize(img, self.small_shape[1::-1])

        # whether we need to do the math (True) or it has already
        # been done (False)
        self.need_saliency_map = True 
Example #4
Source File: face_attack.py    From Adversarial-Face-Attack with GNU General Public License v3.0 6 votes vote down vote up
def detect(self, img):
        """
        img: rgb 3 channel
        """
        minsize = 20  # minimum size of face
        threshold = [0.6, 0.7, 0.7]  # three steps's threshold
        factor = 0.709  # scale factor

        bounding_boxes, _ = FaceDet.detect_face(
                img, minsize, self.pnet, self.rnet, self.onet, threshold, factor)
        area = (bounding_boxes[:, 2] - bounding_boxes[:, 0]) * (bounding_boxes[:, 3] - bounding_boxes[:, 1])
        face_idx = area.argmax()
        bbox = bounding_boxes[face_idx][:4]  # xy,xy

        margin = 32
        x0 = np.maximum(bbox[0] - margin // 2, 0)
        y0 = np.maximum(bbox[1] - margin // 2, 0)
        x1 = np.minimum(bbox[2] + margin // 2, img.shape[1])
        y1 = np.minimum(bbox[3] + margin // 2, img.shape[0])
        x0, y0, x1, y1 = bbox = [int(k + 0.5) for k in [x0, y0, x1, y1]]
        cropped = img[y0:y1, x0:x1, :]
        scaled = cv2.resize(cropped, (160, 160), interpolation=cv2.INTER_LINEAR)
        return scaled, bbox 
Example #5
Source File: utils.py    From DeepLab_v3 with MIT License 6 votes vote down vote up
def multiscale_single_test(image, input_scales, predictor):
    '''
    Predict image semantic segmentation labeling using multi-scale inputs.
    Inputs:
    images: numpy array, [height, width, channel], channel = 3.
    input_scales: list of scale factors. e.g., [0.5, 1.0, 1.5].
    predictor: prediction function which takes one scaled image as input and outputs its semantic segmentation labelings.
    Returns:
    Averaged predicted logits of multi-scale inputs
    '''
    image_height_raw = image.shape[0]
    image_width_raw = image.shape[1]
    multiscale_outputs = []
    for input_scale in input_scales:
        image_height_scaled = round(image_height_raw * input_scale)
        image_width_scaled = round(image_width_raw * input_scale)
        image_scaled = cv2.resize(image, (image_width_scaled, image_height_scaled), interpolation=cv2.INTER_LINEAR)
        output = predictor(inputs=[image_scaled], target_height=image_height_raw, target_width=image_width_raw)[0]
        multiscale_outputs.append(output)

    output_mean = np.mean(multiscale_outputs, axis=0)

    return output_mean 
Example #6
Source File: ScreenGrab.py    From BiblioPixelAnimations with MIT License 6 votes vote down vote up
def step(self, amt=1):
        image = self._capFrame()

        if self.crop:
            image = image[self._cropY + self.yoff:self._ih - self._cropY +
                          self.yoff, self._cropX + self.xoff:self._iw - self._cropX + self.xoff]
        else:
            t, b, l, r = self._pad
            image = cv2.copyMakeBorder(
                image, t, b, l, r, cv2.BORDER_CONSTANT, value=[0, 0, 0])

        resized = cv2.resize(image, (self.width, self.height),
                             interpolation=cv2.INTER_LINEAR)
        if self.mirror:
            resized = cv2.flip(resized, 1)

        for y in range(self.height):
            for x in range(self.width):
                self.layout.set(x, y, tuple(resized[y, x][0:3])) 
Example #7
Source File: video_transforms.py    From DDPAE-video-prediction with MIT License 6 votes vote down vote up
def __call__(self, video):
    for attempt in range(10):
      area = video.shape[-3]*video.shape[-2]
      target_area = random.uniform(0.08, 1.0)*area
      aspect_ratio = random.uniform(3./4, 4./3)

      w = int(round(math.sqrt(target_area*aspect_ratio)))
      h = int(round(math.sqrt(target_area/aspect_ratio)))

      if random.random() < 0.5:
        w, h = h, w

      if w <= video.shape[-2] and h <= video.shape[-3]:
        x1 = random.randint(0, video.shape[-2]-w)
        y1 = random.randint(0, video.shape[-3]-h)

        video = video[..., y1:y1+h, x1:x1+w, :]

        return resize(video, (self.size, self.size), self.interpolation)

    # Fallback
    scale = Scale(self.size, interpolation=self.interpolation)
    crop = CenterCrop(self.size)
    return crop(scale(video)) 
Example #8
Source File: utils.py    From DeepLab_v3 with MIT License 6 votes vote down vote up
def multiscale_single_validate(image, label, input_scales, validator):

    image_height_raw = image.shape[0]
    image_width_raw = image.shape[1]
    multiscale_outputs = []
    multiscale_losses = []
    for input_scale in input_scales:
        image_height_scaled = round(image_height_raw * input_scale)
        image_width_scaled = round(image_width_raw * input_scale)
        image_scaled = cv2.resize(image, (image_width_scaled, image_height_scaled), interpolation=cv2.INTER_LINEAR)
        output, loss = validator(inputs=[image_scaled], target_height=image_height_raw, target_width=image_width_raw, labels=[label])
        multiscale_outputs.append(output[0])
        multiscale_losses.append(loss)

    output_mean = np.mean(multiscale_outputs, axis=0)
    loss_mean = np.mean(multiscale_losses)

    return output_mean, loss_mean 
Example #9
Source File: video_transforms.py    From DDPAE-video-prediction with MIT License 6 votes vote down vote up
def __call__(self, video):
    """
    Args:
        video (numpy.ndarray): Video to be scaled.
    Returns:
        numpy.ndarray: Rescaled video.
    """
    if isinstance(self.size, int):
      w, h = video.shape[-2], video.shape[-3]
      if (w <= h and w == self.size) or (h <= w and h == self.size):
        return video
      if w < h:
        ow = self.size
        oh = int(self.size*h/w)
        return resize(video, (ow, oh), self.interpolation)
      else:
        oh = self.size
        ow = int(self.size*w/h)
        return resize(video, (ow, oh), self.interpolation)
    else:
      return resize(video, self.size, self.interpolation) 
Example #10
Source File: datasets.py    From pytorch-segmentation-toolbox with MIT License 6 votes vote down vote up
def __getitem__(self, index):
        datafiles = self.files[index]
        image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
        image = cv2.resize(image, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)
        size = image.shape
        name = osp.splitext(osp.basename(datafiles["img"]))[0]
        image = np.asarray(image, np.float32)
        image = (image - image.min()) / (image.max() - image.min())
        
        img_h, img_w, _ = image.shape
        pad_h = max(self.crop_h - img_h, 0)
        pad_w = max(self.crop_w - img_w, 0)
        if pad_h > 0 or pad_w > 0:
            image = cv2.copyMakeBorder(image, 0, pad_h, 0, 
                pad_w, cv2.BORDER_CONSTANT, 
                value=(0.0, 0.0, 0.0))
        image = image.transpose((2, 0, 1))
        return image, np.array(size), name 
Example #11
Source File: demo.py    From ICDAR-2019-SROIE with MIT License 6 votes vote down vote up
def resize_image(img):
    img_size = img.shape
    im_size_min = np.min(img_size[0:2])
    im_size_max = np.max(img_size[0:2])

    im_scale = float(600) / float(im_size_min)
    if np.round(im_scale * im_size_max) > 1200:
        im_scale = float(1200) / float(im_size_max)
    new_h = int(img_size[0] * im_scale)
    new_w = int(img_size[1] * im_scale)

    new_h = new_h if new_h // 16 == 0 else (new_h // 16 + 1) * 16
    new_w = new_w if new_w // 16 == 0 else (new_w // 16 + 1) * 16

    re_im = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
    return re_im, (new_h / img_size[0], new_w / img_size[1]) 
Example #12
Source File: ocr_predict.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def forward_ocr(self, img_):
        img_ = cv2.resize(img_, (80, 30))
        img_ = img_.transpose(1, 0)
        print(img_.shape)
        img_ = img_.reshape((1, 80, 30))
        print(img_.shape)
        # img_ = img_.reshape((80 * 30))
        img_ = np.multiply(img_, 1 / 255.0)
        self.predictor.forward(data=img_, **self.init_state_dict)
        prob = self.predictor.get_output(0)
        label_list = []
        for p in prob:
            print(np.argsort(p))
            max_index = np.argsort(p)[::-1][0]
            label_list.append(max_index)
        return self.__get_string(label_list) 
Example #13
Source File: train_featurizer.py    From HardRLWithYoutube with MIT License 6 votes vote down vote up
def generate_dataset(videos_path, framerate, width, height):
    """Converts videos from specified path to ndarrays of shape [numberOfVideos, -1, width, height, 1]

    Args:
        videos_path: Inside the 'videos/' directory, the name of the subdirectory for videos.
        framerate: The desired framerate of the dataset.
        width: The width we will resize the videos to.
        height: The height we will resize the videos to.

    Returns:
        The dataset with the new size and framerate, and converted to monochromatic.

    """
    dataset = []
    video_index = 0
    for playlist in os.listdir('videos/' + videos_path):
        for video_name in os.listdir('videos/{}/{}'.format(videos_path, playlist)):
            dataset.append([])
            print('Video: {}'.format(video_name))
            video = cv2.VideoCapture('videos/{}/{}/{}'.format(videos_path, playlist, video_name))
            while video.isOpened():
                success, frame = video.read()
                if success:
                    frame = preprocess_image(frame, width, height)
                    dataset[video_index].append(frame)

                    frame_index = video.get(cv2.CAP_PROP_POS_FRAMES)
                    video_framerate = video.get(cv2.CAP_PROP_FPS)
                    video.set(cv2.CAP_PROP_POS_FRAMES, frame_index + video_framerate // framerate)
                    last_frame_index = video.get(cv2.CAP_PROP_FRAME_COUNT)
                    if frame_index >= last_frame_index:
                        # Video is over
                        break

                    
                else:
                    break
            dataset[video_index] = np.reshape(dataset[video_index], (-1, width, height, 1))
            video_index += 1
    return dataset 
Example #14
Source File: image.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def resize(im, short, max_size):
    """
    only resize input image to target size and return scale
    :param im: BGR image input by opencv
    :param short: one dimensional size (the short side)
    :param max_size: one dimensional max size (the long side)
    :return: resized image (NDArray) and scale (float)
    """
    im_shape = im.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])
    im_scale = float(short) / float(im_size_min)
    # prevent bigger axis from being more than max_size:
    if np.round(im_scale * im_size_max) > max_size:
        im_scale = float(max_size) / float(im_size_max)
    im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
    return im, im_scale 
Example #15
Source File: data.py    From kuzushiji-recognition with MIT License 6 votes vote down vote up
def __getitem__(self, index, to_tensor=True):
    fn = self.image_fns[index]
    img = cv2.cvtColor(cv2.imread(fn, 1), cv2.COLOR_BGR2RGB)

    img, pad_top, pad_left = KuzushijiDataset.pad_to_ratio(img, ratio=1.5)
    h, w = img.shape[:2]
    # print(h / w, pad_left, pad_top)
    assert img.ndim == 3
    scaled_imgs = []
    for scale in self.scales:
      h_scale = int(scale * self.height)
      w_scale = int(scale * self.width)
      simg = cv2.resize(img, (w_scale, h_scale))

      if to_tensor:
        assert simg.ndim == 3, simg.ndim
        simg = simg.transpose((2, 0, 1))
        simg = th.from_numpy(simg.copy())

      scaled_imgs.append(simg)

    return scaled_imgs + [fn] 
Example #16
Source File: utils.py    From progressive_growing_of_GANs with MIT License 6 votes vote down vote up
def grid_batch_images(self, images):
        n, h, w, c = images.shape
        a = int(math.floor(np.sqrt(n)))
        # images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)
        images = images.astype(np.uint8)
        images_in_square = np.reshape(images[:a * a], (a, a, h, w, c))
        new_img = np.zeros((h * a, w * a, c), dtype=np.uint8)
        for col_i, col_images in enumerate(images_in_square):
            for row_i, image in enumerate(col_images):
                new_img[col_i * h: (1 + col_i) * h, row_i * w: (1 + row_i) * w] = image
        resolution = self.cfg.resolution
        if self.cfg.resolution != h:
            scale = resolution / h
            new_img = cv2.resize(new_img, None, fx=scale, fy=scale,
                                 interpolation=cv2.INTER_NEAREST)
        return new_img 
Example #17
Source File: functional.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def resized_crop(img, i, j, h, w, size, interpolation='BILINEAR'):
    """Crop the given CV Image and resize it to desired size.

    Args:
        img (CV Image): Image to be cropped.
        i (int): i in (i,j) i.e coordinates of the upper left corner
        j (int): j in (i,j) i.e coordinates of the upper left corner
        h (int): Height of the cropped image.
        w (int): Width of the cropped image.
        size (sequence or int): Desired output size. Same semantics as ``resize``.
        interpolation (int, optional): Desired interpolation. Default is
            ``BILINEAR``.
    Returns:
        CV Image: Cropped image.
    """
    assert _is_numpy_image(img), 'img should be CV Image'
    img = crop(img, i, j, h, w)
    img = resize(img, size, interpolation)
    return img 
Example #18
Source File: blob.py    From cascade-rcnn_Pytorch with MIT License 6 votes vote down vote up
def prep_im_for_blob(im, pixel_means, pixel_stds, target_size, max_size):
    """Mean subtract and scale an image for use in a blob."""
    
    im = im.astype(np.float32, copy=False)
    im /= 255.0
    im -= pixel_means
    im /= pixel_stds
    # im = im[:, :, ::-1]
    im_shape = im.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])
    im_scale = float(target_size) / float(im_size_min)
    # Prevent the biggest axis from being more than MAX_SIZE
    # if np.round(im_scale * im_size_max) > max_size:
    #     im_scale = float(max_size) / float(im_size_max)
    # im = imresize(im, im_scale)
    im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
                    interpolation=cv2.INTER_LINEAR)

    return im, im_scale 
Example #19
Source File: img_utils.py    From tools_python with Apache License 2.0 6 votes vote down vote up
def one_pic_to_video(image_path, output_video_path, fps, time):
    """
    一张图片合成视频
    one_pic_to_video('./../source/1.jpeg', './../source/output.mp4', 25, 10)
    :param path: 图片文件路径
    :param output_video_path:合成视频的路径
    :param fps:帧率
    :param time:时长
    :return:
    """

    image_clip = ImageClip(image_path)
    img_width, img_height = image_clip.w, image_clip.h

    # 总共的帧数
    frame_num = (int)(fps * time)

    img_size = (int(img_width), int(img_height))

    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')

    video = cv2.VideoWriter(output_video_path, fourcc, fps, img_size)

    for index in range(frame_num):
        frame = cv2.imread(image_path)
        # 直接缩放到指定大小
        frame_suitable = cv2.resize(frame, (img_size[0], img_size[1]), interpolation=cv2.INTER_CUBIC)

        # 把图片写进视频
        # 重复写入多少次
        video.write(frame_suitable)

    # 释放资源
    video.release()

    return VideoFileClip(output_video_path) 
Example #20
Source File: datasets.py    From pruning_yolov3 with GNU General Public License v3.0 6 votes vote down vote up
def load_image(self, index):
    # loads 1 image from dataset
    img = self.imgs[index]
    if img is None:
        img_path = self.img_files[index]
        img = cv2.imread(img_path)  # BGR
        assert img is not None, 'Image Not Found ' + img_path
        r = self.img_size / max(img.shape)  # size ratio
        if self.augment and r < 1:  # if training (NOT testing), downsize to inference shape
            h, w, _ = img.shape
            img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_LINEAR)  # _LINEAR fastest

    # Augment colorspace
    if self.augment:
        augment_hsv(img, hgain=self.hyp['hsv_h'], sgain=self.hyp['hsv_s'], vgain=self.hyp['hsv_v'])

    return img 
Example #21
Source File: map_utils.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def resize_maps(map, map_scales, resize_method):
  scaled_maps = []
  for i, sc in enumerate(map_scales):
    if resize_method == 'antialiasing':
      # Resize using open cv so that we can compute the size.
      # Use PIL resize to use anti aliasing feature.
      map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR)
      w = map_.shape[1]; h = map_.shape[0]

      map_img = PIL.Image.fromarray((map*255).astype(np.uint8))
      map__img = map_img.resize((w,h), PIL.Image.ANTIALIAS)
      map_ = np.asarray(map__img).astype(np.float32)
      map_ = map_/255.
      map_ = np.minimum(map_, 1.0)
      map_ = np.maximum(map_, 0.0)
    elif resize_method == 'linear_noantialiasing':
      map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR)
    else:
      logging.error('Unknown resizing method')
    scaled_maps.append(map_)
  return scaled_maps 
Example #22
Source File: test_image.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_augmenters(self):
        # ColorNormalizeAug
        mean = np.random.rand(3) * 255
        std = np.random.rand(3) + 1
        width = np.random.randint(100, 500)
        height = np.random.randint(100, 500)
        src = np.random.rand(height, width, 3) * 255.
        # We test numpy and mxnet NDArray inputs
        color_norm_aug = mx.image.ColorNormalizeAug(mean=mx.nd.array(mean), std=std)
        out_image = color_norm_aug(mx.nd.array(src))
        assert_almost_equal(out_image.asnumpy(), (src - mean) / std, atol=1e-3)

        # only test if all augmenters will work
        # TODO(Joshua Zhang): verify the augmenter outputs
        im_list = [[0, x] for x in TestImage.IMAGES]
        test_iter = mx.image.ImageIter(2, (3, 224, 224), label_width=1, imglist=im_list,
            resize=640, rand_crop=True, rand_resize=True, rand_mirror=True, mean=True,
            std=np.array([1.1, 1.03, 1.05]), brightness=0.1, contrast=0.1, saturation=0.1,
            hue=0.1, pca_noise=0.1, rand_gray=0.2, inter_method=10, path_root='', shuffle=True)
        for batch in test_iter:
            pass 
Example #23
Source File: utils.py    From AdaptiveWingLoss with Apache License 2.0 5 votes vote down vote up
def cv_crop(image, landmarks, center, scale, resolution=256, center_shift=0):
    new_image = cv2.copyMakeBorder(image, center_shift,
                                   center_shift,
                                   center_shift,
                                   center_shift,
                                   cv2.BORDER_CONSTANT, value=[0,0,0])
    new_landmarks = landmarks.copy()
    if center_shift != 0:
        center[0] += center_shift
        center[1] += center_shift
        new_landmarks = new_landmarks + center_shift
    length = 200 * scale
    top = int(center[1] - length // 2)
    bottom = int(center[1] + length // 2)
    left = int(center[0] - length // 2)
    right = int(center[0] + length // 2)
    y_pad = abs(min(top, new_image.shape[0] - bottom, 0))
    x_pad = abs(min(left, new_image.shape[1] - right, 0))
    top, bottom, left, right = top + y_pad, bottom + y_pad, left + x_pad, right + x_pad
    new_image = cv2.copyMakeBorder(new_image, y_pad,
                                   y_pad,
                                   x_pad,
                                   x_pad,
                                   cv2.BORDER_CONSTANT, value=[0,0,0])
    new_image = new_image[top:bottom, left:right]
    new_image = cv2.resize(new_image, dsize=(int(resolution), int(resolution)),
                           interpolation=cv2.INTER_LINEAR)
    new_landmarks[:, 0] = (new_landmarks[:, 0] + x_pad - left) * resolution / length
    new_landmarks[:, 1] = (new_landmarks[:, 1] + y_pad - top) * resolution / length
    return new_image, new_landmarks 
Example #24
Source File: BlurDetection.py    From python-- with GNU General Public License v3.0 5 votes vote down vote up
def preImgOps(self, imgName):
        """
        图像的预处理操作
        :param imgName: 图像的而明朝
        :return: 灰度化和resize之后的图片对象
        """
        strPath = self.strDir + imgName

        img = cv2.imread(strPath)  # 读取图片
        cv2.moveWindow("", 1000, 100)
        # cv2.imshow("原始图", img)
        # 预处理操作
        reImg = cv2.resize(img, (800, 900), interpolation=cv2.INTER_CUBIC)  #
        img2gray = cv2.cvtColor(reImg, cv2.COLOR_BGR2GRAY)  # 将图片压缩为单通道的灰度图
        return img2gray, reImg 
Example #25
Source File: train_deeplab2D.py    From pytorch-mri-segmentation-3D with MIT License 5 votes vote down vote up
def scale_gt(img_temp,scale):
    new_dims = (  int(img_temp.shape[0]*scale),  int(img_temp.shape[1]*scale)   )
    return cv2.resize(img_temp,new_dims,interpolation = cv2.INTER_NEAREST).astype(float)
   
#Example chunk = ['scans/132/slices/FLAIR_28.nii.gz'] 
Example #26
Source File: train_deeplab2D.py    From pytorch-mri-segmentation-3D with MIT License 5 votes vote down vote up
def scale_im(img_temp,scale):
    new_dims = (  int(img_temp.shape[0]*scale),  int(img_temp.shape[1]*scale)   )
    return cv2.resize(img_temp,new_dims).astype(float) 
Example #27
Source File: datasets.py    From pytorch-segmentation-toolbox with MIT License 5 votes vote down vote up
def generate_scale_label(self, image, label):
        f_scale = 0.7 + random.randint(0, 14) / 10.0
        image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
        label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
        return image, label 
Example #28
Source File: saliency.py    From OpenCV-Computer-Vision-Projects-with-Python with MIT License 5 votes vote down vote up
def get_saliency_map(self):
        """Returns a saliency map

            This method generates a saliency map for the image that was
            passed to the class constructor.

            :returns: grayscale saliency map
        """
        if self.need_saliency_map:
            # haven't calculated saliency map for this image yet
            num_channels = 1
            if len(self.frame_orig.shape) == 2:
                # single channel
                sal = self._get_channel_sal_magn(self.frame_small)
            else:
                # multiple channels: consider each channel independently
                sal = np.zeros_like(self.frame_small).astype(np.float32)
                for c in xrange(self.frame_small.shape[2]):
                    small = self.frame_small[:, :, c]
                    sal[:, :, c] = self._get_channel_sal_magn(small)

                # overall saliency: channel mean
                sal = np.mean(sal, 2)

            # postprocess: blur, square, and normalize
            if self.gauss_kernel is not None:
                sal = cv2.GaussianBlur(sal, self.gauss_kernel, sigmaX=8,
                                       sigmaY=0)
            sal = sal**2
            sal = np.float32(sal)/np.max(sal)

            # scale up
            sal = cv2.resize(sal, self.frame_orig.shape[1::-1])

            # store a copy so we do the work only once per frame
            self.saliencyMap = sal
            self.need_saliency_map = False

        return self.saliencyMap 
Example #29
Source File: functional.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def resize(img, size, interpolation='BILINEAR'):
    r"""Resize the input CV Image to the given size.

    Args:
        img (CV Image): Image to be resized.
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), the output size will be matched to this. If size is an int,
            the smaller edge of the image will be matched to this number maintaing
            the aspect ratio. i.e, if height > width, then image will be rescaled to
            :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
        interpolation (int, optional): Desired interpolation. Default is
            ``BILINEAR``

    Returns:
        PIL Image: Resized image.
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be CV Image. Got {}'.format(type(img)))
    if not (
        isinstance(
            size,
            int) or (
            isinstance(
                size,
                Iterable) and len(size) == 2)):
        raise TypeError('Got inappropriate size arg: {}'.format(size))

    interpolation = INTER_MODE[interpolation]
    if isinstance(size, int):
        w, h, _ = img.shape
        if (w <= h and w == size) or (h <= w and h == size):
            return img
        if w < h:
            ow = size
            oh = int(size * h / w)
        else:
            oh = size
            ow = int(size * w / h)
    else:
        oh, ow = map(int, size)
    return cv2.resize(img, (ow, oh), interpolation=interpolation) 
Example #30
Source File: data.py    From insightocr with MIT License 5 votes vote down vote up
def __iter__(self):
        init_state_names = [x[0] for x in self.init_states]
        data = []
        label = []
        cnt = 0
        for m_line in self.dataset_lines:
            img_lst = m_line.strip().split(' ')
            img_path = os.path.join(self.data_root, img_lst[0])

            cnt += 1
            img = Image.open(img_path).resize(self.data_shape, Image.BILINEAR).convert('L')
            img = np.array(img).reshape((1, self.data_shape[1], self.data_shape[0]))
            data.append(img)

            ret = np.zeros(self.num_label, int)
            for idx in range(1, len(img_lst)):
                ret[idx - 1] = int(img_lst[idx])

            label.append(ret)
            if cnt % self.batch_size == 0:
                data_all = [mx.nd.array(data)] + self.init_state_arrays
                label_all = [mx.nd.array(label)]
                data_names = ['data'] + init_state_names
                label_names = ['label']
                data = []
                label = []
                yield SimpleBatch(data_names, data_all, label_names, label_all)
                continue