Python PIL.Image.BICUBIC Examples

The following are 30 code examples of PIL.Image.BICUBIC(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module PIL.Image , or try the search function .
Example #1
Source File: perspective.py    From maze-cv with MIT License 7 votes vote down vote up
def transform(startpoints, endpoints, im):
	'''Perform a perspective transformation on an image where startpoints are moved to endpoints, and the image is streched accordingly.'''
	width, height = im.size
	coeffs = find_coeffs(endpoints, startpoints)

	im = im.transform((width, height), Image.PERSPECTIVE, coeffs, Image.BICUBIC)
	return im 
Example #2
Source File: mask_generators.py    From vaeac with MIT License 7 votes vote down vote up
def regenerate_cache(self):
        """
        Resamples the big matrix and resets the counter of the total
        number of elements in the returned masks.
        """
        low_size = int(self.resolution * self.max_size)
        low_pattern = self.rng.uniform(0, 1, size=(low_size, low_size)) * 255
        low_pattern = torch.from_numpy(low_pattern.astype('float32'))
        pattern = transforms.Compose([
                        transforms.ToPILImage(),
                        transforms.Resize(self.max_size, Image.BICUBIC),
                        transforms.ToTensor(),
        ])(low_pattern[None])[0]
        pattern = torch.lt(pattern, self.density).byte()
        self.pattern = pattern.byte()
        self.points_used = 0 
Example #3
Source File: Dataloader.py    From Text_Segmentation_Image_Inpainting with GNU General Public License v3.0 6 votes vote down vote up
def process_images(self, clean, mask):
        i, j, h, w = RandomResizedCrop.get_params(clean, scale=(0.5, 2.0), ratio=(3. / 4., 4. / 3.))
        clean_img = resized_crop(clean, i, j, h, w, size=self.img_size, interpolation=Image.BICUBIC)
        mask = resized_crop(mask, i, j, h, w, self.img_size, interpolation=Image.BICUBIC)

        # get mask before further image augment
        # mask = self.get_mask(raw_img, clean_img)

        if self.add_random_masks:
            mask = random_masks(mask.copy(), size=self.img_size[0], offset=10)
        mask = np.where(np.array(mask) > brightness_difference * 255, np.uint8(255), np.uint8(0))
        mask = cv2.dilate(mask, np.ones((10, 10), np.uint8), iterations=1)

        mask = np.expand_dims(mask, -1)
        mask_t = to_tensor(mask)
        # mask_t = (mask_t > brightness_difference).float()

        # mask_t, _ = torch.max(mask_t, dim=0, keepdim=True)
        binary_mask = (1 - mask_t)  # valid positions are 1; holes are 0
        binary_mask = binary_mask.expand(3, -1, -1)
        clean_img = self.transformer(clean_img)
        corrupted_img = clean_img * binary_mask
        return corrupted_img, binary_mask, clean_img 
Example #4
Source File: super_resolution.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def resolve(ctx):
    from PIL import Image
    if isinstance(ctx, list):
        ctx = [ctx[0]]
    net.load_parameters('superres.params', ctx=ctx)
    img = Image.open(opt.resolve_img).convert('YCbCr')
    y, cb, cr = img.split()
    data = mx.nd.expand_dims(mx.nd.expand_dims(mx.nd.array(y), axis=0), axis=0)
    out_img_y = mx.nd.reshape(net(data), shape=(-3, -2)).asnumpy()
    out_img_y = out_img_y.clip(0, 255)
    out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')

    out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
    out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
    out_img = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')

    out_img.save('resolved.png') 
Example #5
Source File: utils.py    From Tenma with MIT License 6 votes vote down vote up
def optimize_image(image_path, output_quality, base_width):
   ''' Optimizes image and returns a filepath string '''

   img = Image.open(image_path)

   # Check that it's a supported format
   format = str(img.format)
   if format == 'PNG' or format == 'JPEG':
      if base_width < img.size[0]:
         wpercent = (base_width/float(img.size[0]))
         hsize = int((float(img.size[1])*float(wpercent)))
         img = img.resize((base_width,hsize), Image.BICUBIC)
      # The 'quality' option is ignored for PNG files
      img.save(image_path, quality=output_quality, optimize=True)

   return image_path


#============================================================================== 
Example #6
Source File: openwpm.py    From PrivacyScore with GNU General Public License v3.0 6 votes vote down vote up
def pixelize_screenshot(screenshot, screenshot_pixelized, target_width=390, pixelsize=3):
    """
    Thumbnail a screenshot to `target_width` and pixelize it.

    :param screenshot: Screenshot to be thumbnailed in pixelized
    :param screenshot_pixelized: File to which the result should be written
    :param target_width: Width of the final thumbnail
    :param pixelsize: Size of the final pixels
    :return: None
    """
    if target_width % pixelsize != 0:
        raise ValueError("pixelsize must divide target_width")

    img = Image.open(screenshot)
    width, height = img.size
    if height > width:
        img = img.crop((0, 0, width, width))
        height = width
    undersampling_width = target_width // pixelsize
    ratio = width / height
    new_height = int(undersampling_width / ratio)
    img = img.resize((undersampling_width, new_height), Image.BICUBIC)
    img = img.resize((target_width, new_height * pixelsize), Image.NEAREST)
    img.save(screenshot_pixelized, format='png') 
Example #7
Source File: test.py    From Depth-Map-Prediction with GNU General Public License v3.0 6 votes vote down vote up
def main():
    # location of depth module, config and parameters
    module_fn = 'models/depth.py'
    config_fn = 'models/depth.conf'#网络结构
    params_dir = 'weights/depth'#网络相关参数

    # load depth network
    machine = net.create_machine(module_fn, config_fn, params_dir)

    # demo image
    rgb = Image.open('demo_nyud_rgb.jpg')
    rgb = rgb.resize((320, 240), Image.BICUBIC)

    # build depth inference function and run
    rgb_imgs = np.asarray(rgb).reshape((1, 240, 320, 3))
    pred_depths = machine.infer_depth(rgb_imgs)

    # save prediction
    (m, M) = (pred_depths.min(), pred_depths.max())
    depth_img_np = (pred_depths[0] - m) / (M - m)
    depth_img = Image.fromarray((255*depth_img_np).astype(np.uint8))
    depth_img.save('demo_nyud_depth_prediction.png') 
Example #8
Source File: util.py    From 2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement with MIT License 6 votes vote down vote up
def save_image(image_numpy, image_path, aspect_ratio=1.0):
    """Save a numpy image to the disk

    Parameters:
        image_numpy (numpy array) -- input numpy array
        image_path (str)          -- the path of the image
    """

    image_pil = Image.fromarray(image_numpy)
    h, w, _ = image_numpy.shape

    if aspect_ratio > 1.0:
        image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
    if aspect_ratio < 1.0:
        image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
    image_pil.save(image_path) 
Example #9
Source File: Dataloader.py    From Text_Segmentation_Image_Inpainting with GNU General Public License v3.0 6 votes vote down vote up
def resize_pad_tensor(self, pil_img):
        origin = to_tensor(pil_img).unsqueeze(0)
        fix_len = self.resize
        long = max(pil_img.size)
        ratio = fix_len / long
        new_size = tuple(map(lambda x: int(x * ratio) // 8 * 8, pil_img.size))
        img = pil_img.resize(new_size, Image.BICUBIC)
        # img = pil_img
        img = self.transformer(img).unsqueeze(0)

        _, _, h, w = img.size()
        if fix_len > w:

            boarder_pad = (0, fix_len - w, 0, 0)
        else:

            boarder_pad = (0, 0, 0, fix_len - h)

        img = pad(img, boarder_pad, value=0)
        mask_resizer = self.resize_mask(boarder_pad, pil_img.size)
        return img, origin, mask_resizer 
Example #10
Source File: data_processing.py    From iAI with MIT License 6 votes vote down vote up
def _load_and_resize(self, input_image_path):
        """Load an image from the specified path and resize it to the input resolution.
        Return the input image before resizing as a PIL Image (required for visualization),
        and the resized image as a NumPy float array.

        Keyword arguments:
        input_image_path -- string path of the image to be loaded
        """

        image_raw = Image.open(input_image_path)
        # Expecting yolo_input_resolution in (height, width) format, adjusting to PIL
        # convention (width, height) in PIL:
        new_resolution = (
            self.yolo_input_resolution[1],
            self.yolo_input_resolution[0])
        image_resized = image_raw.resize(
            new_resolution, resample=Image.BICUBIC)
        image_resized = np.array(image_resized, dtype=np.float32, order='C')
        return image_raw, image_resized 
Example #11
Source File: data_processing.py    From iAI with MIT License 6 votes vote down vote up
def _load_and_resize(self, input_image_path):
        """Load an image from the specified path and resize it to the input resolution.
        Return the input image before resizing as a PIL Image (required for visualization),
        and the resized image as a NumPy float array.

        Keyword arguments:
        input_image_path -- string path of the image to be loaded
        """

        image_raw = Image.open(input_image_path)
        # Expecting yolo_input_resolution in (height, width) format, adjusting to PIL
        # convention (width, height) in PIL:
        new_resolution = (
            self.yolo_input_resolution[1],
            self.yolo_input_resolution[0])
        image_resized = image_raw.resize(
            new_resolution, resample=Image.BICUBIC)
        image_resized = np.array(image_resized, dtype=np.float32, order='C')
        return image_raw, image_resized 
Example #12
Source File: data_utils.py    From conditional-motion-propagation with MIT License 6 votes vote down vote up
def image_flow_resize(img1, img2, flow, short_size=None, long_size=None):
    assert (short_size is None) ^ (long_size is None)
    w, h = img1.width, img1.height
    if short_size is not None:
        if w < h:
            neww = short_size
            newh = int(short_size / float(w) * h)
        else:
            neww = int(short_size / float(h) * w)
            newh = short_size
    else:
        if w < h:
            neww = int(long_size / float(h) * w)
            newh = long_size
        else:
            neww = long_size
            newh = int(long_size / float(w) * h)
    img1 = img1.resize((neww, newh), Image.BICUBIC)
    img2 = img2.resize((neww, newh), Image.BICUBIC)
    ratio = float(newh) / h
    flow = cv2.resize(flow.copy(), (neww, newh), interpolation=cv2.INTER_LINEAR) * ratio
    return img1, img2, flow, ratio 
Example #13
Source File: base_dataset.py    From Recycle-GAN with MIT License 6 votes vote down vote up
def get_transform(opt):
    transform_list = []
    if opt.resize_or_crop == 'resize_and_crop':
        osize = [opt.loadSize, opt.loadSize]
        transform_list.append(transforms.Scale(osize, Image.BICUBIC))
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'crop':
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'scale_width':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.fineSize)))
    elif opt.resize_or_crop == 'scale_width_and_crop':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.loadSize)))
        transform_list.append(transforms.RandomCrop(opt.fineSize))

    if opt.isTrain and not opt.no_flip:
        transform_list.append(transforms.RandomHorizontalFlip())

    transform_list += [transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5),
                                            (0.5, 0.5, 0.5))]
    return transforms.Compose(transform_list) 
Example #14
Source File: data_utils.py    From conditional-motion-propagation with MIT License 6 votes vote down vote up
def image_resize(img, short_size=None, long_size=None):
    assert (short_size is None) ^ (long_size is None)
    w, h = img.width, img.height
    if short_size is not None:
        if w < h:
            neww = short_size
            newh = int(short_size / float(w) * h)
        else:
            neww = int(short_size / float(h) * w)
            newh = short_size
    else:
        if w < h:
            neww = int(long_size / float(h) * w)
            newh = long_size
        else:
            neww = long_size
            newh = int(long_size / float(w) * h)
    img = img.resize((neww, newh), Image.BICUBIC)
    return img, [w, h] 
Example #15
Source File: dataset.py    From STARnet with MIT License 6 votes vote down vote up
def load_img(filepath, scale):
    list=os.listdir(filepath)
    list.sort()
    
    rate = 1
    #for vimeo90k-setuplet (multiple temporal scale)
    #if random.random() < 0.5:
    #    rate = 2
    
    index = randrange(0, len(list)-(2*rate))
    
    target = [modcrop(Image.open(filepath+'/'+list[i]).convert('RGB'), scale) for i in range(index, index+3*rate, rate)]
    
    h,w = target[0].size
    h_in,w_in = int(h//scale), int(w//scale)
    
    target_l = target[1].resize((h_in,w_in), Image.BICUBIC)
    input = [target[j].resize((h_in,w_in), Image.BICUBIC) for j in [0,2]]
    
    return input, target, target_l, list 
Example #16
Source File: model_factory.py    From DMS with MIT License 5 votes vote down vote up
def get_transforms_eval(model_name, img_size=224, crop_pct=None):
    crop_pct = crop_pct or DEFAULT_CROP_PCT
    if 'dpn' in model_name:
        if crop_pct is None:
            # Use default 87.5% crop for model's native img_size
            # but use 100% crop for larger than native as it
            # improves test time results across all models.
            if img_size == 224:
                scale_size = int(math.floor(img_size / DEFAULT_CROP_PCT))
            else:
                scale_size = img_size
        else:
            scale_size = int(math.floor(img_size / crop_pct))
        normalize = transforms.Normalize(
            mean=[124 / 255, 117 / 255, 104 / 255],
            std=[1 / (.0167 * 255)] * 3)
    elif 'inception' in model_name:
        scale_size = int(math.floor(img_size / crop_pct))
        normalize = LeNormalize()
    else:
        scale_size = int(math.floor(img_size / crop_pct))
        normalize = transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225])

    return transforms.Compose([
        transforms.Scale(scale_size, Image.BICUBIC),
        transforms.CenterCrop(img_size),
        transforms.ToTensor(),
        normalize]) 
Example #17
Source File: utils.py    From yolo3-keras with MIT License 5 votes vote down vote up
def letterbox_image(image, size):
    iw, ih = image.size
    w, h = size
    scale = min(w/iw, h/ih)
    nw = int(iw*scale)
    nh = int(ih*scale)

    image = image.resize((nw,nh), Image.BICUBIC)
    new_image = Image.new('RGB', size, (128,128,128))
    new_image.paste(image, ((w-nw)//2, (h-nh)//2))
    return new_image 
Example #18
Source File: image.py    From perceptron-benchmark with Apache License 2.0 5 votes vote down vote up
def draw_letterbox(image, prediction, original_shape=(416, 416), class_names=[], bounds=(0, 1)):
    """Draw on letterboxes on image."""
    assert len(image.shape) == 3, 'Input is a 3-dimenson numpy.ndarray'
    if bounds != (0, 1):
        import copy
        image = copy.deepcopy(image).astype(np.float32) / bounds[-1]
    if image.shape[0] == 3:
        image = np.transpose(image, [1, 2, 0])
    ih, iw = original_shape
    h, w, _ = image.shape

    scale = min(w / iw, h / ih)
    nw = int(ih * scale)
    nh = int(iw * scale)
    pad = ((w - nw) // 2, (h - nh) // 2)
    image = image[(h - nh) // 2: (h - nh) // 2 + nh,
                  (w - nw) // 2: (w - nw) // 2 + nw, :]
    image = (image * 255).astype('uint8')

    image_pil = Image.fromarray(image.astype('uint8'))
    image_pil = image_pil.resize((iw, ih), Image.BICUBIC)
    new_image = np.asarray(image_pil, dtype=np.float32)
    new_image /= 255.

    for idx, temp_bbox in enumerate(prediction['boxes']):
        top, left, bottom, right = temp_bbox
        top -= pad[1]
        left -= pad[0]
        bbox_re_np = np.array([top, left, bottom, right]) / scale
        bbox_rescale = bbox_re_np.astype('int').tolist()
        prediction['boxes'][idx] = bbox_rescale

    draw = draw_boxes(
        new_image, prediction['boxes'],
        prediction['classes'], prediction['scores'],
        class_names)
    return draw 
Example #19
Source File: image.py    From perceptron-benchmark with Apache License 2.0 5 votes vote down vote up
def letterbox_image(
        shape=(416, 416), data_format='channels_last', fname='example.png'):
    """Returns a letterbox image of target fname.

    Parameters
    ----------
    shape : list of integers
        The shape of the returned image (h, w).
    data_format : str
        "channels_first" or "channls_last".

    Returns
    -------
    image : array_like
        The example image.

    """
    assert len(shape) == 2
    assert data_format in ['channels_first', 'channels_last']
    path = os.path.join(os.path.dirname(__file__), 'images/%s' % fname)
    image = Image.open(path)
    iw, ih = image.size
    h, w = shape
    scale = min(w / iw, h / ih)
    nw = int(iw * scale)
    nh = int(ih * scale)

    image = image.resize((nw, nh), Image.BICUBIC)
    new_image = Image.new('RGB', shape, (128, 128, 128))
    new_image.paste(image, ((w - nw) // 2, (h - nh) // 2))

    image = np.asarray(new_image, dtype=np.float32)
    image /= 255.
    image = image[:, :, :3]
    assert image.shape == shape + (3,)
    if data_format == 'channels_first':
        image = np.transpose(image, (2, 0, 1))
    return image, (h, w) 
Example #20
Source File: aligned_dataset.py    From iSketchNFill with GNU General Public License v3.0 5 votes vote down vote up
def __getitem__(self, index):
        AB_path = self.AB_paths[index]
        AB = Image.open(AB_path).convert('RGB')
        AB = AB.resize((self.opt.loadSize * 2, self.opt.loadSize), Image.BICUBIC)
        AB = self.transform(AB)

        w_total = AB.size(2)
        w = int(w_total / 2)
        h = AB.size(1)
        w_offset = random.randint(0, max(0, w - self.opt.fineSize - 1))
        h_offset = random.randint(0, max(0, h - self.opt.fineSize - 1))

        A = AB[:, h_offset:h_offset + self.opt.fineSize,
               w_offset:w_offset + self.opt.fineSize]
        B = AB[:, h_offset:h_offset + self.opt.fineSize,
               w + w_offset:w + w_offset + self.opt.fineSize]

        if self.opt.which_direction == 'BtoA':
            input_nc = self.opt.output_nc
            output_nc = self.opt.input_nc
        else:
            input_nc = self.opt.input_nc
            output_nc = self.opt.output_nc

        if (not self.opt.no_flip) and random.random() < 0.5:
            idx = [i for i in range(A.size(2) - 1, -1, -1)]
            idx = torch.LongTensor(idx)
            A = A.index_select(2, idx)
            B = B.index_select(2, idx)

        if input_nc == 1:  # RGB to gray
            tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114
            A = tmp.unsqueeze(0)

        if output_nc == 1:  # RGB to gray
            tmp = B[0, ...] * 0.299 + B[1, ...] * 0.587 + B[2, ...] * 0.114
            B = tmp.unsqueeze(0)
    
        return {'A': A, 'B': B,
                'A_paths': AB_path, 'B_paths': AB_path} 
Example #21
Source File: utils.py    From YOLO-3D-Box with MIT License 5 votes vote down vote up
def letterbox_image(image, size):
    '''resize image with unchanged aspect ratio using padding'''
    image_w, image_h = image.size
    w, h = size
    new_w = int(image_w * min(w*1.0/image_w, h*1.0/image_h))
    new_h = int(image_h * min(w*1.0/image_w, h*1.0/image_h))
    resized_image = image.resize((new_w,new_h), Image.BICUBIC)

    boxed_image = Image.new('RGB', size, (128,128,128))
    boxed_image.paste(resized_image, ((w-new_w)//2,(h-new_h)//2))
    return boxed_image 
Example #22
Source File: tools.py    From mvfnet with MIT License 5 votes vote down vote up
def crop_image(image, res=224):
    fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, flip_input=False)
    pts = fa.get_landmarks(np.array(image))
    if len(pts) < 1:
        assert "No face detected!"
    pts = np.array(pts[0]).astype(np.int32)
        
    h = image.size[1]
    w = image.size[0]
        # x-width-pts[0,:], y-height-pts[1,:]
    x_max = np.max(pts[:68, 0])
    x_min = np.min(pts[:68, 0])
    y_max = np.max(pts[:68, 1])
    y_min = np.min(pts[:68, 1])
    bbox = [y_min, x_min, y_max, x_max]
    # c (cy, cx)
    c = [bbox[2] - (bbox[2] - bbox[0]) / 2, bbox[3] - (bbox[3] - bbox[1]) / 2.0]
    c[0] = c[0] - (bbox[2] - bbox[0]) * 0.12
    s = (max(bbox[2] - bbox[0], bbox[3] - bbox[1]) * 1.5).astype(np.int32)
    old_bb = np.array([c[0] - s / 2, c[1] - s / 2, c[0] + s / 2, c[1] + s / 2]).astype(np.int32)
    crop_img = Image.new('RGB', (s, s))
    #crop_img = torch.zeros(image.shape[0], s, s, dtype=torch.float32)

    shift_x = 0 - old_bb[1]
    shift_y = 0 - old_bb[0]
    old_bb = np.array([max(0, old_bb[0]), max(0, old_bb[1]),
              min(h, old_bb[2]), min(w, old_bb[3])]).astype(np.int32)
    hb = old_bb[2] - old_bb[0]
    wb = old_bb[3] - old_bb[1]
    new_bb = np.array([max(0, shift_y), max(0, shift_x), max(0, shift_y) + hb, max(0, shift_x) + wb]).astype(np.int32)
    cache = image.crop((old_bb[1], old_bb[0], old_bb[3], old_bb[2]))
    crop_img.paste(cache, (new_bb[1], new_bb[0], new_bb[3], new_bb[2]))
    crop_img = crop_img.resize((res, res), Image.BICUBIC)
    return crop_img 
Example #23
Source File: transforms.py    From PyTorch-Encoding with MIT License 5 votes vote down vote up
def __init__(self, imgsize):
        self.imgsize = imgsize
        self.resize_method = Resize((imgsize, imgsize), interpolation=Image.BICUBIC) 
Example #24
Source File: transforms.py    From PyTorch-Encoding with MIT License 5 votes vote down vote up
def __init__(self, imgsize, min_covered=0.1, aspect_ratio_range=(3./4, 4./3),
                 area_range=(0.1, 1.0), max_attempts=10):
        assert 0.0 < min_covered
        assert 0 < aspect_ratio_range[0] <= aspect_ratio_range[1]
        assert 0 < area_range[0] <= area_range[1]
        assert 1 <= max_attempts

        self.imgsize = imgsize
        self.min_covered = min_covered
        self.aspect_ratio_range = aspect_ratio_range
        self.area_range = area_range
        self.max_attempts = max_attempts
        self._fallback = ECenterCrop(imgsize)
        self.resize_method = Resize((imgsize, imgsize), interpolation=Image.BICUBIC) 
Example #25
Source File: pascal_voc_loader.py    From sunets with MIT License 5 votes vote down vote up
def r_rotate(self, img, lbl):
        angle = random.uniform(-10, 10)

        lbl = np.array(lbl, dtype=np.int32) - self.ignore_index
        lbl = Image.fromarray(lbl)
        img = tuple([ImageMath.eval("int(a)-b", a=j, b=self.filler[i]) for i, j in enumerate(img.split())])

        lbl = lbl.rotate(angle, resample=Image.NEAREST)
        img = tuple([k.rotate(angle, resample=Image.BICUBIC) for k in img])

        lbl = ImageMath.eval("int(a)+b", a=lbl, b=self.ignore_index)
        img = Image.merge(mode='RGB', bands=tuple(
            [ImageMath.eval("convert(int(a)+b,'L')", a=j, b=self.filler[i]) for i, j in enumerate(img)]))
        return (img, lbl) 
Example #26
Source File: coco_loader.py    From sunets with MIT License 5 votes vote down vote up
def r_rotate(self, img, lbl):
        angle = random.uniform(-10, 10)

        lbl = np.array(lbl, dtype=np.int32) - self.ignore_index
        lbl = Image.fromarray(lbl)
        img = tuple([ImageMath.eval("int(a)-b", a=j, b=self.filler[i]) for i, j in enumerate(img.split())])

        lbl = lbl.rotate(angle, resample=Image.NEAREST)
        img = tuple([k.rotate(angle, resample=Image.BICUBIC) for k in img])

        lbl = ImageMath.eval("int(a)+b", a=lbl, b=self.ignore_index)
        img = Image.merge(mode='RGB', bands=tuple(
            [ImageMath.eval("convert(int(a)+b,'L')", a=j, b=self.filler[i]) for i, j in enumerate(img)]))
        return (img, lbl) 
Example #27
Source File: train_horovod.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def get_val_data(rec_val, batch_size, data_nthreads, input_size, crop_ratio):
    def val_batch_fn(batch, ctx):
        data = batch[0].as_in_context(ctx)
        label = batch[1].as_in_context(ctx)
        return data, label

    normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    crop_ratio = crop_ratio if crop_ratio > 0 else 0.875
    resize = int(math.ceil(input_size/crop_ratio))

    from gluoncv.utils.transforms import EfficientNetCenterCrop
    from autogluon.utils import pil_transforms

    if input_size >= 320:
        transform_test = transforms.Compose([
            pil_transforms.ToPIL(),
            EfficientNetCenterCrop(input_size),
            pil_transforms.Resize((input_size, input_size), interpolation=Image.BICUBIC),
            pil_transforms.ToNDArray(),
            transforms.ToTensor(),
            normalize
        ])
    else:
        transform_test = transforms.Compose([
            transforms.Resize(resize, keep_ratio=True),
            transforms.CenterCrop(input_size),
            transforms.ToTensor(),
            normalize
        ])

    val_set = mx.gluon.data.vision.ImageRecordDataset(rec_val).transform_first(transform_test)

    val_sampler = SplitSampler(len(val_set), num_parts=num_workers, part_index=rank)
    val_data = gluon.data.DataLoader(val_set, batch_size=batch_size,
                                     num_workers=data_nthreads,
                                     sampler=val_sampler)

    return val_data, val_batch_fn

# Horovod: pin GPU to local rank 
Example #28
Source File: __init__.py    From image_process with GNU Affero General Public License v3.0 5 votes vote down vote up
def rotate(i, degrees):
    if i.mode == "P":
        i = i.convert("RGBA")
    elif i.mode == "1":
        i = i.convert("L")

    # rotate does not support the LANCZOS filter (Pillow 2.7.0).
    return i.rotate(int(degrees), Image.BICUBIC, True) 
Example #29
Source File: transforms.py    From gen-efficientnet-pytorch with Apache License 2.0 5 votes vote down vote up
def _pil_interp(method):
    if method == 'bicubic':
        return Image.BICUBIC
    elif method == 'lanczos':
        return Image.LANCZOS
    elif method == 'hamming':
        return Image.HAMMING
    else:
        # default bilinear, do we want to allow nearest?
        return Image.BILINEAR 
Example #30
Source File: simple_crop.py    From Tenma with MIT License 5 votes vote down vote up
def smartcrop(value, arg):
	cache_url = ''
	cache_paths = ('','')

	if value:

		# Split width and height
		crop_size = arg.split('x')
		crop_width = int(crop_size[0])
		crop_height = int(crop_size[1])

		cache_paths = _create_cache_paths(value, crop_width, crop_height)

		if os.path.isfile(cache_paths[0]):
			return cache_paths[1]
		else:
			try:
				# Get image
				img = Image.open(value)

				# Check Aspect ratio and resize acordingly
				if crop_width * img.height < crop_height * img.width:
					height_percent = (float(crop_height)/float(img.size[1]))
					width_size = int(float(img.size[0])*float(height_percent))
					img = img.resize((width_size,crop_height), Image.BICUBIC)

				else:
					width_percent = (float(crop_width)/float(img.size[0]))
					height_size = int(float(img.size[1])*float(width_percent))
					img = img.resize((crop_width,height_size), Image.BICUBIC)

				cropped = _crop_from_center(img, crop_width, crop_height)
				cropped.save(cache_paths[0])
			except Exception:
				pass

	return cache_paths[1]