Python PIL.Image.BILINEAR Examples

The following are 30 code examples of PIL.Image.BILINEAR(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module PIL.Image , or try the search function .
Example #1
Source File: transform.py    From DeepLab_v3_plus with MIT License 8 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        assert img.size == mask.size
        w, h = img.size

        # if one side is 512
        if (w >= h and w == self.size[1]) or (h >= w and h == self.size[0]):
            return {'image': img,
                    'label': mask}
        # if both sides is not equal to 512, resize to 512 * 512
        oh, ow = self.size
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)

        return {'image': img,
                'label': mask} 
Example #2
Source File: Voc_Dataset.py    From Deeplab-v3plus with MIT License 6 votes vote down vote up
def _val_sync_transform(self, img, mask):
        outsize = self.crop_size
        short_size = outsize
        w, h = img.size
        if w > h:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize) / 2.))
        y1 = int(round((h - outsize) / 2.))
        img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
        mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
Example #3
Source File: inference.py    From iAI with MIT License 6 votes vote down vote up
def _load_img(self, image_path):
        image = Image.open(image_path)
        model_input_width = model_utils.ModelData.get_input_width()
        model_input_height = model_utils.ModelData.get_input_height()
        # Note: Bilinear interpolation used by Pillow is a little bit
        # different than the one used by Tensorflow, so if network receives
        # an image that is not 300x300, the network output may differ
        # from the one output by Tensorflow
        image_resized = image.resize(
            size=(model_input_width, model_input_height),
            resample=Image.BILINEAR
        )
        img_np = self._load_image_into_numpy_array(image_resized)
        # HWC -> CHW
        img_np = img_np.transpose((2, 0, 1))
        # Normalize to [-1.0, 1.0] interval (expected by model)
        img_np = (2.0 / 255.0) * img_np - 1.0
        img_np = img_np.ravel()
        return img_np


# This class is similar as TRTInference inference, but it manages Tensorflow 
Example #4
Source File: inference.py    From iAI with MIT License 6 votes vote down vote up
def _load_img(self, image_path):
        image = Image.open(image_path)
        model_input_width = model_utils.ModelData.get_input_width()
        model_input_height = model_utils.ModelData.get_input_height()
        # Note: Bilinear interpolation used by Pillow is a little bit
        # different than the one used by Tensorflow, so if network receives
        # an image that is not 300x300, the network output may differ
        # from the one output by Tensorflow
        image_resized = image.resize(
            size=(model_input_width, model_input_height),
            resample=Image.BILINEAR
        )
        img_np = self._load_image_into_numpy_array(image_resized)
        # HWC -> CHW
        img_np = img_np.transpose((2, 0, 1))
        # Normalize to [-1.0, 1.0] interval (expected by model)
        img_np = (2.0 / 255.0) * img_np - 1.0
        img_np = img_np.ravel()
        return img_np


# This class is similar as TRTInference inference, but it manages Tensorflow 
Example #5
Source File: base_seg.py    From LEDNet with MIT License 6 votes vote down vote up
def _val_sync_transform(self, img, mask):
        outsize = self.crop_size
        short_size = outsize
        w, h = img.size
        if w > h:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize) / 2.))
        y1 = int(round((h - outsize) / 2.))
        img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
        mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
Example #6
Source File: custom_transforms.py    From overhaul-distillation with MIT License 6 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        w, h = img.size
        if w > h:
            oh = self.crop_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = self.crop_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - self.crop_size) / 2.))
        y1 = int(round((h - self.crop_size) / 2.))
        img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
        mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))

        return {'image': img,
                'label': mask} 
Example #7
Source File: transform.py    From pytorch-semantic-segmentation with MIT License 6 votes vote down vote up
def __call__(self, input, target):
        # do something to both images and labels
        if self.reshape_size is not None:
            input = input.resize(self.reshape_size,Image.BILINEAR)
            target = target.resize(self.reshape_size,Image.NEAREST)
 
        if self.augment :
            input, target = RandomCrop(self.crop_size)(input,target) # RandomCrop for  image and label in the same area
            input, target = self.flip(input,target)               # RandomFlip for both croped image and label
            input, target = self.rotate(input,target)
        else:
            input, target =  CenterCrop(self.crop_size)(input, target) # CenterCrop for the validation data
            
        input = ToTensor()(input)  
        Normalize([.485, .456, .406], [.229, .224, .225])(input) #normalize with the params of imagenet
          
        target = torch.from_numpy(np.array(target)).long().unsqueeze(0)

        return input, target 
Example #8
Source File: random_resized_crop.py    From argus-freesound with MIT License 6 votes vote down vote up
def resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR):
    """Crop the given PIL Image and resize it to desired size.
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
    Args:
        img (PIL Image): Image to be cropped.
        i (int): i in (i,j) i.e coordinates of the upper left corner
        j (int): j in (i,j) i.e coordinates of the upper left corner
        h (int): Height of the cropped image.
        w (int): Width of the cropped image.
        size (sequence or int): Desired output size. Same semantics as ``resize``.
        interpolation (int, optional): Desired interpolation. Default is
            ``PIL.Image.BILINEAR``.
    Returns:
        PIL Image: Cropped image.
    """
    img = crop(img, i, j, h, w)
    img = resize(img, size, interpolation)
    return img 
Example #9
Source File: cyclegta5.py    From cycada_release with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __getitem__(self, index):
        id = self.ids[index]
        filename = '{:05d}.png'.format(id)
        img_path = os.path.join(self.root, 'images', filename)
        label_path = os.path.join(self.root, 'labels', filename)
        img = Image.open(img_path).convert('RGB')
        target = Image.open(label_path)
        img = img.resize(target.size, resample=Image.BILINEAR)
        if self.transform is not None:
            img = self.transform(img)
        if self.remap_labels:
            target = np.asarray(target)
            target = remap_labels_to_train_ids(target)
            #target = self.label2train(target)
            target = Image.fromarray(target, 'L')
        if self.target_transform is not None:
            target = self.target_transform(target)
        return img, target 
Example #10
Source File: classify_nsfw.py    From open_nsfw with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def resize_image(data, sz=(256, 256)):
    """
    Resize image. Please use this resize logic for best results instead of the 
    caffe, since it was used to generate training dataset 
    :param str data:
        The image data
    :param sz tuple:
        The resized image dimensions
    :returns bytearray:
        A byte array with the resized image
    """
    img_data = str(data)
    im = Image.open(StringIO(img_data))
    if im.mode != "RGB":
        im = im.convert('RGB')
    imr = im.resize(sz, resample=Image.BILINEAR)
    fh_im = StringIO()
    imr.save(fh_im, format='JPEG')
    fh_im.seek(0)
    return bytearray(fh_im.read()) 
Example #11
Source File: seg_data_base.py    From SegmenTron with Apache License 2.0 6 votes vote down vote up
def _val_sync_transform(self, img, mask):
        outsize = self.crop_size
        short_size = min(outsize)
        w, h = img.size
        if w > h:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize[1]) / 2.))
        y1 = int(round((h - outsize[0]) / 2.))
        img = img.crop((x1, y1, x1 + outsize[1], y1 + outsize[0]))
        mask = mask.crop((x1, y1, x1 + outsize[1], y1 + outsize[0]))

        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
Example #12
Source File: nyu_transform.py    From Visualizing-CNNs-for-monocular-depth-estimation with MIT License 6 votes vote down vote up
def changeScale(self, img, size, interpolation=Image.BILINEAR):

        if not _is_pil_image(img):
            raise TypeError(
                'img should be PIL Image. Got {}'.format(type(img)))
        if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):
            raise TypeError('Got inappropriate size arg: {}'.format(size))

        if isinstance(size, int):
            w, h = img.size
            if (w <= h and w == size) or (h <= w and h == size):
                return img
            if w < h:
                ow = size
                oh = int(size * h / w)
                return img.resize((ow, oh), interpolation)
            else:
                oh = size
                ow = int(size * w / h)
                return img.resize((ow, oh), interpolation)
        else:
            return img.resize(size[::-1], interpolation) 
Example #13
Source File: key.py    From kle_render with MIT License 6 votes vote down vote up
def open_base_img(full_profile, res, base_color, color):
    # get base image according to profile and perceptual gray of key color
    base_num = str([0xE0, 0xB0, 0x80, 0x50, 0x20].index(base_color) + 1)

    # open image and convert to Lab
    with Image.open('images/{0}_{1}{2}.png'.format(*full_profile, base_num)) as img:
        key_img = img.resize((int(s * res / 200) for s in img.size), resample=Image.BILINEAR).convert('RGBA')
    if full_profile[1] in ('ISO', 'BIGENTER'): alpha = key_img.split()[-1]
    l, a, b = ImageCms.applyTransform(key_img, rgb2lab_transform).split()

    # convert key color to Lab
    # a and b should be scaled by 128/100, but desaturation looks more natural
    rgb_color = color_objects.sRGBColor(*ImageColor.getrgb(color), is_upscaled=True)
    lab_color = color_conversions.convert_color(rgb_color, color_objects.LabColor)
    l1, a1, b1 = lab_color.get_value_tuple()
    l1, a1, b1 = int(l1 * 256 / 100), int(a1 + 128), int(b1 + 128)

    # change Lab of base image to match that of key color
    l = ImageMath.eval('convert(l + l1 - l_avg, "L")', l=l, l1=l1, l_avg=base_color)
    a = ImageMath.eval('convert(a + a1 - a, "L")', a=a, a1=a1)
    b = ImageMath.eval('convert(b + b1 - b, "L")', b=b, b1=b1)

    key_img = ImageCms.applyTransform(Image.merge('LAB', (l, a, b)), lab2rgb_transform).convert('RGBA')
    if full_profile[1] in ('ISO', 'BIGENTER'): key_img.putalpha(alpha)
    return key_img 
Example #14
Source File: pipeline.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def make_request_json(self, uri, output_json):
    """Produces a JSON request suitable to send to CloudML Prediction API.

    Args:
      uri: The input image URI.
      output_json: File handle of the output json where request will be written.
    """
    def _open_file_read_binary(uri):
      try:
        return file_io.FileIO(uri, mode='rb')
      except errors.InvalidArgumentError:
        return file_io.FileIO(uri, mode='r')

    with open(output_json, 'w') as outf:
      with _open_file_read_binary(uri) as f:
        image_bytes = f.read()
        image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
        image = image.resize((299, 299), Image.BILINEAR)
        resized_image = io.BytesIO()
        image.save(resized_image, format='JPEG')
        encoded_image = base64.b64encode(resized_image.getvalue())
        row = json.dumps({'key': uri, 'image_bytes': {'b64': encoded_image}})
        outf.write(row)
        outf.write('\n') 
Example #15
Source File: augmentations.py    From PLARD with MIT License 6 votes vote down vote up
def __call__(self, img, mask):
        if self.padding > 0:
            img = ImageOps.expand(img, border=self.padding, fill=0)
            mask = ImageOps.expand(mask, border=self.padding, fill=0)

        assert img.size == mask.size
        w, h = img.size
        th, tw = self.size
        if w == tw and h == th:
            return img, mask
        if w < tw or h < th:
            return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST)

        x1 = random.randint(0, w - tw)
        y1 = random.randint(0, h - th)
        return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th)) 
Example #16
Source File: make_imagenet_p_inception.py    From robustness with Apache License 2.0 5 votes vote down vote up
def resize(img, size, interpolation=PILImage.BILINEAR):
    if isinstance(size, int):
        w, h = img.size
        if (w <= h and w == size) or (h <= w and h == size):
            return img
        if w < h:
            ow = size
            oh = int(size * h / w)
            return img.resize((ow, oh), interpolation)
        else:
            oh = size
            ow = int(size * w / h)
            return img.resize((ow, oh), interpolation)
    else:
        return img.resize(size[::-1], interpolation) 
Example #17
Source File: PILTransform.py    From ext_portrait_segmentation with MIT License 5 votes vote down vote up
def __call__(self, rgb_img, label_img):
        rgb_img = rgb_img.resize(self.size, Image.BILINEAR)
        label_img = label_img.resize(self.size, Image.NEAREST)
        return rgb_img, label_img 
Example #18
Source File: make_imagenet_c_inception.py    From robustness with Apache License 2.0 5 votes vote down vote up
def resized_center_crop(img, scale=0.875, size=(299, 299), interpolation=Image.BILINEAR):
    img = center_crop(img, scale)
    img = resize(img, size, interpolation)

    return img 
Example #19
Source File: make_imagenet_c_inception.py    From robustness with Apache License 2.0 5 votes vote down vote up
def resize(img, size, interpolation=Image.BILINEAR):
    if isinstance(size, int):
        w, h = img.size
        if (w <= h and w == size) or (h <= w and h == size):
            return img
        if w < h:
            ow = size
            oh = int(size * h / w)
            return img.resize((ow, oh), interpolation)
        else:
            oh = size
            ow = int(size * w / h)
            return img.resize((ow, oh), interpolation)
    else:
        return img.resize(size[::-1], interpolation) 
Example #20
Source File: make_imagenet_p_inception.py    From robustness with Apache License 2.0 5 votes vote down vote up
def resized_center_crop(img, scale=0.875, size=(299, 299), interpolation=PILImage.BILINEAR):
    img = center_crop(img, scale)
    img = resize(img, size, interpolation)

    return img 
Example #21
Source File: augmentations.py    From PLARD with MIT License 5 votes vote down vote up
def __call__(self, img, mask):
        rotate_degree = random.random() * 2 * self.degree - self.degree
        return img.rotate(rotate_degree, Image.BILINEAR), mask.rotate(rotate_degree, Image.NEAREST) 
Example #22
Source File: augmentations.py    From PLARD with MIT License 5 votes vote down vote up
def __call__(self, img, mask):
        assert img.size == mask.size
        return img.resize(self.size, Image.BILINEAR), mask.resize(self.size, Image.NEAREST) 
Example #23
Source File: spatial_transforms.py    From PyTorchConv3D with Apache License 2.0 5 votes vote down vote up
def __init__(self, size, interpolation=Image.BILINEAR):
        assert isinstance(size,
                          int) or (isinstance(size, collections.Iterable) and
                                   len(size) == 2)
        self.size = size
        self.interpolation = interpolation 
Example #24
Source File: transform.py    From pytorch-semantic-segmentation with MIT License 5 votes vote down vote up
def __call__(self, input, target):
        # do something to both images 
        input =  input.resize(self.size, Image.BILINEAR)
        target = target.resize(self.size,Image.NEAREST)

        target = torch.from_numpy(np.array(target)).long().unsqueeze(0)
        input_tensor = ToTensor()(input)  
        Normalize([.485, .456, .406], [.229, .224, .225])(input_tensor)
        return input_tensor, target, input 
Example #25
Source File: general.py    From mxbox with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, size, interpolation=Image.BILINEAR):
        self.size = size
        self.interpolation = interpolation 
Example #26
Source File: general.py    From mxbox with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, size, interpolation=Image.BILINEAR):
        assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
        self.size = size
        self.interpolation = interpolation 
Example #27
Source File: spatial_transforms.py    From PyTorchConv3D with Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 scales,
                 size,
                 interpolation=Image.BILINEAR,
                 crop_positions=['c', 'tl', 'tr', 'bl', 'br']):
        self.scales = scales
        self.size = size
        self.interpolation = interpolation

        self.crop_positions = crop_positions 
Example #28
Source File: spatial_transforms.py    From PyTorchConv3D with Apache License 2.0 5 votes vote down vote up
def __init__(self, scales, size, interpolation=Image.BILINEAR):
        self.scales = scales
        self.size = size
        self.interpolation = interpolation 
Example #29
Source File: transform.py    From seamseg with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __call__(self, img, msk, cat, iscrowd):
        # Random flip
        if self.random_flip:
            img, msk = self._random_flip(img, msk)

        # Adjust scale, possibly at random
        if self.random_scale is not None:
            target_size = self._random_target_size()
        else:
            target_size = self.shortest_size
        scale = self._adjusted_scale(img.size[0], img.size[1], target_size)

        out_size = tuple(int(dim * scale) for dim in img.size)
        img = img.resize(out_size, resample=Image.BILINEAR)
        msk = [m.resize(out_size, resample=Image.NEAREST) for m in msk]

        # Wrap in np.array
        cat = np.array(cat, dtype=np.int32)
        iscrowd = np.array(iscrowd, dtype=np.uint8)

        # Image transformations
        img = tfn.to_tensor(img)
        img = self._normalize_image(img)

        # Label transformations
        msk = np.stack([np.array(m, dtype=np.int32, copy=False) for m in msk], axis=0)
        msk, cat, iscrowd = self._compact_labels(msk, cat, iscrowd)

        # Convert labels to torch and extract bounding boxes
        msk = torch.from_numpy(msk.astype(np.long))
        cat = torch.from_numpy(cat.astype(np.long))
        iscrowd = torch.from_numpy(iscrowd)
        bbx = extract_boxes(msk, cat.numel())

        return dict(img=img, msk=msk, cat=cat, iscrowd=iscrowd, bbx=bbx) 
Example #30
Source File: transform.py    From seamseg with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __call__(self, img):
        # Adjust scale
        scale = self._adjusted_scale(img.size[0], img.size[1])

        out_size = tuple(int(dim * scale) for dim in img.size)
        img = img.resize(out_size, resample=Image.BILINEAR)

        # Image transformations
        img = tfn.to_tensor(img)
        img = self._normalize_image(img)

        return img