Python PIL.Image.BILINEAR Examples

The following are 30 code examples for showing how to use PIL.Image.BILINEAR(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module PIL.Image , or try the search function .

Example 1
Project: DeepLab_v3_plus   Author: songdejia   File: transform.py    License: MIT License 8 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        assert img.size == mask.size
        w, h = img.size

        # if one side is 512
        if (w >= h and w == self.size[1]) or (h >= w and h == self.size[0]):
            return {'image': img,
                    'label': mask}
        # if both sides is not equal to 512, resize to 512 * 512
        oh, ow = self.size
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)

        return {'image': img,
                'label': mask} 
Example 2
Project: argus-freesound   Author: lRomul   File: random_resized_crop.py    License: MIT License 6 votes vote down vote up
def resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR):
    """Crop the given PIL Image and resize it to desired size.
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
    Args:
        img (PIL Image): Image to be cropped.
        i (int): i in (i,j) i.e coordinates of the upper left corner
        j (int): j in (i,j) i.e coordinates of the upper left corner
        h (int): Height of the cropped image.
        w (int): Width of the cropped image.
        size (sequence or int): Desired output size. Same semantics as ``resize``.
        interpolation (int, optional): Desired interpolation. Default is
            ``PIL.Image.BILINEAR``.
    Returns:
        PIL Image: Cropped image.
    """
    img = crop(img, i, j, h, w)
    img = resize(img, size, interpolation)
    return img 
Example 3
Project: overhaul-distillation   Author: clovaai   File: custom_transforms.py    License: MIT License 6 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        w, h = img.size
        if w > h:
            oh = self.crop_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = self.crop_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - self.crop_size) / 2.))
        y1 = int(round((h - self.crop_size) / 2.))
        img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
        mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))

        return {'image': img,
                'label': mask} 
Example 4
Project: iAI   Author: aimuch   File: inference.py    License: MIT License 6 votes vote down vote up
def _load_img(self, image_path):
        image = Image.open(image_path)
        model_input_width = model_utils.ModelData.get_input_width()
        model_input_height = model_utils.ModelData.get_input_height()
        # Note: Bilinear interpolation used by Pillow is a little bit
        # different than the one used by Tensorflow, so if network receives
        # an image that is not 300x300, the network output may differ
        # from the one output by Tensorflow
        image_resized = image.resize(
            size=(model_input_width, model_input_height),
            resample=Image.BILINEAR
        )
        img_np = self._load_image_into_numpy_array(image_resized)
        # HWC -> CHW
        img_np = img_np.transpose((2, 0, 1))
        # Normalize to [-1.0, 1.0] interval (expected by model)
        img_np = (2.0 / 255.0) * img_np - 1.0
        img_np = img_np.ravel()
        return img_np


# This class is similar as TRTInference inference, but it manages Tensorflow 
Example 5
Project: iAI   Author: aimuch   File: inference.py    License: MIT License 6 votes vote down vote up
def _load_img(self, image_path):
        image = Image.open(image_path)
        model_input_width = model_utils.ModelData.get_input_width()
        model_input_height = model_utils.ModelData.get_input_height()
        # Note: Bilinear interpolation used by Pillow is a little bit
        # different than the one used by Tensorflow, so if network receives
        # an image that is not 300x300, the network output may differ
        # from the one output by Tensorflow
        image_resized = image.resize(
            size=(model_input_width, model_input_height),
            resample=Image.BILINEAR
        )
        img_np = self._load_image_into_numpy_array(image_resized)
        # HWC -> CHW
        img_np = img_np.transpose((2, 0, 1))
        # Normalize to [-1.0, 1.0] interval (expected by model)
        img_np = (2.0 / 255.0) * img_np - 1.0
        img_np = img_np.ravel()
        return img_np


# This class is similar as TRTInference inference, but it manages Tensorflow 
Example 6
def changeScale(self, img, size, interpolation=Image.BILINEAR):

        if not _is_pil_image(img):
            raise TypeError(
                'img should be PIL Image. Got {}'.format(type(img)))
        if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):
            raise TypeError('Got inappropriate size arg: {}'.format(size))

        if isinstance(size, int):
            w, h = img.size
            if (w <= h and w == size) or (h <= w and h == size):
                return img
            if w < h:
                ow = size
                oh = int(size * h / w)
                return img.resize((ow, oh), interpolation)
            else:
                oh = size
                ow = int(size * w / h)
                return img.resize((ow, oh), interpolation)
        else:
            return img.resize(size[::-1], interpolation) 
Example 7
Project: PLARD   Author: zhechen   File: augmentations.py    License: MIT License 6 votes vote down vote up
def __call__(self, img, mask):
        if self.padding > 0:
            img = ImageOps.expand(img, border=self.padding, fill=0)
            mask = ImageOps.expand(mask, border=self.padding, fill=0)

        assert img.size == mask.size
        w, h = img.size
        th, tw = self.size
        if w == tw and h == th:
            return img, mask
        if w < tw or h < th:
            return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST)

        x1 = random.randint(0, w - tw)
        y1 = random.randint(0, h - th)
        return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th)) 
Example 8
Project: open_nsfw   Author: yahoo   File: classify_nsfw.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def resize_image(data, sz=(256, 256)):
    """
    Resize image. Please use this resize logic for best results instead of the 
    caffe, since it was used to generate training dataset 
    :param str data:
        The image data
    :param sz tuple:
        The resized image dimensions
    :returns bytearray:
        A byte array with the resized image
    """
    img_data = str(data)
    im = Image.open(StringIO(img_data))
    if im.mode != "RGB":
        im = im.convert('RGB')
    imr = im.resize(sz, resample=Image.BILINEAR)
    fh_im = StringIO()
    imr.save(fh_im, format='JPEG')
    fh_im.seek(0)
    return bytearray(fh_im.read()) 
Example 9
Project: cycada_release   Author: jhoffman   File: cyclegta5.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __getitem__(self, index):
        id = self.ids[index]
        filename = '{:05d}.png'.format(id)
        img_path = os.path.join(self.root, 'images', filename)
        label_path = os.path.join(self.root, 'labels', filename)
        img = Image.open(img_path).convert('RGB')
        target = Image.open(label_path)
        img = img.resize(target.size, resample=Image.BILINEAR)
        if self.transform is not None:
            img = self.transform(img)
        if self.remap_labels:
            target = np.asarray(target)
            target = remap_labels_to_train_ids(target)
            #target = self.label2train(target)
            target = Image.fromarray(target, 'L')
        if self.target_transform is not None:
            target = self.target_transform(target)
        return img, target 
Example 10
Project: pytorch-semantic-segmentation   Author: mapleneverfade   File: transform.py    License: MIT License 6 votes vote down vote up
def __call__(self, input, target):
        # do something to both images and labels
        if self.reshape_size is not None:
            input = input.resize(self.reshape_size,Image.BILINEAR)
            target = target.resize(self.reshape_size,Image.NEAREST)
 
        if self.augment :
            input, target = RandomCrop(self.crop_size)(input,target) # RandomCrop for  image and label in the same area
            input, target = self.flip(input,target)               # RandomFlip for both croped image and label
            input, target = self.rotate(input,target)
        else:
            input, target =  CenterCrop(self.crop_size)(input, target) # CenterCrop for the validation data
            
        input = ToTensor()(input)  
        Normalize([.485, .456, .406], [.229, .224, .225])(input) #normalize with the params of imagenet
          
        target = torch.from_numpy(np.array(target)).long().unsqueeze(0)

        return input, target 
Example 11
Project: LEDNet   Author: AceCoooool   File: base_seg.py    License: MIT License 6 votes vote down vote up
def _val_sync_transform(self, img, mask):
        outsize = self.crop_size
        short_size = outsize
        w, h = img.size
        if w > h:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize) / 2.))
        y1 = int(round((h - outsize) / 2.))
        img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
        mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
Example 12
Project: SegmenTron   Author: LikeLy-Journey   File: seg_data_base.py    License: Apache License 2.0 6 votes vote down vote up
def _val_sync_transform(self, img, mask):
        outsize = self.crop_size
        short_size = min(outsize)
        w, h = img.size
        if w > h:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize[1]) / 2.))
        y1 = int(round((h - outsize[0]) / 2.))
        img = img.crop((x1, y1, x1 + outsize[1], y1 + outsize[0]))
        mask = mask.crop((x1, y1, x1 + outsize[1], y1 + outsize[0]))

        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
Example 13
Project: kle_render   Author: CQCumbers   File: key.py    License: MIT License 6 votes vote down vote up
def open_base_img(full_profile, res, base_color, color):
    # get base image according to profile and perceptual gray of key color
    base_num = str([0xE0, 0xB0, 0x80, 0x50, 0x20].index(base_color) + 1)

    # open image and convert to Lab
    with Image.open('images/{0}_{1}{2}.png'.format(*full_profile, base_num)) as img:
        key_img = img.resize((int(s * res / 200) for s in img.size), resample=Image.BILINEAR).convert('RGBA')
    if full_profile[1] in ('ISO', 'BIGENTER'): alpha = key_img.split()[-1]
    l, a, b = ImageCms.applyTransform(key_img, rgb2lab_transform).split()

    # convert key color to Lab
    # a and b should be scaled by 128/100, but desaturation looks more natural
    rgb_color = color_objects.sRGBColor(*ImageColor.getrgb(color), is_upscaled=True)
    lab_color = color_conversions.convert_color(rgb_color, color_objects.LabColor)
    l1, a1, b1 = lab_color.get_value_tuple()
    l1, a1, b1 = int(l1 * 256 / 100), int(a1 + 128), int(b1 + 128)

    # change Lab of base image to match that of key color
    l = ImageMath.eval('convert(l + l1 - l_avg, "L")', l=l, l1=l1, l_avg=base_color)
    a = ImageMath.eval('convert(a + a1 - a, "L")', a=a, a1=a1)
    b = ImageMath.eval('convert(b + b1 - b, "L")', b=b, b1=b1)

    key_img = ImageCms.applyTransform(Image.merge('LAB', (l, a, b)), lab2rgb_transform).convert('RGBA')
    if full_profile[1] in ('ISO', 'BIGENTER'): key_img.putalpha(alpha)
    return key_img 
Example 14
Project: cloudml-samples   Author: GoogleCloudPlatform   File: pipeline.py    License: Apache License 2.0 6 votes vote down vote up
def make_request_json(self, uri, output_json):
    """Produces a JSON request suitable to send to CloudML Prediction API.

    Args:
      uri: The input image URI.
      output_json: File handle of the output json where request will be written.
    """
    def _open_file_read_binary(uri):
      try:
        return file_io.FileIO(uri, mode='rb')
      except errors.InvalidArgumentError:
        return file_io.FileIO(uri, mode='r')

    with open(output_json, 'w') as outf:
      with _open_file_read_binary(uri) as f:
        image_bytes = f.read()
        image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
        image = image.resize((299, 299), Image.BILINEAR)
        resized_image = io.BytesIO()
        image.save(resized_image, format='JPEG')
        encoded_image = base64.b64encode(resized_image.getvalue())
        row = json.dumps({'key': uri, 'image_bytes': {'b64': encoded_image}})
        outf.write(row)
        outf.write('\n') 
Example 15
Project: Deeplab-v3plus   Author: hualin95   File: Voc_Dataset.py    License: MIT License 6 votes vote down vote up
def _val_sync_transform(self, img, mask):
        outsize = self.crop_size
        short_size = outsize
        w, h = img.size
        if w > h:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize) / 2.))
        y1 = int(round((h - outsize) / 2.))
        img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
        mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
Example 16
Project: unicorn-hat-hd   Author: pimoroni   File: camera.py    License: MIT License 5 votes vote down vote up
def write(self, buf):
        img = Image.frombytes('RGB', (64, 64), buf)
        img = img.resize((16, 16), Image.BILINEAR)

        for x in range(16):
            for y in range(16):
                r, g, b = img.getpixel((x, y))
                self.hat.set_pixel(x, y, r, g, b)

        self.hat.show() 
Example 17
Project: DeepLab_v3_plus   Author: songdejia   File: transform.py    License: MIT License 5 votes vote down vote up
def __call__(self, sample):
        img, mask = sample['image'], sample['label']

        if self.padding > 0:
            img = ImageOps.expand(img, border=self.padding, fill=0)
            mask = ImageOps.expand(mask, border=self.padding, fill=0)

        assert img.size == mask.size
        w, h = img.size
        th, tw = self.size # target size
        if w == tw and h == th:
            return {'image': img,
                    'label': mask}

        if w < tw or h < th:
            img = img.resize((tw, th), Image.BILINEAR)
            mask = mask.resize((tw, th), Image.NEAREST)
            return {'image': img,
                    'label': mask}

        x1 = random.randint(0, w - tw)
        y1 = random.randint(0, h - th)
        img = img.crop((x1, y1, x1 + tw, y1 + th))
        mask = mask.crop((x1, y1, x1 + tw, y1 + th))

        return {'image': img,
                'label': mask} 
Example 18
Project: DeepLab_v3_plus   Author: songdejia   File: transform.py    License: MIT License 5 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']

        assert img.size == mask.size

        img = img.resize(self.size, Image.BILINEAR)
        mask = mask.resize(self.size, Image.NEAREST)

        return {'image': img,
                'label': mask} 
Example 19
Project: DeepLab_v3_plus   Author: songdejia   File: transform.py    License: MIT License 5 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        assert img.size == mask.size
        for attempt in range(10):
            area = img.size[0] * img.size[1]
            target_area = random.uniform(0.45, 1.0) * area
            aspect_ratio = random.uniform(0.5, 2)

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

            if random.random() < 0.5:
                w, h = h, w

            if w <= img.size[0] and h <= img.size[1]:
                x1 = random.randint(0, img.size[0] - w)
                y1 = random.randint(0, img.size[1] - h)

                img = img.crop((x1, y1, x1 + w, y1 + h))
                mask = mask.crop((x1, y1, x1 + w, y1 + h))
                assert (img.size == (w, h))

                img = img.resize((self.size, self.size), Image.BILINEAR)
                mask = mask.resize((self.size, self.size), Image.NEAREST)

                return {'image': img,
                        'label': mask}

        # Fallback
        scale = Scale(self.size)
        crop = CenterCrop(self.size)
        sample = crop(scale(sample))
        return sample 
Example 20
Project: DeepLab_v3_plus   Author: songdejia   File: transform.py    License: MIT License 5 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        rotate_degree = random.random() * 2 * self.degree - self.degree
        img = img.rotate(rotate_degree, Image.BILINEAR)
        mask = mask.rotate(rotate_degree, Image.NEAREST)

        return {'image': img,
                'label': mask} 
Example 21
Project: DeepLab_v3_plus   Author: songdejia   File: transform.py    License: MIT License 5 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        assert img.size == mask.size

        scale = random.uniform(self.limit[0], self.limit[1])
        w = int(scale * img.size[0])
        h = int(scale * img.size[1])

        img, mask = img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST)

        return {'image': img, 'label': mask} 
Example 22
def convert_mat_to_images(args):
    '''convert the caltech101 mat file to images
    Examples
    --------
    python convert_data.py --dataset /home/ubuntu/datasets/caltech101/data/caltech101_silhouettes_28.mat --save_path /home/ubuntu/datasets/caltech101/data/ --invert --height 32 --width 32
    '''
    dataset = scipy.io.loadmat("{}/{}".format(args.save_path, args.dataset))

    # image pixel data
    X = dataset['X']

    # image class labels (not used in this project)
    Y = dataset['Y']

    total_image = X.shape[0]

    h=args.height
    w=args.width

    for i in range(total_image):
        img = X[i]
        img = np.reshape(img, (28, 28))
        if args.invert:
            img = (1-img)*255
        else:
            img = img*255
        img = Image.fromarray(img, 'L')
        img = img.rotate(-90)
        img = img.resize([h, w], Image.BILINEAR)
        img.save(args.save_path + '/img' + str(i) + '.png') 
Example 23
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: utils.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, size, interpolation=Image.BILINEAR):
        assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
        self.size = size
        self.interpolation = interpolation 
Example 24
Project: Pytorch-Project-Template   Author: moemen95   File: voc_utils.py    License: MIT License 5 votes vote down vote up
def __init__(self, size, interpolation=Image.BILINEAR):
        self.size = tuple(reversed(size))  # size: (h, w)
        self.interpolation = interpolation 
Example 25
Project: tsn-pytorch   Author: yjxiong   File: transforms.py    License: BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, size, interpolation=Image.BILINEAR):
        self.worker = torchvision.transforms.Scale(size, interpolation) 
Example 26
Project: tsn-pytorch   Author: yjxiong   File: transforms.py    License: BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, size, interpolation=Image.BILINEAR):
        self.size = size
        self.interpolation = interpolation 
Example 27
Project: insightocr   Author: deepinsight   File: data.py    License: MIT License 5 votes vote down vote up
def __iter__(self):
        init_state_names = [x[0] for x in self.init_states]
        data = []
        label = []
        cnt = 0
        for m_line in self.dataset_lines:
            img_lst = m_line.strip().split(' ')
            img_path = os.path.join(self.data_root, img_lst[0])

            cnt += 1
            img = Image.open(img_path).resize(self.data_shape, Image.BILINEAR).convert('L')
            img = np.array(img).reshape((1, self.data_shape[1], self.data_shape[0]))
            data.append(img)

            ret = np.zeros(self.num_label, int)
            for idx in range(1, len(img_lst)):
                ret[idx - 1] = int(img_lst[idx])

            label.append(ret)
            if cnt % self.batch_size == 0:
                data_all = [mx.nd.array(data)] + self.init_state_arrays
                label_all = [mx.nd.array(label)]
                data_names = ['data'] + init_state_names
                label_names = ['label']
                data = []
                label = []
                yield SimpleBatch(data_names, data_all, label_names, label_all)
                continue 
Example 28
Project: DeepLung   Author: uci-cbcl   File: transforms.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, size, interpolation=Image.BILINEAR):
        # assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 3)
        self.size = size
        self.interpolation = interpolation 
Example 29
Project: DeepLung   Author: uci-cbcl   File: transforms.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, size, interpolation=Image.BILINEAR):
        assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 3)
        self.size = size
        self.interpolation = interpolation 
Example 30
Project: DeepLung   Author: uci-cbcl   File: transforms.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, size, interpolation=Image.BILINEAR):
        self.size = size
        self.interpolation = interpolation