Python PIL.Image.CUBIC Examples

The following are 9 code examples of PIL.Image.CUBIC(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module PIL.Image , or try the search function .
Example #1
Source File: resize.py    From open_model_zoo with Apache License 2.0 6 votes vote down vote up
def __init__(self, interpolation):
        if Image is None:
            raise ImportError(
                'pillow backend for resize operation requires TensorFlow. Please install it before usage.'
            )
        self._supported_interpolations = {
            'NEAREST': Image.NEAREST,
            'NONE': Image.NONE,
            'BILINEAR': Image.BILINEAR,
            'LINEAR': Image.LINEAR,
            'BICUBIC': Image.BICUBIC,
            'CUBIC': Image.CUBIC,
            'ANTIALIAS': Image.ANTIALIAS,
        }
        try:
            optional_interpolations = {
                'BOX': Image.BOX,
                'LANCZOS': Image.LANCZOS,
                'HAMMING': Image.HAMMING,
            }
            self._supported_interpolations.update(optional_interpolations)
        except AttributeError:
            pass
        super().__init__(interpolation) 
Example #2
Source File: resize.py    From open_model_zoo with Apache License 2.0 6 votes vote down vote up
def supported_interpolations(cls):
        if Image is None:
            return {}
        intrp = {
            'NEAREST': Image.NEAREST,
            'NONE': Image.NONE,
            'BILINEAR': Image.BILINEAR,
            'LINEAR': Image.LINEAR,
            'BICUBIC': Image.BICUBIC,
            'CUBIC': Image.CUBIC,
            'ANTIALIAS': Image.ANTIALIAS
        }
        try:
            optional_interpolations = {
                'BOX': Image.BOX,
                'LANCZOS': Image.LANCZOS,
                'HAMMING': Image.HAMMING,
            }
            intrp.update(optional_interpolations)
        except AttributeError:
            pass
        return intrp 
Example #3
Source File: torchutils.py    From SSENet-pytorch with MIT License 5 votes vote down vote up
def __getitem__(self, idx):

        name = self.img_name_list[idx]

        img = Image.open(os.path.join(self.img_dir, name + '.jpg')).convert("RGB")
        mask = Image.open(os.path.join(self.label_dir, name + '.png'))

        if self.rescale is not None:
            s = self.rescale[0] + random.random() * (self.rescale[1] - self.rescale[0])
            adj_size = (round(img.size[0]*s/8)*8, round(img.size[1]*s/8)*8)
            img = img.resize(adj_size, resample=Image.CUBIC)
            mask = img.resize(adj_size, resample=Image.NEAREST)

        if self.img_transform is not None:
            img = self.img_transform(img)
        if self.mask_transform is not None:
            mask = self.mask_transform(mask)

        if self.cropsize is not None:
            img, mask = imutils.random_crop([img, mask], self.cropsize, (0, 255))

        mask = imutils.RescaleNearest(0.125)(mask)

        if self.flip is True and bool(random.getrandbits(1)):
            img = np.flip(img, 1).copy()
            mask = np.flip(mask, 1).copy()

        img = np.transpose(img, (2, 0, 1))

        return name, img, mask 
Example #4
Source File: data_transforms.py    From drn with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __call__(self, image, label):
        ratio = random.uniform(self.scale[0], self.scale[1])
        w, h = image.size
        tw = int(ratio * w)
        th = int(ratio * h)
        if ratio == 1:
            return image, label
        elif ratio < 1:
            interpolation = Image.ANTIALIAS
        else:
            interpolation = Image.CUBIC
        return image.resize((tw, th), interpolation), \
               label.resize((tw, th), Image.NEAREST) 
Example #5
Source File: torchutils.py    From psa with MIT License 5 votes vote down vote up
def __getitem__(self, idx):

        name = self.img_name_list[idx]

        img = Image.open(os.path.join(self.img_dir, name + '.jpg')).convert("RGB")
        mask = Image.open(os.path.join(self.label_dir, name + '.png'))

        if self.rescale is not None:
            s = self.rescale[0] + random.random() * (self.rescale[1] - self.rescale[0])
            adj_size = (round(img.size[0]*s/8)*8, round(img.size[1]*s/8)*8)
            img = img.resize(adj_size, resample=Image.CUBIC)
            mask = img.resize(adj_size, resample=Image.NEAREST)

        if self.img_transform is not None:
            img = self.img_transform(img)
        if self.mask_transform is not None:
            mask = self.mask_transform(mask)

        if self.cropsize is not None:
            img, mask = imutils.random_crop([img, mask], self.cropsize, (0, 255))

        mask = imutils.RescaleNearest(0.125)(mask)

        if self.flip is True and bool(random.getrandbits(1)):
            img = np.flip(img, 1).copy()
            mask = np.flip(mask, 1).copy()

        img = np.transpose(img, (2, 0, 1))

        return name, img, mask 
Example #6
Source File: representations.py    From SharpNet with GNU General Public License v3.0 5 votes vote down vote up
def scale(self, ratio):
        w, h = self.shape()
        tw = int(ratio * w)
        th = int(ratio * h)

        if ratio < 1:
            interpolation = Image.ANTIALIAS
        else:
            interpolation = Image.CUBIC

        self.data = (self.data).resize((tw, th), interpolation) 
Example #7
Source File: data_transforms.py    From dla with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __call__(self, image, label):
        ratio = random.uniform(self.scale[0], self.scale[1])
        w, h = image.size
        tw = int(ratio * w)
        th = int(ratio * h)
        if ratio == 1:
            return image, label
        elif ratio < 1:
            interpolation = Image.ANTIALIAS
        else:
            interpolation = Image.CUBIC
        return image.resize((tw, th), interpolation), \
            label.resize((tw, th), Image.NEAREST) 
Example #8
Source File: reinforcement_learning.py    From HandsOnDeepLearningWithPytorch with MIT License 5 votes vote down vote up
def get_screen():
    screen = env.render(mode='rgb_array').transpose((2, 0, 1))  # transpose into torch order (CHW)
    screen = screen[:, 160:320]  # Strip off the top and bottom of the screen

    # Get cart location
    world_width = env.x_threshold * 2
    scale = screen_width / world_width
    cart_location = int(env.state[0] * scale + screen_width / 2.0)  # MIDDLE OF CART

    # Decide how much to strip
    view_width = 320
    if cart_location < view_width // 2:
        slice_range = slice(view_width)
    elif cart_location > (screen_width - view_width // 2):
        slice_range = slice(-view_width, None)
    else:
        slice_range = slice(cart_location - view_width // 2,
                            cart_location + view_width // 2)

    # Strip off the edges, so that we have a square image centered on a cart
    screen = screen[:, :, slice_range]

    screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
    screen = torch.from_numpy(screen)
    resize = T.Compose([T.ToPILImage(),
                        T.Resize(40, interpolation=Image.CUBIC),
                        T.ToTensor()])

    return resize(screen).unsqueeze(0).to(device)  # Resize, and add a batch dimension (BCHW) 
Example #9
Source File: test_color_lut.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def test_wrong_args(self):
        im = Image.new('RGB', (10, 10), 0)

        with self.assertRaisesRegex(ValueError, "filter"):
            im.im.color_lut_3d('RGB',
                               Image.CUBIC,
                               *self.generate_identity_table(3, 3))

        with self.assertRaisesRegex(ValueError, "image mode"):
            im.im.color_lut_3d('wrong',
                               Image.LINEAR,
                               *self.generate_identity_table(3, 3))

        with self.assertRaisesRegex(ValueError, "table_channels"):
            im.im.color_lut_3d('RGB',
                               Image.LINEAR,
                               *self.generate_identity_table(5, 3))

        with self.assertRaisesRegex(ValueError, "table_channels"):
            im.im.color_lut_3d('RGB',
                               Image.LINEAR,
                               *self.generate_identity_table(1, 3))

        with self.assertRaisesRegex(ValueError, "table_channels"):
            im.im.color_lut_3d('RGB',
                               Image.LINEAR,
                               *self.generate_identity_table(2, 3))

        with self.assertRaisesRegex(ValueError, "Table size"):
            im.im.color_lut_3d('RGB',
                               Image.LINEAR,
                               *self.generate_identity_table(3, (1, 3, 3)))

        with self.assertRaisesRegex(ValueError, "Table size"):
            im.im.color_lut_3d('RGB',
                               Image.LINEAR,
                               *self.generate_identity_table(3, (66, 3, 3)))

        with self.assertRaisesRegex(ValueError, r"size1D \* size2D \* size3D"):
            im.im.color_lut_3d('RGB',
                               Image.LINEAR,
                               3, 2, 2, 2, [0, 0, 0] * 7)

        with self.assertRaisesRegex(ValueError, r"size1D \* size2D \* size3D"):
            im.im.color_lut_3d('RGB',
                               Image.LINEAR,
                               3, 2, 2, 2, [0, 0, 0] * 9)

        with self.assertRaises(TypeError):
            im.im.color_lut_3d('RGB',
                               Image.LINEAR,
                               3, 2, 2, 2, [0, 0, "0"] * 8)

        with self.assertRaises(TypeError):
            im.im.color_lut_3d('RGB',
                               Image.LINEAR,
                               3, 2, 2, 2, 16)