Python PIL.Image.EXTENT Examples

The following are 9 code examples of PIL.Image.EXTENT(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module PIL.Image , or try the search function .
Example #1
Source File: transformations.py    From deep-prior with GNU General Public License v3.0 6 votes vote down vote up
def transformImg(self, img, t):
        imgT = img.transform((int(img.size[0]*t[3]),int(img.size[1]*t[3])), Image.EXTENT, (0,0,img.size[0],img.size[1]), Image.BILINEAR)
        imgT = imgT.rotate(numpy.rad2deg(t[0]), Image.BILINEAR, expand=1)
        if t[4] == 1.:
            imgT = imgT.transpose(Image.FLIP_LEFT_RIGHT)

        # crop only valid part
        if self.crop:
            imgT = imgT.crop(self.getInscribedRectangle(t[0], (img.size[0]*t[3], img.size[1]*t[3])))

        # crop from translation
        imgT = imgT.resize((int(self.imgSize[0]*1.1), int(self.imgSize[1]*1.1)), Image.BILINEAR)
        xstart = int((imgT.size[0] // 2 - t[1]) - self.imgSize[0] // 2)
        ystart = int((imgT.size[1] // 2 - t[2]) - self.imgSize[1] // 2)
        assert xstart >= 0 and ystart >= 0
        return imgT.crop((xstart, ystart, xstart+self.imgSize[0], ystart+self.imgSize[1])) 
Example #2
Source File: image.py    From python-escpos with MIT License 6 votes vote down vote up
def to_column_format(self, high_density_vertical=True):
        """
        Extract slices of an image as equal-sized blobs of column-format data.

        :param high_density_vertical: Printed line height in dots
        """
        im = self._im.transpose(Image.ROTATE_270).transpose(Image.FLIP_LEFT_RIGHT)
        line_height = 24 if high_density_vertical else 8
        width_pixels, height_pixels = im.size
        top = 0
        left = 0
        while left < width_pixels:
            box = (left, top, left + line_height, top + height_pixels)
            im_slice = im.transform((line_height, height_pixels), Image.EXTENT, box)
            im_bytes = im_slice.tobytes()
            yield(im_bytes)
            left += line_height 
Example #3
Source File: test_image_transform.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_extent(self):
        im = hopper('RGB')
        (w, h) = im.size
        transformed = im.transform(im.size, Image.EXTENT,
                                   (0, 0,
                                    w//2, h//2),  # ul -> lr
                                   Image.BILINEAR)

        scaled = im.resize((w*2, h*2), Image.BILINEAR).crop((0, 0, w, h))

        # undone -- precision?
        self.assert_image_similar(transformed, scaled, 23) 
Example #4
Source File: test_image_transform.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_fill(self):
        for mode, pixel in [
            ['RGB', (255, 0, 0)],
            ['RGBA', (255, 0, 0, 255)],
            ['LA', (76, 0)]
        ]:
            im = hopper(mode)
            (w, h) = im.size
            transformed = im.transform(im.size, Image.EXTENT,
                                       (0, 0,
                                        w*2, h*2),
                                       Image.BILINEAR,
                                       fillcolor='red')

            self.assertEqual(transformed.getpixel((w-1, h-1)), pixel) 
Example #5
Source File: test_image_transform.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_alpha_premult_transform(self):

        def op(im, sz):
            (w, h) = im.size
            return im.transform(sz, Image.EXTENT,
                                (0, 0,
                                 w, h),
                                Image.BILINEAR)

        self._test_alpha_premult(op) 
Example #6
Source File: transform.py    From detectron2 with Apache License 2.0 5 votes vote down vote up
def apply_image(self, img, interp=None):
        h, w = self.output_size
        ret = Image.fromarray(img).transform(
            size=(w, h),
            method=Image.EXTENT,
            data=self.src_rect,
            resample=interp if interp else self.interp,
            fill=self.fill,
        )
        return np.asarray(ret) 
Example #7
Source File: transform.py    From detectron2 with Apache License 2.0 5 votes vote down vote up
def apply_image(self, img, interp=None):
        h, w = self.output_size
        ret = Image.fromarray(img).transform(
            size=(w, h),
            method=Image.EXTENT,
            data=self.src_rect,
            resample=interp if interp else self.interp,
            fill=self.fill,
        )
        return np.asarray(ret) 
Example #8
Source File: Image.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def __transformer(self, box, image, method, data,
                      resample=NEAREST, fill=1):
        w = box[2] - box[0]
        h = box[3] - box[1]

        if method == AFFINE:
            data = data[0:6]

        elif method == EXTENT:
            # convert extent to an affine transform
            x0, y0, x1, y1 = data
            xs = float(x1 - x0) / w
            ys = float(y1 - y0) / h
            method = AFFINE
            data = (xs, 0, x0, 0, ys, y0)

        elif method == PERSPECTIVE:
            data = data[0:8]

        elif method == QUAD:
            # quadrilateral warp.  data specifies the four corners
            # given as NW, SW, SE, and NE.
            nw = data[0:2]
            sw = data[2:4]
            se = data[4:6]
            ne = data[6:8]
            x0, y0 = nw
            As = 1.0 / w
            At = 1.0 / h
            data = (x0, (ne[0]-x0)*As, (sw[0]-x0)*At,
                    (se[0]-sw[0]-ne[0]+x0)*As*At,
                    y0, (ne[1]-y0)*As, (sw[1]-y0)*At,
                    (se[1]-sw[1]-ne[1]+y0)*As*At)

        else:
            raise ValueError("unknown transformation method")

        if resample not in (NEAREST, BILINEAR, BICUBIC):
            raise ValueError("unknown resampling filter")

        image.load()

        self.load()

        if image.mode in ("1", "P"):
            resample = NEAREST

        self.im.transform2(box, image.im, method, data, resample, fill) 
Example #9
Source File: test_mode_i16.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def test_basic(self):
        # PIL 1.1 has limited support for 16-bit image data.  Check that
        # create/copy/transform and save works as expected.

        def basic(mode):

            imIn = self.original.convert(mode)
            self.verify(imIn)

            w, h = imIn.size

            imOut = imIn.copy()
            self.verify(imOut)  # copy

            imOut = imIn.transform((w, h), Image.EXTENT, (0, 0, w, h))
            self.verify(imOut)  # transform

            filename = self.tempfile("temp.im")
            imIn.save(filename)

            imOut = Image.open(filename)

            self.verify(imIn)
            self.verify(imOut)

            imOut = imIn.crop((0, 0, w, h))
            self.verify(imOut)

            imOut = Image.new(mode, (w, h), None)
            imOut.paste(imIn.crop((0, 0, w//2, h)), (0, 0))
            imOut.paste(imIn.crop((w//2, 0, w, h)), (w//2, 0))

            self.verify(imIn)
            self.verify(imOut)

            imIn = Image.new(mode, (1, 1), 1)
            self.assertEqual(imIn.getpixel((0, 0)), 1)

            imIn.putpixel((0, 0), 2)
            self.assertEqual(imIn.getpixel((0, 0)), 2)

            if mode == "L":
                maximum = 255
            else:
                maximum = 32767

            imIn = Image.new(mode, (1, 1), 256)
            self.assertEqual(imIn.getpixel((0, 0)), min(256, maximum))

            imIn.putpixel((0, 0), 512)
            self.assertEqual(imIn.getpixel((0, 0)), min(512, maximum))

        basic("L")

        basic("I;16")
        basic("I;16B")
        basic("I;16L")

        basic("I")