Python PIL.Image.merge() Examples

The following are 30 code examples of PIL.Image.merge(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module PIL.Image , or try the search function .
Example #1
Source File: test_format_hsv.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def wedge(self):
        w = Image._wedge()
        w90 = w.rotate(90)

        (px, h) = w.size

        r = Image.new('L', (px*3, h))
        g = r.copy()
        b = r.copy()

        r.paste(w, (0, 0))
        r.paste(w90, (px, 0))

        g.paste(w90, (0, 0))
        g.paste(w,  (2*px, 0))

        b.paste(w, (px, 0))
        b.paste(w90, (2*px, 0))

        img = Image.merge('RGB', (r, g, b))

        return img 
Example #2
Source File: utils.py    From YOLO with MIT License 6 votes vote down vote up
def distort_image(im, hue, sat, val):
	im = im.convert('HSV')
	cs = list(im.split())
	cs[1] = cs[1].point(lambda i: i * sat)
	cs[2] = cs[2].point(lambda i: i * val)
	def change_hue(x):
		x += hue*255
		if x > 255:
			x -= 255
		if x < 0:
			x += 255
		return x
	cs[0] = cs[0].point(change_hue)
	im = Image.merge(im.mode, tuple(cs))
	im = im.convert('RGB')
	return im

# generate random scale. 
Example #3
Source File: utils.py    From emotion_classification with MIT License 6 votes vote down vote up
def distort_image(im, hue, sat, val):
    im = im.convert('HSV')
    cs = list(im.split())
    cs[1] = cs[1].point(lambda i: i * sat)
    cs[2] = cs[2].point(lambda i: i * val)
    
    def change_hue(x):
        x += hue*255
        if x > 255:
            x -= 255
        if x < 0:
            x += 255
        return x
    cs[0] = cs[0].point(change_hue)
    im = Image.merge(im.mode, tuple(cs))

    im = im.convert('RGB')
    return im 
Example #4
Source File: utils.py    From emotion_classification with MIT License 6 votes vote down vote up
def distort_image(im, hue, sat, val):
    im = im.convert('HSV')
    cs = list(im.split())
    cs[1] = cs[1].point(lambda i: i * sat)
    cs[2] = cs[2].point(lambda i: i * val)
    
    def change_hue(x):
        x += hue*255
        if x > 255:
            x -= 255
        if x < 0:
            x += 255
        return x
    cs[0] = cs[0].point(change_hue)
    im = Image.merge(im.mode, tuple(cs))

    im = im.convert('RGB')
    return im 
Example #5
Source File: image.py    From pytorch-0.4-yolov3 with MIT License 6 votes vote down vote up
def distort_image(im, hue, sat, val):
    im = im.convert('HSV')
    cs = list(im.split())
    cs[1] = cs[1].point(lambda i: i * sat)
    cs[2] = cs[2].point(lambda i: i * val)
    
    def change_hue(x):
        x += hue*255
        if x > 255:
            x -= 255
        if x < 0:
            x += 255
        return x
    cs[0] = cs[0].point(change_hue)
    im = Image.merge(im.mode, tuple(cs))

    im = im.convert('RGB')
    #constrain_image(im)
    return im 
Example #6
Source File: key.py    From kle_render with MIT License 6 votes vote down vote up
def open_base_img(full_profile, res, base_color, color):
    # get base image according to profile and perceptual gray of key color
    base_num = str([0xE0, 0xB0, 0x80, 0x50, 0x20].index(base_color) + 1)

    # open image and convert to Lab
    with Image.open('images/{0}_{1}{2}.png'.format(*full_profile, base_num)) as img:
        key_img = img.resize((int(s * res / 200) for s in img.size), resample=Image.BILINEAR).convert('RGBA')
    if full_profile[1] in ('ISO', 'BIGENTER'): alpha = key_img.split()[-1]
    l, a, b = ImageCms.applyTransform(key_img, rgb2lab_transform).split()

    # convert key color to Lab
    # a and b should be scaled by 128/100, but desaturation looks more natural
    rgb_color = color_objects.sRGBColor(*ImageColor.getrgb(color), is_upscaled=True)
    lab_color = color_conversions.convert_color(rgb_color, color_objects.LabColor)
    l1, a1, b1 = lab_color.get_value_tuple()
    l1, a1, b1 = int(l1 * 256 / 100), int(a1 + 128), int(b1 + 128)

    # change Lab of base image to match that of key color
    l = ImageMath.eval('convert(l + l1 - l_avg, "L")', l=l, l1=l1, l_avg=base_color)
    a = ImageMath.eval('convert(a + a1 - a, "L")', a=a, a1=a1)
    b = ImageMath.eval('convert(b + b1 - b, "L")', b=b, b1=b1)

    key_img = ImageCms.applyTransform(Image.merge('LAB', (l, a, b)), lab2rgb_transform).convert('RGBA')
    if full_profile[1] in ('ISO', 'BIGENTER'): key_img.putalpha(alpha)
    return key_img 
Example #7
Source File: sensor.py    From Hands-On-Intelligent-Agents-with-OpenAI-Gym with MIT License 6 votes vote down vote up
def save_to_disk(self, filename):
        """Save this image to disk (requires PIL installed)."""
        filename = _append_extension(filename, '.png')

        try:
            from PIL import Image as PImage
        except ImportError:
            raise RuntimeError(
                'cannot import PIL, make sure pillow package is installed')

        image = PImage.frombytes(
            mode='RGBA',
            size=(self.width, self.height),
            data=self.raw_data,
            decoder_name='raw')
        color = image.split()
        image = PImage.merge("RGB", color[2::-1])

        folder = os.path.dirname(filename)
        if not os.path.isdir(folder):
            os.makedirs(folder)
        image.save(filename) 
Example #8
Source File: sensor.py    From Hands-On-Intelligent-Agents-with-OpenAI-Gym with MIT License 6 votes vote down vote up
def save_to_disk(self, filename):
        """Save this image to disk (requires PIL installed)."""
        filename = _append_extension(filename, '.png')

        try:
            from PIL import Image as PImage
        except ImportError:
            raise RuntimeError(
                'cannot import PIL, make sure pillow package is installed')

        image = PImage.frombytes(
            mode='RGBA',
            size=(self.width, self.height),
            data=self.raw_data,
            decoder_name='raw')
        color = image.split()
        image = PImage.merge("RGB", color[2::-1])

        folder = os.path.dirname(filename)
        if not os.path.isdir(folder):
            os.makedirs(folder)
        image.save(filename) 
Example #9
Source File: loader.py    From 3D-ResNets-PyTorch with MIT License 6 votes vote down vote up
def __call__(self, video_path, frame_indices):
        with h5py.File(video_path, 'r') as f:

            flow_data = []
            for flow in self.flows:
                flow_data.append(f[f'video_{flow}'])

            video = []
            for i in frame_indices:
                if i < len(flow_data[0]):
                    frame = [
                        Image.open(io.BytesIO(video_data[i]))
                        for video_data in flow_data
                    ]
                    frame.append(frame[-1])  # add dummy data into third channel
                    video.append(Image.merge('RGB', frame))

        return video 
Example #10
Source File: super_resolution.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def resolve(ctx):
    from PIL import Image
    if isinstance(ctx, list):
        ctx = [ctx[0]]
    net.load_parameters('superres.params', ctx=ctx)
    img = Image.open(opt.resolve_img).convert('YCbCr')
    y, cb, cr = img.split()
    data = mx.nd.expand_dims(mx.nd.expand_dims(mx.nd.array(y), axis=0), axis=0)
    out_img_y = mx.nd.reshape(net(data), shape=(-3, -2)).asnumpy()
    out_img_y = out_img_y.clip(0, 255)
    out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')

    out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
    out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
    out_img = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')

    out_img.save('resolved.png') 
Example #11
Source File: inference_with_net.py    From TableTrainNet with MIT License 6 votes vote down vote up
def from_png_to_bmp(png_path, output_path=BMP_IMAGE_TEST_TO_PATH):
    """
    Convert a png_path image into a bmp 3-channel one and return the path to the converted image
    :param png_path: path of the image
    :param output_path: path in which we save the image
    :return: the file path
    """
    # convert a .png image file to a .bmp image file using PIL
    file_name = os.path.splitext(png_path)[0] \
        .split("/")[-1]
    file_in = png_path
    img = Image.open(file_in)

    file_out = os.path.join(output_path, str(file_name), str(file_name) + '.bmp')
    len(img.split())  # test
    if len(img.split()) == 4:
        # prevent IOError: cannot write mode RGBA as BMP
        r, g, b, a = img.split()
        img = Image.merge("RGB", (r, g, b))
        img.save(file_out)
    else:
        img.save(file_out)
    return file_out 
Example #12
Source File: test_image_filter.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_consistency_5x5(self):
        source = Image.open("Tests/images/hopper.bmp")
        reference = Image.open("Tests/images/hopper_emboss_more.bmp")
        kernel = ImageFilter.Kernel((5, 5),  # noqa: E127
                                    (-1, -1, -1, -1,  0,
                                     -1, -1, -1,  0,  1,
                                     -1, -1,  0,  1,  1,
                                     -1,  0,  1,  1,  1,
                                      0,  1,  1,  1,  1), 0.3)
        source = source.split() * 2
        reference = reference.split() * 2

        for mode in ['L', 'LA', 'RGB', 'CMYK']:
            self.assert_image_equal(
                Image.merge(mode, source[:len(mode)]).filter(kernel),
                Image.merge(mode, reference[:len(mode)]),
            ) 
Example #13
Source File: sensor.py    From coiltraine with MIT License 6 votes vote down vote up
def save_to_disk(self, filename, format='.png'):
        """Save this image to disk (requires PIL installed)."""
        filename = _append_extension(filename, format)

        try:
            from PIL import Image as PImage
        except ImportError:
            raise RuntimeError(
                'cannot import PIL, make sure pillow package is installed')

        image = PImage.frombytes(
            mode='RGBA',
            size=(self.width, self.height),
            data=self.raw_data,
            decoder_name='raw')
        color = image.split()
        image = PImage.merge("RGB", color[2::-1])

        folder = os.path.dirname(filename)
        if not os.path.isdir(folder):
            os.makedirs(folder)
        image.save(filename, quality=100) 
Example #14
Source File: super_resolution.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def resolve(ctx):
    from PIL import Image
    if isinstance(ctx, list):
        ctx = [ctx[0]]
    net.load_parameters('superres.params', ctx=ctx)
    img = Image.open(opt.resolve_img).convert('YCbCr')
    y, cb, cr = img.split()
    data = mx.nd.expand_dims(mx.nd.expand_dims(mx.nd.array(y), axis=0), axis=0)
    out_img_y = mx.nd.reshape(net(data), shape=(-3, -2)).asnumpy()
    out_img_y = out_img_y.clip(0, 255)
    out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')

    out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
    out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
    out_img = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')

    out_img.save('resolved.png') 
Example #15
Source File: deepfry.py    From FlameCogs with MIT License 6 votes vote down vote up
def _fry(img):
		e = ImageEnhance.Sharpness(img)
		img = e.enhance(100)
		e = ImageEnhance.Contrast(img)
		img = e.enhance(100)
		e = ImageEnhance.Brightness(img)
		img = e.enhance(.27)
		r, b, g = img.split()
		e = ImageEnhance.Brightness(r)
		r = e.enhance(4)
		e = ImageEnhance.Brightness(g)
		g = e.enhance(1.75)
		e = ImageEnhance.Brightness(b)
		b = e.enhance(.6)
		img = Image.merge('RGB', (r, g, b))
		e = ImageEnhance.Brightness(img)
		img = e.enhance(1.5)
		temp = BytesIO()
		temp.name = 'deepfried.png'
		img.save(temp)
		temp.seek(0)
		return temp 
Example #16
Source File: test_color_lut.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_channels_order(self):
        g = Image.linear_gradient('L')
        im = Image.merge('RGB', [g, g.transpose(Image.ROTATE_90),
                                 g.transpose(Image.ROTATE_180)])

        # Reverse channels by splitting and using table
        self.assert_image_equal(
            Image.merge('RGB', im.split()[::-1]),
            im._new(im.im.color_lut_3d('RGB', Image.LINEAR,
                    3, 2, 2, 2, [
                        0, 0, 0,  0, 0, 1,
                        0, 1, 0,  0, 1, 1,

                        1, 0, 0,  1, 0, 1,
                        1, 1, 0,  1, 1, 1,
                    ]))) 
Example #17
Source File: service.py    From OCRbot with GNU Affero General Public License v3.0 5 votes vote down vote up
def invert_image(image):
	if image.mode == 'RGBA':
		# Remove alpha channel before inverting image then re-add it
		r, g, b, a = image.split()
		rgb_image = Image.merge('RGB', (r, g, b))
		inverted_image = ImageOps.invert(rgb_image)
		r2, g2, b2 = inverted_image.split()
		final_transparent_image = Image.merge('RGBA', (r2, g2, b2, a))
		return final_transparent_image
	else:
		inverted_image = ImageOps.invert(image)
		return inverted_image 
Example #18
Source File: steglsb.py    From lsb-steganography with MIT License 5 votes vote down vote up
def _decode_img(self):
        s = self.steg.split()
        expr = 'convert((s & 2**bits - 1) << (8 - bits), "L")'
        out = [ImageMath.eval(expr, s = s[k], bits = self.bits) for k in range(len(s))] 
        out = Image.merge(self.steg.mode, out)
        self._save_img(out, self.outfile)
        print('[*] Created outfile at {}'.format(self.outfile)) 
Example #19
Source File: transformation.py    From Beyond-Binary-Supervision-CVPR19 with MIT License 5 votes vote down vote up
def __call__(self, img):
        r, g, b = img.split()
        img = Image.merge("RGB", (b, g, r))
        return img 
Example #20
Source File: mpv.py    From FeelUOwn with GNU General Public License v3.0 5 votes vote down vote up
def screenshot_raw(self, includes='subtitles'):
        """Mapped mpv screenshot_raw command, see man mpv(1). Returns a pillow Image object."""
        from PIL import Image
        res = self.node_command('screenshot-raw', includes)
        if res['format'] != 'bgr0':
            raise ValueError('Screenshot in unknown format "{}". Currently, only bgr0 is supported.'
                    .format(res['format']))
        img = Image.frombytes('RGBA', (res['w'], res['h']), res['data'])
        b,g,r,a = img.split()
        return Image.merge('RGB', (r,g,b)) 
Example #21
Source File: image_to_pc.py    From fluxclient with GNU Affero General Public License v3.0 5 votes vote down vote up
def merge(self):
        """
        merge left and right scanned points
        find which side is brighter, use it as base
        use Left side as base
        """
        s_R = sum(int(p[3]) + p[4] + p[5] for p in self.points_R)
        s_L = sum(int(p[3]) + p[4] + p[5] for p in self.points_L)

        if s_R > s_L:
            base = self.points_R
            add_on = self.points_L
            delta = round(60 / (360 / self.steps))
        else:
            base = self.points_L
            add_on = self.points_R
            delta = -round(60 / (360 / self.steps))

        record = {}
        for p in range(len(base)):
            record[(base[p][6], base[p][8])] = p

        self.points_M = base[:]
        logger.debug("merging base %s, add_on %s", len(base), len(add_on))

        for p in add_on:
            t = (p[6] + delta) % 400, p[8]
            if t in record:
                old_p = base[record[t]]
                base[record[t]][3] = p[3] / 2 + old_p[3] / 2
                base[record[t]][4] = p[4] / 2 + old_p[4] / 2
                base[record[t]][5] = p[5] / 2 + old_p[5] / 2
            else:
                self.points_M.append(p)

        logger.warning('merge done: output self.mpoints_M:%s', len(self.points_M)) 
Example #22
Source File: pildriver.py    From mxnet-lambda with Apache License 2.0 5 votes vote down vote up
def do_merge(self):
        """usage: merge <string:mode> <image:pic1>
                        [<image:pic2> [<image:pic3> [<image:pic4>]]]

        Merge top-of stack images in a way described by the mode.
        """
        mode = self.do_pop()
        bandlist = []
        for band in mode:
            bandlist.append(self.do_pop())
        self.push(Image.merge(mode, bandlist))

    # Image class methods 
Example #23
Source File: deepfry.py    From FlameCogs with MIT License 5 votes vote down vote up
def _nuke(img):
		w, h = img.size[0], img.size[1]
		dx = ((w+200)//200)*2
		dy = ((h+200)//200)*2
		img = img.resize(((w+1)//dx,(h+1)//dy))
		e = ImageEnhance.Sharpness(img)
		img = e.enhance(100)
		e = ImageEnhance.Contrast(img)
		img = e.enhance(100)
		e = ImageEnhance.Brightness(img)
		img = e.enhance(.27)
		r, b, g = img.split()
		e = ImageEnhance.Brightness(r)
		r = e.enhance(4)
		e = ImageEnhance.Brightness(g)
		g = e.enhance(1.75)
		e = ImageEnhance.Brightness(b)
		b = e.enhance(.6)
		img = Image.merge('RGB', (r, g, b))
		e = ImageEnhance.Brightness(img)
		img = e.enhance(1.5)
		e = ImageEnhance.Sharpness(img)
		img = e.enhance(100)
		img = img.resize((w,h),Image.BILINEAR)
		temp = BytesIO()
		temp.name = 'nuke.jpg'
		img.save(temp, quality=1)
		temp.seek(0)
		return temp 
Example #24
Source File: deepfry.py    From FlameCogs with MIT License 5 votes vote down vote up
def _videofry(img, duration):
		imgs = []
		frame = 0
		while img:
			i = img.copy()
			i = i.convert('RGB')
			e = ImageEnhance.Sharpness(i)
			i = e.enhance(100)
			e = ImageEnhance.Contrast(i)
			i = e.enhance(100)
			e = ImageEnhance.Brightness(i)
			i = e.enhance(.27)
			r, g, b = i.split()
			e = ImageEnhance.Brightness(r)
			r = e.enhance(4)
			e = ImageEnhance.Brightness(g)
			g = e.enhance(1.75)
			e = ImageEnhance.Brightness(b)
			b = e.enhance(.6)
			e = ImageEnhance.Contrast(b)
			i = Image.merge('RGB', (r, g, b))
			e = ImageEnhance.Brightness(i)
			i = e.enhance(1.5)
			imgs.append(i)
			frame += 1
			try:
				img.seek(frame)
			except EOFError:
				break
		temp = BytesIO()
		temp.name = 'deepfried.gif'
		if duration:
			imgs[0].save(temp, format='GIF', save_all=True, append_images=imgs[1:], loop=0, duration=duration)
		else:
			imgs[0].save(temp, format='GIF', save_all=True, append_images=imgs[1:], loop=0)
		temp.seek(0)
		return temp 
Example #25
Source File: models.py    From pretrained.ml with MIT License 5 votes vote down vote up
def predict(self, img):
        """ # Arguments
                img: a numpy array

            # Returns
                The url to an image with the segmentation
            """

        with self.graph.as_default():
            img = Image.fromarray(img)
            # RGB -> BGR
            b, g, r = img.split()
            img = Image.merge("RGB", (r, g, b))
            img -= self.IMG_MEAN

            # Predictions.
            raw_output = self.net.layers['fc1_voc12']
            raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(img)[0:2, ])
            raw_output_up = tf.argmax(raw_output_up, axis=3)
            self.pred = tf.expand_dims(raw_output_up, dim=3)

            preds = self.sess.run(self.pred, feed_dict={self.image_placeholder: np.expand_dims(img, axis=0)})

        msk = decode_labels(preds, num_classes=self.NUM_CLASSES)
        im = Image.fromarray(msk[0])

        filename = str(uuid.uuid4()) + '.jpg'
        save_dir = './outputs'
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        save_path = os.path.join(save_dir, filename)
        im.save(save_path)

        return json.dumps({'output': filename}) 
Example #26
Source File: Screenshot.py    From roc with MIT License 5 votes vote down vote up
def region_shot(cls,name='seq', seq_id=0, x1=0, y1=0, x2=200, y2=200, color=False, x_val=0):
        sleep(1)
        image = pyautogui.screenshot(region=(x1, y1, x2, y2))
        if not color:
            image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2GRAY)
        else:
            image = cv2.cvtColor(np.array(image), 2)
        cv2.imwrite('sequential/bw/' + str(name) + str(seq_id) + '.png', image)

        # process image
        im = Image.open('sequential/bw/' + str(name) + str(seq_id) + '.png')
        R, G, B = im.convert('RGB').split()
        r = R.load()
        g = G.load()
        b = B.load()
        w, h = im.size

        # Convert non-black pixels to white
        for i in range(w):
            for j in range(h):
                # if(r[i, j] != 0 or g[i, j] != 0 or b[i, j] != 0):
                if (x_val < r[i, j] < 100 or x_val < g[i, j] < 100 or x_val < b[i, j] < 100):
                    r[i, j] = 0
                    # g[i, j] = 0
                    # Just change R channel

        # Merge just the R channel as all channels
        im = Image.merge('RGB', (R, R, R))
        im.save('sequential/bw/' + str(name) + str(seq_id) + '.png') 
Example #27
Source File: __init__.py    From Legofy with MIT License 5 votes vote down vote up
def apply_color_overlay(image, color):
    '''Small function to apply an effect over an entire image'''
    overlay_red, overlay_green, overlay_blue = color
    channels = image.split()

    r = channels[0].point(lambda color: overlay_effect(color, overlay_red))
    g = channels[1].point(lambda color: overlay_effect(color, overlay_green))
    b = channels[2].point(lambda color: overlay_effect(color, overlay_blue))


    channels[0].paste(r)
    channels[1].paste(g)
    channels[2].paste(b)

    return Image.merge(image.mode, channels) 
Example #28
Source File: captcha.py    From any-captcha with GNU General Public License v2.0 5 votes vote down vote up
def save(self, fp):
        lens = self._captcha.split()
        path = re.findall("(.+/).+", fp)[0]
        if os.path.exists(path) is False:
            os.makedirs(path)

        if fp.lower().endswith("jpg") and len(lens) == 4:
            r, g, b, a = lens
            tmp = Image.merge("RGB", (r, g, b))
            tmp.save(fp, "JPEG")
        else:
            self._captcha.save(fp) 
Example #29
Source File: transforms.py    From Deep_Metric with Apache License 2.0 5 votes vote down vote up
def __call__(self, img):
        r, g, b = img.split()
        img = Image.merge("RGB", (b, g, r))
        return img 
Example #30
Source File: darkenIcons.py    From godot-themes with MIT License 5 votes vote down vote up
def inverse(inpng, outpng):
    image = Image.open(inpng)
    if image.mode == 'RGBA':
        r, g, b, a = image.split()
        rgb_image = Image.merge('RGB', (r, g, b))
        inverted_image = PIL.ImageOps.invert(rgb_image)
        r2, g2, b2 = inverted_image.split()
        final_transparent_image = Image.merge('RGBA', (r2, g2, b2, a))
        final_transparent_image.save(outpng)
    else:
        inverted_image = PIL.ImageOps.invert(image)
        inverted_image.save(outpng)