Python PIL.Image.Image() Examples
The following are 30
code examples of PIL.Image.Image().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
PIL.Image
, or try the search function
.
Example #1
Source File: blob_image_test.py From browserscope with Apache License 2.0 | 6 votes |
def expect_crop(self, left_x=None, right_x=None, top_y=None, bottom_y=None): """Setup a mox expectation to images_stub._Crop.""" crop_xform = images_service_pb.Transform() if left_x is not None: if not isinstance(left_x, float): raise self.failureException('Crop argument must be a float.') crop_xform.set_crop_left_x(left_x) if right_x is not None: if not isinstance(right_x, float): raise self.failureException('Crop argument must be a float.') crop_xform.set_crop_right_x(right_x) if top_y is not None: if not isinstance(top_y, float): raise self.failureException('Crop argument must be a float.') crop_xform.set_crop_top_y(top_y) if bottom_y is not None: if not isinstance(bottom_y, float): raise self.failureException('Crop argument must be a float.') crop_xform.set_crop_bottom_y(bottom_y) self._images_stub._Crop(mox.IsA(Image.Image), crop_xform).AndReturn( self._image)
Example #2
Source File: transforms.py From ACAN with MIT License | 6 votes |
def __call__(self, img): """Convert a ``numpy.ndarray`` to tensor. Args: img (numpy.ndarray): Image to be converted to tensor. Returns: Tensor: Converted image. """ if not(_is_numpy_image(img)): raise TypeError('img should be ndarray. Got {}'.format(type(img))) if isinstance(img, np.ndarray): # handle numpy array if img.ndim == 3: img = torch.from_numpy(img.transpose((2, 0, 1)).copy()) elif img.ndim == 2: img = torch.from_numpy(img.copy()) else: raise RuntimeError('img should be ndarray with 2 or 3 dimensions. Got {}'.format(img.ndim)) # backward compatibility #return img.float().div(255) return img.float()
Example #3
Source File: opencv_functional.py From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License | 6 votes |
def adjust_saturation(img, saturation_factor): """Adjust color saturation of an image. Args: img (numpy ndarray): numpy ndarray to be adjusted. saturation_factor (float): How much to adjust the saturation. 0 will give a black and white image, 1 will give the original image while 2 will enhance the saturation by a factor of 2. Returns: numpy ndarray: Saturation adjusted image. """ # ~10ms slower than PIL! if not _is_numpy_image(img): raise TypeError('img should be numpy Image. Got {}'.format(type(img))) img = Image.fromarray(img) enhancer = ImageEnhance.Color(img) img = enhancer.enhance(saturation_factor) return np.array(img)
Example #4
Source File: transforms.py From ACAN with MIT License | 6 votes |
def adjust_saturation(img, saturation_factor): """Adjust color saturation of an image. Args: img (PIL Image): PIL Image to be adjusted. saturation_factor (float): How much to adjust the saturation. 0 will give a black and white image, 1 will give the original image while 2 will enhance the saturation by a factor of 2. Returns: PIL Image: Saturation adjusted image. """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) enhancer = ImageEnhance.Color(img) img = enhancer.enhance(saturation_factor) return img
Example #5
Source File: transforms.py From ACAN with MIT License | 6 votes |
def adjust_contrast(img, contrast_factor): """Adjust contrast of an Image. Args: img (PIL Image): PIL Image to be adjusted. contrast_factor (float): How much to adjust the contrast. Can be any non negative number. 0 gives a solid gray image, 1 gives the original image while 2 increases the contrast by a factor of 2. Returns: PIL Image: Contrast adjusted image. """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) enhancer = ImageEnhance.Contrast(img) img = enhancer.enhance(contrast_factor) return img
Example #6
Source File: opencv_functional.py From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License | 6 votes |
def adjust_contrast(img, contrast_factor): """Adjust contrast of an mage. Args: img (numpy ndarray): numpy ndarray to be adjusted. contrast_factor (float): How much to adjust the contrast. Can be any non negative number. 0 gives a solid gray image, 1 gives the original image while 2 increases the contrast by a factor of 2. Returns: numpy ndarray: Contrast adjusted image. """ # much faster to use the LUT construction than anything else I've tried # it's because you have to change dtypes multiple times if not _is_numpy_image(img): raise TypeError('img should be numpy Image. Got {}'.format(type(img))) table = np.array([ (i-74)*contrast_factor+74 for i in range (0,256)]).clip(0,255).astype('uint8') # enhancer = ImageEnhance.Contrast(img) # img = enhancer.enhance(contrast_factor) if img.shape[2] == 1: return cv2.LUT(img, table)[:,:,np.newaxis] else: return cv2.LUT(img, table)
Example #7
Source File: opencv_functional.py From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License | 6 votes |
def adjust_brightness(img, brightness_factor): """Adjust brightness of an Image. Args: img (numpy ndarray): numpy ndarray to be adjusted. brightness_factor (float): How much to adjust the brightness. Can be any non negative number. 0 gives a black image, 1 gives the original image while 2 increases the brightness by a factor of 2. Returns: numpy ndarray: Brightness adjusted image. """ if not _is_numpy_image(img): raise TypeError('img should be numpy Image. Got {}'.format(type(img))) table = np.array([ i*brightness_factor for i in range (0,256)]).clip(0,255).astype('uint8') # same thing but a bit slower # cv2.convertScaleAbs(img, alpha=brightness_factor, beta=0) if img.shape[2] == 1: return cv2.LUT(img, table)[:,:,np.newaxis] else: return cv2.LUT(img, table)
Example #8
Source File: transforms.py From ACAN with MIT License | 6 votes |
def get_params(img, output_size): """Get parameters for ``crop`` for center crop. Args: img (numpy.ndarray (C x H x W)): Image to be cropped. output_size (tuple): Expected output size of the crop. Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for center crop. """ h = img.shape[0] w = img.shape[1] th, tw = output_size i = int(round((h - th) / 2.)) j = int(round((w - tw) / 2.)) # # randomized cropping # i = np.random.randint(i-3, i+4) # j = np.random.randint(j-3, j+4) return i, j, th, tw
Example #9
Source File: opencv_functional.py From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License | 6 votes |
def resized_crop(img, i, j, h, w, size, interpolation=cv2.INTER_LINEAR): """Crop the given numpy ndarray and resize it to desired size. Notably used in :class:`~torchvision.transforms.RandomResizedCrop`. Args: img (numpy ndarray): Image to be cropped. i: Upper pixel coordinate. j: Left pixel coordinate. h: Height of the cropped image. w: Width of the cropped image. size (sequence or int): Desired output size. Same semantics as ``scale``. interpolation (int, optional): Desired interpolation. Default is ``cv2.INTER_LINEAR``. Returns: PIL Image: Cropped image. """ assert _is_numpy_image(img), 'img should be numpy image' img = crop(img, i, j, h, w) img = resize(img, size, interpolation=interpolation) return img
Example #10
Source File: opencv_functional.py From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License | 6 votes |
def to_grayscale(img, num_output_channels=1): """Convert image to grayscale version of image. Args: img (numpy ndarray): Image to be converted to grayscale. Returns: numpy ndarray: Grayscale version of the image. if num_output_channels = 1 : returned image is single channel if num_output_channels = 3 : returned image is 3 channel with r = g = b """ if not _is_numpy_image(img): raise TypeError('img should be numpy ndarray. Got {}'.format(type(img))) if num_output_channels==1: img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)[:,:,np.newaxis] elif num_output_channels==3: # much faster than doing cvtColor to go back to gray img = np.broadcast_to(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)[:,:,np.newaxis], img.shape) return img
Example #11
Source File: transforms.py From ACAN with MIT License | 6 votes |
def get_params(img, output_size, slide): """Get parameters for ``crop`` for random crop. Args: img (numpy.ndarray (H x W x C)): Image to be cropped. output_size (tuple): Expected output size of the crop. Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for random crop. """ h = img.shape[0] w = img.shape[1] th, tw = output_size i = round((h - th - 1) * slide[0]) j = round((w - tw - 1) * slide[1]) # i = np.random.randint(0, h - th) # j = np.random.randint(0, w - tw) return i, j, th, tw
Example #12
Source File: transforms.py From ACAN with MIT License | 6 votes |
def __call__(self, img): """ Args: img (numpy.ndarray (C x H x W)): Image to be cropped. Returns: img (numpy.ndarray (C x H x W)): Cropped image. """ i, j, h, w = self.i, self.j, self.h, self.w if not(_is_numpy_image(img)): raise TypeError('img should be ndarray. Got {}'.format(type(img))) if img.ndim == 3: return img[i:i + h, j:j + w, :] elif img.ndim == 2: return img[i:i + h, j:j + w] else: raise RuntimeError( 'img should be ndarray with 2 or 3 dimensions. Got {}'.format(img.ndim))
Example #13
Source File: transforms.py From ACAN with MIT License | 6 votes |
def __call__(self, img): """ Args: img (numpy.ndarray (H x W x C)): Image to be cropped. Returns: img (numpy.ndarray (H x W x C)): Cropped image. """ i, j, h, w = self.get_params(img, self.size, self.slide) """ i: Upper pixel coordinate. j: Left pixel coordinate. h: Height of the cropped image. w: Width of the cropped image. """ if not(_is_numpy_image(img)): raise TypeError('img should be ndarray. Got {}'.format(type(img))) if img.ndim == 3: return img[i:i+h, j:j+w, :] elif img.ndim == 2: return img[i:i + h, j:j + w] else: raise RuntimeError('img should be ndarray with 2 or 3 dimensions. Got {}'.format(img.ndim))
Example #14
Source File: features.py From NGU-scripts with GNU Lesser General Public License v3.0 | 6 votes |
def __cutoff_right(bmp) -> PILImage: first_pix = bmp.getpixel((0, 0)) width, height = bmp.size count = 0 for x in range(8, width): dif = False for y in range(0, height): if not Inputs.rgb_equal(first_pix, bmp.getpixel((x, y))): dif = True break if dif: count = 0 else: count += 1 if count > 8: return bmp.crop((0, 0, x , height)) return bmp # splits the three parts of the resource breakdown (pow, bars, cap)
Example #15
Source File: opencv_functional.py From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License | 6 votes |
def to_tensor(pic): """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. See ``ToTensor`` for more details. Args: pic (PIL Image or numpy.ndarray): Image to be converted to tensor. Returns: Tensor: Converted image. """ if not(_is_numpy_image(pic)): raise TypeError('pic should be ndarray. Got {}'.format(type(pic))) # handle numpy array img = torch.from_numpy(pic.transpose((2, 0, 1))) # backward compatibility if isinstance(img, torch.ByteTensor) or img.dtype==torch.uint8: return img.float() else: return img
Example #16
Source File: nyu_transform.py From Visualizing-CNNs-for-monocular-depth-estimation with MIT License | 6 votes |
def __call__(self, sample): image, depth = sample['image'], sample['depth'] applied_angle = random.uniform(-self.angle, self.angle) angle1 = applied_angle angle1_rad = angle1 * np.pi / 180 # print('before rotating:',image.size) image = ndimage.interpolation.rotate( image, angle1, reshape=self.reshape, order=self.order) depth = ndimage.interpolation.rotate( depth, angle1, reshape=self.reshape, order=self.order) image = Image.fromarray(image) depth = Image.fromarray(depth) # print('after rotating:',image.shape,depth.shape) return {'image': image, 'depth': depth}
Example #17
Source File: MuscimaPlusPlusMaskImageGenerator.py From OMR-Datasets with MIT License | 6 votes |
def __render_masks_of_nodes_for_semantic_segmentation(self, nodes: List[Node], destination_directory: str, destination_filename: str, width: int, height: int): image = numpy.zeros((height, width), dtype=numpy.uint8) skipped_classes = ["staffSpace", "staff", "staffLine"] for node in reversed(nodes): if node.class_name in skipped_classes: continue try: symbol_class = node.class_name color_mask = node.mask * self.class_to_color_mapping[symbol_class] for i in range(node.height): for j in range(node.width): if color_mask[i, j] != 0: image[node.top + i, node.left + j] = color_mask[i, j] except: print("Error drawing node {0}".format(node.unique_id)) image = Image.fromarray(image, mode="L") os.makedirs(destination_directory, exist_ok=True) image.save(os.path.join(destination_directory, destination_filename))
Example #18
Source File: nyu_transform.py From Visualizing-CNNs-for-monocular-depth-estimation with MIT License | 6 votes |
def changeScale(self, img, size, interpolation=Image.BILINEAR): if not _is_pil_image(img): raise TypeError( 'img should be PIL Image. Got {}'.format(type(img))) if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)): raise TypeError('Got inappropriate size arg: {}'.format(size)) if isinstance(size, int): w, h = img.size if (w <= h and w == size) or (h <= w and h == size): return img if w < h: ow = size oh = int(size * h / w) return img.resize((ow, oh), interpolation) else: oh = size ow = int(size * w / h) return img.resize((ow, oh), interpolation) else: return img.resize(size[::-1], interpolation)
Example #19
Source File: image_helper.py From openseg.pytorch with MIT License | 6 votes |
def pil_read_image(image_path, mode='RGB'): with open(image_path, 'rb') as f: img = Image.open(f) if mode == 'RGB': return img.convert('RGB') elif mode == 'BGR': img = img.convert('RGB') cv_img = ImageHelper.rgb2bgr(np.array(img)) return Image.fromarray(cv_img) elif mode == 'P': return img.convert('P') else: Log.error('Not support mode {}'.format(mode)) exit(1)
Example #20
Source File: ppm_utils.py From avocado-vt with GNU General Public License v2.0 | 6 votes |
def image_histogram_compare(image_a, image_b, size=(0, 0)): """ Compare the histogram of two images and return similar degree. :param image_a: Full path of the first image :param image_b: Full path of the second image :param size: Convert image to size(width, height), and if size=(0, 0), the function will convert the big size image align with the small one. """ img_a = Image.open(image_a) img_b = Image.open(image_b) if not any(size): size = tuple(map(max, img_a.size, img_b.size)) img_a_h = img_a.resize(size).convert('RGB').histogram() img_b_h = img_b.resize(size).convert('RGB').histogram() s = 0 for i, j in list(zip(img_a_h, img_b_h)): if i == j: s += 1 else: s += 1 - float(abs(i - j))/max(i, j) return s / len(img_a_h)
Example #21
Source File: ppm_utils.py From avocado-vt with GNU General Public License v2.0 | 6 votes |
def image_crop_save(image, new_image, box=None): """ Crop an image and save it to a new image. :param image: Full path of the original image :param new_image: Full path of the cropped image :param box: A 4-tuple defining the left, upper, right, and lower pixel coordinate. :return: True if crop and save image succeed """ img = Image.open(image) if not box: x, y = img.size box = (x/4, y/4, x*3/4, y*3/4) try: img.crop(box).save(new_image) except (KeyError, SystemError) as e: logging.error("Fail to crop image: %s", e) return False return True
Example #22
Source File: ppm_utils.py From avocado-vt with GNU General Public License v2.0 | 6 votes |
def image_average_hash(image, img_wd=8, img_ht=8): """ Resize and convert the image, then get image data as sequence object, calculate the average hash :param image: an image path or an opened image object """ if not isinstance(image, Image.Image): image = Image.open(image) image = image.resize((img_wd, img_ht), Image.ANTIALIAS).convert('L') avg = reduce(lambda x, y: x + y, image.getdata()) / (img_wd * img_ht) def _hta(i): if i < avg: return 0 else: return 1 return reduce(lambda x, y_z: x | (y_z[1] << y_z[0]), enumerate(map(_hta, image.getdata())), 0)
Example #23
Source File: ppm_utils.py From avocado-vt with GNU General Public License v2.0 | 6 votes |
def get_region_md5sum(width, height, data, x1, y1, dx, dy, cropped_image_filename=None): """ Return the md5sum of a cropped region. :param width: Original image width :param height: Original image height :param data: Image data :param x1: Desired x coord of the cropped region :param y1: Desired y coord of the cropped region :param dx: Desired width of the cropped region :param dy: Desired height of the cropped region :param cropped_image_filename: if not None, write the resulting cropped image to a file with this name """ (cw, ch, cdata) = image_crop(width, height, data, x1, y1, dx, dy) # Write cropped image for debugging if cropped_image_filename: image_write_to_ppm_file(cropped_image_filename, cw, ch, cdata) return image_md5sum(cw, ch, cdata)
Example #24
Source File: functional.py From pytorch-semantic-segmentation with MIT License | 6 votes |
def get_params(img, output_size): """Get parameters for ``crop`` for a random crop. Args: img (PIL Image): Image to be cropped. output_size (tuple): Expected output size of the crop. Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for random crop. """ w, h = img.size tw, th = output_size if w == tw and h == th: return 0, 0, h, w i = random.randint(0, h - th) j = random.randint(0, w - tw) return i, j, th, tw
Example #25
Source File: transforms.py From ACAN with MIT License | 6 votes |
def adjust_brightness(img, brightness_factor): """Adjust brightness of an Image. Args: img (PIL Image): PIL Image to be adjusted. brightness_factor (float): How much to adjust the brightness. Can be any non negative number. 0 gives a black image, 1 gives the original image while 2 increases the brightness by a factor of 2. Returns: PIL Image: Brightness adjusted image. """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) enhancer = ImageEnhance.Brightness(img) img = enhancer.enhance(brightness_factor) return img
Example #26
Source File: opencv_functional.py From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License | 5 votes |
def affine(img, angle, translate, scale, shear, interpolation=cv2.INTER_LINEAR, mode=cv2.BORDER_CONSTANT, fillcolor=0): """Apply affine transformation on the image keeping image center invariant Args: img (numpy ndarray): numpy ndarray to be transformed. angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction. translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation) scale (float): overall scale shear (float): shear angle value in degrees between -180 to 180, clockwise direction. interpolation (``cv2.INTER_NEAREST` or ``cv2.INTER_LINEAR`` or ``cv2.INTER_AREA``, ``cv2.INTER_CUBIC``): An optional resampling filter. See `filters`_ for more information. If omitted, it is set to ``cv2.INTER_LINEAR``, for bilinear interpolation. mode (``cv2.BORDER_CONSTANT`` or ``cv2.BORDER_REPLICATE`` or ``cv2.BORDER_REFLECT`` or ``cv2.BORDER_REFLECT_101``) Method for filling in border regions. Defaults to cv2.BORDER_CONSTANT, meaning areas outside the image are filled with a value (val, default 0) val (int): Optional fill color for the area outside the transform in the output image. Default: 0 """ if not _is_numpy_image(img): raise TypeError('img should be numpy Image. Got {}'.format(type(img))) assert isinstance(translate, (tuple, list)) and len(translate) == 2, \ "Argument translate should be a list or tuple of length 2" assert scale > 0.0, "Argument scale should be positive" output_size = img.shape[0:2] center = (img.shape[1] * 0.5 + 0.5, img.shape[0] * 0.5 + 0.5) matrix = _get_affine_matrix(center, angle, translate, scale, shear) if img.shape[2]==1: return cv2.warpAffine(img, matrix, output_size[::-1],interpolation, borderMode=mode, borderValue=fillcolor)[:,:,np.newaxis] else: return cv2.warpAffine(img, matrix, output_size[::-1],interpolation, borderMode=mode, borderValue=fillcolor)
Example #27
Source File: utils.py From pytorch-0.4-yolov3 with MIT License | 5 votes |
def drawtext(img, pos, text, bgcolor=(255,255,255), font=None): if font is None: font = ImageFont.load_default().font (tw, th) = font.getsize(text) box_img = Image.new('RGB', (tw+2, th+2), bgcolor) ImageDraw.Draw(box_img).text((0, 0), text, fill=(0,0,0,255), font=font) if img.mode != 'RGB': img = img.convert('RGB') sx, sy = int(pos[0]),int(pos[1]-th-2) if sx<0: sx=0 if sy<0: sy=0 img.paste(box_img, (sx, sy))
Example #28
Source File: transforms.py From Qualia2.0 with MIT License | 5 votes |
def __init__(self, size, interpolation=Image.BILINEAR): self.size = size self.interpolation = interpolation
Example #29
Source File: transforms.py From Qualia2.0 with MIT License | 5 votes |
def __call__(self, img): ''' Args: img (PIL Image): Image to be flipped. ''' if random.random() < self.p: if isinstance(img, np.ndarray): return img[:,:,:,::-1] elif isinstance(img, Image.Image): return img.transpose(Image.FLIP_LEFT_RIGHT) return img
Example #30
Source File: transforms.py From Qualia2.0 with MIT License | 5 votes |
def __call__(self, img): ''' Args: img (PIL Image): Image to be flipped. ''' if random.random() < self.p: if isinstance(img, np.ndarray): return img[:,:,::-1,:] elif isinstance(img, Image.Image): return img.transpose(Image.FLIP_TOP_BOTTOM) return img