Python PIL.Image.NEAREST Examples

The following are 30 code examples of PIL.Image.NEAREST(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module PIL.Image , or try the search function .
Example #1
Source File: transform.py    From DeepLab_v3_plus with MIT License 8 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        assert img.size == mask.size
        w, h = img.size

        # if one side is 512
        if (w >= h and w == self.size[1]) or (h >= w and h == self.size[0]):
            return {'image': img,
                    'label': mask}
        # if both sides is not equal to 512, resize to 512 * 512
        oh, ow = self.size
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)

        return {'image': img,
                'label': mask} 
Example #2
Source File: seg_data_base.py    From SegmenTron with Apache License 2.0 6 votes vote down vote up
def _val_sync_transform(self, img, mask):
        outsize = self.crop_size
        short_size = min(outsize)
        w, h = img.size
        if w > h:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize[1]) / 2.))
        y1 = int(round((h - outsize[0]) / 2.))
        img = img.crop((x1, y1, x1 + outsize[1], y1 + outsize[0]))
        mask = mask.crop((x1, y1, x1 + outsize[1], y1 + outsize[0]))

        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
Example #3
Source File: compute_multiview_projection.py    From Pointnet2.ScanNet with MIT License 6 votes vote down vote up
def resize_crop_image(image, new_image_dims):
    image_dims = [image.shape[1], image.shape[0]]
    if image_dims != new_image_dims:
        resize_width = int(math.floor(new_image_dims[1] * float(image_dims[0]) / float(image_dims[1])))
        image = transforms.Resize([new_image_dims[1], resize_width], interpolation=Image.NEAREST)(Image.fromarray(image))
        image = transforms.CenterCrop([new_image_dims[1], new_image_dims[0]])(image)
    
    return np.array(image) 
Example #4
Source File: segbase.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def _val_sync_transform(self, img, mask):
        outsize = self.crop_size
        short_size = outsize
        w, h = img.size
        if w > h:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize) / 2.))
        y1 = int(round((h - outsize) / 2.))
        img = img.crop((x1, y1, x1+outsize, y1+outsize))
        mask = mask.crop((x1, y1, x1+outsize, y1+outsize))
        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
Example #5
Source File: cityscapes_Dataset.py    From Deeplab-v3plus with MIT License 6 votes vote down vote up
def _val_sync_transform(self, img, mask):
        outsize = self.crop_size
        short_size = outsize
        w, h = img.size
        if w > h:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize) / 2.))
        y1 = int(round((h - outsize) / 2.))
        img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
        mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
Example #6
Source File: augmentations.py    From PLARD with MIT License 6 votes vote down vote up
def __call__(self, img, mask):
        if self.padding > 0:
            img = ImageOps.expand(img, border=self.padding, fill=0)
            mask = ImageOps.expand(mask, border=self.padding, fill=0)

        assert img.size == mask.size
        w, h = img.size
        th, tw = self.size
        if w == tw and h == th:
            return img, mask
        if w < tw or h < th:
            return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST)

        x1 = random.randint(0, w - tw)
        y1 = random.randint(0, h - th)
        return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th)) 
Example #7
Source File: PILTransform.py    From ext_portrait_segmentation with MIT License 6 votes vote down vote up
def __call__(self, rgb_img, label_img=None):

        label1 = label_img
        label2 = label_img
        if self.scale1 != 1:
            w, h = label_img.size
            label1 = label1.resize((w//self.scale1, h//self.scale1), Image.NEAREST)

        if self.scale2 != 1:
            w, h = label_img.size
            label2 = label2.resize((w//self.scale2, h//self.scale2), Image.NEAREST)

        rgb_img = F.to_tensor(rgb_img) # convert to tensor (values between 0 and 1)
        rgb_img = F.normalize(rgb_img, self.mean, self.std) # normalize the tensor
        label1 = torch.LongTensor(np.array(label1).astype(np.int64))
        label2 = torch.LongTensor(np.array(label2).astype(np.int64))


        return rgb_img, label1, label2 
Example #8
Source File: Voc_Dataset.py    From Deeplab-v3plus with MIT License 6 votes vote down vote up
def _val_sync_transform(self, img, mask):
        outsize = self.crop_size
        short_size = outsize
        w, h = img.size
        if w > h:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize) / 2.))
        y1 = int(round((h - outsize) / 2.))
        img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
        mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
Example #9
Source File: fisheye.py    From DualFisheye with MIT License 6 votes vote down vote up
def update_preview(self, psize):
        # Safety check: Ignore calls during construction/destruction.
        if not self.init_done: return
        # Copy latest user settings to the lens object.
        self.lens.fov_deg = self.f.get()
        self.lens.radius_px = self.r.get()
        self.lens.center_px[0] = self.x.get()
        self.lens.center_px[1] = self.y.get()
        # Re-scale the image to match the canvas size.
        # Note: Make a copy first, because thumbnail() operates in-place.
        self.img_sc = self.img.copy()
        self.img_sc.thumbnail(psize, Image.NEAREST)
        self.img_tk = ImageTk.PhotoImage(self.img_sc)
        # Re-scale the x/y/r parameters to match the preview scale.
        pre_scale = float(psize[0]) / float(self.img.size[0])
        x = self.x.get() * pre_scale
        y = self.y.get() * pre_scale
        r = self.r.get() * pre_scale
        # Clear and redraw the canvas.
        self.preview.delete('all')
        self.preview.create_image(0, 0, anchor=tk.NW, image=self.img_tk)
        self.preview.create_oval(x-r, y-r, x+r, y+r,
                                 outline='#C00000', width=3)

    # Make a combined label/textbox/slider for a given variable: 
Example #10
Source File: sample_onnx.py    From iAI with MIT License 6 votes vote down vote up
def preprocess_image(image_path, inp_dims):
    ppm_image = Image.open(image_path)
    # resize image
    new_h = 224
    new_w = 224
    size = (new_w, new_h)
    # resize image
    img = ppm_image.resize(size, Image.NEAREST)
    # convert to numpy array
    img = np.array(img)
    # hwc2chw
    img = img.transpose(2, 0, 1)
    # convert image to 1D array
    img = img.ravel()
    # convert image to float
    img = img.astype(np.float32)
    # normalize image data
    img = normalize_data(img, inp_dims)
    return img 
Example #11
Source File: joint_transforms.py    From pytorch-semantic-segmentation with MIT License 6 votes vote down vote up
def __call__(self, img, mask):
        if self.padding > 0:
            img = ImageOps.expand(img, border=self.padding, fill=0)
            mask = ImageOps.expand(mask, border=self.padding, fill=0)

        assert img.size == mask.size
        w, h = img.size
        th, tw = self.size
        if w == tw and h == th:
            return img, mask
        if w < tw or h < th:
            return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST)

        x1 = random.randint(0, w - tw)
        y1 = random.randint(0, h - th)
        return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th)) 
Example #12
Source File: custom_transforms.py    From overhaul-distillation with MIT License 6 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        w, h = img.size
        if w > h:
            oh = self.crop_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = self.crop_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - self.crop_size) / 2.))
        y1 = int(round((h - self.crop_size) / 2.))
        img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
        mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))

        return {'image': img,
                'label': mask} 
Example #13
Source File: data_loader.py    From cycada_release with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_transform2(dataset_name, net_transform, downscale):
    "Returns image and label transform to downscale, crop and prepare for net."
    orig_size = get_orig_size(dataset_name)
    transform = []
    target_transform = []
    if downscale is not None:
        transform.append(transforms.Resize(orig_size // downscale))
        target_transform.append(
                transforms.Resize(orig_size // downscale,
                    interpolation=Image.NEAREST))
    transform.extend([transforms.Resize(orig_size), net_transform]) 
    target_transform.extend([transforms.Resize(orig_size, interpolation=Image.NEAREST),
        to_tensor_raw]) 
    transform = transforms.Compose(transform)
    target_transform = transforms.Compose(target_transform)
    return transform, target_transform 
Example #14
Source File: transform.py    From pytorch-semantic-segmentation with MIT License 6 votes vote down vote up
def __call__(self, input, target):
        # do something to both images and labels
        if self.reshape_size is not None:
            input = input.resize(self.reshape_size,Image.BILINEAR)
            target = target.resize(self.reshape_size,Image.NEAREST)
 
        if self.augment :
            input, target = RandomCrop(self.crop_size)(input,target) # RandomCrop for  image and label in the same area
            input, target = self.flip(input,target)               # RandomFlip for both croped image and label
            input, target = self.rotate(input,target)
        else:
            input, target =  CenterCrop(self.crop_size)(input, target) # CenterCrop for the validation data
            
        input = ToTensor()(input)  
        Normalize([.485, .456, .406], [.229, .224, .225])(input) #normalize with the params of imagenet
          
        target = torch.from_numpy(np.array(target)).long().unsqueeze(0)

        return input, target 
Example #15
Source File: base.py    From PyTorch-Encoding with MIT License 6 votes vote down vote up
def _val_sync_transform(self, img, mask):
        outsize = self.crop_size
        short_size = outsize
        w, h = img.size
        if w > h:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize) / 2.))
        y1 = int(round((h - outsize) / 2.))
        img = img.crop((x1, y1, x1+outsize, y1+outsize))
        mask = mask.crop((x1, y1, x1+outsize, y1+outsize))
        # final transform
        return img, self._mask_transform(mask) 
Example #16
Source File: openwpm.py    From PrivacyScore with GNU General Public License v3.0 6 votes vote down vote up
def pixelize_screenshot(screenshot, screenshot_pixelized, target_width=390, pixelsize=3):
    """
    Thumbnail a screenshot to `target_width` and pixelize it.

    :param screenshot: Screenshot to be thumbnailed in pixelized
    :param screenshot_pixelized: File to which the result should be written
    :param target_width: Width of the final thumbnail
    :param pixelsize: Size of the final pixels
    :return: None
    """
    if target_width % pixelsize != 0:
        raise ValueError("pixelsize must divide target_width")

    img = Image.open(screenshot)
    width, height = img.size
    if height > width:
        img = img.crop((0, 0, width, width))
        height = width
    undersampling_width = target_width // pixelsize
    ratio = width / height
    new_height = int(undersampling_width / ratio)
    img = img.resize((undersampling_width, new_height), Image.BICUBIC)
    img = img.resize((target_width, new_height * pixelsize), Image.NEAREST)
    img.save(screenshot_pixelized, format='png') 
Example #17
Source File: cityscapescoarse.py    From PyTorch-Encoding with MIT License 6 votes vote down vote up
def _val_sync_transform(self, img, mask):
        """
        synchronized transformation
        """
        outsize = 720
        short = outsize
        w, h = img.size
        if w > h:
            oh = short
            ow = int(1.0 * w * oh / h)
        else:
            ow = short
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize) / 2.))
        y1 = int(round((h - outsize) / 2.))
        img = img.crop((x1, y1, x1+outsize, y1+outsize))
        mask = mask.crop((x1, y1, x1+outsize, y1+outsize))

        return img, mask 
Example #18
Source File: base_seg.py    From LEDNet with MIT License 6 votes vote down vote up
def _val_sync_transform(self, img, mask):
        outsize = self.crop_size
        short_size = outsize
        w, h = img.size
        if w > h:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize) / 2.))
        y1 = int(round((h - outsize) / 2.))
        img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
        mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
Example #19
Source File: base.py    From PyTorch-Encoding with MIT License 5 votes vote down vote up
def _sync_transform(self, img, mask):
        # random mirror
        if random.random() < 0.5:
            img = img.transpose(Image.FLIP_LEFT_RIGHT)
            mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
        crop_size = self.crop_size
        # random scale (short edge)
        w, h = img.size
        long_size = random.randint(int(self.base_size*0.5), int(self.base_size*2.0))
        if h > w:
            oh = long_size
            ow = int(1.0 * w * long_size / h + 0.5)
            short_size = ow
        else:
            ow = long_size
            oh = int(1.0 * h * long_size / w + 0.5)
            short_size = oh
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # pad crop
        if short_size < crop_size:
            padh = crop_size - oh if oh < crop_size else 0
            padw = crop_size - ow if ow < crop_size else 0
            img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
            mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
        # random crop crop_size
        w, h = img.size
        x1 = random.randint(0, w - crop_size)
        y1 = random.randint(0, h - crop_size)
        img = img.crop((x1, y1, x1+crop_size, y1+crop_size))
        mask = mask.crop((x1, y1, x1+crop_size, y1+crop_size))
        # final transform
        return img, self._mask_transform(mask) 
Example #20
Source File: texture_utils.py    From wwrando with MIT License 5 votes vote down vote up
def encode_image(image, image_format, palette_format, mipmap_count=1):
  image = image.convert("RGBA")
  image_width, image_height = image.size
  
  if mipmap_count < 1:
    mipmap_count = 1
  
  encoded_colors, colors_to_color_indexes = generate_new_palettes_from_image(image, image_format, palette_format)
  
  block_width = BLOCK_WIDTHS[image_format]
  block_height = BLOCK_HEIGHTS[image_format]
  block_data_size = BLOCK_DATA_SIZES[image_format]
  
  new_image_data = BytesIO()
  mipmap_image = image
  mipmap_width = image_width
  mipmap_height = image_height
  for i in range(mipmap_count):
    if i != 0:
      mipmap_width //= 2
      mipmap_height //= 2
      mipmap_image = image.resize((mipmap_width, mipmap_height), Image.NEAREST)
    
    mipmap_image_data = encode_mipmap_image(
      mipmap_image, image_format,
      colors_to_color_indexes,
      block_width, block_height,
      mipmap_width, mipmap_height
    )
    
    mipmap_image_data.seek(0)
    new_image_data.write(mipmap_image_data.read())
  
  new_palette_data = encode_palette(encoded_colors, palette_format, image_format)
  
  return (new_image_data, new_palette_data, encoded_colors) 
Example #21
Source File: joint_transforms.py    From pytorch-semantic-segmentation with MIT License 5 votes vote down vote up
def __call__(self, img, mask):
        assert img.size == mask.size
        for attempt in range(10):
            area = img.size[0] * img.size[1]
            target_area = random.uniform(0.45, 1.0) * area
            aspect_ratio = random.uniform(0.5, 2)

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

            if random.random() < 0.5:
                w, h = h, w

            if w <= img.size[0] and h <= img.size[1]:
                x1 = random.randint(0, img.size[0] - w)
                y1 = random.randint(0, img.size[1] - h)

                img = img.crop((x1, y1, x1 + w, y1 + h))
                mask = mask.crop((x1, y1, x1 + w, y1 + h))
                assert (img.size == (w, h))

                return img.resize((self.size, self.size), Image.BILINEAR), mask.resize((self.size, self.size),
                                                                                       Image.NEAREST)

        # Fallback
        scale = Scale(self.size)
        crop = CenterCrop(self.size)
        return crop(*scale(img, mask)) 
Example #22
Source File: joint_transforms.py    From pytorch-semantic-segmentation with MIT License 5 votes vote down vote up
def __call__(self, img, mask):
        rotate_degree = random.random() * 2 * self.degree - self.degree
        return img.rotate(rotate_degree, Image.BILINEAR), mask.rotate(rotate_degree, Image.NEAREST) 
Example #23
Source File: joint_transforms.py    From pytorch-semantic-segmentation with MIT License 5 votes vote down vote up
def __call__(self, img, mask):
        assert img.size == mask.size
        w, h = img.size
        if (w >= h and w == self.size) or (h >= w and h == self.size):
            return img, mask
        if w > h:
            ow = self.size
            oh = int(self.size * h / w)
            return img.resize((ow, oh), Image.BILINEAR), mask.resize((ow, oh), Image.NEAREST)
        else:
            oh = self.size
            ow = int(self.size * w / h)
            return img.resize((ow, oh), Image.BILINEAR), mask.resize((ow, oh), Image.NEAREST) 
Example #24
Source File: main.py    From LEDNet with MIT License 5 votes vote down vote up
def __call__(self, input, target):
        # do something to both images
        input =  Resize(self.height, Image.BILINEAR)(input)
        target = Resize(self.height, Image.NEAREST)(target)

        if(self.augment):
            # Random hflip
            hflip = random.random()
            if (hflip < 0.5):
                input = input.transpose(Image.FLIP_LEFT_RIGHT)
                target = target.transpose(Image.FLIP_LEFT_RIGHT)
            
            #Random translation 0-2 pixels (fill rest with padding
            transX = random.randint(-2, 2) 
            transY = random.randint(-2, 2)

            input = ImageOps.expand(input, border=(transX,transY,0,0), fill=0)
            target = ImageOps.expand(target, border=(transX,transY,0,0), fill=255) #pad label filling with 255
            input = input.crop((0, 0, input.size[0]-transX, input.size[1]-transY))
            target = target.crop((0, 0, target.size[0]-transX, target.size[1]-transY))   

        input = ToTensor()(input)
        if (self.enc):
            target = Resize(int(self.height/8), Image.NEAREST)(target)
        target = ToLabel()(target)
        target = Relabel(255, 19)(target)

        return input, target 
Example #25
Source File: project_multiview_features.py    From Pointnet2.ScanNet with MIT License 5 votes vote down vote up
def resize_crop_image(image, new_image_dims):
    image_dims = [image.shape[1], image.shape[0]]
    if image_dims == new_image_dims:
        return image
    resize_width = int(math.floor(new_image_dims[1] * float(image_dims[0]) / float(image_dims[1])))
    image = transforms.Resize([new_image_dims[1], resize_width], interpolation=Image.NEAREST)(Image.fromarray(image))
    image = transforms.CenterCrop([new_image_dims[1], new_image_dims[0]])(image)
    image = np.array(image)
    
    return image 
Example #26
Source File: seg_data_base.py    From SegmenTron with Apache License 2.0 5 votes vote down vote up
def _sync_transform(self, img, mask):
        # random mirror
        if cfg.AUG.MIRROR and random.random() < 0.5:
            img = img.transpose(Image.FLIP_LEFT_RIGHT)
            mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
        crop_size = self.crop_size
        # random scale (short edge)
        short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
        w, h = img.size
        if h > w:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        else:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # pad crop
        if short_size < min(crop_size):
            padh = crop_size[0] - oh if oh < crop_size[0] else 0
            padw = crop_size[1] - ow if ow < crop_size[1] else 0
            img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
            mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=-1)
        # random crop crop_size
        w, h = img.size
        x1 = random.randint(0, w - crop_size[1])
        y1 = random.randint(0, h - crop_size[0])
        img = img.crop((x1, y1, x1 + crop_size[1], y1 + crop_size[0]))
        mask = mask.crop((x1, y1, x1 + crop_size[1], y1 + crop_size[0]))
        # gaussian blur as in PSP
        if cfg.AUG.BLUR_PROB > 0 and random.random() < cfg.AUG.BLUR_PROB:
            radius = cfg.AUG.BLUR_RADIUS if cfg.AUG.BLUR_RADIUS > 0 else random.random()
            img = img.filter(ImageFilter.GaussianBlur(radius=radius))
        # color jitter
        if self.color_jitter:
            img = self.color_jitter(img)
        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
Example #27
Source File: cityscapes_Dataset.py    From Deeplab-v3plus with MIT License 5 votes vote down vote up
def _train_sync_transform(self, img, mask):
        '''
        :param image:  PIL input image
        :param gt_image: PIL input gt_image
        :return:
        '''
        # random mirror
        if random.random() < 0.5:
            img = img.transpose(Image.FLIP_LEFT_RIGHT)
            mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
        crop_size = self.crop_size
        # random scale (short edge)
        short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
        w, h = img.size
        if h > w:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        else:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # pad crop
        if short_size < crop_size:
            padh = crop_size - oh if oh < crop_size else 0
            padw = crop_size - ow if ow < crop_size else 0
            img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
            mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
        # random crop crop_size
        w, h = img.size
        x1 = random.randint(0, w - crop_size)
        y1 = random.randint(0, h - crop_size)
        img = img.crop((x1, y1, x1 + crop_size, y1 + crop_size))
        mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
        # gaussian blur as in PSP
        if random.random() < 0.5:
            img = img.filter(ImageFilter.GaussianBlur(
                radius=random.random()))
        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
Example #28
Source File: transforms.py    From torchbench with Apache License 2.0 5 votes vote down vote up
def __call__(self, image, target):
        size = random.randint(self.min_size, self.max_size)
        image = F.resize(image, size)
        target = F.resize(target, size, interpolation=Image.NEAREST)
        return image, target 
Example #29
Source File: transforms.py    From torchbench with Apache License 2.0 5 votes vote down vote up
def __call__(self, image, target):
        image = F.resize(image, self.resize_shape)
        target = F.resize(
            target, self.resize_shape, interpolation=Image.NEAREST
        )
        return image, target 
Example #30
Source File: lip.py    From Single-Human-Parsing-LIP with MIT License 5 votes vote down vote up
def get_a_sample(self, image_path, gt_path, index):
        # get PIL Image
        img = Image.open(image_path[index])  # .resize((512,512),resample=Image.BICUBIC)
        if len(img.getbands()) != 3:
            img = img.convert('RGB')
        gt = Image.open(gt_path[index])  # .resize((30,30),resample=Image.NEAREST)
        if len(gt.getbands()) != 1:
            gt = gt.convert('L')

        if self.transform is not None:
            img = self.transform(img)
        if self.gt_transform is not None:
            gt = self.gt_transform(gt)
        return img, gt