Python PIL.Image.fromarray() Examples

The following are 30 code examples of PIL.Image.fromarray(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module PIL.Image , or try the search function .
Example #1
Source File: validate_submission_lib.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _prepare_sample_data(self, submission_type):
    """Prepares sample data for the submission.

    Args:
      submission_type: type of the submission.
    """
    # write images
    images = np.random.randint(0, 256,
                               size=[BATCH_SIZE, 299, 299, 3], dtype=np.uint8)
    for i in range(BATCH_SIZE):
      Image.fromarray(images[i, :, :, :]).save(
          os.path.join(self._sample_input_dir, IMAGE_NAME_PATTERN.format(i)))
    # write target class for targeted attacks
    if submission_type == 'targeted_attack':
      target_classes = np.random.randint(1, 1001, size=[BATCH_SIZE])
      target_class_filename = os.path.join(self._sample_input_dir,
                                           'target_class.csv')
      with open(target_class_filename, 'w') as f:
        for i in range(BATCH_SIZE):
          f.write((IMAGE_NAME_PATTERN + ',{1}\n').format(i, target_classes[i])) 
Example #2
Source File: super_resolution.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def resolve(ctx):
    from PIL import Image
    if isinstance(ctx, list):
        ctx = [ctx[0]]
    net.load_parameters('superres.params', ctx=ctx)
    img = Image.open(opt.resolve_img).convert('YCbCr')
    y, cb, cr = img.split()
    data = mx.nd.expand_dims(mx.nd.expand_dims(mx.nd.array(y), axis=0), axis=0)
    out_img_y = mx.nd.reshape(net(data), shape=(-3, -2)).asnumpy()
    out_img_y = out_img_y.clip(0, 255)
    out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')

    out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
    out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
    out_img = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')

    out_img.save('resolved.png') 
Example #3
Source File: test.py    From Depth-Map-Prediction with GNU General Public License v3.0 6 votes vote down vote up
def main():
    # location of depth module, config and parameters
    module_fn = 'models/depth.py'
    config_fn = 'models/depth.conf'#网络结构
    params_dir = 'weights/depth'#网络相关参数

    # load depth network
    machine = net.create_machine(module_fn, config_fn, params_dir)

    # demo image
    rgb = Image.open('demo_nyud_rgb.jpg')
    rgb = rgb.resize((320, 240), Image.BICUBIC)

    # build depth inference function and run
    rgb_imgs = np.asarray(rgb).reshape((1, 240, 320, 3))
    pred_depths = machine.infer_depth(rgb_imgs)

    # save prediction
    (m, M) = (pred_depths.min(), pred_depths.max())
    depth_img_np = (pred_depths[0] - m) / (M - m)
    depth_img = Image.fromarray((255*depth_img_np).astype(np.uint8))
    depth_img.save('demo_nyud_depth_prediction.png') 
Example #4
Source File: gen.py    From insightocr with MIT License 6 votes vote down vote up
def addNoiseAndGray(surf):
    # https://stackoverflow.com/questions/34673424/how-to-get-numpy-array-of-rgb-colors-from-pygame-surface
    imgdata = pygame.surfarray.array3d(surf)
    imgdata = imgdata.swapaxes(0, 1)
    # print('imgdata shape %s' % imgdata.shape)  # shall be IMG_HEIGHT * IMG_WIDTH
    imgdata2 = noise_generator('s&p', imgdata)

    img2 = Image.fromarray(np.uint8(imgdata2))
    # img2.save('/home/zhichyu/Downloads/2sp.jpg')
    grayscale2 = ImageOps.grayscale(img2)
    # grayscale2.save('/home/zhichyu/Downloads/2bw2.jpg')
    # return grayscale2

    array = np.asarray(np.uint8(grayscale2))
    # print('array.shape %s' % array.shape)
    selem = disk(random.randint(0, 1))
    eroded = erosion(array, selem)
    return eroded 
Example #5
Source File: image_segmentaion.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def main():
    """Module main execution"""
    # Initialization variables - update to change your model and execution context
    model_prefix = "FCN8s_VGG16"
    epoch = 19

    # By default, MXNet will run on the CPU. Change to ctx = mx.gpu() to run on GPU.
    ctx = mx.cpu()

    fcnxs, fcnxs_args, fcnxs_auxs = mx.model.load_checkpoint(model_prefix, epoch)
    fcnxs_args["data"] = mx.nd.array(get_data(args.input), ctx)
    data_shape = fcnxs_args["data"].shape
    label_shape = (1, data_shape[2]*data_shape[3])
    fcnxs_args["softmax_label"] = mx.nd.empty(label_shape, ctx)
    exector = fcnxs.bind(ctx, fcnxs_args, args_grad=None, grad_req="null", aux_states=fcnxs_args)
    exector.forward(is_train=False)
    output = exector.outputs[0]
    out_img = np.uint8(np.squeeze(output.asnumpy().argmax(axis=1)))
    out_img = Image.fromarray(out_img)
    out_img.putpalette(get_palette())
    out_img.save(args.output) 
Example #6
Source File: logutil.py    From Depth-Map-Prediction with GNU General Public License v3.0 6 votes vote down vote up
def save_image(fn, img, **kwargs):
    '''
    Save an image img to filename fn in the current output dir.
    kwargs the same as for PIL Image.save()
    '''
    (h, w, c) = img.shape
    if not isinstance(img, np.ndarray):
        img = np.array(img)
    if c == 1:
        img = np.concatenate((img,)*3, axis=2)
    if img.dtype.kind == 'f':
        img = (img * 255).astype('uint8')
    elif img.dtype.kind == 'f':
        img = img.astype('uint8')
    else:
        raise ValueError('bad dtype: %s' % img.dtype)
    i = Image.fromarray(img)
    with open(fn, 'w') as f:
        i.save(f, **kwargs) 
Example #7
Source File: image.py    From Deep_VoiceChanger with MIT License 6 votes vote down vote up
def Chainer2PIL(data, rescale=True):
    data = np.array(data)
    if rescale:
        data *= 256
        # data += 128
    if data.dtype != np.uint8:
        data = np.clip(data, 0, 255)
        data = data.astype(np.uint8)
    if data.shape[0] == 1:
        buf = data.astype(np.uint8).reshape((data.shape[1], data.shape[2]))
    else:
        buf = np.zeros((data.shape[1], data.shape[2], data.shape[0]), dtype=np.uint8)
        for i in range(3):
            a = data[i,:,:]
            buf[:,:,i] = a
    img = Image.fromarray(buf)
    return img 
Example #8
Source File: visualization_utils.py    From object_detector_app with MIT License 6 votes vote down vote up
def draw_keypoints_on_image_array(image,
                                  keypoints,
                                  color='red',
                                  radius=2,
                                  use_normalized_coordinates=True):
  """Draws keypoints on an image (numpy array).

  Args:
    image: a numpy array with shape [height, width, 3].
    keypoints: a numpy array with shape [num_keypoints, 2].
    color: color to draw the keypoints with. Default is red.
    radius: keypoint radius. Default value is 2.
    use_normalized_coordinates: if True (default), treat keypoint values as
      relative to the image.  Otherwise treat them as absolute.
  """
  image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
  draw_keypoints_on_image(image_pil, keypoints, color, radius,
                          use_normalized_coordinates)
  np.copyto(image, np.array(image_pil)) 
Example #9
Source File: visualization_utils.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def draw_keypoints_on_image_array(image,
                                  keypoints,
                                  color='red',
                                  radius=2,
                                  use_normalized_coordinates=True):
  """Draws keypoints on an image (numpy array).

  Args:
    image: a numpy array with shape [height, width, 3].
    keypoints: a numpy array with shape [num_keypoints, 2].
    color: color to draw the keypoints with. Default is red.
    radius: keypoint radius. Default value is 2.
    use_normalized_coordinates: if True (default), treat keypoint values as
      relative to the image.  Otherwise treat them as absolute.
  """
  image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
  draw_keypoints_on_image(image_pil, keypoints, color, radius,
                          use_normalized_coordinates)
  np.copyto(image, np.array(image_pil)) 
Example #10
Source File: validate_submission_lib.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _prepare_sample_data(self, submission_type):
    """Prepares sample data for the submission.

    Args:
      submission_type: type of the submission.
    """
    # write images
    images = np.random.randint(0, 256,
                               size=[BATCH_SIZE, 299, 299, 3], dtype=np.uint8)
    for i in range(BATCH_SIZE):
      Image.fromarray(images[i, :, :, :]).save(
          os.path.join(self._sample_input_dir, IMAGE_NAME_PATTERN.format(i)))
    # write target class for targeted attacks
    if submission_type == 'targeted_attack':
      target_classes = np.random.randint(1, 1001, size=[BATCH_SIZE])
      target_class_filename = os.path.join(self._sample_input_dir,
                                           'target_class.csv')
      with open(target_class_filename, 'w') as f:
        for i in range(BATCH_SIZE):
          f.write((IMAGE_NAME_PATTERN + ',{1}\n').format(i, target_classes[i])) 
Example #11
Source File: attack_fgsm.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def save_images(images, filenames, output_dir):
  """Saves images to the output directory.

  Args:
    images: array with minibatch of images
    filenames: list of filenames without path
      If number of file names in this list less than number of images in
      the minibatch then only first len(filenames) images will be saved.
    output_dir: directory where to save images
  """
  for i, filename in enumerate(filenames):
    # Images for inception classifier are normalized to be in [-1, 1] interval,
    # so rescale them back to [0, 1].
    with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
      img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)
      Image.fromarray(img).save(f, format='PNG') 
Example #12
Source File: hello.py    From PyOptiX with MIT License 6 votes vote down vote up
def main():
    width = 512
    height = 384

    context = Context()

    context.set_ray_type_count(1)

    context['result_buffer'] = Buffer.empty((height, width, 4), buffer_type='o', dtype=np.float32, drop_last_dim=True)

    ray_gen_program = Program('draw_color.cu', 'draw_solid_color')

    ray_gen_program['draw_color'] = np.array([0.462, 0.725, 0.0], dtype=np.float32)

    entry_point = EntryPoint(ray_gen_program)
    entry_point.launch(size=(width, height))

    result_array = context['result_buffer'].to_array()
    result_array *= 255
    result_image = Image.fromarray(result_array.astype(np.uint8)[:, :, :3])

    ImageWindow(result_image) 
Example #13
Source File: random_resized_crop.py    From argus-freesound with MIT License 6 votes vote down vote up
def __call__(self, np_image):
        """
        Args:
            img (PIL Image): Image to be cropped and resized.
        Returns:
            PIL Image: Randomly cropped and resized image.
        """

        if self.size is None:
            size = np_image.shape
        else:
            size = self.size

        image = Image.fromarray(np_image)
        i, j, h, w = self.get_params(image, self.scale, self.ratio)
        image = resized_crop(image, i, j, h, w, size, self.interpolation)
        np_image = np.array(image)
        return np_image 
Example #14
Source File: utils.py    From DeepLab_v3 with MIT License 6 votes vote down vote up
def save_annotation(label, filename, add_colormap=True):
    '''
    Saves the given label to image on disk.
    Args:
    label: The numpy array to be saved. The data will be converted to uint8 and saved as png image.
    save_dir: The directory to which the results will be saved.
    filename: The image filename.
    add_colormap: Add color map to the label or not.
    colormap_type: Colormap type for visualization.
    '''
    # Add colormap for visualizing the prediction.

    colored_label = label_to_color_image(label) if add_colormap else label

    image = Image.fromarray(colored_label.astype(dtype=np.uint8))
    image.save(filename) 
Example #15
Source File: cityscapes.py    From overhaul-distillation with MIT License 6 votes vote down vote up
def __getitem__(self, index):

        img_path = self.files[self.split][index].rstrip()
        lbl_path = os.path.join(self.annotations_base,
                                img_path.split(os.sep)[-2],
                                os.path.basename(img_path)[:-15] + 'gtFine_labelIds.png')

        _img = Image.open(img_path).convert('RGB')
        _tmp = np.array(Image.open(lbl_path), dtype=np.uint8)
        _tmp = self.encode_segmap(_tmp)
        _target = Image.fromarray(_tmp)

        sample = {'image': _img, 'label': _target}

        if self.split == 'train':
            return self.transform_tr(sample)
        elif self.split == 'val':
            return self.transform_val(sample)
        elif self.split == 'test':
            return self.transform_ts(sample) 
Example #16
Source File: video_transforms.py    From DDPAE-video-prediction with MIT License 6 votes vote down vote up
def __call__(self, video):
    """
    Args:
        img (numpy array): Input image, shape (... x H x W x C), dtype uint8.
    Returns:
        PIL Image: Color jittered image.
    """
    transforms = self.get_params(self.brightness, self.contrast, self.saturation, self.hue)
    reshaped_video = video.reshape((-1, *video.shape[-3:]))
    n_channels = video.shape[-1]
    for i in range(reshaped_video.shape[0]):
      img = reshaped_video[i]
      if n_channels == 1:
        img = img.squeeze(axis=2)
      img = Image.fromarray(img)
      for t in transforms:
        img = t(img)
      img = np.array(img)
      if n_channels == 1:
        img = img[..., np.newaxis]
      reshaped_video[i] = img
    video = reshaped_video.reshape(video.shape)
    return video 
Example #17
Source File: visualization.py    From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License 6 votes vote down vote up
def draw_bounding_boxes(image, gt_boxes, im_info):
  num_boxes = gt_boxes.shape[0]
  gt_boxes_new = gt_boxes.copy()
  gt_boxes_new[:,:4] = np.round(gt_boxes_new[:,:4].copy() / im_info[2])
  disp_image = Image.fromarray(np.uint8(image[0]))

  for i in range(num_boxes):
    this_class = int(gt_boxes_new[i, 4])
    disp_image = _draw_single_box(disp_image, 
                                gt_boxes_new[i, 0],
                                gt_boxes_new[i, 1],
                                gt_boxes_new[i, 2],
                                gt_boxes_new[i, 3],
                                'N%02d-C%02d' % (i, this_class),
                                FONT,
                                color=STANDARD_COLORS[this_class % NUM_COLORS])

  image[0, :] = np.array(disp_image)
  return image 
Example #18
Source File: digit_data_loader.py    From transferlearning with MIT License 5 votes vote down vote up
def __getitem__(self, index):
        """
         Args:
             index (int): Index
         Returns:
             tuple: (image, target) where target is index of the target class.
         """

        img, target = self.data[index], self.labels[index]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image
        # print(img.shape)
        if img.shape[0] != 1:
            # print(img)
            img = Image.fromarray(np.uint8(np.asarray(img.transpose((1, 2, 0)))))
        #
        elif img.shape[0] == 1:
            im = np.uint8(np.asarray(img))
            # print(np.vstack([im,im,im]).shape)
            im = np.vstack([im, im, im]).transpose((1, 2, 0))
            img = Image.fromarray(im)

        if self.target_transform is not None:
            target = self.target_transform(target)
        if self.transform is not None:
            img = self.transform(img)
            #  return img, target
        return img, target 
Example #19
Source File: images_loader.py    From L3C-PyTorch with GNU General Public License v3.0 5 votes vote down vote up
def _tensor_to_image(t):
    assert t.shape[0] == 3, t.shape
    return Image.fromarray(t.permute(1, 2, 0).detach().cpu().numpy()) 
Example #20
Source File: multiscale_tester.py    From L3C-PyTorch with GNU General Public License v3.0 5 votes vote down vote up
def _write_img(decoded, png_out_p):
        """
        :param decoded: 1CHW tensor
        :param png_out_p: str
        """
        assert decoded.shape[0] == 1 and decoded.shape[1] == 3, decoded.shape
        img = pe.tensor_to_np(decoded.squeeze(0))  # CHW
        img = img.transpose(1, 2, 0).astype(np.uint8)  # Make HW3
        img = Image.fromarray(img)
        img.save(png_out_p) 
Example #21
Source File: visualization_utils_test.py    From object_detector_app with MIT License 5 votes vote down vote up
def test_draw_bounding_boxes_on_image(self):
    test_image = self.create_colorful_test_image()
    test_image = Image.fromarray(test_image)
    width_original, height_original = test_image.size
    boxes = np.array([[0.25, 0.75, 0.4, 0.6],
                      [0.1, 0.1, 0.9, 0.9]])

    visualization_utils.draw_bounding_boxes_on_image(test_image, boxes)
    width_final, height_final = test_image.size

    self.assertEqual(width_original, width_final)
    self.assertEqual(height_original, height_final) 
Example #22
Source File: visualization_utils_test.py    From object_detector_app with MIT License 5 votes vote down vote up
def test_draw_keypoints_on_image(self):
    test_image = self.create_colorful_test_image()
    test_image = Image.fromarray(test_image)
    width_original, height_original = test_image.size
    keypoints = [[0.25, 0.75], [0.4, 0.6], [0.1, 0.1], [0.9, 0.9]]

    visualization_utils.draw_keypoints_on_image(test_image, keypoints)
    width_final, height_final = test_image.size

    self.assertEqual(width_original, width_final)
    self.assertEqual(height_original, height_final) 
Example #23
Source File: pc_util.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def point_cloud_three_views_demo():
    """ Demo for draw_point_cloud function """
    points = read_ply('../third_party/mesh_sampling/piano.ply')
    im_array = point_cloud_three_views(points)
    img = Image.fromarray(np.uint8(im_array*255.0))
    img.save('piano.jpg') 
Example #24
Source File: vis_submission.py    From kuzushiji-recognition with MIT License 5 votes vote down vote up
def visualize_predictions(img, labels, font, unicode_map, fontsize=50):
    """
    This function takes in a filename of an image, and the labels in the string format given in a submission csv, and returns an image with the characters and predictions annotated.
    Copied from:
    https://www.kaggle.com/anokas/kuzushiji-visualisation
    """
    # Convert annotation string to array
    labels_split = labels.split(' ')
    if len(labels_split) < 3:
      return img
    labels = np.array(labels_split).reshape(-1, 3)

    # Read image
    imsource = Image.fromarray(img).convert('RGBA')
    bbox_canvas = Image.new('RGBA', imsource.size)
    char_canvas = Image.new('RGBA', imsource.size)
    bbox_draw = ImageDraw.Draw(bbox_canvas) # Separate canvases for boxes and chars so a box doesn't cut off a character
    char_draw = ImageDraw.Draw(char_canvas)

    for codepoint, x, y in labels:
        x, y = int(x), int(y)
        char = unicode_map[codepoint] # Convert codepoint to actual unicode character

        # Draw bounding box around character, and unicode character next to it
        bbox_draw.rectangle((x-10, y-10, x+10, y+10), fill=(255, 0, 0, 255))
        char_draw.text((x+25, y-fontsize*(3/4)), char, fill=(255, 0, 0, 255), font=font)

    imsource = Image.alpha_composite(Image.alpha_composite(imsource, bbox_canvas), char_canvas)
    imsource = imsource.convert("RGB") # Remove alpha for saving in jpg format.
    return np.asarray(imsource) 
Example #25
Source File: visualization_utils.py    From object_detector_app with MIT License 5 votes vote down vote up
def encode_image_array_as_png_str(image):
  """Encodes a numpy array into a PNG string.

  Args:
    image: a numpy array with shape [height, width, 3].

  Returns:
    PNG encoded image string.
  """
  image_pil = Image.fromarray(np.uint8(image))
  output = six.StringIO()
  image_pil.save(output, format='PNG')
  png_string = output.getvalue()
  output.close()
  return png_string 
Example #26
Source File: variational_autoencoder.py    From Recipes with MIT License 5 votes vote down vote up
def get_image_pair(X, Xpr, channels=1, idx=-1):
    mode = 'RGB' if channels == 3 else 'L'
    shp=X[0][0].shape
    i = np.random.randint(X.shape[0]) if idx == -1 else idx
    orig = Image.fromarray(get_image_array(X, i, shp, channels), mode=mode)
    ret = Image.new(mode, (orig.size[0], orig.size[1]*2))
    ret.paste(orig, (0,0))
    new = Image.fromarray(get_image_array(Xpr, i, shp, channels), mode=mode)
    ret.paste(new, (0, orig.size[1]))
    return ret

# ############################# Batch iterator ############################### 
Example #27
Source File: ingest_stl10.py    From ArtGAN with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def transform_and_save(img_arr, output_filename):
    """
    Takes an image and optionally transforms it and then writes it out to output_filename
    """
    img = Image.fromarray(img_arr)
    img.save(output_filename) 
Example #28
Source File: transforms.py    From ACAN with MIT License 5 votes vote down vote up
def __call__(self, img):
        """
        Args:
            img (numpy.ndarray (H x W x C)): Input image.

        Returns:
            img (numpy.ndarray (H x W x C)): Color jittered image.
        """
        if not(_is_numpy_image(img)):
            raise TypeError('img should be ndarray. Got {}'.format(type(img)))

        pil = Image.fromarray(img)
        transform = self.get_params(self.brightness, self.contrast,
                                    self.saturation, self.hue)
        return np.array(transform(pil)) 
Example #29
Source File: transforms.py    From ACAN with MIT License 5 votes vote down vote up
def adjust_gamma(img, gamma, gain=1):
    """Perform gamma correction on an image.

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

        I_out = 255 * gain * ((I_in / 255) ** gamma)

    See https://en.wikipedia.org/wiki/Gamma_correction for more details.

    Args:
        img (PIL Image): PIL Image to be adjusted.
        gamma (float): Non negative real number. gamma larger than 1 make the
            shadows darker, while gamma smaller than 1 make dark regions
            lighter.
        gain (float): The constant multiplier.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    if gamma < 0:
        raise ValueError('Gamma should be a non-negative real number')

    input_mode = img.mode
    img = img.convert('RGB')

    np_img = np.array(img, dtype=np.float32)
    np_img = 255 * gain * ((np_img / 255) ** gamma)
    np_img = np.uint8(np.clip(np_img, 0, 255))

    img = Image.fromarray(np_img, 'RGB').convert(input_mode)
    return img 
Example #30
Source File: voc2012.py    From Pytorch-Project-Template with MIT License 5 votes vote down vote up
def __getitem__(self, index):
        if self.mode == 'test':
            img_path, img_name = self.imgs[index]
            img = Image.open(os.path.join(img_path, img_name + '.jpg')).convert('RGB')
            if self.transform is not None:
                img = self.transform(img)
            return img_name, img

        img_path, mask_path = self.imgs[index]
        img = Image.open(img_path).convert('RGB')
        if self.mode == 'train':
            mask = sio.loadmat(mask_path)['GTcls']['Segmentation'][0][0]
            mask = Image.fromarray(mask.astype(np.uint8))
        else:
            mask = Image.open(mask_path)

        if self.joint_transform is not None:
            img, mask = self.joint_transform(img, mask)

        if self.sliding_crop is not None:
            img_slices, mask_slices, slices_info = self.sliding_crop(img, mask)
            if self.transform is not None:
                img_slices = [self.transform(e) for e in img_slices]
            if self.target_transform is not None:
                mask_slices = [self.target_transform(e) for e in mask_slices]
            img, mask = torch.stack(img_slices, 0), torch.stack(mask_slices, 0)
            return img, mask, torch.LongTensor(slices_info)
        else:
            if self.transform is not None:
                img = self.transform(img)
            if self.target_transform is not None:
                mask = self.target_transform(mask)
            return img, mask