Python skimage.io.imread() Examples

The following are 30 code examples of skimage.io.imread(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module skimage.io , or try the search function .
Example #1
Source File: pf_dataset.py    From weakalign with MIT License 6 votes vote down vote up
def get_image(self,img_name_list,idx):
        img_name = os.path.join(self.dataset_path, img_name_list[idx])
        image = io.imread(img_name)
        
        # get image size
        im_size = np.asarray(image.shape)
        
        # convert to torch Variable
        image = np.expand_dims(image.transpose((2,0,1)),0)
        image = torch.Tensor(image.astype(np.float32))
        image_var = Variable(image,requires_grad=False)
        
        # Resize image using bilinear sampling with identity affine tnf
        image = self.affineTnf(image_var).data.squeeze(0)
        
        im_size = torch.Tensor(im_size.astype(np.float32))
        
        return (image, im_size) 
Example #2
Source File: mxnet_predict_example.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def PreprocessImage(path, show_img=False):
    # load image
    img = io.imread(path)
    print("Original Image Shape: ", img.shape)
    # we crop image from center
    short_egde = min(img.shape[:2])
    yy = int((img.shape[0] - short_egde) / 2)
    xx = int((img.shape[1] - short_egde) / 2)
    crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
    # resize to 224, 224
    resized_img = transform.resize(crop_img, (224, 224))
    # convert to numpy.ndarray
    sample = np.asarray(resized_img) * 255
    # swap axes to make image from (224, 224, 3) to (3, 224, 224)
    sample = np.swapaxes(sample, 0, 2)
    sample = np.swapaxes(sample, 1, 2)

    # sub mean
    return sample

# Get preprocessed batch (single image batch) 
Example #3
Source File: Bot.py    From poeai with MIT License 6 votes vote down vote up
def SplitSave(self, p = 'TSD/Train/Images', wp = 'TSD/Train/Split'):
        '''
        #p:     #Dir contains images to split
        #wp:    #Dir to write split images  
        '''
        c = 0
        if not os.path.exists(wp):
            os.mkdir(wp)
        pdl = np.random.choice([fni for fni in os.listdir(p) if fni.startswith('di')], 32, replace = False)
        for i, fn in enumerate(pdl):
            print('{:4d}/{:4d}:\t{:s}'.format(i + 1, len(pdl), fn))
            #A = imread(os.path.join(p, fn))[0:-14, 1:-1]
            #A = self.GetScreen()
            #S = self.ts.DivideIntoSubimages(A).astype(np.uint8)
            A = imread(os.path.join(p, fn))[0:-12, 4:-4, :]
            S = self.ts.DivideIntoSubimages(A).astype(np.uint8)
            for i, Si in enumerate(S):
                imsave(os.path.join(wp, '{:03d}.png'.format(c)), Si)
                c += 1 
Example #4
Source File: secure_camera.py    From WannaPark with GNU General Public License v3.0 6 votes vote down vote up
def get_car_image_plate_number(image_path, image_name):
  
	img = Image(cv2.imread(image_path,0), image_name)
	l_carsR = getCarsFromImage(img.img, carClassifier)
	for carR in l_carsR:
		car = Car(img.img, carR, plateCassifier)
		car.setPlateText(processPlateText(car, net))
		img.addCar(car)
	
	for car in img.cars:
		car.draw()
		if(not car.isPlateEmpty()):
			plate_number = car.plateText
		# imshow(car.carImg)
		x, y, w, h = car.carR.x, car.carR.y, car.carR.w, car.carR.h

	color_image = imread(image_path)
	return color_image[y:y+h, x:x+w], plate_number 
Example #5
Source File: io.py    From torchsupport with MIT License 6 votes vote down vote up
def imread(path, type='float32'):
  """Reads a given image from file, returning a `Tensor`.

  Args:
    path (str): path to an image file.
    type (str): the desired type of the output tensor, defaults to 'float32'.
  """
  reading = True
  while reading:
    try:
      image = io.imread(path)
      reading = False
    except OSError as e:
      if e.errno == 121:
        print("Attempting to recover from Remote IO Error ...")
        time.sleep(10)
      else:
        print("Unexpected OSError. Aborting ...")
        raise e
  image = np.array(image).astype(type)
  image = np.transpose(image,(2,0,1))
  image = torch.from_numpy(image)
  return image 
Example #6
Source File: io.py    From torchsupport with MIT License 6 votes vote down vote up
def stackread(path, type='float32'):
  """Reads a given image from file, returning a `Tensor`.

  Args:
    path (str): path to an image file.
    type (str): the desired type of the output tensor, defaults to 'float32'.
  """
  reading = True
  while reading:
    try:
      image = io.imread(path)
      reading = False
    except OSError as e:
      if e.errno == 121:
        print("Attempting to recover from Remote IO Error ...")
        time.sleep(10)
      else:
        print("Unexpected OSError. Aborting ...")
        raise e
  image = np.array(image).astype(type)
  image = np.transpose(image,(0,1,2))
  image = torch.from_numpy(image)
  return image 
Example #7
Source File: demo.py    From RingNet with MIT License 6 votes vote down vote up
def preprocess_image(img_path):
    img = io.imread(img_path)
    if np.max(img.shape[:2]) != config.img_size:
        print('Resizing so the max image size is %d..' % config.img_size)
        scale = (float(config.img_size) / np.max(img.shape[:2]))
    else:
        scale = 1.0#scaling_factor
    center = np.round(np.array(img.shape[:2]) / 2).astype(int)
    # image center in (x,y)
    center = center[::-1]
    crop, proc_param = img_util.scale_and_crop(img, scale, center,
                                               config.img_size)
    # import ipdb; ipdb.set_trace()
    # Normalize image to [-1, 1]
    # plt.imshow(crop/255.0)
    # plt.show()
    crop = 2 * ((crop / 255.) - 0.5)

    return crop, proc_param, img 
Example #8
Source File: coco_visualiser.py    From COCO-Assistant with MIT License 6 votes vote down vote up
def visualise_single(ann, folder, img_filename):
    if folder not in ['train', 'val', 'test']:
        raise AssertionError('Folder not in ["train", "val", "test"]')
    # Get image id and image filename mapping dict
    id_fn_dict = get_imgid_dict(ann)
    img_path = os.path.join(os.getcwd(), "images", folder, img_filename)
    im = io.imread(img_path)
    annids = ann.getAnnIds(imgIds=id_fn_dict[img_filename], iscrowd=None)
    anns = ann.loadAnns(annids)

    # load and display instance annotations
    plt.figure(figsize=(15, 15))
    plt.imshow(im)
    plt.axis('off')
    plt.title(img_filename)
    ann.showAnns(anns)
    plt.show() 
Example #9
Source File: vfn_eval.py    From view-finding-network with GNU General Public License v3.0 6 votes vote down vote up
def evaluate_sliding_window(img_filename, crops):
    img = io.imread(img_filename).astype(np.float32)/255
    if img.ndim == 2: # Handle B/W images
        img = np.expand_dims(img, axis=-1)
        img = np.repeat(img, 3, 2)

    img_crops = np.zeros((batch_size, 227, 227, 3))
    for i in xrange(len(crops)):
        crop = crops[i]
        img_crop = transform.resize(img[crop[1]:crop[1]+crop[3],crop[0]:crop[0]+crop[2]], (227, 227))-0.5
        img_crop = np.expand_dims(img_crop, axis=0)
        img_crops[i,:,:,:] = img_crop

    # compute ranking scores
    scores = sess.run([score_func], feed_dict={image_placeholder: img_crops})

    # find the optimal crop
    idx = np.argmax(scores[:len(crops)])
    best_window = crops[idx]

    # return the best crop
    return (best_window[0], best_window[1], best_window[2], best_window[3]) 
Example #10
Source File: nstyle.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def PreprocessContentImage(path, long_edge):
    img = io.imread(path)
    logging.info("load the content image, size = %s", img.shape[:2])
    factor = float(long_edge) / max(img.shape[:2])
    new_size = (int(img.shape[0] * factor), int(img.shape[1] * factor))
    resized_img = transform.resize(img, new_size)
    sample = np.asarray(resized_img) * 256
    # swap axes to make image from (224, 224, 3) to (3, 224, 224)
    sample = np.swapaxes(sample, 0, 2)
    sample = np.swapaxes(sample, 1, 2)
    # sub mean
    sample[0, :] -= 123.68
    sample[1, :] -= 116.779
    sample[2, :] -= 103.939
    logging.info("resize the content image to %s", new_size)
    return np.resize(sample, (1, 3, sample.shape[1], sample.shape[2])) 
Example #11
Source File: dataset.py    From pytorch-UNet with MIT License 6 votes vote down vote up
def __getitem__(self, idx):
        image_filename = self.images_list[idx]
        # read image
        image = io.imread(os.path.join(self.input_path, image_filename))
        # read mask image
        mask = io.imread(os.path.join(self.output_path, image_filename))

        # correct dimensions if needed
        image, mask = correct_dims(image, mask)

        if self.joint_transform:
            image, mask = self.joint_transform(image, mask)

        if self.one_hot_mask:
            assert self.one_hot_mask > 0, 'one_hot_mask must be nonnegative'
            mask = torch.zeros((self.one_hot_mask, mask.shape[1], mask.shape[2])).scatter_(0, mask.long(), 1)

        return image, mask, image_filename 
Example #12
Source File: image_utils.py    From keras-ctpn with Apache License 2.0 6 votes vote down vote up
def load_image(image_path):
    """
    加载图像
    :param image_path: 图像路径
    :return: [h,w,3] numpy数组
    """
    image = plt.imread(image_path)
    # 灰度图转为RGB
    if len(image.shape) == 2:
        image = np.expand_dims(image, axis=2)
        image = np.tile(image, (1, 1, 3))
    elif image.shape[-1] == 1:
        image = skimage.color.gray2rgb(image)  # io.imread 报ValueError: Input image expected to be RGB, RGBA or gray
    # 标准化为0~255之间
    if image.dtype == np.float32:
        image *= 255
        image = image.astype(np.uint8)
    # 删除alpha通道
    return image[..., :3] 
Example #13
Source File: ImageNet.py    From Representation-Learning-by-Learning-to-Count with MIT License 6 votes vote down vote up
def __init__(self, ids, name='default',
                 max_examples=None, is_train=True):
        self._ids = list(ids)
        self.name = name
        self.is_train = is_train

        if max_examples is not None:
            self._ids = self._ids[:max_examples]

        file = os.path.join(__IMAGENET_IMG_PATH__, self._ids[0])

        try:
            imread(file)
        except:
            raise IOError('Dataset not found. Please make sure the dataset was downloaded.')
        log.info("Reading Done: %s", file) 
Example #14
Source File: gan_lstm.py    From Progressive-Generative-Networks with MIT License 6 votes vote down vote up
def __getitem__(self, idx):
        image_pos = self.lines.ix[idx, 0]
        image = io.imread(image_pos)
        image = image.astype(np.float)
        h,w = image.shape[:2]
        if(h<w):
            factor = h/350.0
            w = w/factor
            h = 350
        else:
            factor = w/350.0
            h = h/factor
            w = 350
        image = transform.resize(image, (int(h), int(w), 3))
        image_id = self.lines.ix[idx, 1]
        sample = {'image': image, 'id': image_id}
        if self.trans is not None:
            sample = self.trans(sample)
        return sample 
Example #15
Source File: gan_lstm.py    From Progressive-Generative-Networks with MIT License 6 votes vote down vote up
def __getitem__(self, idx):
        image_pos = self.lines.ix[idx, 0]
        image = io.imread(image_pos)
        image = image.astype(np.float)
        h,w = image.shape[:2]
        if(h<w):
            factor = h/350.0
            w = w/factor
            h = 350
        else:
            factor = w/350.0
            h = h/factor
            w = 350
        image = transform.resize(image, (int(h), int(w), 3))
        image_id = self.lines.ix[idx, 1]
        sample = {'image': image, 'id': image_id}
        if self.trans is not None:
            sample = self.trans(sample)
        return sample 
Example #16
Source File: 1_1_scene_gen_for_detection_maskrcnn.py    From Pix2Pose with MIT License 6 votes vote down vote up
def get_random_background(im_height,im_width,backfiles):
    back_fn = backfiles[int(random.random()*(len(backfiles)-1))]
    back_img = cv2.imread(back_dir+"/"+back_fn)
    img_syn = np.zeros( (im_height,im_width,3))                    

    if back_img.ndim != 3:
        back_img = skimage.color.gray2rgb(back_img)
    back_v = min(back_img.shape[0],img_syn.shape[0])
    back_u = min(back_img.shape[1],img_syn.shape[1])
    img_syn[:back_v,:back_u]=back_img[:back_v,:back_u]/255

    if(img_syn.shape[0]>back_img.shape[0]):
        width = min(img_syn.shape[0]-back_v,back_img.shape[0])
        img_syn[back_v:back_v+width,:back_u]=back_img[:width,:back_u]/255
    if(img_syn.shape[1]>back_img.shape[1]):
        height = min(img_syn.shape[1]-back_u,back_img.shape[1])
        img_syn[:back_v,back_u:back_u+height]=back_img[:back_v,:height]/255                                  
    return img_syn 
Example #17
Source File: gan_lstm_two.py    From Progressive-Generative-Networks with MIT License 6 votes vote down vote up
def __getitem__(self, idx):
        image_pos = self.lines.ix[idx, 0]
        image = io.imread(image_pos)
        image = image.astype(np.float)
        h,w = image.shape[:2]
        if(h<w):
            factor = h/350.0
            w = w/factor
            h = 350
        else:
            factor = w/350.0
            h = h/factor
            w = 350
        image = transform.resize(image, (int(h), int(w), 3))
        image_id = self.lines.ix[idx, 1]
        sample = {'image': image, 'id': image_id}
        if self.trans is not None:
            sample = self.trans(sample)
        return sample 
Example #18
Source File: tracklet_utils_3c.py    From TNT with GNU General Public License v3.0 6 votes vote down vote up
def hist_feature_extract(feature_size, num_patch, max_length, patch_folder):
    fea_mat = np.zeros((num_patch,feature_size-4+2))
    tracklet_list = os.listdir(patch_folder)
    N_tracklet = len(tracklet_list)
    cnt = 0
    for n in range(N_tracklet):
        tracklet_folder = patch_folder+'/'+tracklet_list[n]
        patch_list = os.listdir(tracklet_folder)

        # get patch list, track_id and fr_id, starts from 1
        prev_cnt = cnt
        for m in range(len(patch_list)):
            # track_id
            fea_mat[cnt,0] = n+1
            # fr_id
            fea_mat[cnt,1] = int(patch_list[m][-8:-4])
            
            patch_list[m] = tracklet_folder+'/'+patch_list[m]
            patch_img = imread(patch_list[m])
            fea_mat[cnt,2:] = track_lib.extract_hist(patch_img)
            #import pdb; pdb.set_trace()
            cnt = cnt+1
    return fea_mat 
Example #19
Source File: tracklet_utils_3c.py    From TNT with GNU General Public License v3.0 6 votes vote down vote up
def hist_feature_extract(feature_size, num_patch, max_length, patch_folder):
    fea_mat = np.zeros((num_patch,feature_size-4+2))
    tracklet_list = os.listdir(patch_folder)
    N_tracklet = len(tracklet_list)
    cnt = 0
    for n in range(N_tracklet):
        tracklet_folder = patch_folder+'/'+tracklet_list[n]
        patch_list = os.listdir(tracklet_folder)

        # get patch list, track_id and fr_id, starts from 1
        prev_cnt = cnt
        for m in range(len(patch_list)):
            # track_id
            fea_mat[cnt,0] = n+1
            # fr_id
            fea_mat[cnt,1] = int(patch_list[m][-8:-4])
            
            patch_list[m] = tracklet_folder+'/'+patch_list[m]
            patch_img = imread(patch_list[m])
            fea_mat[cnt,2:] = track_lib.extract_hist(patch_img)
            #import pdb; pdb.set_trace()
            cnt = cnt+1
    return fea_mat 
Example #20
Source File: pascal_parts_dataset.py    From weakalign with MIT License 6 votes vote down vote up
def get_image(self,img_name_list,idx):
        img_name = os.path.join(self.dataset_path, img_name_list[idx])
        image = io.imread(img_name)
        
        # get image size
        im_size = np.asarray(image.shape)
        
        # convert to torch Variable
        image = np.expand_dims(image.transpose((2,0,1)),0)
        image = torch.Tensor(image.astype(np.float32))
        image_var = Variable(image,requires_grad=False)
        
        # Resize image using bilinear sampling with identity affine tnf
        image = self.affineTnf(image_var).data.squeeze(0)
        
        im_size = torch.Tensor(im_size.astype(np.float32))
        
        return (image, im_size) 
Example #21
Source File: gan_lstm_oval.py    From Progressive-Generative-Networks with MIT License 6 votes vote down vote up
def __getitem__(self, idx):
        image_pos = self.lines.ix[idx, 0]
        image = io.imread(image_pos)
        image = image.astype(np.float)
        h,w = image.shape[:2]
        if(h<w):
            factor = h/350.0
            w = w/factor
            h = 350
        else:
            factor = w/350.0
            h = h/factor
            w = 350
        image = transform.resize(image, (int(h), int(w), 3))
        image_id = self.lines.ix[idx, 1]
        sample = {'image': image, 'id': image_id}
        if self.trans is not None:
            sample = self.trans(sample)
        return sample 
Example #22
Source File: gan_lstm_oval.py    From Progressive-Generative-Networks with MIT License 6 votes vote down vote up
def __getitem__(self, idx):
        image_pos = self.lines.ix[idx, 0]
        image = io.imread(image_pos)
        image = image.astype(np.float)
        h,w = image.shape[:2]
        if(h<w):
            factor = h/350.0
            w = w/factor
            h = 350
        else:
            factor = w/350.0
            h = h/factor
            w = 350
        image = transform.resize(image, (int(h), int(w), 3))
        image_id = self.lines.ix[idx, 1]
        sample = {'image': image, 'id': image_id}
        if self.trans is not None:
            sample = self.trans(sample)
        return sample 
Example #23
Source File: pf_dataset.py    From weakalign with MIT License 6 votes vote down vote up
def get_image(self,img_name_list,idx):
        img_name = os.path.join(self.dataset_path, img_name_list.iloc[idx])
        image = io.imread(img_name)
        
        # get image size
        im_size = np.asarray(image.shape)
        
        # convert to torch Variable
        image = np.expand_dims(image.transpose((2,0,1)),0)
        image = torch.Tensor(image.astype(np.float32))
        image_var = Variable(image,requires_grad=False)
        
        # Resize image using bilinear sampling with identity affine tnf
        image = self.affineTnf(image_var).data.squeeze(0)
        
        im_size = torch.Tensor(im_size.astype(np.float32))
        
        return (image, im_size) 
Example #24
Source File: bm_comp_perform.py    From BIRL with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def register_image_pair(idx, path_img_target, path_img_source, path_out):
    """ register two images together

    :param int idx: empty parameter for using the function in parallel
    :param str path_img_target: path to the target image
    :param str path_img_source: path to the source image
    :param str path_out: path for exporting the output
    :return tuple(str,float):
    """
    start = time.time()
    # load and denoise reference image
    img_target = io.imread(path_img_target)
    img_target = denoise_wavelet(img_target, wavelet_levels=7, multichannel=True)
    img_target_gray = rgb2gray(img_target)

    # load and denoise moving image
    img_source = io.imread(path_img_source)
    img_source = denoise_bilateral(img_source, sigma_color=0.05,
                                   sigma_spatial=2, multichannel=True)
    img_source_gray = rgb2gray(img_source)

    # detect ORB features on both images
    detector_target = ORB(n_keypoints=150)
    detector_source = ORB(n_keypoints=150)
    detector_target.detect_and_extract(img_target_gray)
    detector_source.detect_and_extract(img_source_gray)
    matches = match_descriptors(detector_target.descriptors,
                                detector_source.descriptors)
    # robustly estimate affine transform model with RANSAC
    model, _ = ransac((detector_target.keypoints[matches[:, 0]],
                       detector_source.keypoints[matches[:, 1]]),
                      AffineTransform, min_samples=25, max_trials=500,
                      residual_threshold=0.95)

    # warping source image with estimated transformations
    img_warped = warp(img_target, model.inverse, output_shape=img_target.shape[:2])
    path_img_warped = os.path.join(path_out, NAME_IMAGE_WARPED % idx)
    io.imsave(path_img_warped, img_warped)
    # summarise experiment
    execution_time = time.time() - start
    return path_img_warped, execution_time 
Example #25
Source File: shanghai.py    From LCFCN with Apache License 2.0 5 votes vote down vote up
def __getitem__(self, index):        
        name = self.img_names[index]
      
        # LOAD IMG, POINT, and ROI
        image = imread(os.path.join(self.path, "images", name))
        if image.ndim == 2:
            image = image[:,:,None].repeat(3,2)
        pointList = hu.load_mat(os.path.join(self.path, 
                        "ground-truth", 
          "GT_" + name.replace(".jpg", "") +".mat"))
        pointList = pointList["image_info"][0][0][0][0][0] 
        
        points = np.zeros(image.shape[:2], "uint8")[:,:,None]
        H, W = image.shape[:2]
        for x, y in pointList:
            points[min(int(y), H-1), min(int(x), W-1)] = 1

        counts = torch.LongTensor(np.array([pointList.shape[0]]))

        collection = list(map(FT.to_pil_image, [image, points]))
        image, points = transformers.apply_transform(self.split, image, points, 
                   transform_name=self.exp_dict['dataset']['transform'])
            
        return {"images":image, 
                "points":points.squeeze(), 
                "counts":counts, 
                'meta':{"index":index}} 
Example #26
Source File: trancos.py    From LCFCN with Apache License 2.0 5 votes vote down vote up
def __getitem__(self, index):
        name = self.img_names[index]

        # LOAD IMG, POINT, and ROI
        image = imread(os.path.join(self.path, name + ".jpg"))
        points = imread(os.path.join(self.path, name + "dots.png"))[:,:,:1].clip(0,1)
        roi = loadmat(os.path.join(self.path, name + "mask.mat"))["BW"][:,:,np.newaxis]
        
        # LOAD IMG AND POINT
        image = image * roi
        image = hu.shrink2roi(image, roi)
        points = hu.shrink2roi(points, roi).astype("uint8")

        counts = torch.LongTensor(np.array([int(points.sum())]))   
        
        collection = list(map(FT.to_pil_image, [image, points]))
        image, points = transformers.apply_transform(self.split, image, points, 
                   transform_name=self.exp_dict['dataset']['transform'])
            
        return {"images":image, 
                "points":points.squeeze(), 
                "counts":counts, 
                'meta':{"index":index}} 
Example #27
Source File: test_on_image.py    From LCFCN with Apache License 2.0 5 votes vote down vote up
def apply(image_path, model_name, model_path):
  transformer = ut.ComposeJoint(
                    [
                         [transforms.ToTensor(), None],
                         [transforms.Normalize(*ut.mean_std), None],
                         [None,  ut.ToLong() ]
                    ])  

  # Load best model
  model = model_dict[model_name](n_classes=2).cuda()
  model.load_state_dict(torch.load(model_path))

  # Read Image
  image_raw = imread(image_path)
  collection = list(map(FT.to_pil_image, [image_raw, image_raw]))
  image, _ = transformer(collection)

  batch = {"images":image[None]}
  
  # Make predictions
  pred_blobs = model.predict(batch, method="blobs").squeeze()
  pred_counts = int(model.predict(batch, method="counts").ravel()[0])

  # Save Output
  save_path = image_path + "_blobs_count:{}.png".format(pred_counts)

  imsave(save_path, ut.combine_image_blobs(image_raw, pred_blobs))
  print("| Counts: {}\n| Output saved in: {}".format(pred_counts, save_path)) 
Example #28
Source File: getMetrics_market.py    From Human-Pose-Transfer with MIT License 5 votes vote down vote up
def load_generated_images(images_folder):
    input_images = []
    target_images = []
    generated_images = []
    print("load image from {}".format(images_folder))

    names = []
    for img_name in os.listdir(images_folder):
        img = imread(os.path.join(images_folder, img_name))
        w = 64  # h, w ,c
        input_images.append(img[:, :w])
        target_images.append(img[:, 2 * w:3 * w])
        generated_images.append(img[:, 4 * w:5 * w])

        # assert img_name.endswith('_vis.png'), 'unexpected img name: should end with _vis.png'
        assert img_name.endswith('_vis.png') or img_name.endswith(
            '_vis.jpg'), 'unexpected img name: should end with _vis.png'

        img_name = img_name[:-8]
        img_name = img_name.split('___')
        assert len(img_name) == 2, 'unexpected img split: length 2 expect!'
        fr = img_name[0]
        to = img_name[1]

        # m = re.match(r'([A-Za-z0-9_]*.jpg)_([A-Za-z0-9_]*.jpg)', img_name)
        # m = re.match(r'([A-Za-z0-9_]*.jpg)_([A-Za-z0-9_]*.jpg)_vis.png', img_name)
        # fr = m.groups()[0]
        # to = m.groups()[1]
        names.append([fr, to])

    return input_images, target_images, generated_images, names 
Example #29
Source File: assemble_data.py    From Deep-Exemplar-based-Colorization with MIT License 5 votes vote down vote up
def download_image(args_tuple):
    "For use with multiprocessing map. Returns filename on fail."
    try:
        url, filename = args_tuple
        if not os.path.exists(filename):
            urllib.urlretrieve(url, filename)
        with open(filename) as f:
            assert hashlib.sha1(f.read()).hexdigest() != MISSING_IMAGE_SHA1
        test_read_image = io.imread(filename)
        return True
    except KeyboardInterrupt:
        raise Exception()  # multiprocessing doesn't catch keyboard exceptions
    except:
        return False 
Example #30
Source File: 5_Data Loading And Processing.py    From ML_CIA with MIT License 5 votes vote down vote up
def __getitem__(self, idx):
        img_name = os.path.join(self.root_dir,
                                self.landmarks_frame.iloc[idx, 0])
        image = io.imread(img_name)
        landmarks = self.landmarks_frame.iloc[idx, 1:].as_matrix()
        landmarks = landmarks.astype('float').reshape(-1, 2)
        sample = {'image': image, 'landmarks': landmarks}

        if self.transform:
            sample = self.transform(sample)

        return sample