Python keras.preprocessing.image.img_to_array() Examples

The following are code examples for showing how to use keras.preprocessing.image.img_to_array(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Car-Recognition   Author: foamliu   File: analyze.py    MIT License 9 votes vote down vote up
def predict(img_dir, model):
    img_files = []
    for root, dirs, files in os.walk(img_dir, topdown=False):
        for name in files:
            img_files.append(os.path.join(root, name))
    img_files = sorted(img_files)

    y_pred = []
    y_test = []

    for img_path in tqdm(img_files):
        # print(img_path)
        img = image.load_img(img_path, target_size=(224, 224))
        x = image.img_to_array(img)
        preds = model.predict(x[None, :, :, :])
        decoded = decode_predictions(preds, top=1)
        pred_label = decoded[0][0][0]
        # print(pred_label)
        y_pred.append(pred_label)
        tokens = img_path.split(os.pathsep)
        class_id = int(tokens[-2])
        # print(str(class_id))
        y_test.append(class_id)

    return y_pred, y_test 
Example 2
Project: Keras-Unet   Author: MLearing   File: data.py    GNU General Public License v2.0 7 votes vote down vote up
def create_test_data(self):

        # 测试集生成npy文件
        i = 0
        print('-' * 30)
        print('Creating training images...')
        print('-' * 30)
        imgs = glob.glob(self.test_path + "/*." + self.img_type)           # deform/train
        print(len(imgs))
        imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)
        for imgname in imgs:
            midname = imgname[imgname.rindex("/") + 1:]   # 图像的名字
            img = load_img(self.test_path + "/" + midname, grayscale=True)   # 转换为灰度图
            img = img_to_array(img)
            imgdatas[i] = img
            if i % 100 == 0:
                print('Done: {0}/{1} images'.format(i, len(imgs)))
            i += 1
        print('loading done', imgdatas.shape)
        np.save(self.npy_path + '/imgs_test.npy', imgdatas)            # 将30张训练集和30张label生成npy数据
        print('Saving to .npy files done.') 
Example 3
Project: Image-Caption-Generator   Author: dabasajay   File: test.py    MIT License 6 votes vote down vote up
def extract_features(filename, model, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Loading and resizing image
	image = load_img(filename, target_size=target_size)
	# Convert the image pixels to a numpy array
	image = img_to_array(image)
	# Reshape data for the model
	image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
	# Prepare the image for the CNN Model model
	image = preprocess_input(image)
	# Pass image into model to get encoded features
	features = model.predict(image, verbose=0)
	return features

# Load the tokenizer 
Example 4
Project: kaggle-carvana-2017   Author: killthekitten   File: ensemble_gpu.py    MIT License 6 votes vote down vote up
def data_loader(q, ):
    for bi in batch_indices:
        start, end = bi
        x_batch = []
        filenames_batch = filenames[start:end]

        for filename in filenames_batch:
            imgs = []
            for d in dirs:
                img = img_to_array(load_img(os.path.join(d, filename), grayscale=True))
                imgs.append(np.squeeze(img))
            x_batch.append(np.array(imgs).transpose((1, 2, 0)))
        q.put((filenames_batch, np.array(x_batch)))

    for gpu in gpus:
        q.put((None, None)) 
Example 5
Project: deploy-ml   Author: deploy-ml   File: training.py    MIT License 6 votes vote down vote up
def calculate(self, input_array=None, happening=True, override=False, image_path=None):
        """
        Calculates probability of outcome
        WARNING [CANNOT BE USED ONCE MODEL IS PICKLED]
        :param input_array: array of inputs (should be same order as training data)
        :param happening: if set False, returns probability of event not happening
        :param override: set to True if you want to override scaling
        :param image_path: string of the path to the image being fed into the model
        :return: float between 0 and 1
        """
        if self.convolutional:
            image = cv2.imread(image_path)
            image = cv2.resize(image, (self.dims_one, self.dims_two))
            image = image.astype("float") / 255.0
            image = img_to_array(image)
            image = np.expand_dims(image, axis=0)
            return self.model.predict(image)[0][0]

        else:
            if self.scaled_inputs and not override:
                input_array = self.scaling_tool.transform(input_array)
            if happening:
                return self.model.predict([input_array])[0][0]
            else:
                return self.model.predict([input_array])[0][0] 
Example 6
Project: deploy-ml   Author: deploy-ml   File: base.py    MIT License 6 votes vote down vote up
def calculate(self, input_array=None, happening=True, override=False, image=None):
        """
        Calculates probability of outcome
        :param input_array: array of inputs (should be same order as training data)
        :param happening: if set False, returns probability of event not happening
        :param override: set to True if you want to override scaling
        :param image: image object that has been read
        :return: float between 0 and 1
        """
        if self.convolutional:
            image = cv2.resize(image, (self.dims_one, self.dims_two))
            image = image.astype("float") / 255.0
            image = img_to_array(image)
            image = np.expand_dims(image, axis=0)
            return self.model.predict(image)[0][0]

        else:
            if self.scaled_inputs and not override:
                input_array = self.scaling_tool.transform(input_array)
            if happening:
                return self.model.predict([input_array])[0][0]
            else:
                return self.model.predict([input_array])[0][0] 
Example 7
Project: visimil   Author: rene4jazz   File: application.py    MIT License 6 votes vote down vote up
def get_features(url):
    response = requests.get(url)
    img = Image.open(BytesIO(response.content)).convert('RGB')

    target_size = (224, 224)

    if img.size != target_size:
        img = img.resize(target_size, Image.LANCZOS)

    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    features = model.predict(x).flatten()
    img.close()

    return features.tolist() 
Example 8
Project: DisplaceNet   Author: GKalliatakis   File: hra_utils.py    MIT License 6 votes vote down vote up
def predict(violation_class, model, img, target_size):
  """Generates output predictions for a single PIL image.

    # Arguments
        violation_class: violation_class: one of `cl` (HRA dataset with 2 classes - [i]'child_labour' and [ii]'no violation')
            or `dp` (HRA dataset with 2 classes - [i]'displaced_populations' and [ii]'no violation')
        model: keras model
        img: PIL format image
        target_size: (w,h) tuple

    # Returns
        list of predicted labels and their probabilities
  """
  if img.size != target_size:
    img = img.resize(target_size)

  x = image.img_to_array(img)
  x = np.expand_dims(x, axis=0)
  x = preprocess_input(x)
  preds = model.predict(x)

  # print ('Raw preds: ',preds )

  return preds, decode_predictions(violation_class = violation_class, preds = preds, top=2)[0] 
Example 9
Project: DisplaceNet   Author: GKalliatakis   File: hra_utils.py    MIT License 6 votes vote down vote up
def predict_v2(violation_class, model, img, target_size):
  """Generates output predictions for a single PIL image.
    # Arguments
        violation_class: violation_class: one of `cl` (HRA dataset with 2 classes - [i]'child_labour' and [ii]'no violation')
            or `dp` (HRA dataset with 2 classes - [i]'displaced_populations' and [ii]'no violation')
        model: keras model
        img: PIL format image
        target_size: (w,h) tuple
    # Returns
        list of predicted labels and their probabilities
  """
  if img.size != target_size:
    img = img.resize(target_size)

  x = image.img_to_array(img)
  x = np.expand_dims(x, axis=0)
  x = preprocess_input(x)
  preds = model.predict(x)

  return decode_predictions(violation_class = violation_class, preds = preds, top=2)[0] 
Example 10
Project: DisplaceNet   Author: GKalliatakis   File: hra_utils.py    MIT License 6 votes vote down vote up
def prepare_input_data(img_path,
                       objects_or_places_flag):
    """Prepares the raw images for the EMOTIC model.

    # Arguments
        body_path: Path to body only image file.
        image_path: Path to entire image file.

    # Returns
        The two processed images
    """

    body_img = image.load_img(img_path, target_size=(224, 224))
    x1 = image.img_to_array(body_img)
    x1 = np.expand_dims(x1, axis=0)

    if objects_or_places_flag == 'objects':
        x1 = imagenet_preprocess_input(x1)

    elif objects_or_places_flag == 'places':
        x1 = places_preprocess_input(x1)


    return x1 
Example 11
Project: kutils   Author: subpic   File: image_utils.py    MIT License 6 votes vote down vote up
def read_image(image_path, image_size=1):
    """
    Read image from disk

    :param image_path: full path to the image
    :param image_size: resize image to specified size
                       can be a 2-tuple of (H, W) or a scalar zoom factor
    :return: np.ndarray
    """
    if type(image_size) == tuple:
        im = load_img(image_path, target_size=image_size) 
        x = img_to_array(im)
    else:
        im = load_img(image_path)
        x = img_to_array(im)            
        if not image_size == 1:
            new_size = map(int, (x.shape[0]*image_size, x.shape[1]*image_size))        
            x = transform.resize(x/255., new_size, mode='reflect')*255.
    return x 
Example 12
Project: kutils   Author: subpic   File: image_utils.py    MIT License 6 votes vote down vote up
def read_image_batch(image_paths, image_size=None):
    """
    Reads image array of np.uint8 and shape (num_images, *image_shape)

    :param image_paths: list of image paths
    :param image_size: if not None, image is resized
    :return: np.ndarray
    """
    images = None
    for i, image_path in enumerate(image_paths):
        im = load_img(image_path)
        if image_size is not None:
            im = im.resize(image_size, Image.LANCZOS)
        x = img_to_array(im)
        if images is None:
            images = np.zeros((len(image_paths),) + x.shape,
                              dtype=np.uint8)
        images[i, ...] = x
    return images 
Example 13
Project: ai-platform   Author: produvia   File: yolo_image.py    MIT License 6 votes vote down vote up
def load_image_pixels(filename, shape):
	# load the image to get its shape
	image = load_img(filename)
	width, height = image.size
	# load the image with the required size
	image = load_img(filename, target_size=shape)
	# convert to numpy array
	image = img_to_array(image)
	# scale pixel values to [0, 1]
	image = image.astype('float32')
	image /= 255.0
	# add a dimension so that we have one sample
	image = expand_dims(image, 0)
	return image, width, height

# get all of the results above a threshold 
Example 14
Project: keras-captcha   Author: 369guang   File: utils.py    MIT License 6 votes vote down vote up
def data_increase(folder_dir):
    datagen = ImageDataGenerator(
        featurewise_center=True,
        featurewise_std_normalization=True,
        rotation_range=20,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=True)
    for img_path in os.listdir(folder_dir):
        # print(f"{folder_dir}/{img_path}")
        new_name = img_path.split(".")[0]
        # os.rename(f"{folder_dir}/{img_path}", f"{folder_dir}/{new_name}_1.png")
        img = load_img(f'{folder_dir}/{img_path}')  # 这是一个PIL图像
        x = img_to_array(img)  # 把PIL图像转换成一个numpy数组,形状为(3, 150, 150)
        x = x.reshape((1,) + x.shape)  # 这是一个numpy数组,形状为 (1, 3, 150, 150)

        # 下面是生产图片的代码
        # 生产的所有图片保存在 `preview/` 目录下
        i = 0
        for batch in datagen.flow(x, batch_size=1,
                                  save_to_dir=TRAIN_DIR, save_prefix=new_name, save_format='png'):
            i += 1
            if i > 3:
                break  # 否则生成器会退出循环 
Example 15
Project: KGNet   Author: KevinGong2013   File: main.py    Apache License 2.0 6 votes vote down vote up
def test():

    classes = []

    for subdir in sorted(os.listdir('data/train')):
        if os.path.isdir(os.path.join('data/train', subdir)):
            classes.append(subdir)

    m = genmodel()
    m.load_weights('weights.model')

    image = load_img('data/predict/c.png', target_size=(48, 48)).convert('L')
    
    x = img_to_array(image)
    x = x.reshape((1,) + x.shape)
    
    k = m.predict(x)[0]
    ks = k.argsort()
    
    l = classes
    print(ks[-1])
    print(l[ks[-1]], l[ks[-2]], l[ks[-3]]) 
Example 16
Project: U-net   Author: DuFanXin   File: data_Keras.py    MIT License 6 votes vote down vote up
def create_test_data(self):
		# 测试集生成npy
		i = 0
		print('-' * 30)
		print('Creating test images...')
		print('-' * 30)
		imgs = glob.glob(self.test_path + "/*." + self.img_type)           # ../data_set/train
		print(len(imgs))
		imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)
		for imgname in imgs:
			midname = imgname[imgname.rindex("/") + 1:]   # 图像的名字
			img = load_img(self.test_path + "/" + midname, grayscale=True)   # 转换为灰度图
			img = img_to_array(img)
			imgdatas[i] = img
			if i % 100 == 0:
				print('Done: {0}/{1} images'.format(i, len(imgs)))
			i += 1
		print('loading done', imgdatas.shape)
		np.save(self.npy_path + '/imgs_test.npy', imgdatas)            # 将30张训练集和30张label生成npy数据
		# np.save(self.npy_path + '/imgs_mask_train.npy', imglabels)
		print('Saving to .npy files done.') 
Example 17
Project: Transfer-Learning   Author: DhavalThkkar   File: predict.py    MIT License 6 votes vote down vote up
def predict(model, img, target_size):
    """Run model prediction on image
    Args:
        model: keras model
        img: PIL format image
        target_size: (w,h) tuple
    Returns:
        list of predicted labels and their probabilities 
    """
    if img.size != target_size:
        img = img.resize(target_size)

    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    return preds[0] 
Example 18
Project: visual_redactions   Author: tribhuvanesh   File: seq.py    Apache License 2.0 6 votes vote down vote up
def image_list_to_arr(image_list):
    target_img_size = (250, 250)
    n_items = len(image_list)

    X = np.zeros(shape=(n_items, target_img_size[0], target_img_size[1], 3))

    pbar = Progbar(n_items)

    for idx, (image_id, this_image_path) in enumerate(image_list):
        # ----- Image -> Mat
        resized_img_path = this_image_path.replace('images', 'images_250')
        resized_img_path = osp.join('/BS/orekondy2/work/datasets/VISPR2017', resized_img_path)

        if osp.exists(resized_img_path):
            this_image_path = resized_img_path
        else:
            this_image_path = osp.join(SEG_ROOT, this_image_path)

        img = load_img(this_image_path, target_size=target_img_size)
        img_arr = img_to_array(img)
        X[idx] = img_arr
        pbar.update(idx)

    return X 
Example 19
Project: visual_redactions   Author: tribhuvanesh   File: nn.py    Apache License 2.0 6 votes vote down vote up
def image_list_to_arr(image_list):
    target_img_size = (250, 250)
    n_items = len(image_list)

    X = np.zeros(shape=(n_items, target_img_size[0], target_img_size[1], 3))

    pbar = Progbar(n_items)

    for idx, (image_id, this_image_path) in enumerate(image_list):
        # ----- Image -> Mat
        resized_img_path = this_image_path.replace('images', 'images_250')
        resized_img_path = osp.join('/BS/orekondy2/work/datasets/VISPR2017', resized_img_path)

        if osp.exists(resized_img_path):
            this_image_path = resized_img_path
        else:
            this_image_path = osp.join(SEG_ROOT, this_image_path)

        img = load_img(this_image_path, target_size=target_img_size)
        img_arr = img_to_array(img)
        X[idx] = img_arr
        pbar.update(idx)

    return X 
Example 20
Project: VisualNN   Author: angelhunt   File: eval.py    GNU General Public License v3.0 5 votes vote down vote up
def preprocessing(instancePath):
    image = load_img(instancePath, target_size=(224, 224))
    image = img_to_array(image)
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    image = preprocess_input(image)
    return image 
Example 21
Project: VisualNN   Author: angelhunt   File: eval.py    GNU General Public License v3.0 5 votes vote down vote up
def preprocessing(instancePath):
    image = load_img(instancePath, target_size=(224, 224))
    image = img_to_array(image)
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    image = preprocess_input(image)
    return image 
Example 22
Project: Keras-Unet   Author: MLearing   File: data.py    GNU General Public License v2.0 5 votes vote down vote up
def Augmentation(self):
        # 读入3通道的train和label, 分别转换成矩阵, 然后将label的第一个通道放在train的第2个通处, 做数据增强
        print("运行 Augmentation")
        """
        Start augmentation.....
        """
        trains = self.train_imgs
        labels = self.label_imgs
        path_train = self.train_path
        path_label = self.label_path
        path_merge = self.merge_path
        imgtype = self.img_type
        path_aug_merge = self.aug_merge_path
        print(len(trains), len(labels))
        if len(trains) != len(labels) or len(trains) == 0 or len(trains) == 0:
            print("trains can't match labels")
            return 0
        for i in range(len(trains)):
            img_t = load_img(path_train + "/" + str(i) + "." + imgtype)  # 读入train
            img_l = load_img(path_label + "/" + str(i) + "." + imgtype)  # 读入label
            x_t = img_to_array(img_t)                                    # 转换成矩阵
            x_l = img_to_array(img_l)
            x_t[:, :, 2] = x_l[:, :, 0]                                  # 把label当做train的第三个通道
            img_tmp = array_to_img(x_t)
            img_tmp.save(path_merge + "/" + str(i) + "." + imgtype)      # 保存合并后的图像
            img = x_t
            img = img.reshape((1,) + img.shape)                          # 改变shape(1, 512, 512, 3)
            savedir = path_aug_merge + "/" + str(i)                      # 存储合并增强后的图像
            if not os.path.lexists(savedir):
                os.mkdir(savedir)
            self.doAugmentate(img, savedir, str(i))                      # 数据增强 
Example 23
Project: Keras-Unet   Author: MLearing   File: data.py    GNU General Public License v2.0 5 votes vote down vote up
def create_train_data(self):
        # 将增强之后的训练集生成npy文件         
        i = 0
        print('-' * 30)
        print('creating train image')
        print('-' * 30)
        count = 0
        for indir in os.listdir(self.aug_merge_path):
            path = os.path.join(self.aug_merge_path, indir)
            count += len(os.listdir(path))
        imgdatas = np.ndarray((count, self.out_rows, self.out_cols, 1), dtype=np.uint8)
        imglabels = np.ndarray((count, self.out_rows, self.out_cols, 1), dtype=np.uint8)
        for indir in os.listdir(self.aug_merge_path):
            trainPath = os.path.join(self.aug_train_path, indir)
            labelPath = os.path.join(self.aug_label_path, indir)
            print(trainPath, labelPath)
            imgs = glob.glob(trainPath + '/*' + '.tif')
            for imgname in imgs:
                trainmidname = imgname[imgname.rindex('/') + 1:]
                labelimgname = imgname[imgname.rindex('/') + 1:imgname.rindex('_')] + '_label.tif'
                print(trainmidname, labelimgname)
                img = load_img(trainPath + '/' + trainmidname, grayscale=True)
                label = load_img(labelPath + '/' + labelimgname, grayscale=True)
                img = img_to_array(img)
                label = img_to_array(label)
                imgdatas[i] = img
                imglabels[i] = label
                if i % 100 == 0:
                    print('Done: {0}/{1} images'.format(i, len(imgs)))
                i += 1
                print(i)
        print('loading done', imgdatas.shape)
        np.save(self.npy_path + '/augimgs_train.npy', imgdatas)            # 将30张训练集和30张label生成npy数据
        np.save(self.npy_path + '/augimgs_mask_train.npy', imglabels)
        print('Saving to .npy files done.') 
Example 24
Project: Image-Caption-Generator   Author: dabasajay   File: preprocessing.py    MIT License 5 votes vote down vote up
def extract_features(path, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Get CNN Model from model.py
	model = CNNModel(model_type)
	features = dict()
	# Extract features from each photo
	for name in tqdm(os.listdir(path)):
		# Loading and resizing image
		filename = path + name
		image = load_img(filename, target_size=target_size)
		# Convert the image pixels to a numpy array
		image = img_to_array(image)
		# Reshape data for the model
		image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
		# Prepare the image for the CNN Model model
		image = preprocess_input(image)
		# Pass image into model to get encoded features
		feature = model.predict(image, verbose=0)
		# Store encoded features for the image
		image_id = name.split('.')[0]
		features[image_id] = feature
	return features 
Example 25
Project: deep-learning-note   Author: wdxtub   File: utils.py    MIT License 5 votes vote down vote up
def preprocess_image(image_path):
    img = image.load_img(image_path)
    img = image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img 
Example 26
Project: deep-learning-note   Author: wdxtub   File: 3_nerual_style_transfer.py    MIT License 5 votes vote down vote up
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_height, img_width))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg19.preprocess_input(img)
    return img 
Example 27
Project: nsfw-v2   Author: sajithm   File: server.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def prepare_image(img):
    if img.mode != "RGB":
        img = img.convert("RGB")
    img = img.resize((IMAGE_WIDTH, IMAGE_HEIGHT))
    img_tensor = image.img_to_array(img)
    img_tensor = np.expand_dims(img_tensor, axis=0)
    img_tensor /= 255. 
    return img_tensor 
Example 28
Project: kaggle-carvana-2017   Author: killthekitten   File: predict_multithreaded.py    MIT License 5 votes vote down vote up
def data_loader(q, ):
    for start in tqdm(range(0, len(filenames), batch_size)):
        x_batch = []
        end = min(start + batch_size, len(filenames))
        filenames_batch = filenames[start:end]

        for filename in filenames_batch:
            img = load_img(filename)

            stacked_channels = []
            for i in range(args.stacked_channels):
                channel_path = os.path.join(args.stacked_channels_dir,
                                            str(i),
                                            filename.split('/')[-1].replace('.jpg', '.png'))
                stacked_channel = load_img(channel_path, grayscale=True)
                stacked_channels.append(stacked_channel)
            stacked_img = np.dstack((img, *stacked_channels))

            x_batch.append(img_to_array(stacked_img))


        x_batch = preprocess_input(np.array(x_batch, np.float32), mode=args.preprocessing_function)
        if args.pred_tta:
            x_batch = do_tta(x_batch, args.pred_tta)
        padded_x = np.zeros((batch_size, 1280, 1920, args.stacked_channels + 3))
        padded_x[:, :, 1:-1, :] = x_batch
        q.put((filenames_batch, padded_x))

    for gpu in gpus:
        q.put((None, None)) 
Example 29
Project: kaggle-carvana-2017   Author: killthekitten   File: predict_masks.py    MIT License 5 votes vote down vote up
def predict():
    output_dir = args.pred_mask_dir
    model = make_model((None, None, 3))
    model.load_weights(args.weights)
    batch_size = args.pred_batch_size
    nbr_test_samples = 100064

    filenames = [os.path.join(args.test_data_dir, f) for f in sorted(os.listdir(args.test_data_dir))]

    start_time = clock()
    for i in range(int(nbr_test_samples / batch_size) + 1):
        x = []
        for j in range(batch_size):
            if i * batch_size + j < len(filenames):
                img = load_img(filenames[i * batch_size + j], target_size=(args.img_height, args.img_width))
                x.append(img_to_array(img))
        x = np.array(x)
        x = preprocess_input(x, args.preprocessing_function)
        x = do_tta(x, args.pred_tta)
        batch_x = np.zeros((x.shape[0], 1280, 1920, 3))
        batch_x[:, :, 1:-1, :] = x
        preds = model.predict_on_batch(batch_x)
        preds = undo_tta(preds, args.pred_tta)
        for j in range(batch_size):
            filename = filenames[i * batch_size + j]
            prediction = preds[j][:, 1:-1, :]
            array_to_img(prediction * 255).save(os.path.join(output_dir, filename.split('/')[-1][:-4] + ".png"))
        time_spent = clock() - start_time
        print("predicted batch ", str(i))
        print("Time spent: {:.2f}  seconds".format(time_spent))
        print("Speed: {:.2f}  ms per image".format(time_spent / (batch_size * (i + 1)) * 1000))
        print("Elapsed: {:.2f} hours  ".format(time_spent / (batch_size * (i + 1)) / 3600 * (nbr_test_samples - (batch_size * (i + 1))))) 
Example 30
Project: image-similarity-clustering   Author: zegami   File: extract.py    MIT License 5 votes vote down vote up
def get_feature(metadata):
    print('{}'.format(metadata['id']))
    try:
        img_path = os.path.join(source_dir, 'images', metadata['image'])
        if os.path.isfile(img_path):
            print('is file: {}'.format(img_path))
            try:
                # load image setting the image size to 224 x 224
                img = image.load_img(img_path, target_size=(224, 224))
                # convert image to numpy array
                x = image.img_to_array(img)
                # the image is now in an array of shape (3, 224, 224)
                # but we need to expand it to (1, 2, 224, 224) as Keras is expecting a list of images
                x = np.expand_dims(x, axis=0)
                x = preprocess_input(x)

                # extract the features
                features = pargs.model.predict(x)[0]
                # convert from Numpy to a list of values
                features_arr = np.char.mod('%f', features)

                return {"id": metadata['id'], "features": ','.join(features_arr)}
            except Exception as ex:
                # skip all exceptions for now
                print(ex)
                pass
    except Exception as ex:
        # skip all exceptions for now
        print(ex)
        pass
    return None 
Example 31
Project: cyclegan-keras-art-attrs   Author: hollygrimm   File: data_generator.py    MIT License 5 votes vote down vote up
def prepare_image(self, image_path, target_size):
        """Loads image from filepath and scales RGB values from -1 to 1

        Args:
            image_path (str): Image Path
            target_size (tuple): scale to x, y dimensions

        Returns:
            image_array
        """
        img = image.load_img(image_path, target_size = target_size)
        x = image.img_to_array(img)
        x = preprocess_input(x, mode='tf')
        return x 
Example 32
Project: deepflying   Author: dslab-deepflying   File: styleTransfer.py    GNU General Public License v3.0 5 votes vote down vote up
def preprocess_image(image_path):
    img = load_img(image_path,target_size=(img_height,img_width))
    img = img_to_array(img)
    img = np.expand_dims(img,axis=0)
    img = vgg19.preprocess_input(img)
    return img 
Example 33
Project: deepflying   Author: dslab-deepflying   File: neural_style_transfer.py    GNU General Public License v3.0 5 votes vote down vote up
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_nrows, img_ncols))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg19.preprocess_input(img)
    return img

# util function to convert a tensor into a valid image 
Example 34
Project: deepflying   Author: dslab-deepflying   File: nst.py    GNU General Public License v3.0 5 votes vote down vote up
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_nrows, img_ncols))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg19.preprocess_input(img)
    return img

# util function to convert a tensor into a valid image 
Example 35
Project: spark-deep-learning   Author: databricks   File: image_utils.py    Apache License 2.0 5 votes vote down vote up
def loadAndPreprocessKerasInceptionV3(raw_uri):
    # this is the canonical way to load and prep images in keras
    uri = raw_uri[5:] if raw_uri.startswith("file:/") else raw_uri
    image = img_to_array(load_img(uri, target_size=InceptionV3Constants.INPUT_SHAPE))
    image = np.expand_dims(image, axis=0)
    return preprocess_input(image) 
Example 36
Project: spark-deep-learning   Author: databricks   File: tf_image_test.py    Apache License 2.0 5 votes vote down vote up
def _loadImageViaKeras(self, raw_uri):
        uri = raw_uri[5:] if raw_uri.startswith("file:/") else raw_uri
        image = img_to_array(load_img(uri))
        image = np.expand_dims(image, axis=0)
        return preprocess_input(image) 
Example 37
Project: spark-deep-learning   Author: databricks   File: test_builder.py    Apache License 2.0 5 votes vote down vote up
def test_keras_consistency(self):
        """ Exported model in Keras should get same result as original """

        img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg'))

        def keras_load_and_preproc(fpath):
            img = load_img(fpath, target_size=(299, 299))
            img_arr = img_to_array(img)
            img_iv3_input = iv3.preprocess_input(img_arr)
            return np.expand_dims(img_iv3_input, axis=0)

        imgs_iv3_input = np.vstack([keras_load_and_preproc(fp) for fp in img_fpaths])

        model_ref = InceptionV3(weights="imagenet")
        preds_ref = model_ref.predict(imgs_iv3_input)

        with IsolatedSession(using_keras=True) as issn:
            K.set_learning_phase(0)
            model = InceptionV3(weights="imagenet")
            gfn = issn.asGraphFunction(model.inputs, model.outputs)

        with IsolatedSession(using_keras=True) as issn:
            K.set_learning_phase(0)
            feeds, fetches = issn.importGraphFunction(gfn, prefix="InceptionV3")
            preds_tgt = issn.run(fetches[0], {feeds[0]: imgs_iv3_input})

            np.testing.assert_array_almost_equal(preds_tgt, preds_ref, decimal=5) 
Example 38
Project: spark-deep-learning   Author: databricks   File: test_pieces.py    Apache License 2.0 5 votes vote down vote up
def test_spimage_converter_module(self):
        """ spimage converter module must preserve original image """
        img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg'))

        def exec_gfn_spimg_decode(spimg_dict, img_dtype):
            gfn = gfac.buildSpImageConverter('BGR', img_dtype)
            with IsolatedSession() as issn:
                feeds, fetches = issn.importGraphFunction(gfn, prefix="")
                feed_dict = dict(
                    (tnsr, spimg_dict[tfx.op_name(tnsr, issn.graph)]) for tnsr in feeds)
                img_out = issn.run(fetches[0], feed_dict=feed_dict)
            return img_out

        def check_image_round_trip(img_arr):
            spimg_dict = imageArrayToStruct(img_arr).asDict()
            spimg_dict['data'] = bytes(spimg_dict['data'])
            img_arr_out = exec_gfn_spimg_decode(
                spimg_dict, imageTypeByOrdinal(spimg_dict['mode']).dtype)
            self.assertTrue(np.all(img_arr_out == img_arr))

        for fp in img_fpaths:
            img = load_img(fp)

            img_arr_byte = img_to_array(img).astype(np.uint8)
            check_image_round_trip(img_arr_byte)

            img_arr_float = img_to_array(img).astype(np.float32)
            check_image_round_trip(img_arr_float)

            img_arr_preproc = iv3.preprocess_input(img_to_array(img))
            check_image_round_trip(img_arr_preproc) 
Example 39
Project: spark-deep-learning   Author: databricks   File: test_pieces.py    Apache License 2.0 5 votes vote down vote up
def test_pipeline(self):
        """ Pipeline should provide correct function composition """
        img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg'))

        xcpt_model = Xception(weights="imagenet")
        stages = [('spimage', gfac.buildSpImageConverter('BGR', 'float32')),
                  ('xception', GraphFunction.fromKeras(xcpt_model))]
        piped_model = GraphFunction.fromList(stages)

        for fpath in img_fpaths:
            target_size = model_sizes['Xception']
            img = load_img(fpath, target_size=target_size)
            img_arr = np.expand_dims(img_to_array(img), axis=0)
            img_input = xcpt.preprocess_input(img_arr)
            preds_ref = xcpt_model.predict(img_input)

            spimg_input_dict = imageArrayToStruct(img_input).asDict()
            spimg_input_dict['data'] = bytes(spimg_input_dict['data'])
            with IsolatedSession() as issn:
                # Need blank import scope name so that spimg fields match the input names
                feeds, fetches = issn.importGraphFunction(piped_model, prefix="")
                feed_dict = dict(
                    (tnsr, spimg_input_dict[tfx.op_name(tnsr, issn.graph)]) for tnsr in feeds)
                preds_tgt = issn.run(fetches[0], feed_dict=feed_dict)
                # Uncomment the line below to see the graph
                # tfx.write_visualization_html(issn.graph,
                # NamedTemporaryFile(prefix="gdef", suffix=".html").name)

            np.testing.assert_array_almost_equal(preds_tgt,
                                                 preds_ref,
                                                 decimal=self.featurizerCompareDigitsExact) 
Example 40
Project: Keras_MedicalImgAI   Author: taoyilee   File: grad_cam.py    MIT License 5 votes vote down vote up
def load_image(path):
    img_path = sys.argv[1]
    img = image.load_img(img_path, target_size=(1024, 1024))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x 
Example 41
Project: Visual-Question-Answering   Author: Hynix07   File: question_answer.py    MIT License 5 votes vote down vote up
def extract_image_features(img_path):
	model = models.VGG_16('weights/vgg16_weights_th_dim_ordering_th_kernels.h5')
	img = image.load_img(img_path,target_size=(224,224))
	x = image.img_to_array(img)
	x = np.expand_dims(x,axis=0)
	x = preprocess_input(x)
	last_layer_output = K.function([model.layers[0].input,K.learning_phase()],
		[model.layers[-1].input])
	features = last_layer_output([x,0])[0]
	return features 
Example 42
Project: DisplaceNet   Author: GKalliatakis   File: emotic_utils.py    MIT License 5 votes vote down vote up
def prepare_input_data(body_path,
                       image_path):
    """Prepares the raw images for the EMOTIC model.

    # Arguments
        body_path: Path to body only image file.
        image_path: Path to entire image file.

    # Returns
        The two processed images
    """

    body_img = image.load_img(body_path, target_size=(224, 224))
    x1 = image.img_to_array(body_img)
    x1 = np.expand_dims(x1, axis=0)
    x1 = imagenet_preprocess_input(x1)

    entire_img = image.load_img(image_path, target_size=(224, 224))
    x2 = image.img_to_array(entire_img)
    x2 = np.expand_dims(x2, axis=0)
    x2 = places_preprocess_input(x2)


    return x1, x2


# ----------------------------------------------------------------------------------------------------- #
#                            Obtain ensembling weights for different classifiers
# ----------------------------------------------------------------------------------------------------- # 
Example 43
Project: DisplaceNet   Author: GKalliatakis   File: hra_utils.py    MIT License 5 votes vote down vote up
def duo_ensemble_predict(violation_class,
                         model_a, model_b,
                         img,
                         target_size
                         ):
  """Generates output predictions for a single PIL image for 2 different models,
    and then puts together those predictions by averaging them at inference time.

    # Arguments
        violation_class: violation_class: one of `cl` (HRA dataset with 2 classes - [i]'child_labour' and [ii]'no violation')
            or `dp` (HRA dataset with 2 classes - [i]'displaced_populations' and [ii]'no violation')
        model_a: 1st model
        model_b: 2nd model
        img: PIL format image
        target_size: (w,h) tuple

    # Returns
        list of predicted labels (which have been pooled accordingly) and their probabilities
  """
  if img.size != target_size:
    img = img.resize(target_size)

  x = image.img_to_array(img)
  x = np.expand_dims(x, axis=0)
  x = preprocess_input(x)

  preds_a = model_a.predict(x)
  preds_b = model_b.predict(x)
  final_preds = 0.50 * (preds_a + preds_b)

  return decode_predictions(violation_class = violation_class, preds = final_preds, top=2)[0] 
Example 44
Project: aetros-cli   Author: aetros   File: JobModel.py    MIT License 5 votes vote down vote up
def convert_file_to_input_node(self, file_path, input_node=None):
        if input_node is None:
            input_node = self.get_input_node(0)

        size = (int(input_node['width']), int(input_node['height']))

        if 'http://' in file_path or 'https://' in file_path:
            local_path = tempfile.mktemp()
            print("Download input ...")
            f = open(local_path, 'wb')
            f.write(urllib.urlopen(file_path).read())
            f.close()
        else:
            local_path = file_path

        if input_node['inputType'] == 'list':
            raise Exception("List input not yet available")
        else:
            try:
                image = Image.open(local_path)
            except Exception:
                print(("Could not open %s" % (local_path,)))
                return []

            image = image.resize(size, Image.ANTIALIAS)

            from keras.preprocessing.image import img_to_array

            image = self.convert_image_to_node(image, input_node)

            return image 
Example 45
Project: aetros-cli   Author: aetros   File: JobModel.py    MIT License 5 votes vote down vote up
def convert_image_to_node(self, image, input_node=None):
        from keras.preprocessing.image import img_to_array

        if input_node is None:
            input_node = self.get_input_node(0)

        if input_node['inputType'] == 'image':
            image = image.convert("L")
            image = img_to_array(image)

        elif input_node['inputType'] == 'image_bgr':
            image = image.convert("RGB")
            image = np.asarray(image, dtype='float32')
            image = image[:, :, ::-1].copy()
            image = img_to_array(image)
        else:
            image = image.convert("RGB")
            image = img_to_array(image)

        if 'imageScale' not in input_node:
            input_node['imageScale'] = 255

        if float(input_node['imageScale']) > 0:
            image = image / float(input_node['imageScale'])

        return image 
Example 46
Project: keras-gan-models   Author: chen0040   File: image_loader.py    MIT License 5 votes vote down vote up
def load_and_scale_images(img_dir_path, extension, img_width, img_height):
    images = []
    for f in os.listdir(img_dir_path):
        filepath = os.path.join(img_dir_path, f)
        if os.path.isfile(filepath) and f.endswith(extension):
            image = img_to_array(load_img(filepath, target_size=(img_width, img_height)))
            image = (image.astype(np.float32) / 255) * 2 - 1
            images.append(image)
    return np.array(images) 
Example 47
Project: face_classification   Author: oarriaga   File: inference.py    MIT License 5 votes vote down vote up
def load_image(image_path, grayscale=False, target_size=None):
    pil_image = image.load_img(image_path, grayscale, target_size)
    return image.img_to_array(pil_image) 
Example 48
Project: backdoor   Author: bolunwang   File: mad_outlier_detection.py    MIT License 5 votes vote down vote up
def analyze_pattern_norm_dist():

    mask_flatten = []
    idx_mapping = {}

    for y_label in range(NUM_CLASSES):
        mask_filename = IMG_FILENAME_TEMPLATE % ('mask', y_label)
        if os.path.isfile('%s/%s' % (RESULT_DIR, mask_filename)):
            img = image.load_img(
                '%s/%s' % (RESULT_DIR, mask_filename),
                color_mode='grayscale',
                target_size=INPUT_SHAPE)
            mask = image.img_to_array(img)
            mask /= 255
            mask = mask[:, :, 0]

            mask_flatten.append(mask.flatten())

            idx_mapping[y_label] = len(mask_flatten) - 1

    l1_norm_list = [np.sum(np.abs(m)) for m in mask_flatten]

    print('%d labels found' % len(l1_norm_list))

    outlier_detection(l1_norm_list, idx_mapping)

    pass 
Example 49
Project: dogs-vs-cats-kaggle   Author: yakovenkodenis   File: tiny_cnn.py    MIT License 5 votes vote down vote up
def predict(self, test_data_folder, batch_size=32, verbose=1):
        predictions = []
        for img_path in get_list_of_images(images_folder=test_data_folder):
            image = load_img(join(test_data_folder, img_path))
            x = img_to_array(image)
            x = self.datagen.random_transform(x)
            x = self.datagen.standardize(x)
            prediction = self.model.predict_proba(np.array([x]), verbose=0)
            predictions.append(prediction[0, 0])

        return predictions 
Example 50
Project: caption_generator   Author: anuragmishracse   File: prepare_dataset.py    MIT License 5 votes vote down vote up
def load_image(path):
    img = image.load_img(path, target_size=(224,224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return np.asarray(x) 
Example 51
Project: caption_generator   Author: anuragmishracse   File: caption_generator.py    MIT License 5 votes vote down vote up
def load_image(self, path):
        img = image.load_img(path, target_size=(224,224))
        x = image.img_to_array(img)
        return np.asarray(x) 
Example 52
Project: quickcnn   Author: CG1507   File: retrain.py    MIT License 5 votes vote down vote up
def get_img_matrix(self, filepath):
		img = image.load_img(filepath, target_size=self.target_size)
		x = image.img_to_array(img)
		x = np.expand_dims(x, axis=0)
		x = self.preprocess_fun(x)
		return x 
Example 53
Project: trash_classifier   Author: soham96   File: trash_classifier.py    GNU General Public License v3.0 5 votes vote down vote up
def pp_image(img):
    img = image.load_img('pic.png', target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    return np.asarray(x) 
Example 54
Project: trash_classifier   Author: soham96   File: pi_trash_classifier.py    GNU General Public License v3.0 5 votes vote down vote up
def pp_image():
    img = image.load_img('pic.png', target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    return np.asarray(x) 
Example 55
Project: trash_classifier   Author: soham96   File: mobilenet_training.py    GNU General Public License v3.0 5 votes vote down vote up
def get_images(paths):
    images = []
    for path in paths:
        img = image.load_img(path, target_size=(224, 224))
        x = image.img_to_array(img)
        #x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        images.append(x)

    return np.asarray(images) 
Example 56
Project: Emotion   Author: petercunha   File: inference.py    MIT License 5 votes vote down vote up
def load_image(image_path, grayscale=False, target_size=None):
    pil_image = image.load_img(image_path, grayscale, target_size)
    return image.img_to_array(pil_image) 
Example 57
Project: FaceGAN-Generating-Random-Faces   Author: adityajn105   File: train_gan.py    MIT License 5 votes vote down vote up
def getRealMiniBatch(path='allPhotos',batch_size=32):
  allp = os.listdir(path)
  while True:
    batch = sample(allp,batch_size)
    imgs = []
    for img in batch:
      x = os.path.join(path,img)
      x = img_to_array(load_img( x , target_size=(IMG_WIDTH, IMG_HEIGHT) ))
      imgs.append(x)
    yield (np.array(imgs)/127.5)-1 
Example 58
Project: Sacred_Deep_Learning   Author: AAbercrombie0492   File: image_utilities.py    GNU General Public License v3.0 5 votes vote down vote up
def load_images_from_directory(directory, n_samples):
    '''
    Load n number of images from a directory.
    Returns image arrays, filenames, and a fail log.
    '''
    from os import listdir
    from os.path import isfile, join
    from keras.preprocessing import image
    from tqdm import tqdm
    from keras.applications.resnet50 import preprocess_input

    fail_log = ''
    image_arrays = []
    files = [f for f in listdir(directory) if isfile(join(directory, f))]

    for img_path in tqdm(files[:n_samples]):
        # try:
        full_path = os.path.join(directory, img_path)
        img = image.load_img(full_path, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        image_arrays.append(x)
        # except:
            # fail_log +='{}\n'.format(img_path)
            # continue

    return image_arrays, files, fail_log 
Example 59
Project: siamese-two-stream   Author: icarofua   File: utils.py    Apache License 2.0 5 votes vote down vote up
def load_img(img, type, vec_size, vec_size2):
  if type is not None:
    if type=='plate':
      iplt0 = image.load_img(img[0], target_size=vec_size)
      iplt1 = image.load_img(img[2], target_size=vec_size)
    else:
      iplt0 = image.load_img(img[1], target_size=vec_size)
      iplt1 = image.load_img(img[3], target_size=vec_size)

    iplt0 = image.img_to_array(iplt0)
    iplt0 = iplt0/255.0
    iplt1 = image.img_to_array(iplt1)
    iplt1 = iplt1/255.0
    d1 = {"i0":iplt0, "i1":iplt1, "l":img[4], "p1":img[0], "p2":img[2]}

  else:
    iplt0 = image.load_img(img[0], target_size=vec_size)
    iplt1 = image.load_img(img[2], target_size=vec_size)
    iplt2 = image.load_img(img[1], target_size=vec_size2)
    iplt3 = image.load_img(img[3], target_size=vec_size2)

    iplt0 = image.img_to_array(iplt0)
    iplt0 = iplt0/255.0
    iplt1 = image.img_to_array(iplt1)
    iplt1 = iplt1/255.0
    iplt2 = image.img_to_array(iplt2)
    iplt2 = iplt2/255.0
    iplt3 = image.img_to_array(iplt3)
    iplt3 = iplt3/255.0

    d1 = {"i0":iplt0,"i1":iplt1,"i2":iplt2,"i3":iplt3,"l":img[4], "p1":img[0], "p2":img[2]}

  return d1

#------------------------------------------------------------------------------ 
Example 60
Project: pdftotree   Author: HazyResearch   File: visual_utils.py    MIT License 5 votes vote down vote up
def predict_heatmap(pdf_path, page_num, model, img_dim=448, img_dir="tmp/img"):
    """
    Return an image corresponding to the page of the pdf
    documents saved at pdf_path. If the image is not found in img_dir this
    function creates it and saves it in img_dir.

    :param pdf_path: path to the pdf document.
    :param page_num: page number to create image from in the pdf file.
    :return:
    """
    if not os.path.isdir(img_dir):
        print("\nCreating image folder at {}".format(img_dir))
        os.makedirs(img_dir)
    pdf_name = os.path.splitext(os.path.basename(pdf_path))[0]
    # TODO: add hashing function to make sure name is unique
    # TODO: add parallelization
    img_path = os.path.join(img_dir, pdf_name + "-{}.png".format(page_num))
    if not os.path.isfile(img_path):
        # create image for a page in the pdf document and save it in img_dir
        save_image(pdf_path, img_path, page_num)
    image = load_img(img_path, grayscale=True, target_size=(img_dim, img_dim))
    image = img_to_array(image, data_format=K.image_data_format())
    image = (
        image.reshape((img_dim, img_dim, 1))
        .repeat(3, axis=2)
        .reshape((1, img_dim, img_dim, 3))
    )
    return (
        image.astype(np.uint8).reshape((img_dim, img_dim, 3)),
        model.predict(image).reshape((img_dim, img_dim)),
    ) 
Example 61
Project: AlgoTeam   Author: Ling-Bao   File: cnn_feature.py    GNU General Public License v3.0 5 votes vote down vote up
def extract_feat(self, img_path):
        """
        Use vgg16 model to extract features, Output normalized feature vector
        :param img_path: Path of images extracted feature
        :return: None
        """
        img = image.load_img(img_path, target_size=(self.input_shape[0], self.input_shape[1]))
        img = image.img_to_array(img)
        img = np.expand_dims(img, axis=0)
        img = preprocess_input(img)
        feat = self.model.predict(img)
        norm_feat = feat[0] / LA.norm(feat[0])

        return norm_feat 
Example 62
Project: AlgoTeam   Author: Ling-Bao   File: cnn_feature.py    GNU General Public License v3.0 5 votes vote down vote up
def extract_feat(self, img_path):
        """
        Use vgg16 model to extract features, Output normalized feature vector
        :param img_path: Path of images extracted feature
        :return: None
        """
        img = image.load_img(img_path, target_size=(self.input_shape[0], self.input_shape[1]))
        img = image.img_to_array(img)
        img = np.expand_dims(img, axis=0)
        img = preprocess_input(img)
        feat = self.model.predict(img)
        norm_feat = feat[0] / LA.norm(feat[0])

        return norm_feat 
Example 63
Project: U-net   Author: DuFanXin   File: data_Keras.py    MIT License 5 votes vote down vote up
def augmentation(self):
		# 读入3通道的train和label, 分别转换成矩阵, 然后将label的第一个通道放在train的第2个通处, 做数据增强
		print("运行 Augmentation")

		# Start augmentation.....
		trains = self.train_imgs
		labels = self.label_imgs
		path_train = self.train_path
		path_label = self.label_path
		path_merge = self.merge_path
		imgtype = self.img_type
		path_aug_merge = self.aug_merge_path
		print('%d images \n%d labels' % (len(trains), len(labels)))
		if len(trains) != len(labels) or len(trains) == 0 or len(trains) == 0:
			print("trains can't match labels")
			return 0
		if not os.path.lexists(path_merge):
			os.mkdir(path_merge)
		if not os.path.lexists(path_aug_merge):
			os.mkdir(path_aug_merge)
		for i in range(len(trains)):
			img_t = load_img(path_train + "/" + str(i) + "." + imgtype)  # 读入train
			img_l = load_img(path_label + "/" + str(i) + "." + imgtype)  # 读入label
			x_t = img_to_array(img_t)                                    # 转换成矩阵
			x_l = img_to_array(img_l)
			x_t[:, :, 2] = x_l[:, :, 0]                                  # 把label当做train的第三个通道
			img_tmp = array_to_img(x_t)
			img_tmp.save(path_merge + "/" + str(i) + "." + imgtype)      # 保存合并后的图像
			img = x_t
			img = img.reshape((1,) + img.shape)                          # 改变shape(1, 512, 512, 3)
			savedir = path_aug_merge + "/" + str(i)                      # 存储合并增强后的图像
			if not os.path.lexists(savedir):
				os.mkdir(savedir)
			print("running %d doAugmenttaion" % i)
			self.do_augmentate(img, savedir, str(i))                      # 数据增强 
Example 64
Project: U-net   Author: DuFanXin   File: data_Keras.py    MIT License 5 votes vote down vote up
def create_train_data(self):
		# 将增强之后的训练集生成npy
		i = 0
		print('-' * 30)
		print('creating train image')
		print('-' * 30)
		count = 0
		for indir in os.listdir(self.aug_merge_path):
			path = os.path.join(self.aug_merge_path, indir)
			count += len(os.listdir(path))
		imgdatas = np.ndarray((count, self.out_rows, self.out_cols, 1), dtype=np.uint8)
		imglabels = np.ndarray((count, self.out_rows, self.out_cols, 1), dtype=np.uint8)
		for indir in os.listdir(self.aug_merge_path):
			trainpath = os.path.join(self.aug_train_path, indir)
			labelpath = os.path.join(self.aug_label_path, indir)
			print(trainpath, labelpath)
			imgs = glob.glob(trainpath + '/*' + '.tif')
			for imgname in imgs:
				trainmidname = imgname[imgname.rindex('/') + 1:]
				labelimgname = imgname[imgname.rindex('/') + 1:imgname.rindex('_')] + '_label.tif'
				print(trainmidname, labelimgname)
				img = load_img(trainpath + '/' + trainmidname, grayscale=True)
				label = load_img(labelpath + '/' + labelimgname, grayscale=True)
				img = img_to_array(img)
				label = img_to_array(label)
				imgdatas[i] = img
				imglabels[i] = label
				if i % 100 == 0:
					print('Done: {0}/{1} images'.format(i, len(imgs)))
				i += 1
				print(i)
		print('loading done', imgdatas.shape)
		np.save(self.npy_path + '/imgs_train.npy', imgdatas)            # 将30张训练集和30张label生成npy数据
		np.save(self.npy_path + '/imgs_mask_train.npy', imglabels)
		print('Saving to .npy files done.') 
Example 65
Project: U-net   Author: DuFanXin   File: data_Keras.py    MIT License 5 votes vote down vote up
def create_small_train_data(self):
		# 将增强之后的训练集生成npy
		print('-' * 30)
		print('creating samll train image')
		print('-' * 30)
		imgs = glob.glob('../data_set/aug_train/0/*' + '.tif')
		count = len(imgs)
		imgdatas = np.ndarray((count, self.out_rows, self.out_cols, 1), dtype=np.uint8)
		imglabels = np.ndarray((count, self.out_rows, self.out_cols, 1), dtype=np.uint8)
		trainpath = '../data_set/aug_train/0'
		labelpath = '../data_set/aug_label/0'
		i = 0
		for imgname in imgs:
			trainmidname = imgname[imgname.rindex('/') + 1:]
			labelimgname = imgname[imgname.rindex('/') + 1:imgname.rindex('_')] + '_label.tif'
			print(trainmidname, labelimgname)
			img = load_img(trainpath + '/' + trainmidname, grayscale=True)
			label = load_img(labelpath + '/' + labelimgname, grayscale=True)
			img = img_to_array(img)
			label = img_to_array(label)
			imgdatas[i] = img
			imglabels[i] = label
			i += 1
			print(i)
		print('loading done', imgdatas.shape)
		np.save(self.npy_path + '/imgs_small_train.npy', imgdatas)  # 将30张训练集和30张label生成npy数据
		np.save(self.npy_path + '/imgs_mask_small_train.npy', imglabels)
		print('Saving to .npy files done.') 
Example 66
Project: U-net   Author: DuFanXin   File: TestSomeFunction.py    MIT License 5 votes vote down vote up
def create_small_train_data():
	out_rows = 512
	out_cols = 512
	# 将增强之后的训练集生成npy
	print('-' * 30)
	print('creating samll train image')
	print('-' * 30)
	imgs = glob.glob('../data_set/aug_train/0/*' + '.tif')
	count = len(imgs)
	imgdatas = np.ndarray((count, out_rows, out_cols, 1), dtype=np.uint8)
	imglabels = np.ndarray((count, out_rows, out_cols, 1), dtype=np.uint8)
	train_path = '../data_set/aug_train/0'
	label_path = '../data_set/aug_label/0'
	i = 0
	for imgname in imgs:
		trainmidname = imgname[imgname.rindex('/') + 1:]
		labelimgname = imgname[imgname.rindex('/') + 1:imgname.rindex('_')] + '_label.tif'
		print(imgname, trainmidname, labelimgname)
		img = load_img(train_path + '/' + trainmidname, grayscale=True)
		label = load_img(label_path + '/' + labelimgname, grayscale=True)
		img = img_to_array(img)
		label = img_to_array(label)
		imgdatas[i] = img
		imglabels[i] = label
		i += 1
		print(i)
	print('loading done', imgdatas.shape)
	np.save('../data_set/npydata/imgs_small_train.npy', imgdatas)  # 将30张训练集和30张label生成npy数据
	np.save('../data_set/npydata/imgs_mask_small_train.npy', imglabels)
	print('Saving to .npy files done.') 
Example 67
Project: Face-and-Emotion-Recognition   Author: vjgpt   File: inference.py    MIT License 5 votes vote down vote up
def load_image(image_path, grayscale=False, target_size=None):
    pil_image = image.load_img(image_path, grayscale, target_size)
    return image.img_to_array(pil_image) 
Example 68
Project: visual_redactions   Author: tribhuvanesh   File: predict_large.py    Apache License 2.0 5 votes vote down vote up
def anno_to_data(anno_dct, attr_id_to_idx, target_img_size=(250, 250)):
    n_items = len(anno_dct)
    n_attr = len(attr_id_to_idx)

    X = np.zeros(shape=(n_items, target_img_size[0], target_img_size[1], 3))
    Y = np.zeros(shape=(n_items, n_attr))
    image_id_list = []

    pbar = Progbar(n_items)

    for idx, (image_id, entry) in enumerate(anno_dct.iteritems()):
        # ----- Labels -> Vec
        this_attr_ids = set()
        for attr_entry in entry['attributes']:
            this_attr_ids.add(attr_entry['attr_id'])
        label_vec = np.zeros(n_attr)
        for attr_id in this_attr_ids:
            this_idx = attr_id_to_idx[attr_id]
            label_vec[this_idx] = 1
        Y[idx] = label_vec

        # ----- Image -> Mat
        this_image_path = entry['image_path']

        resized_img_path = this_image_path.replace('images', 'images_250')
        resized_img_path = osp.join('/BS/orekondy2/work/datasets/VISPR2017', resized_img_path)

        if osp.exists(resized_img_path):
            this_image_path = resized_img_path
        else:
            this_image_path = osp.join(SEG_ROOT, this_image_path)

        img = load_img(this_image_path, target_size=target_img_size)
        img_arr = img_to_array(img)
        X[idx] = img_arr

        image_id_list.append(image_id)

        pbar.update(idx)

    return X, Y, image_id_list 
Example 69
Project: visual_redactions   Author: tribhuvanesh   File: predict_large_lang.py    Apache License 2.0 5 votes vote down vote up
def anno_to_data(anno_dct, attr_id_to_idx, target_img_size=(250, 250)):
    n_items = len(anno_dct)
    n_attr = len(attr_id_to_idx)

    X = np.zeros(shape=(n_items, target_img_size[0], target_img_size[1], 3))
    Y = np.zeros(shape=(n_items, n_attr))
    image_id_list = []

    pbar = Progbar(n_items)

    for idx, (image_id, entry) in enumerate(anno_dct.iteritems()):
        # ----- Labels -> Vec
        this_attr_ids = set()
        for attr_entry in entry['attributes']:
            this_attr_ids.add(attr_entry['attr_id'])
        label_vec = np.zeros(n_attr)
        for attr_id in this_attr_ids:
            this_idx = attr_id_to_idx[attr_id]
            label_vec[this_idx] = 1
        Y[idx] = label_vec

        # ----- Image -> Mat
        this_image_path = entry['image_path']

        resized_img_path = this_image_path.replace('images', 'images_250')
        resized_img_path = osp.join('/BS/orekondy2/work/datasets/VISPR2017', resized_img_path)

        if osp.exists(resized_img_path):
            this_image_path = resized_img_path
        else:
            this_image_path = osp.join(SEG_ROOT, this_image_path)

        img = load_img(this_image_path, target_size=target_img_size)
        img_arr = img_to_array(img)
        X[idx] = img_arr

        image_id_list.append(image_id)

        pbar.update(idx)

    return X, Y, image_id_list 
Example 70
Project: deepflying   Author: dslab-deepflying   File: neural_style_transfer.py    GNU General Public License v3.0 4 votes vote down vote up
def train():
    print('WTF')
    evaluator = Evaluator()

    # run scipy-based optimization (L-BFGS) over the pixels of the generated image
    # so as to minimize the neural style loss
    x = preprocess_image(base_image_path)

    ori = load_img(base_image_path, target_size=(img_nrows, img_ncols))
    ori = img_to_array(ori)
    ori = np.array(ori, np.uint8)

    sty = load_img(style_reference_image_path, target_size=(img_nrows, img_ncols))
    sty = img_to_array(sty)
    sty = np.array(sty, np.uint8)

    # plt.figure(figsize=(5, 6))
    # plt.subplot(2, 3, 1), plt.title('Content')
    # plt.axis('off')
    # plt.imshow(ori)
    # plt.subplot(2, 3, 2), plt.title('Style')
    # plt.axis('off')
    # plt.imshow(sty)
    # plt.show()


    for i in range(iterations):
        start_time = time.time()
        x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
                                         fprime=evaluator.grads, maxfun=20)
        end_time = time.time()

        if i % SAVE_INTERVAL == 0:
            # save current generated image
            img = deprocess_image(x.copy())
            fname = result_prefix + '_at_iteration_%d.png' % i
            save_img(fname, img)
            # end_time = time.time()

            print('Image saved as', fname)
            # plt.imshow(img)
            # plt.show()

        sys.stdout.write(
            "\r Iteration %d [ loss: %f ] , time : %ds " % (i, min_val, end_time - start_time))
        sys.stdout.flush()

    print(" \n\rTranslation complete ! \n\r")
    plt.figure(figsize=(20, 24))
    plt.subplot(4, 6, 1), plt.title('Origin')
    plt.axis('off')
    plt.imshow(ori)
    plt.subplot(4, 6, 2), plt.title('NST')
    plt.axis('off')
    plt.imshow(img)
    plt.savefig("nst-final")
    # plt.show() 
Example 71
Project: deepflying   Author: dslab-deepflying   File: nst.py    GNU General Public License v3.0 4 votes vote down vote up
def train():
    evaluator = Evaluator()

    # run scipy-based optimization (L-BFGS) over the pixels of the generated image
    # so as to minimize the neural style loss
    x = preprocess_image(base_image_path)

    mask = load_img(mask_image_path, target_size=(img_nrows, img_ncols))
    mask = img_to_array(mask)[:, :, 0]
    mask = np.array(mask, np.uint8)

    ori = load_img(base_image_path, target_size=(img_nrows, img_ncols))
    ori = img_to_array(ori)
    ori = np.array(ori, np.uint8)

    sty = load_img(style_reference_image_path, target_size=(img_nrows, img_ncols))
    sty = img_to_array(sty)
    sty = np.array(sty, np.uint8)

    for i in range(iterations):
        start_time = time.time()
        x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
                                         fprime=evaluator.grads, maxfun=20)
        end_time = time.time()

        if i % SAVE_INTERVAL == 0:
            # save current generated image
            img = deprocess_image(x.copy())
            fname = result_prefix + '_at_iteration_%d.png' % i
            save_img(fname, img)
            # end_time = time.time()

            bac_mask = cv2.bitwise_not(mask)
            bac_img = cv2.bitwise_and(ori, ori, mask=bac_mask)
            masked_img = cv2.bitwise_and(img, img, mask=mask)
            masked_img = cv2.add(bac_img, masked_img)
            print('Image saved as', fname)
            plt.imshow(masked_img)
            # plt.show()

        sys.stdout.write(
            "\r Iteration %d [ loss: %f ] , time : %ds " % (i, min_val, end_time - start_time))
        sys.stdout.flush()

    print(" \n\rTranslation complete ! \n\r") 
Example 72
Project: deepflying   Author: dslab-deepflying   File: neural_style_transfer.py    GNU General Public License v3.0 4 votes vote down vote up
def train():
    print('WTF')
    evaluator = Evaluator()

    # run scipy-based optimization (L-BFGS) over the pixels of the generated image
    # so as to minimize the neural style loss
    x = preprocess_image(base_image_path)

    ori = load_img(base_image_path, target_size=(img_nrows, img_ncols))
    ori = img_to_array(ori)
    ori = np.array(ori, np.uint8)

    sty = load_img(style_reference_image_path, target_size=(img_nrows, img_ncols))
    sty = img_to_array(sty)
    sty = np.array(sty, np.uint8)

    # plt.figure(figsize=(5, 6))
    # plt.subplot(2, 3, 1), plt.title('Content')
    # plt.axis('off')
    # plt.imshow(ori)
    # plt.subplot(2, 3, 2), plt.title('Style')
    # plt.axis('off')
    # plt.imshow(sty)
    # plt.show()


    for i in range(iterations):
        start_time = time.time()
        x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
                                         fprime=evaluator.grads, maxfun=20)
        end_time = time.time()

        if i % SAVE_INTERVAL == 0:
            # save current generated image
            img = deprocess_image(x.copy())
            fname = result_prefix + '_at_iteration_%d.png' % i
            save_img(fname, img)
            # end_time = time.time()

            print('Image saved as', fname)
            # plt.imshow(img)
            # plt.show()

        sys.stdout.write(
            "\r Iteration %d [ loss: %f ] , time : %ds " % (i, min_val, end_time - start_time))
        sys.stdout.flush()

    print(" \n\rTranslation complete ! \n\r")
    plt.figure(figsize=(20, 24))
    plt.subplot(4, 6, 1), plt.title('Origin')
    plt.axis('off')
    plt.imshow(ori)
    plt.subplot(4, 6, 2), plt.title('NST')
    plt.axis('off')
    plt.imshow(img)
    plt.savefig("nst-final")
    # plt.show() 
Example 73
Project: deploy-ml   Author: deploy-ml   File: loading_pictures.py    MIT License 4 votes vote down vote up
def load_picture_data(dims_one=28, dims_two=28, outcome_pointer="positive",
                      positive_file="positive_images", negative_file="negative_images", file_type="png"):

    positive_images = glob.glob("{}/*.{}".format(positive_file, file_type))

    negative_images = glob.glob("{}/*.{}".format(negative_file, file_type))

    data = []
    labels = []

    image_paths = positive_images + negative_images

    random.seed(42)
    random.shuffle(image_paths)

    # loop over the input images
    for imagePath in image_paths:

        # load the image, pre-process it, and store it in the data list
        image = cv2.imread(imagePath)
        image = cv2.resize(image, (dims_one, dims_two))
        image = img_to_array(image)
        data.append(image)

        # extract the class label from the image path and update the
        # labels list
        label = imagePath.split(os.path.sep)[-2]
        label = 1 if label == positive_file else 0
        labels.append(label)

    data = np.array(data, dtype="float") / 255.0
    labels = np.array(labels)

    # partition the data into training and testing splits using 75% of
    # the data for training and the remaining 25% for testing
    # self.X_train, self.X_test, self.y_train, self.y_test
    X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.25, random_state=42)

    # convert the labels from integers to vectors
    y_train = to_categorical(y_train, num_classes=2)
    y_test = to_categorical(y_test, num_classes=2)

    return X_train, X_test, y_train, y_test

    # construct the image generator for data augmentation
    # aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,
    #                          height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
    #                          horizontal_flip=True, fill_mode="nearest") 
Example 74
Project: mnist-sample   Author: Paperspace   File: mnist_grpc_client.py    MIT License 4 votes vote down vote up
def do_inference(hostport, work_dir, concurrency, num_tests, model_name, hostname_override, insecure):
    """Tests PredictionService with concurrent requests.
    Args:
        hostport: Host:port address of the PredictionService.
        work_dir: The full path of working directory for test data set.
        concurrency: Maximum number of concurrent requests.
        num_tests: Number of test images to use.
    Returns:
        The classification error rate.
    Raises:
        IOError: An error occurred processing test data set.
    """
    if insecure:
        channel_options=None
        if hostname_override:
            channel_options=(('grpc.default_authority', hostname_overrride),)
        channel = grpc.insecure_channel(hostport, options=channel_options)
    else:
        channel_options=None
        if hostname_override:
            channel_options=(('grpc.ssl_target_name_override', hostname_override),)
        channel_creds = grpc.ssl_channel_credentials()
        channel = grpc.secure_channel(hostport, channel_creds, options=channel_options)

    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = model_name
    request.model_spec.signature_name = 'serving_default'
    
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
    X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
    # For loading images
    # img = image.load_img('./data/mnist_png/testing/0/10.png', target_size=(28,28))
    #x = image.img_to_array(img)
    #request.inputs['images'].CopyFrom(
    #tf.contrib.util.make_tensor_proto(image2, shape=[1,1,image2.size]))
    image_index=randint(0, 9999)
    x= X_train[image_index][0]
    print("Shape is ",x.shape," Label is ", y_train[image_index])
    start = time.time()
    for _ in range(num_tests):
        xt= x.astype(np.float32)
        request.inputs['image'].CopyFrom(tf.contrib.util.make_tensor_proto(xt, shape=[1,1,28, 28]))
        #result_counter.throttle()
        result_future = stub.Predict.future(request, 10.25)  # 5 seconds; increase if above 1000 iterations
        result_future.add_done_callback(_callback)
        end = time.time()
        image_index=randint(0, 9999)
        x= X_train[image_index][0]
    print("Time to Send ", num_tests ," is ",end - start)
        
    time.sleep(6) # increase if above 1000 iterations
    # if things are wrong the callback will not come, so uncomment below to force the result
    # or get to see what is the bug
    #res= result_future.result()
    #response = numpy.array(res.outputs['probabilities'].float_val)
    #prediction = numpy.argmax(response)
    #print("Predicted Result is ", prediction,"Detection Probability= ",response[prediction]) 
Example 75
Project: spark-deep-learning   Author: databricks   File: keras_sql_udf_test.py    Apache License 2.0 4 votes vote down vote up
def test_composite_udf(self):
        """ Composite Keras Image UDF registration """
        df = get_image_paths_df(self.sql)

        def keras_load_img(fpath):
            from keras.preprocessing.image import load_img, img_to_array
            import numpy as np
            from pyspark.sql import Row
            img = load_img(fpath, target_size=(299, 299))
            return img_to_array(img).astype(np.uint8)

        def pil_load_spimg(fpath):
            from PIL import Image
            import numpy as np
            img_arr = np.array(Image.open(fpath), dtype=np.uint8)
            # PIL is RGB, image schema is BGR => need to flip the channels
            return imageArrayToStruct(_reverseChannels(img_arr))

        def keras_load_spimg(fpath):
            # Keras loads image in RGB order, ImageSchema expects BGR => need to flip
            return imageArrayToStruct(_reverseChannels(keras_load_img(fpath)))

        # Load image with Keras and store it in our image schema
        JVMAPI.registerUDF('keras_load_spimg', keras_load_spimg,
                           ImageSchema.imageSchema['image'].dataType)
        JVMAPI.registerUDF('pil_load_spimg', pil_load_spimg,
                           ImageSchema.imageSchema['image'].dataType)

        # Register an InceptionV3 model
        registerKerasImageUDF("iv3_img_pred",
                              InceptionV3(weights="imagenet"),
                              keras_load_img)

        run_sql = self.session.sql

        # Choice 1: manually chain the functions in SQL
        df1 = run_sql("select iv3_img_pred(keras_load_spimg(fpath)) as preds from _test_image_paths_df")
        preds1 = np.array(df1.select("preds").rdd.collect())

        # Choice 2: build a pipelined UDF and directly use it in SQL
        JVMAPI.registerPipeline("load_img_then_iv3_pred", ["keras_load_spimg", "iv3_img_pred"])
        df2 = run_sql("select load_img_then_iv3_pred(fpath) as preds from _test_image_paths_df")
        preds2 = np.array(df2.select("preds").rdd.collect())

        # Choice 3: create the image tensor input table first and apply the Keras model
        df_images = run_sql("select pil_load_spimg(fpath) as image from _test_image_paths_df")
        df_images.createOrReplaceTempView("_test_images_df")
        df3 = run_sql("select iv3_img_pred(image) as preds from _test_images_df")
        preds3 = np.array(df3.select("preds").rdd.collect())

        self.assertTrue(len(preds1) == len(preds2))
        np.testing.assert_allclose(preds1, preds2)
        np.testing.assert_allclose(preds2, preds3) 
Example 76
Project: DisplaceNet   Author: GKalliatakis   File: save_raw_imgs.py    MIT License 4 votes vote down vote up
def save_img_to_numpy(base_img_dir,
                      base_csv_dir,
                      input_size,
                      mode='train',
                      to_file = 'numpy_annotations/x_train'
                      ):
    """ Saves images loaded from a CSV to numpy array.

        # Arguments
            base_img_dir: the directory where the raw images are stored.
            In our setup, we:
            - created train/ val/ and test/ subfolders inside EMOTIC_database/

            base_csv_dir: the directory where the CSV files are stored.
            input_size: the default input size for the model (ref https://keras.io/applications/).
                All models have input size of 224x224 except Xception,InceptionV3 and InceptionResNetV2 which have input size of 299x299.
            mode: one of `train` (train set), `val` (validation set)
                or `test` (test set).
            to_file: the name or path of the numpy array where the images will be saved.
        """


    # Load CSV File With Pandas
    csv_name = base_csv_dir + mode + '.csv'
    csv_file = pandas.read_csv(csv_name)

    if mode == 'train':
        nb_samples = 23706
    elif mode == 'val':
        nb_samples = 3332
    elif mode == 'test':
        nb_samples = 7280

    field_number = 0

    # pre-allocating the data array, and then loading the data directly into it
    # ref: https://hjweide.github.io/efficient-image-loading
    data = np.empty((nb_samples, input_size, input_size, 3), dtype=np.uint8)

    for img_name in csv_file.filename:
        progress(field_number, nb_samples)

        img_name = base_img_dir + img_name
        img = image.load_img(img_name, target_size=(input_size, input_size))

        x = image.img_to_array(img) # this is a Numpy array with shape (input_size, input_size, 3)
        x = np.expand_dims(x, axis=0) # this is a Numpy array with shape (1, input_size, input_size, 3)
        x = preprocess_input(x)

        data[field_number, ...] = x

        field_number += 1
        if field_number > nb_samples - 1:
            break

    np.save(to_file, data)

    return data 
Example 77
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 4 votes vote down vote up
def predict():
    # load the class_indices saved in the earlier step
    class_dictionary = np.load('class_indices.npy').item()

    num_classes = len(class_dictionary)

    # add the path to your test image below
    image_path = 'path/to/your/test_image'

    orig = cv2.imread(image_path)

    print("[INFO] loading and preprocessing image...")
    image = load_img(image_path, target_size=(224, 224))
    image = img_to_array(image)

    # important! otherwise the predictions will be '0'
    image = image / 255

    image = np.expand_dims(image, axis=0)

    # build the VGG16 network
    model = applications.VGG16(include_top=False, weights='imagenet')

    # get the bottleneck prediction from the pre-trained VGG16 model
    bottleneck_prediction = model.predict(image)

    # build top model
    model = Sequential()
    model.add(Flatten(input_shape=bottleneck_prediction.shape[1:]))
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='sigmoid'))

    model.load_weights(top_model_weights_path)

    # use the bottleneck prediction on the top model to get the final
    # classification
    class_predicted = model.predict_classes(bottleneck_prediction)

    probabilities = model.predict_proba(bottleneck_prediction)

    inID = class_predicted[0]

    inv_map = {v: k for k, v in class_dictionary.items()}

    label = inv_map[inID]

    # get the prediction label
    print("Image ID: {}, Label: {}".format(inID, label))

    # display the predictions with the image
    cv2.putText(orig, "Predicted: {}".format(label), (10, 30),
                cv2.FONT_HERSHEY_PLAIN, 1.5, (43, 99, 255), 2)

    cv2.imshow("Classification", orig)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example 78
Project: satellite-imagery-change-detection   Author: soroushhashemifar   File: feat.py    GNU General Public License v3.0 4 votes vote down vote up
def extra_feat(img_path):
    #Using a RESNET50 as feature extractor
    base_model = ResNet50(weights='imagenet',include_top=False)
    img = image.load_img(img_path, target_size=RESIZE_SIZE)
    x_img = image.img_to_array(img)
    x = np.expand_dims(x_img, axis=0)
    x = preprocess_input(x)
    block1_pool_features=get_activations(base_model, 10, x)
    block2_pool_features=get_activations(base_model, 15, x)
    block3_pool_features=get_activations(base_model, 17, x)
    #block4_pool_features=get_activations(base_model, 20, x)
    block5_pool_features=get_activations(base_model, 22, x)
    #block6_pool_features=get_activations(base_model, 25, x)
    block7_pool_features=get_activations(base_model, 30, x)
    #block8_pool_features=get_activations(base_model, 35, x)
    block9_pool_features=get_activations(base_model, 37, x)
    #block10_pool_features=get_activations(base_model, 39, x)
    block11_pool_features=get_activations(base_model, 42, x)
    #block12_pool_features=get_activations(base_model, 45, x)
    block13_pool_features=get_activations(base_model, 46, x)
    block14_pool_features=get_activations(base_model, 47, x)
    block15_pool_features=get_activations(base_model, 50, x)
    #block16_pool_features=get_activations(base_model, 23, x)
    block17_pool_features=get_activations(base_model, 27, x)
    block18_pool_features=get_activations(base_model, 33, x)
    #block19_pool_features=get_activations(base_model, 38, x)
    block20_pool_features=get_activations(base_model, 43, x)
    block21_pool_features=get_activations(base_model, 49, x)

    x1 = tf.image.resize_images(block1_pool_features[0],RESIZE_SIZE)
    x2 = tf.image.resize_images(block2_pool_features[0],RESIZE_SIZE)
    x3 = tf.image.resize_images(block3_pool_features[0],RESIZE_SIZE)
    #x4 = tf.image.resize_images(block4_pool_features[0],RESIZE_SIZE)
    x5 = tf.image.resize_images(block5_pool_features[0],RESIZE_SIZE)
    #x6 = tf.image.resize_images(block6_pool_features[0],RESIZE_SIZE)
    x7 = tf.image.resize_images(block7_pool_features[0],RESIZE_SIZE)
    #x8 = tf.image.resize_images(block8_pool_features[0],RESIZE_SIZE)
    x9 = tf.image.resize_images(block9_pool_features[0],RESIZE_SIZE)
    #x10 = tf.image.resize_images(block10_pool_features[0],RESIZE_SIZE)
    x11 = tf.image.resize_images(block11_pool_features[0],RESIZE_SIZE)
    #x12 = tf.image.resize_images(block12_pool_features[0],RESIZE_SIZE)
    x13 = tf.image.resize_images(block13_pool_features[0],RESIZE_SIZE)
    x14 = tf.image.resize_images(block14_pool_features[0],RESIZE_SIZE)
    x15 = tf.image.resize_images(block15_pool_features[0],RESIZE_SIZE)
    #x16 = tf.image.resize_images(block16_pool_features[0],RESIZE_SIZE)
    x17 = tf.image.resize_images(block17_pool_features[0],RESIZE_SIZE)
    x18 = tf.image.resize_images(block18_pool_features[0],RESIZE_SIZE)
    #x19 = tf.image.resize_images(block19_pool_features[0],RESIZE_SIZE)
    x20 = tf.image.resize_images(block20_pool_features[0],RESIZE_SIZE)
    x21 = tf.image.resize_images(block21_pool_features[0],RESIZE_SIZE)

    F = tf.concat([x1,x2,x3,x5,x7,x9,x11,x13,x14,x15,x17,x18,x20,x21], 3)
    return F, x_img 
Example 79
Project: ImageClassification   Author: nicoduj   File: ClassImageAugmentationSample.py    MIT License 4 votes vote down vote up
def generate(input_path = '',output_path = '', target_nb = 1000):
	count = 10
	fileOutput = '{}/Random_{}.jpg'



	for class_name in os.listdir(input_path.format('')) :

		if not class_name.startswith('.'):
		
			print ('Generating Training data for ', class_name)
	
			if not os.path.exists(output_path.format(class_name)) :
				os.makedirs(output_path.format(class_name))


			nbfiles = len([name for name in os.listdir(input_path.format(class_name)) if os.path.isfile(os.path.join(input_path.format(class_name), name)) and not name.startswith('.')])
			print ('NB existing images :', nbfiles)
			nbfilesRandom = len([name for name in os.listdir(output_path.format(class_name)) if os.path.isfile(os.path.join(output_path.format(class_name), name)) and not name.startswith('.')])
			total =  target_nb - (nbfiles+nbfilesRandom)
			print ('NB image to generate :', total)

			 
			gen = ImageDataGenerator(
				rotation_range=10,
				width_shift_range=0.1,
				height_shift_range=0.1,
				shear_range=0.1,
				zoom_range=0.1,
				horizontal_flip=False,
				fill_mode='nearest')

			j=nbfilesRandom
	
			while (j-nbfilesRandom)<total:
				# load image to array
				input_file = os.path.join(input_path.format(class_name),random.choice( [name for name in os.listdir(input_path.format(class_name)) if not name.startswith('.') ]) )
				print(input_file)
				image = img_to_array(load_img(input_file))

				# reshape to array rank 4
				image = image.reshape((1,) + image.shape)

				# let's create infinite flow of images
				images_flow = gen.flow(image, batch_size=1)
	
				for i, new_images in enumerate(images_flow):
					# we access only first image because of batch_size=1
					new_image = array_to_img(new_images[0], scale=True)
					new_image.save(output_path.format(fileOutput.format(class_name,j*1000 + i + 1)))
					print ('step:',i,j)
					j = j+1
					if i >= count:
						break
					if j >= total:
						break



#train directory to augment 
Example 80
Project: ssd_keras   Author: ndl-lab   File: inference_devided.py    MIT License 4 votes vote down vote up
def main():
    imgpathlist=list(glob.glob(os.path.join(path_test_prefix,"*")))
    cnt = 0
    while cnt<len(imgpathlist):
        inputs = []
        images = []
        filenames = []
        for img_path in imgpathlist[cnt:min(cnt+batch_size,len(imgpathlist))]:
            print(img_path)
            img = image.load_img(img_path, target_size=(300, 300))
            img = image.img_to_array(img)
            images.append(imread(img_path,mode="RGB"))
            inputs.append(img.copy())
            filenames.append(os.path.basename(img_path))
        inputs = preprocess_input(np.array(inputs))
        preds = model.predict(inputs, batch_size=1, verbose=1)
        results = bbox_util.detection_out(preds)
        cnt += batch_size
        for i, img in enumerate(images):
            print(i)
            # Parse the outputs.
            det_conf = results[i][:, 1]
            det_xmin = results[i][:, 2]
            det_xmax = results[i][:, 4]

            # Get detections with confidence higher than 0.2.
            top_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.2]
            if len(top_indices)==0:
                cvimg = np.asarray(img)
                im = cv2pil(cvimg)
                im.save(os.path.join("output", filenames[i] + "_00.jpg"),
                         dpi=(dpiinfo["width_dpi"], dpiinfo["height_dpi"]), quality=100)
                continue
            top_conf = det_conf[top_indices]
            top_xmin = det_xmin[top_indices]
            top_xmax = det_xmax[top_indices]
            div_x=0
            for j in range(top_conf.shape[0]):
                print(top_conf[j])
                if j>=1:
                    break
                xmin = int(round(top_xmin[j] * img.shape[1]))
                xmax = int(round(top_xmax[j] * img.shape[1]))
                div_x=(xmin+xmax)//2
            cvimg = np.asarray(img)
            im1=cv2pil(cvimg[:,:div_x,::-1])
            im2=cv2pil(cvimg[:, div_x:, ::-1])
            im1.save(os.path.join("inference_output",filenames[i]+"_01.jpg"), dpi=(dpiinfo["width_dpi"],dpiinfo["height_dpi"]),quality=100)
            im2.save(os.path.join("inference_output", filenames[i] + "_02.jpg"), dpi=(dpiinfo["width_dpi"],dpiinfo["height_dpi"]),quality=100)

        del inputs,images
        gc.collect()