Python keras.preprocessing.image.img_to_array() Examples

The following are 30 code examples for showing how to use keras.preprocessing.image.img_to_array(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.preprocessing.image , or try the search function .

Example 1
Project: vergeml   Author: mme   File: imagenet.py    License: MIT License 6 votes vote down vote up
def predict(self, f, k=5, resize_mode='fill'):
        from keras.preprocessing import image
        from vergeml.img import resize_image

        filename = os.path.basename(f)

        if not os.path.exists(f):
            return dict(filename=filename, prediction=[])

        img = image.load_img(f)
        img = resize_image(img, self.image_size, self.image_size, 'antialias', resize_mode)

        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = self.preprocess_input(x)
        preds = self.model.predict(x)
        pred = self._decode(preds, top=k)[0]
        prediction=[dict(probability=np.asscalar(perc), label=klass) for _, klass, perc in pred]

        return dict(filename=filename, prediction=prediction) 
Example 2
Project: Image-Caption-Generator   Author: dabasajay   File: preprocessing.py    License: MIT License 6 votes vote down vote up
def extract_features(path, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Get CNN Model from model.py
	model = CNNModel(model_type)
	features = dict()
	# Extract features from each photo
	for name in tqdm(os.listdir(path)):
		# Loading and resizing image
		filename = path + name
		image = load_img(filename, target_size=target_size)
		# Convert the image pixels to a numpy array
		image = img_to_array(image)
		# Reshape data for the model
		image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
		# Prepare the image for the CNN Model model
		image = preprocess_input(image)
		# Pass image into model to get encoded features
		feature = model.predict(image, verbose=0)
		# Store encoded features for the image
		image_id = name.split('.')[0]
		features[image_id] = feature
	return features 
Example 3
Project: Image-Caption-Generator   Author: dabasajay   File: test.py    License: MIT License 6 votes vote down vote up
def extract_features(filename, model, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Loading and resizing image
	image = load_img(filename, target_size=target_size)
	# Convert the image pixels to a numpy array
	image = img_to_array(image)
	# Reshape data for the model
	image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
	# Prepare the image for the CNN Model model
	image = preprocess_input(image)
	# Pass image into model to get encoded features
	features = model.predict(image, verbose=0)
	return features

# Load the tokenizer 
Example 4
Project: kaggle-carvana-2017   Author: killthekitten   File: ensemble_gpu.py    License: MIT License 6 votes vote down vote up
def data_loader(q, ):
    for bi in batch_indices:
        start, end = bi
        x_batch = []
        filenames_batch = filenames[start:end]

        for filename in filenames_batch:
            imgs = []
            for d in dirs:
                img = img_to_array(load_img(os.path.join(d, filename), grayscale=True))
                imgs.append(np.squeeze(img))
            x_batch.append(np.array(imgs).transpose((1, 2, 0)))
        q.put((filenames_batch, np.array(x_batch)))

    for gpu in gpus:
        q.put((None, None)) 
Example 5
Project: ai-platform   Author: produvia   File: yolo_image.py    License: MIT License 6 votes vote down vote up
def load_image_pixels(filename, shape):
	# load the image to get its shape
	image = load_img(filename)
	width, height = image.size
	# load the image with the required size
	image = load_img(filename, target_size=shape)
	# convert to numpy array
	image = img_to_array(image)
	# scale pixel values to [0, 1]
	image = image.astype('float32')
	image /= 255.0
	# add a dimension so that we have one sample
	image = expand_dims(image, 0)
	return image, width, height

# get all of the results above a threshold 
Example 6
Project: Car-Recognition   Author: foamliu   File: analyze.py    License: MIT License 6 votes vote down vote up
def predict(img_dir, model):
    img_files = []
    for root, dirs, files in os.walk(img_dir, topdown=False):
        for name in files:
            img_files.append(os.path.join(root, name))
    img_files = sorted(img_files)

    y_pred = []
    y_test = []

    for img_path in tqdm(img_files):
        # print(img_path)
        img = image.load_img(img_path, target_size=(224, 224))
        x = image.img_to_array(img)
        preds = model.predict(x[None, :, :, :])
        decoded = decode_predictions(preds, top=1)
        pred_label = decoded[0][0][0]
        # print(pred_label)
        y_pred.append(pred_label)
        tokens = img_path.split(os.pathsep)
        class_id = int(tokens[-2])
        # print(str(class_id))
        y_test.append(class_id)

    return y_pred, y_test 
Example 7
Project: U-net   Author: DuFanXin   File: data_Keras.py    License: MIT License 6 votes vote down vote up
def create_test_data(self):
		# 测试集生成npy
		i = 0
		print('-' * 30)
		print('Creating test images...')
		print('-' * 30)
		imgs = glob.glob(self.test_path + "/*." + self.img_type)           # ../data_set/train
		print(len(imgs))
		imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)
		for imgname in imgs:
			midname = imgname[imgname.rindex("/") + 1:]   # 图像的名字
			img = load_img(self.test_path + "/" + midname, grayscale=True)   # 转换为灰度图
			img = img_to_array(img)
			imgdatas[i] = img
			if i % 100 == 0:
				print('Done: {0}/{1} images'.format(i, len(imgs)))
			i += 1
		print('loading done', imgdatas.shape)
		np.save(self.npy_path + '/imgs_test.npy', imgdatas)            # 将30张训练集和30张label生成npy数据
		# np.save(self.npy_path + '/imgs_mask_train.npy', imglabels)
		print('Saving to .npy files done.') 
Example 8
Project: Transfer-Learning   Author: DhavalThkkar   File: predict.py    License: MIT License 6 votes vote down vote up
def predict(model, img, target_size):
    """Run model prediction on image
    Args:
        model: keras model
        img: PIL format image
        target_size: (w,h) tuple
    Returns:
        list of predicted labels and their probabilities 
    """
    if img.size != target_size:
        img = img.resize(target_size)

    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    return preds[0] 
Example 9
Project: lost   Author: l3p-cv   File: cluster_resnet.py    License: MIT License 6 votes vote down vote up
def main(self):
        self.logger.info('Will load keras model')
        model = ResNet50(weights='imagenet')
        self.logger.info('Keras model loaded')
        feature_list = []
        img_path_list = []
        for raw_file in self.inp.raw_files:
            media_path = raw_file.path
            file_list = os.listdir(media_path)
            total = float(len(file_list))
            for index, img_file in enumerate(file_list):
                img_path = os.path.join(media_path, img_file)
                img_path_list.append(img_path)
                img = image.load_img(img_path, target_size=(224, 224))
                x = keras_image.img_to_array(img)
                x = np.expand_dims(x, axis=0)
                x = preprocess_input(x)
                # extract features
                scores = model.predict(x)
                sim_class = np.argmax(scores)
                print('Scores {}\nSimClass: {}'.format(scores, sim_class))
                self.outp.request_annos(img_path, img_sim_class=sim_class)
                self.logger.info('Requested annotation for: {} (cluster: {})'.format(img_path, sim_class))
                self.update_progress(index*100/total) 
Example 10
Project: neural-style-keras   Author: robertomest   File: utils.py    License: MIT License 6 votes vote down vote up
def preprocess_image_crop(image_path, img_size):
    '''
    Preprocess the image scaling it so that its smaller size is img_size.
    The larger size is then cropped in order to produce a square image.
    '''
    img = load_img(image_path)
    scale = float(img_size) / min(img.size)
    new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
    # print('old size: %s,new size: %s' %(str(img.size), str(new_size)))
    img = img.resize(new_size, resample=Image.BILINEAR)
    img = img_to_array(img)
    crop_h = img.shape[0] - img_size
    crop_v = img.shape[1] - img_size
    img = img[crop_h:img_size+crop_h, crop_v:img_size+crop_v, :]
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img

# util function to open, resize and format pictures into appropriate tensors 
Example 11
Project: neural-style-keras   Author: robertomest   File: utils.py    License: MIT License 6 votes vote down vote up
def preprocess_image_scale(image_path, img_size=None):
    '''
    Preprocess the image scaling it so that its larger size is max_size.
    This function preserves aspect ratio.
    '''
    img = load_img(image_path)
    if img_size:
        scale = float(img_size) / max(img.size)
        new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
        img = img.resize(new_size, resample=Image.BILINEAR)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img


# util function to convert a tensor into a valid image 
Example 12
Project: imagecluster   Author: elcorto   File: io.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _image_worker(filename, size):
    # Handle PIL error "OSError: broken data stream when reading image file".
    # See https://github.com/python-pillow/Pillow/issues/1510 . We have this
    # issue with smartphone panorama JPG files. But instead of bluntly setting
    # ImageFile.LOAD_TRUNCATED_IMAGES = True and hoping for the best (is the
    # image read, and till the end?), we catch the OSError thrown by PIL and
    # ignore the file completely. This is better than reading potentially
    # undefined data and process it. A more specialized exception from PILs
    # side would be good, but let's hope that an OSError doesn't cover too much
    # ground when reading data from disk :-)
    try:
        print(filename)
        img = PIL.Image.open(filename).convert('RGB').resize(size, resample=3)
        arr = image.img_to_array(img, dtype=int)
        return filename, arr
    except OSError as ex:
        print(f"skipping {filename}: {ex}")
        return filename, None 
Example 13
Project: heatmaps   Author: gabrieldemarmiesse   File: demo.py    License: MIT License 6 votes vote down vote up
def display_heatmap(new_model, img_path, ids, preprocessing=None):
    # The quality is reduced.
    # If you have more than 8GB of RAM, you can try to increase it.
    img = image.load_img(img_path, target_size=(800, 1280))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    if preprocessing is not None:
        x = preprocess_input(x)

    out = new_model.predict(x)

    heatmap = out[0]  # Removing batch axis.

    if K.image_data_format() == 'channels_first':
        heatmap = heatmap[ids]
        if heatmap.ndim == 3:
            heatmap = np.sum(heatmap, axis=0)
    else:
        heatmap = heatmap[:, :, ids]
        if heatmap.ndim == 3:
            heatmap = np.sum(heatmap, axis=2)

    plt.imshow(heatmap, interpolation="none")
    plt.show() 
Example 14
Project: heatmaps   Author: gabrieldemarmiesse   File: helper.py    License: MIT License 6 votes vote down vote up
def helper_test(model):
    img_path = "../examples/dog.jpg"
    new_model = to_heatmap(model)

    # Loading the image
    img = image.load_img(img_path, target_size=(800, 800))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    out = new_model.predict(x)

    s = "n02084071"  # Imagenet code for "dog"
    ids = synset_to_dfs_ids(s)
    heatmap = out[0]
    if K.image_data_format() == 'channels_first':
        heatmap = heatmap[ids]
        heatmap = np.sum(heatmap, axis=0)
    else:
        heatmap = heatmap[:, :, ids]
        heatmap = np.sum(heatmap, axis=2)
    print(heatmap.shape)
    assert heatmap.shape[0] == heatmap.shape[1]
    K.clear_session() 
Example 15
Project: fine-tuning   Author: holms-ur   File: predict.py    License: GNU General Public License v3.0 6 votes vote down vote up
def predict(imagePath):
    img = load_img(imagePath)
    img = img_to_array(img)
    output = img.copy()
    # make prediction
    results = rcnn.detect([img], verbose=0)
    r = results[0]
    for (box, score) in zip(r['rois'], r['scores']):
          # filter out weak detections
          if score < 0.5:
               continue
          label = "{}: {:.2f}".format('table', score)
          cv2.rectangle(output, (box[1], box[0]), (box[3], box[2]),(0, 255, 0), 2)
          cv2.putText(output, label, (box[1], box[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
    cv2.imwrite("prediction.jpg", output)
    return r['rois'] 
Example 16
Project: nyoka   Author: nyoka-pmml   File: testScoreWithAdapaKeras.py    License: Apache License 2.0 6 votes vote down vote up
def test_01_image_classifier_with_image_as_input(self):
        
        cnn_pmml = KerasToPmml(self.model_final,model_name="MobileNetImage",description="Demo",\
            copyright="Internal User",dataSet='image',predictedClasses=['dogs','cats'])
        cnn_pmml.export(open('2classMBNet.pmml', "w"), 0)

        img = image.load_img('nyoka/tests/resizedCat.png')
        img = img_to_array(img)
        img = preprocess_input(img)
        imgtf = np.expand_dims(img, axis=0)
        model_pred=self.model_final.predict(imgtf)
        model_preds = {'dogs':model_pred[0][0],'cats':model_pred[0][1]}

        model_name  = self.adapa_utility.upload_to_zserver('2classMBNet.pmml')

        predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, 'nyoka/tests/resizedCat.png','DN')
  
        self.assertEqual(abs(probabilities['cats'] - model_preds['cats']) < 0.00001, True)
        self.assertEqual(abs(probabilities['dogs'] - model_preds['dogs']) < 0.00001, True) 
Example 17
Project: vess2ret   Author: costapt   File: data.py    License: MIT License 6 votes vote down vote up
def _load_img_pair(self, idx, load_from_memory):
        """Get a pair of images with index idx."""
        if load_from_memory:
            a = self.a[idx]
            b = self.b[idx]
            return a, b

        fname = self.filenames[idx]

        a = load_img(os.path.join(self.a_dir, fname),
                     grayscale=self.is_a_grayscale,
                     target_size=self.target_size)
        b = load_img(os.path.join(self.b_dir, fname),
                     grayscale=self.is_b_grayscale,
                     target_size=self.target_size)

        a = img_to_array(a, self.dim_ordering)
        b = img_to_array(b, self.dim_ordering)

        return a, b 
Example 18
Project: sia-cog   Author: tech-quantum   File: objcls.py    License: MIT License 6 votes vote down vote up
def predict(imagepath, target_x, target_y, name, model):
    if imagepath.startswith('http://') or imagepath.startswith('https://') or imagepath.startswith('ftp://'):
        response = requests.get(imagepath)
        img = Image.open(BytesIO(response.content))
        img = img.resize((target_x, target_y))
    else:
        if not os.path.exists(imagepath):
            raise Exception('Input image file does not exist')
        img = image.load_img(imagepath, target_size=(target_x, target_y))

    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = processInputImage(name, x)
    preds = decodePrediction(name, model.predict(x))
    result = []
    for p in preds[0]:
        result.append({"synset": p[0], "text": p[1], "prediction": float("{0:.2f}".format((p[2] * 100)))})

    return json.loads(jsonpickle.encode(result, unpicklable=False)) 
Example 19
Project: DeepTL-Lane-Change-Classification   Author: Ekim-Yurtsever   File: dataset.py    License: MIT License 6 votes vote down vote up
def load_images_for_keras(self, img_path, target_size=(224, 224)):

        features = []
        filenames = sorted(os.listdir(img_path))

        for filename in filenames:

            img = image.load_img(os.path.join(img_path, filename), target_size=target_size)
            img = image.img_to_array(img)
            img = np.expand_dims(img, axis=0)
            img = preprocess_input(img)

            feature = self.model.predict(img)

            if img is not None:
                features.append(feature)

        return features 
Example 20
Project: detect-cell-edge-use-unet   Author: silencemao   File: data.py    License: GNU General Public License v2.0 6 votes vote down vote up
def create_test_data(self):

        # 测试集生成npy
        i = 0
        print('-' * 30)
        print('Creating training images...')
        print('-' * 30)
        imgs = glob.glob(self.test_path + "/*." + self.img_type)           # deform/train
        print(len(imgs))
        imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)
        for imgname in imgs:
            midname = imgname[imgname.rindex("/") + 1:]   # 图像的名字
            img = load_img(self.test_path + "/" + midname, grayscale=True)   # 转换为灰度图
            img = img_to_array(img)
            imgdatas[i] = img
            if i % 100 == 0:
                print('Done: {0}/{1} images'.format(i, len(imgs)))
            i += 1
        print('loading done', imgdatas.shape)
        np.save(self.npy_path + '/imgs_test.npy', imgdatas)            # 将30张训练集和30张label生成npy数据
        # np.save(self.npy_path + '/imgs_mask_train.npy', imglabels)
        print('Saving to .npy files done.') 
Example 21
Project: Neural-Network-Projects-with-Python   Author: PacktPublishing   File: utils.py    License: MIT License 6 votes vote down vote up
def get_data(dir):
    X_train, Y_train = [], []
    X_test, Y_test = [], []
    subfolders = sorted([file.path for file in os.scandir(dir) if file.is_dir()])
    for idx, folder in enumerate(subfolders):
        for file in sorted(os.listdir(folder)):
            img = load_img(folder+"/"+file, color_mode='grayscale')
            img = img_to_array(img).astype('float32')/255
            img = img.reshape(img.shape[0], img.shape[1],1)
            if idx < 35:
                X_train.append(img)
                Y_train.append(idx)
            else:
                X_test.append(img)
                Y_test.append(idx-35)

    X_train = np.array(X_train)
    X_test = np.array(X_test)
    Y_train = np.array(Y_train)
    Y_test = np.array(Y_test)
    return (X_train, Y_train), (X_test, Y_test) 
Example 22
Project: deep-learning-explorer   Author: waspinator   File: detector.py    License: Apache License 2.0 6 votes vote down vote up
def detect(self, image, tolerance=2):
        """Detect objects in image

        inputs: PIL image, polygon fidelity tolerance
        """
        image = image.convert('RGB')
        image.thumbnail((IMAGE_SIZE, IMAGE_SIZE))
        image = img_to_array(image)
        result = self.model.detect([image])[0]
        width = np.shape(image)[1]
        height = np.shape(image)[0]

        coco = extra_utils.result_to_coco(result, CLASS_NAMES,
                                          (width, height), tolerance)

        return coco 
Example 23
Project: deep-learning-note   Author: wdxtub   File: utils.py    License: MIT License 5 votes vote down vote up
def preprocess_image(image_path):
    img = image.load_img(image_path)
    img = image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img 
Example 24
Project: deep-learning-note   Author: wdxtub   File: 3_nerual_style_transfer.py    License: MIT License 5 votes vote down vote up
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_height, img_width))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg19.preprocess_input(img)
    return img 
Example 25
Project: kaggle-carvana-2017   Author: killthekitten   File: predict_multithreaded.py    License: MIT License 5 votes vote down vote up
def data_loader(q, ):
    for start in tqdm(range(0, len(filenames), batch_size)):
        x_batch = []
        end = min(start + batch_size, len(filenames))
        filenames_batch = filenames[start:end]

        for filename in filenames_batch:
            img = load_img(filename)

            stacked_channels = []
            for i in range(args.stacked_channels):
                channel_path = os.path.join(args.stacked_channels_dir,
                                            str(i),
                                            filename.split('/')[-1].replace('.jpg', '.png'))
                stacked_channel = load_img(channel_path, grayscale=True)
                stacked_channels.append(stacked_channel)
            stacked_img = np.dstack((img, *stacked_channels))

            x_batch.append(img_to_array(stacked_img))


        x_batch = preprocess_input(np.array(x_batch, np.float32), mode=args.preprocessing_function)
        if args.pred_tta:
            x_batch = do_tta(x_batch, args.pred_tta)
        padded_x = np.zeros((batch_size, 1280, 1920, args.stacked_channels + 3))
        padded_x[:, :, 1:-1, :] = x_batch
        q.put((filenames_batch, padded_x))

    for gpu in gpus:
        q.put((None, None)) 
Example 26
Project: kaggle-carvana-2017   Author: killthekitten   File: predict_masks.py    License: MIT License 5 votes vote down vote up
def predict():
    output_dir = args.pred_mask_dir
    model = make_model((None, None, 3))
    model.load_weights(args.weights)
    batch_size = args.pred_batch_size
    nbr_test_samples = 100064

    filenames = [os.path.join(args.test_data_dir, f) for f in sorted(os.listdir(args.test_data_dir))]

    start_time = clock()
    for i in range(int(nbr_test_samples / batch_size) + 1):
        x = []
        for j in range(batch_size):
            if i * batch_size + j < len(filenames):
                img = load_img(filenames[i * batch_size + j], target_size=(args.img_height, args.img_width))
                x.append(img_to_array(img))
        x = np.array(x)
        x = preprocess_input(x, args.preprocessing_function)
        x = do_tta(x, args.pred_tta)
        batch_x = np.zeros((x.shape[0], 1280, 1920, 3))
        batch_x[:, :, 1:-1, :] = x
        preds = model.predict_on_batch(batch_x)
        preds = undo_tta(preds, args.pred_tta)
        for j in range(batch_size):
            filename = filenames[i * batch_size + j]
            prediction = preds[j][:, 1:-1, :]
            array_to_img(prediction * 255).save(os.path.join(output_dir, filename.split('/')[-1][:-4] + ".png"))
        time_spent = clock() - start_time
        print("predicted batch ", str(i))
        print("Time spent: {:.2f}  seconds".format(time_spent))
        print("Speed: {:.2f}  ms per image".format(time_spent / (batch_size * (i + 1)) * 1000))
        print("Elapsed: {:.2f} hours  ".format(time_spent / (batch_size * (i + 1)) / 3600 * (nbr_test_samples - (batch_size * (i + 1))))) 
Example 27
Project: image-similarity-clustering   Author: zegami   File: features.py    License: MIT License 5 votes vote down vote up
def _extract(fp, model):
        # Load the image, setting the size to 224 x 224
        img = image.load_img(fp, target_size=(224, 224))
        
        # Convert the image to a numpy array, resize it (1, 2, 244, 244), and preprocess it
        img_data = image.img_to_array(img)
        img_data = np.expand_dims(img_data, axis=0)
        img_data = preprocess_input(img_data)

        # Extract the features
        np_features = model.predict(img_data)[0]
        
        # Convert from Numpy to a list of values
        return np.char.mod('%f', np_features) 
Example 28
Project: dataiku-contrib   Author: dataiku   File: dl_image_toolbox_utils.py    License: Apache License 2.0 5 votes vote down vote up
def preprocess_img(img_path, img_shape, preprocessing):
    img = load_img(img_path,target_size=img_shape)
    array = img_to_array(img)
    array = preprocessing(array)
    return array 
Example 29
Project: dataiku-contrib   Author: dataiku   File: dl_image_toolbox_utils.py    License: Apache License 2.0 5 votes vote down vote up
def preprocess_img(img_path, img_shape, preprocessing):
    img = load_img(img_path,target_size=img_shape)
    array = img_to_array(img)
    array = preprocessing(array)
    return array 
Example 30
Project: spark-deep-learning   Author: databricks   File: image_utils.py    License: Apache License 2.0 5 votes vote down vote up
def loadAndPreprocessKerasInceptionV3(raw_uri):
    # this is the canonical way to load and prep images in keras
    uri = raw_uri[5:] if raw_uri.startswith("file:/") else raw_uri
    image = img_to_array(load_img(uri, target_size=InceptionV3Constants.INPUT_SHAPE))
    image = np.expand_dims(image, axis=0)
    return preprocess_input(image)