Python keras.applications.resnet50.preprocess_input() Examples

The following are code examples for showing how to use keras.applications.resnet50.preprocess_input(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: cbc_networks   Author: saralajew   File: imagenet_cnn_reasoning_trainable_patches.py    BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def get_data_generators(args):
    """The ImageNet dataset is to large to completely load into memory. To load
    the training and test images we therefore use a image data generator.
    This function initializes the generators and returns them to be used
    during training.

    # Arguments
        args: Namespace with the arguments specifying the paths to the
        folders containing train and test images.

    # Returns
        Two data generators, one for training data and one for test data
    """
    train_datagen = ImageDataGenerator(dtype='float32',
                                       preprocessing_function=preprocess_input)
    train_generator = train_datagen.flow_from_directory(
        args.train_path,
        target_size=(224, 224),
        color_mode='rgb',
        batch_size=args.batch_size,
        class_mode='categorical',
        shuffle=True,
        interpolation='custom_imagenet')

    val_datagen = ImageDataGenerator(dtype='float32',
                                     preprocessing_function=preprocess_input)
    val_generator = val_datagen.flow_from_directory(
        args.test_path,
        target_size=(224, 224),
        color_mode='rgb',
        batch_size=args.batch_size,
        class_mode='categorical',
        shuffle=True,
        interpolation='custom_imagenet')

    return train_generator, val_generator 
Example 2
Project: Sacred_Deep_Learning   Author: AAbercrombie0492   File: image_utilities.py    GNU General Public License v3.0 6 votes vote down vote up
def preprocess_input_resnet(x):
    """Wrapper around keras.applications.vgg16.preprocess_input()
    to make it compatible for use with keras.preprocessing.image.ImageDataGenerator's
    `preprocessing_function` argument.

    Parameters
    ----------
    x : a numpy 3darray (a single image to be preprocessed)

    Note we cannot pass keras.applications.resnet50.preprocess_input()
    directly to to keras.preprocessing.image.ImageDataGenerator's
    `preprocessing_function` argument because the former expects a
    4D tensor whereas the latter expects a 3D tensor. Hence the
    existence of this wrapper.

    Returns a numpy 3darray (the preprocessed image).
    """
    import numpy as np
    from keras.applications.resnet50 import preprocess_input
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x[0] 
Example 3
Project: visual_redactions   Author: tribhuvanesh   File: wsl.py    Apache License 2.0 6 votes vote down vote up
def img_to_features(X, model, batch_size=64):
    n_img, n_h, n_w, n_c = X.shape
    n_batches = n_img / batch_size + 1
    n_feat = model.output_shape[-1]

    feat_mat = np.zeros((n_img, n_feat))

    pbar = Progbar(n_batches)

    for b_idx, start_idx in enumerate(range(0, n_img, batch_size)):
        end_idx = min(start_idx + batch_size, n_img)
        this_batch_size = end_idx - start_idx

        bx = X[start_idx:end_idx]
        bx = preprocess_input(bx.copy())
        batch_feat = model.predict(bx)

        feat_mat[start_idx:end_idx] = batch_feat.reshape((this_batch_size, n_feat))
        pbar.update(b_idx)

    return feat_mat 
Example 4
Project: lost   Author: l3p-cv   File: cluster_resnet.py    MIT License 6 votes vote down vote up
def main(self):
        self.logger.info('Will load keras model')
        model = ResNet50(weights='imagenet')
        self.logger.info('Keras model loaded')
        feature_list = []
        img_path_list = []
        for raw_file in self.inp.raw_files:
            media_path = raw_file.path
            file_list = os.listdir(media_path)
            total = float(len(file_list))
            for index, img_file in enumerate(file_list):
                img_path = os.path.join(media_path, img_file)
                img_path_list.append(img_path)
                img = image.load_img(img_path, target_size=(224, 224))
                x = keras_image.img_to_array(img)
                x = np.expand_dims(x, axis=0)
                x = preprocess_input(x)
                # extract features
                scores = model.predict(x)
                sim_class = np.argmax(scores)
                print('Scores {}\nSimClass: {}'.format(scores, sim_class))
                self.outp.request_annos(img_path, img_sim_class=sim_class)
                self.logger.info('Requested annotation for: {} (cluster: {})'.format(img_path, sim_class))
                self.update_progress(index*100/total) 
Example 5
Project: documents   Author: AuTURBO   File: classify.py    GNU General Public License v3.0 6 votes vote down vote up
def callback(image_msg):
    #First convert the image to OpenCV image 
    cv_image = bridge.imgmsg_to_cv2(image_msg, desired_encoding="passthrough")
    cv_image = cv2.resize(cv_image, target_size)  # resize image
    np_image = np.asarray(cv_image)               # read as np array
    np_image = np.expand_dims(np_image, axis=0)   # Add another dimension for tensorflow
    np_image = np_image.astype(float)  # preprocess needs float64 and img is uint8
    np_image = preprocess_input(np_image)         # Regularize the data
    
    global graph                                  # This is a workaround for asynchronous execution
    with graph.as_default():
       preds = model.predict(np_image)            # Classify the image
       # decode returns a list  of tuples [(class,description,probability),(class, descrip ...
       pred_string = decode_predictions(preds, top=1)[0]   # Decode top 1 predictions
       msg_string.data = pred_string[0][1]
       msg_float.data = float(pred_string[0][2])
       pub.publish(msg_string)
       pub1.publish(msg_float) 
Example 6
Project: jenkins-x-seldon-core-sandbox   Author: SeldonIO   File: ONNXResNet.py    Apache License 2.0 6 votes vote down vote up
def __init__(self):
        print("Loading model")
        # Import the ONNX file
        models = import_onnx_file('resnet50/model.onnx')
        # Create an nGraph runtime environment
        runtime = ng.runtime(backend_name='CPU')
        # Select the first model and compile it to a callable function
        model = models[0]
        self.resnet = runtime.computation(model['output'], *model['inputs'])
        print("Model loaded")

        #Do a test run to warm up and check all is ok
        print("Running test on img of Zebra as warmup")
        img = image.load_img('zebra.jpg', target_size=(224, 224))
        img = image.img_to_array(img)
        x = np.expand_dims(img.copy(), axis=0)
        x = preprocess_input(x,mode='torch')
        x = x.transpose(0,3,1,2)
        preds = self.resnet(x)
        print(decode_predictions(preds[0], top=5)) 
Example 7
Project: DeepTL-Lane-Change-Classification   Author: Ekim-Yurtsever   File: dataset.py    MIT License 6 votes vote down vote up
def load_images_for_keras(self, img_path, target_size=(224, 224)):

        features = []
        filenames = sorted(os.listdir(img_path))

        for filename in filenames:

            img = image.load_img(os.path.join(img_path, filename), target_size=target_size)
            img = image.img_to_array(img)
            img = np.expand_dims(img, axis=0)
            img = preprocess_input(img)

            feature = self.model.predict(img)

            if img is not None:
                features.append(feature)

        return features 
Example 8
Project: CarND-Transfer-Learning-Lab   Author: udacity   File: run_bottleneck.py    MIT License 6 votes vote down vote up
def gen(session, data, labels, batch_size):
    def _f():
        start = 0
        end = start + batch_size
        n = data.shape[0]

        while True:
            X_batch = session.run(resize_op, {img_placeholder: data[start:end]})
            X_batch = preprocess_input(X_batch)
            y_batch = labels[start:end]
            start += batch_size
            end += batch_size
            if start >= n:
                start = 0
                end = batch_size

            print(start, end)
            yield (X_batch, y_batch)

    return _f 
Example 9
Project: deep_learning   Author: jarvisqi   File: cat_dog.py    MIT License 6 votes vote down vote up
def pred_data():

    with open('./models/cat_dog.yaml') as yamlfile:
        loaded_model_yaml = yamlfile.read()
    model = model_from_yaml(loaded_model_yaml)
    model.load_weights('./models/cat_dog.h5')

    sgd = Adam(lr=0.0003)
    model.compile(loss='categorical_crossentropy',optimizer=sgd, metrics=['accuracy'])

    images = []
    path='./data/test/'
    for f in os.listdir(path):
        img = image.load_img(path + f, target_size=image_size)
        img_array = image.img_to_array(img)

        x = np.expand_dims(img_array, axis=0)
        x = preprocess_input(x)
        result = model.predict_classes(x,verbose=0)

        print(f,result[0]) 
Example 10
Project: Kernelized_Correlation_Filter   Author: stevenwudi   File: KCFpy_debug.py    GNU General Public License v3.0 6 votes vote down vote up
def get_scale_sample_dnn(self, im, scaleFactors):
        from keras.applications.vgg19 import preprocess_input
        resized_im_array = np.zeros(shape=(len(scaleFactors), self.resize_size[0], self.resize_size[1], 3))
        for i, s in enumerate(scaleFactors):
            # patch_sz = np.floor(self.first_target_sz * s)
            patch_sz = np.rint(self.first_patch_sz * s)
            im_patch = self.get_subwindow(im, self.pos, patch_sz)  # extract image
            # resize image to model size
            resized_im_array[i] = imresize(im_patch, self.resize_size)

        dnn_input = resized_im_array.transpose(0, 3, 1, 2).astype(np.float64)
        dnn_input = preprocess_input(dnn_input)
        features_list = self.extract_model_function(dnn_input)
        features = features_list[0]
        features = (features.transpose(0, 2, 3, 1) - features.min()) / (features.max() - features.min())
        features = np.multiply(features, self.cos_window[0][None, :, :, None])
        return features 
Example 11
Project: HistologyCancerDiagnosisDeepPersistenceHomology   Author: KitwareMedical   File: rgb_data_loader.py    Apache License 2.0 6 votes vote down vote up
def preprocess(self, img_path):

        img = skimage.io.imread(img_path)
        if img.shape == (1024, 1024, 3):
                img = img[::4, ::4, :]

        image_id = int(float(re.findall("\d+\.\d+", img_path)[0]))

        if image_id in self.stats.keys():
            [src_mu, src_sigma] = self.stats[image_id]
            img_nmzd = htk_cnorm.reinhard(img, self.ref_mu_lab, self.ref_std_lab, src_mu=src_mu, src_sigma=src_sigma).astype('float')
        else:
            print '#### stats for %d not present' % (image_id)
            img_nmzd = htk_cnorm.reinhard(img, self.ref_mu_lab, self.ref_std_lab).astype('float')


        img = preprocess_resnet(img_nmzd)

        return img 
Example 12
Project: seldon-core   Author: SeldonIO   File: ONNXResNet.py    Apache License 2.0 6 votes vote down vote up
def __init__(self):
        print("Loading model")
        # Import the ONNX file
        models = import_onnx_file('resnet50/model.onnx')
        # Create an nGraph runtime environment
        runtime = ng.runtime(backend_name='CPU')
        # Select the first model and compile it to a callable function
        model = models[0]
        self.resnet = runtime.computation(model['output'], *model['inputs'])
        print("Model loaded")

        #Do a test run to warm up and check all is ok
        print("Running test on img of Zebra as warmup")
        img = image.load_img('zebra.jpg', target_size=(224, 224))
        img = image.img_to_array(img)
        x = np.expand_dims(img.copy(), axis=0)
        x = preprocess_input(x,mode='torch')
        x = x.transpose(0,3,1,2)
        preds = self.resnet(x)
        print(decode_predictions(preds[0], top=5)) 
Example 13
Project: image-similarity-clustering   Author: zegami   File: extract.py    MIT License 5 votes vote down vote up
def get_feature(metadata):
    print('{}'.format(metadata['id']))
    try:
        img_path = os.path.join(source_dir, 'images', metadata['image'])
        if os.path.isfile(img_path):
            print('is file: {}'.format(img_path))
            try:
                # load image setting the image size to 224 x 224
                img = image.load_img(img_path, target_size=(224, 224))
                # convert image to numpy array
                x = image.img_to_array(img)
                # the image is now in an array of shape (3, 224, 224)
                # but we need to expand it to (1, 2, 224, 224) as Keras is expecting a list of images
                x = np.expand_dims(x, axis=0)
                x = preprocess_input(x)

                # extract the features
                features = pargs.model.predict(x)[0]
                # convert from Numpy to a list of values
                features_arr = np.char.mod('%f', features)

                return {"id": metadata['id'], "features": ','.join(features_arr)}
            except Exception as ex:
                # skip all exceptions for now
                print(ex)
                pass
    except Exception as ex:
        # skip all exceptions for now
        print(ex)
        pass
    return None 
Example 14
Project: spark-deep-learning   Author: databricks   File: named_image_test.py    Apache License 2.0 5 votes vote down vote up
def test_imagenet_preprocess_input(self):
        # compare our tf implementation to the np implementation in keras
        image = np.zeros((256, 256, 3))

        sess = tf.Session()
        with sess.as_default():
            x = tf.placeholder(tf.float32, shape=[256, 256, 3])
            processed = keras_apps._imagenet_preprocess_input(x, (256, 256)),
            sparkdl_preprocessed_input = sess.run(processed, {x: image})

        keras_preprocessed_input = resnet50.preprocess_input(np.expand_dims(image, axis=0))

        # NOTE: precision errors occur for decimal > 5
        np.testing.assert_array_almost_equal(sparkdl_preprocessed_input, keras_preprocessed_input,
                                             decimal=5) 
Example 15
Project: spark-deep-learning   Author: databricks   File: test_pieces.py    Apache License 2.0 5 votes vote down vote up
def test_spimage_converter_module(self):
        """ spimage converter module must preserve original image """
        img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg'))

        def exec_gfn_spimg_decode(spimg_dict, img_dtype):
            gfn = gfac.buildSpImageConverter('BGR', img_dtype)
            with IsolatedSession() as issn:
                feeds, fetches = issn.importGraphFunction(gfn, prefix="")
                feed_dict = dict(
                    (tnsr, spimg_dict[tfx.op_name(tnsr, issn.graph)]) for tnsr in feeds)
                img_out = issn.run(fetches[0], feed_dict=feed_dict)
            return img_out

        def check_image_round_trip(img_arr):
            spimg_dict = imageArrayToStruct(img_arr).asDict()
            spimg_dict['data'] = bytes(spimg_dict['data'])
            img_arr_out = exec_gfn_spimg_decode(
                spimg_dict, imageTypeByOrdinal(spimg_dict['mode']).dtype)
            self.assertTrue(np.all(img_arr_out == img_arr))

        for fp in img_fpaths:
            img = load_img(fp)

            img_arr_byte = img_to_array(img).astype(np.uint8)
            check_image_round_trip(img_arr_byte)

            img_arr_float = img_to_array(img).astype(np.float32)
            check_image_round_trip(img_arr_float)

            img_arr_preproc = iv3.preprocess_input(img_to_array(img))
            check_image_round_trip(img_arr_preproc) 
Example 16
Project: spark-deep-learning   Author: databricks   File: test_pieces.py    Apache License 2.0 5 votes vote down vote up
def test_bare_keras_module(self):
        """ Keras GraphFunctions should give the same result as standard Keras models """
        img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg'))

        for model_gen, preproc_fn, target_size in [(InceptionV3, iv3.preprocess_input, model_sizes['InceptionV3']),
                                      (Xception, xcpt.preprocess_input, model_sizes['Xception']),
                                      (ResNet50, rsnt.preprocess_input, model_sizes['ResNet50'])]:

            keras_model = model_gen(weights="imagenet")
            _preproc_img_list = []
            for fpath in img_fpaths:
                img = load_img(fpath, target_size=target_size)
                # WARNING: must apply expand dimensions first, or ResNet50 preprocessor fails
                img_arr = np.expand_dims(img_to_array(img), axis=0)
                _preproc_img_list.append(preproc_fn(img_arr))

            imgs_input = np.vstack(_preproc_img_list)

            preds_ref = keras_model.predict(imgs_input)

            gfn_bare_keras = GraphFunction.fromKeras(keras_model)

            with IsolatedSession(using_keras=True) as issn:
                K.set_learning_phase(0)
                feeds, fetches = issn.importGraphFunction(gfn_bare_keras)
                preds_tgt = issn.run(fetches[0], {feeds[0]: imgs_input})

            np.testing.assert_array_almost_equal(preds_tgt,
                                                 preds_ref,
                                                 decimal=self.featurizerCompareDigitsExact) 
Example 17
Project: spark-deep-learning   Author: databricks   File: test_pieces.py    Apache License 2.0 5 votes vote down vote up
def test_pipeline(self):
        """ Pipeline should provide correct function composition """
        img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg'))

        xcpt_model = Xception(weights="imagenet")
        stages = [('spimage', gfac.buildSpImageConverter('BGR', 'float32')),
                  ('xception', GraphFunction.fromKeras(xcpt_model))]
        piped_model = GraphFunction.fromList(stages)

        for fpath in img_fpaths:
            target_size = model_sizes['Xception']
            img = load_img(fpath, target_size=target_size)
            img_arr = np.expand_dims(img_to_array(img), axis=0)
            img_input = xcpt.preprocess_input(img_arr)
            preds_ref = xcpt_model.predict(img_input)

            spimg_input_dict = imageArrayToStruct(img_input).asDict()
            spimg_input_dict['data'] = bytes(spimg_input_dict['data'])
            with IsolatedSession() as issn:
                # Need blank import scope name so that spimg fields match the input names
                feeds, fetches = issn.importGraphFunction(piped_model, prefix="")
                feed_dict = dict(
                    (tnsr, spimg_input_dict[tfx.op_name(tnsr, issn.graph)]) for tnsr in feeds)
                preds_tgt = issn.run(fetches[0], feed_dict=feed_dict)
                # Uncomment the line below to see the graph
                # tfx.write_visualization_html(issn.graph,
                # NamedTemporaryFile(prefix="gdef", suffix=".html").name)

            np.testing.assert_array_almost_equal(preds_tgt,
                                                 preds_ref,
                                                 decimal=self.featurizerCompareDigitsExact) 
Example 18
Project: videofeatures   Author: jonasrothfuss   File: CNNFeatures.py    MIT License 5 votes vote down vote up
def computeFeatures(self, video):
    x = vgg16.preprocess_input(video)
    features = self.model.predict(x)
    return features 
Example 19
Project: videofeatures   Author: jonasrothfuss   File: CNNFeatures.py    MIT License 5 votes vote down vote up
def computeFeatures(self, video):
    x = resnet50.preprocess_input(video)
    features = self.model.predict(x)
    return features.reshape((-1, 2048)) 
Example 20
Project: Sacred_Deep_Learning   Author: AAbercrombie0492   File: image_utilities.py    GNU General Public License v3.0 5 votes vote down vote up
def load_images_from_directory(directory, n_samples):
    '''
    Load n number of images from a directory.
    Returns image arrays, filenames, and a fail log.
    '''
    from os import listdir
    from os.path import isfile, join
    from keras.preprocessing import image
    from tqdm import tqdm
    from keras.applications.resnet50 import preprocess_input

    fail_log = ''
    image_arrays = []
    files = [f for f in listdir(directory) if isfile(join(directory, f))]

    for img_path in tqdm(files[:n_samples]):
        # try:
        full_path = os.path.join(directory, img_path)
        img = image.load_img(full_path, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        image_arrays.append(x)
        # except:
            # fail_log +='{}\n'.format(img_path)
            # continue

    return image_arrays, files, fail_log 
Example 21
Project: MassImageRetrieval   Author: liuguiyangnwpu   File: nn_feature_extraction.py    Apache License 2.0 5 votes vote down vote up
def __fetch_nn_feature(batch_image, batch_file_name):
        batch_image = np.concatenate(batch_image, axis=0)
        x = preprocess_input(batch_image)

        features = res50_model.predict(x)
        features_reduce = features.squeeze()
        for idx in range(len(batch_file_name)):
            image_nn_feature_dict[batch_file_name[idx]] = features_reduce[idx]
        # print(features_reduce)
        print(features_reduce.shape) 
Example 22
Project: visual_redactions   Author: tribhuvanesh   File: seq.py    Apache License 2.0 5 votes vote down vote up
def img_to_features(X, image_list, model, batch_size=64):
    n_img, n_h, n_w, n_c = X.shape
    n_batches = n_img / batch_size + 1
    n_feat = model.output_shape[-1]

    feat_mat = np.zeros((n_img, n_feat))

    pbar = Progbar(n_batches)

    for b_idx, start_idx in enumerate(range(0, n_img, batch_size)):
        end_idx = min(start_idx + batch_size, n_img)
        this_batch_size = end_idx - start_idx

        bx = X[start_idx:end_idx]
        bx = preprocess_input(bx)
        batch_feat = model.predict(bx)

        feat_mat[start_idx:end_idx] = batch_feat.reshape((this_batch_size, n_feat))
        pbar.update(b_idx)

    # Create a dict: image_id -> feat
    image_id_to_visfeat = dict()
    for i, (image_id, image_path) in enumerate(image_list):
        image_id_to_visfeat[image_id] = feat_mat[i]

    return image_id_to_visfeat 
Example 23
Project: visual_redactions   Author: tribhuvanesh   File: nn.py    Apache License 2.0 5 votes vote down vote up
def img_to_features(X, image_list, model, batch_size=64):
    n_img, n_h, n_w, n_c = X.shape
    n_batches = n_img / batch_size + 1
    n_feat = model.output_shape[-1]

    feat_mat = np.zeros((n_img, n_feat))

    pbar = Progbar(n_batches)

    for b_idx, start_idx in enumerate(range(0, n_img, batch_size)):
        end_idx = min(start_idx + batch_size, n_img)
        this_batch_size = end_idx - start_idx

        bx = X[start_idx:end_idx]
        bx = preprocess_input(bx)
        batch_feat = model.predict(bx)

        feat_mat[start_idx:end_idx] = batch_feat
        pbar.update(b_idx)

    # Create a dict: image_id -> feat
    image_id_to_visfeat = dict()
    for i, (image_id, image_path) in enumerate(image_list):
        image_id_to_visfeat[image_id] = feat_mat[i]

    return image_id_to_visfeat 
Example 24
Project: Video-decorruption   Author: SyPRX   File: decorruption.py    Apache License 2.0 5 votes vote down vote up
def _get_features(self, lst_files):
        """
        For all frames, extract feature vector from VGG16
        :param lst_files:
        :return: lst_features
        """

        lst_features = [] # list of file_feature

        # Load VGG16 model
        self.model = self._model()

        for img_file in tqdm(lst_files, desc='Extracting features'):

            # Resize img in order to fit the required input dimension for VGG
            img = cv2.resize(img_file, (224, 224))

            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)

            # Get the feature vector regarding the specified layer in _model()
            features = self.model.predict(x)

            lst_features.append(features[0])

        return lst_features 
Example 25
Project: fresh_eyes   Author: ksteinfe   File: server.py    MIT License 5 votes vote down vote up
def do_predict(mdl, data_dict):
    # obtain predictions
    #print("getting prediction for {} images".format(len(data_dict)))
    #st = time.time()
    keys = data_dict.keys()
    pth_imgs = [data_dict[k] for k in keys]

    arr_imgs = []
    for pth_img in pth_imgs:
        img = image.load_img(pth_img) # Load the image file, expecting to be 224x224 pixels (required by this model)
        x = image.img_to_array(img) # Convert the image to a numpy array
        x = resnet50.preprocess_input(x) # Scale the input image to the range used in the trained network
        arr_imgs.append(x)

    x = np.stack( arr_imgs, axis=0 )
    predictions = mdl.predict(x) # Run the image through the deep neural network to make a prediction
    #print("I made a set of predictions in {:.2f}s".format(time.time()-st))

    decoded_predictions = resnet50.decode_predictions(predictions, top=len(predictions[0]) ) # Look up the names of the predicted classes. Index zero is the results for the first image.
    response = {}
    for key, prediction in zip(keys,decoded_predictions):
        d = {}
        for imagenet_id, name, likelihood in prediction:
            d[name.lower()] = float(round(likelihood,ROUND_FLOATS_TO))
        response[key] = d

    return response 
Example 26
Project: fresh_eyes   Author: ksteinfe   File: serve_keras.py    MIT License 5 votes vote down vote up
def do_predict(mdl, pred_data):
    # obtain predictions
    #if len(pred_data)>1: print("getting prediction for {} images".format(len(pred_data)))
    st = time.time()
    keys = pred_data.keys()
    pth_imgs = [pred_data[k] for k in keys]

    arr_imgs = []
    for pth_img in pth_imgs:
        img = image.load_img(pth_img) # Load the image file, expecting to be 224x224 pixels (required by this model)
        x = image.img_to_array(img) # Convert the image to a numpy array
        x = resnet50.preprocess_input(x) # Scale the input image to the range used in the trained network
        arr_imgs.append(x)

    x = np.stack( arr_imgs, axis=0 )
    predictions = mdl.predict(x) # Run the image through the deep neural network to make a prediction
    #if len(pred_data)>1: print("I made a set of predictions in {:.2f}s".format(time.time()-st))

    decoded_predictions = resnet50.decode_predictions(predictions, top=len(predictions[0]) ) # Look up the names of the predicted classes. Index zero is the results for the first image.
    response = {}
    for key, prediction in zip(keys,decoded_predictions):
        d = {}
        for imagenet_id, name, likelihood in prediction:
            d[name.lower()] = float(round(likelihood,ROUND_FLOATS_TO))
        response[key] = d

    return response 
Example 27
Project: lost   Author: l3p-cv   File: cluster_resnet.py    MIT License 5 votes vote down vote up
def main(self):
        self.logger.info('Will load keras model')
        model = ResNet50(weights='imagenet')
        self.logger.info('Keras model loaded')
        feature_list = []
        img_path_list = []
        # Request only MIA annotations for annotations of first stage
        # that have been annotated in current iteration cycle.
        img_annos = list(filter(lambda x: x.iteration == self.iteration, 
            self.inp.img_annos))
        total = len(img_annos)
        for index, img_anno in enumerate(img_annos):
            annos = img_anno.to_vec('anno.data')
            if annos:
                types = img_anno.to_vec('anno.dtype')
                img = skimage.io.imread(self.get_abs_path(img_anno.img_path))
                crops, anno_boxes = anno_helper.crop_boxes(annos, types, 
                    img, context=0.01)
                sim_classes = []
                for crop in crops:
                    # img = image.load_img(img_path, target_size=(224, 224))
                    crop_img = image.img_to_array(image.array_to_img(crop, scale=False).resize((224,224)))
                    x = keras_image.img_to_array(crop_img)
                    x = np.expand_dims(x, axis=0)
                    x = preprocess_input(x)
                    # extract features
                    scores = model.predict(x)
                    sim_classes.append(np.argmax(scores))
                self.outp.request_annos(img_anno.img_path, 
                    annos=annos, anno_types=types, anno_sim_classes=sim_classes)
                self.logger.info('Requested annotation for: {}\n{}\n{}'.format(img_anno.img_path, types, sim_classes))
                self.update_progress(index*100/total) 
Example 28
Project: lost   Author: l3p-cv   File: cluster_kmeans.py    MIT License 5 votes vote down vote up
def main(self):
        n_cluster = int(self.get_arg('n-clusters'))
        self.logger.info('Will load keras model')
        base_model = ResNet50(weights='imagenet')
        self.logger.info('Keras model loaded')
        layer_code = 'avg_pool'
        # base_model.summary()
        model = Model(inputs=base_model.input, outputs=base_model.get_layer(layer_code).output)
        feature_list = []
        img_path_list = []
        self.logger.info('Will compute CNN features')
        for raw_file in self.inp.raw_files:
            media_path = raw_file.path
            media_path = raw_file.path
            file_list = os.listdir(media_path)
            total = float(len(file_list))
            for index, img_file in enumerate(file_list):
                img_path = os.path.join(media_path, img_file)
                img_path_list.append(img_path)
                img = image.load_img(img_path, target_size=(224, 224))
                x = keras_image.img_to_array(img)
                x = np.expand_dims(x, axis=0)
                x = preprocess_input(x)
                # extract features
                features = model.predict(x)
                feature_list.append(features[0].flatten())
                self.update_progress(index*70/total)
        self.logger.info('Computed CNN feature!')
        self.logger.info('Start KMeans clustering')
        kmeans = KMeans(n_clusters=n_cluster, random_state=0).fit(feature_list)
        self.logger.info('Clustering completed!')
        counter = 0
        for sim_class, img_path in zip(kmeans.labels_, img_path_list):
            self.outp.request_annos(img_path, img_sim_class=sim_class)
            self.logger.info('Requested annotation for: {} (cluster: {})'.format(img_path, sim_class))
            counter += 1
            self.update_progress(70 + (counter*30/len(img_path_list))) 
Example 29
Project: dog-breed-web-classifier   Author: JacobPolloreno   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def preprocess_inception(img):
    """Prepare image for Inception model

    :param img, PIL Image object
    :return preprocssed input for inception model
    """
    from keras.applications.inception_v3 import preprocess_input
    img = preprocess_input(img_to_tensor(img))
    return img 
Example 30
Project: dog-breed-web-classifier   Author: JacobPolloreno   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def preprocess_resnet(img):
    """Prepare image for Resnet50 model
    """
    from keras.applications.resnet50 import preprocess_input
    img = preprocess_input(img_to_tensor(img))
    return extract_bottleneck_features_resnet(img) 
Example 31
Project: Luz-Vision   Author: general-labs   File: imageclassifier.py    GNU General Public License v3.0 5 votes vote down vote up
def classify_image(path):

    K.clear_session()
    classifier=ResNet50()
    #print(classifier.summary())
    new_image = image.load_img('temp/temp.png', target_size=(224, 224))
    transformed_image= image.img_to_array(new_image)
    #print(transformed_image.shape)
    transformed_image=np.expand_dims(transformed_image,axis=0)
    #print(transformed_image.shape)
    transformed_image=preprocess_input(transformed_image)
    #print(transformed_image)
    y_pred= classifier.predict(transformed_image)
    #print(y_pred)
    #print(y_pred.shape)

    decode_predictions(y_pred, top=5)
    label = decode_predictions(y_pred)
    # retrieve the most likely result, i.e. highest probability
    decoded_label = label[0][0]

    print("######===============########")
    # print the classification
    print('%s (%.2f%%)' % (decoded_label[1], decoded_label[2]*100 ))
    print("######===============########")

    # Destroy references
    del classifier, new_image, transformed_image, y_pred, label
    K.clear_session()
    return ({"Prediction": decoded_label[1], "confidence": decoded_label[2] * 100, "url": path}) 
Example 32
Project: Image-AI   Author: general-labs   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def classify_image(path):
    with urllib.request.urlopen(path) as url:
        with open('temp.jpg', 'wb') as f:
            f.write(url.read())
    K.clear_session()
    classifier=ResNet50()
    #print(classifier.summary())
    new_image = image.load_img('temp.jpg', target_size=(224, 224))
    transformed_image= image.img_to_array(new_image)
    #print(transformed_image.shape)
    transformed_image=np.expand_dims(transformed_image,axis=0)
    #print(transformed_image.shape)
    transformed_image=preprocess_input(transformed_image)
    #print(transformed_image)
    y_pred= classifier.predict(transformed_image)
    #print(y_pred)
    #print(y_pred.shape)

    decode_predictions(y_pred, top=5)
    label = decode_predictions(y_pred)
    # retrieve the most likely result, i.e. highest probability
    decoded_label = label[0][0]

    print("######===============########")
    # print the classification
    print('%s (%.2f%%)' % (decoded_label[1], decoded_label[2]*100 ))
    print("######===============########")

    # Destroy references
    del classifier, new_image, transformed_image, y_pred, label
    K.clear_session()
    return ({"Prediction": decoded_label[1], "confidence": decoded_label[2] * 100, "url": path}) 
Example 33
Project: Image-AI   Author: general-labs   File: imageclassifier.py    GNU General Public License v3.0 5 votes vote down vote up
def classify_image(path):
    with urllib.request.urlopen(path) as url:
        with open('temp/temp.jpg', 'wb') as f:
            f.write(url.read())
    K.clear_session()
    classifier=ResNet50()
    #print(classifier.summary())
    new_image = image.load_img('temp/temp.jpg', target_size=(224, 224))
    transformed_image= image.img_to_array(new_image)
    #print(transformed_image.shape)
    transformed_image=np.expand_dims(transformed_image,axis=0)
    #print(transformed_image.shape)
    transformed_image=preprocess_input(transformed_image)
    #print(transformed_image)
    y_pred= classifier.predict(transformed_image)
    #print(y_pred)
    #print(y_pred.shape)

    decode_predictions(y_pred, top=5)
    label = decode_predictions(y_pred)
    # retrieve the most likely result, i.e. highest probability
    decoded_label = label[0][0]

    print("######===============########")
    # print the classification
    print('%s (%.2f%%)' % (decoded_label[1], decoded_label[2]*100 ))
    print("######===============########")

    # Destroy references
    del classifier, new_image, transformed_image, y_pred, label
    K.clear_session()
    return ({"Prediction": decoded_label[1], "confidence": decoded_label[2] * 100, "url": path}) 
Example 34
Project: jenkins-x-seldon-core-sandbox   Author: SeldonIO   File: ONNXResNet.py    Apache License 2.0 5 votes vote down vote up
def predict(self,X,features_names):
        print(X.shape)
        X = preprocess_input(X,mode='torch')
        preds = self.resnet(X)
        print(decode_predictions(preds[0], top=5))
        return preds 
Example 35
Project: DeepTL-Lane-Change-Classification   Author: Ekim-Yurtsever   File: dataset.py    MIT License 5 votes vote down vote up
def extract_feature(self, img_path):
        img = image.load_img(img_path, target_size=(331, 331))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        self.features = self.model.predict(x) 
Example 36
Project: DeepTL-Lane-Change-Classification   Author: Ekim-Yurtsever   File: visualization.py    MIT License 5 votes vote down vote up
def visualize_activation_with_image(self, image_path, filter_id=0, layer_name='activation_49', save_option=0, save_path='default'):
        self.activation_model = Model(inputs=self.model.input, outputs=self.model.get_layer(layer_name).output)
        img = image.load_img(image_path, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        activation_output = self.activation_model.predict(x)
        ax = plt.subplot(111)
        ax.axis('off')

        if save_option == 1:
            plt.imsave(save_path, activation_output[0, :, :, filter_id])
        elif save_option == 0:
            ax.imshow(activation_output[0, :, :, filter_id]) 
Example 37
Project: tensorflow_serving_examples   Author: movchan74   File: serving_test.py    MIT License 5 votes vote down vote up
def preprocess_image(img_path):
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x 
Example 38
Project: tensorflow_serving_examples   Author: movchan74   File: serving_service.py    MIT License 5 votes vote down vote up
def preprocess_image(img):
    img = image.load_img(img, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x 
Example 39
Project: tensorflow_serving_examples   Author: movchan74   File: keras_test.py    MIT License 5 votes vote down vote up
def preprocess_image(img_path):
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x 
Example 40
Project: ICIAR2018   Author: alexander-rakhlin   File: models.py    MIT License 5 votes vote down vote up
def predict(self, x):
        if self.data_format == "channels_first":
            x = x.transpose(0, 3, 1, 2)
        x = preprocess_resnet(x.astype(K.floatx()))
        return self.model.predict(x, batch_size=self.batch_size) 
Example 41
Project: smart_categorizer   Author: kostyaev   File: categorize.py    Apache License 2.0 5 votes vote down vote up
def get_features(paths):
    features = []
    for batch in tqdm.tqdm(chunks(paths, 16)):
        imgs = [image.img_to_array(image.load_img(f, target_size=(224, 224))) for f in batch]
        imgs = np.array(imgs)
        x = preprocess_input(imgs)
        preds = model.predict(x)
        features.extend(preds[:,0,0,:])
    return np.array(features) 
Example 42
Project: dMazeRunner   Author: cmlasu   File: run_optimizer.py    MIT License 5 votes vote down vote up
def download_block_from_keras(args):
    import keras
    supported_models = ["resnet50"]

    if args.model == "resnet50":
        weights_url = ''.join(['https://github.com/fchollet/deep-learning-models/releases/',
                            'download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5'])
        weights_file = 'resnet50_weights.h5'
        download(weights_url, weights_file)
        keras_resnet50 = keras.applications.resnet50.ResNet50(include_top=True, weights=None,
                                                            input_shape=(224, 224, 3), classes=1000)
        keras_resnet50.load_weights('resnet50_weights.h5')

        from PIL import Image
        from keras.applications.resnet50 import preprocess_input
        img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
        download(img_url, 'cat.png')
        img = Image.open('cat.png').resize((224, 224))
        # input preprocess
        data = np.array(img)[np.newaxis, :].astype('float32')
        data = preprocess_input(data).transpose([0, 3, 1, 2])

        shape_dict = {'input_1': data.shape}
        sym, params = nnvm.frontend.from_keras(keras_resnet50)
        target = 'llvm'

    else:
        print("not supported model; supported models:", supported_models)
        exit()

    model_layers = get_dataflow(sym, target, shape_dict, params, args.batch_size)
    return model_layers 
Example 43
Project: seldon-core   Author: SeldonIO   File: ONNXResNet.py    Apache License 2.0 5 votes vote down vote up
def predict(self,X,features_names):
        print(X.shape)
        X = preprocess_input(X,mode='torch')
        preds = self.resnet(X)
        print(decode_predictions(preds[0], top=5))
        return preds 
Example 44
Project: fasnet   Author: ycfang-lab   File: extract_feature.py    Apache License 2.0 5 votes vote down vote up
def extract(nb_feature=2048):
    dataset = ['casia', 'replayattack']
    datatype = ['train', 'devel', 'test']
    factors = (1.8, 2.0, 2.2)

    model = Extractor()

    for ds, dt in product(dataset, datatype):

        save_path = '{}/{}-{}-lstm.h5'.format(data_save_dir, ds, dt)

        with hp.File(save_path, 'w') as h:
            for fct in factors:
                x = h.create_dataset(
                    '{}/data'.format(fct), (1, 25, nb_feature), maxshape=(None, 25, cfg.nb_feature),
                    chunks=(64, 25, nb_feature), dtype='float32'
                )
                y = h.create_dataset(
                    '{}/label'.format(fct), (1, 1), chunks=(64, 1), maxshape=(None, 1),
                    dtype='float32'
                )

                csv_path = 'csv/{}-{}-{}-lstm.csv'.format(ds, dt, fct)
                csv_data = pd.read_csv(csv_path, sep=',')

                length = len(csv_data)
                x.resize([length, 25, nb_feature])
                y.resize([length, 1])

                for i in tqdm(range(length)):
                    label = csv_data['label'][i]
                    video = csv_data['path'][i]
                    frames = read_video(video)
                    frames = frames[:, :, :, ::-1]
                    frames = preprocess_input(frames)
                    features = model.predict(frames)
                    x[i] = features.reshape((-1, 25, nb_feature))
                    y[i] = np.asarray([label]).reshape((-1, 1)) 
Example 45
Project: ResNetCAM-keras   Author: alexisbcook   File: ResNet_CAM.py    MIT License 5 votes vote down vote up
def pretrained_path_to_tensor(img_path):
    # loads RGB image as PIL.Image.Image type
    img = image.load_img(img_path, target_size=(224, 224))
    # convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
    x = image.img_to_array(img)
    # convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
    x = np.expand_dims(x, axis=0)
    # convert RGB -> BGR, subtract mean ImageNet pixel, and return 4D tensor
    return preprocess_input(x) 
Example 46
Project: Classify-ResNetCAM-keras   Author: jiye-ML   File: ResNet_CAM.py    MIT License 5 votes vote down vote up
def pretrained_path_to_tensor(img_path):
    # loads RGB image as PIL.Image.Image type
    img = image.load_img(img_path, target_size=(224, 224))
    # convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
    x = image.img_to_array(img)
    # convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
    x = np.expand_dims(x, axis=0)
    # convert RGB -> BGR, subtract mean ImageNet pixel, and return 4D tensor
    return preprocess_input(x) 
Example 47
Project: satellite-imagery-change-detection   Author: soroushhashemifar   File: feat.py    GNU General Public License v3.0 4 votes vote down vote up
def extra_feat(img_path):
    #Using a RESNET50 as feature extractor
    base_model = ResNet50(weights='imagenet',include_top=False)
    img = image.load_img(img_path, target_size=RESIZE_SIZE)
    x_img = image.img_to_array(img)
    x = np.expand_dims(x_img, axis=0)
    x = preprocess_input(x)
    block1_pool_features=get_activations(base_model, 10, x)
    block2_pool_features=get_activations(base_model, 15, x)
    block3_pool_features=get_activations(base_model, 17, x)
    #block4_pool_features=get_activations(base_model, 20, x)
    block5_pool_features=get_activations(base_model, 22, x)
    #block6_pool_features=get_activations(base_model, 25, x)
    block7_pool_features=get_activations(base_model, 30, x)
    #block8_pool_features=get_activations(base_model, 35, x)
    block9_pool_features=get_activations(base_model, 37, x)
    #block10_pool_features=get_activations(base_model, 39, x)
    block11_pool_features=get_activations(base_model, 42, x)
    #block12_pool_features=get_activations(base_model, 45, x)
    block13_pool_features=get_activations(base_model, 46, x)
    block14_pool_features=get_activations(base_model, 47, x)
    block15_pool_features=get_activations(base_model, 50, x)
    #block16_pool_features=get_activations(base_model, 23, x)
    block17_pool_features=get_activations(base_model, 27, x)
    block18_pool_features=get_activations(base_model, 33, x)
    #block19_pool_features=get_activations(base_model, 38, x)
    block20_pool_features=get_activations(base_model, 43, x)
    block21_pool_features=get_activations(base_model, 49, x)

    x1 = tf.image.resize_images(block1_pool_features[0],RESIZE_SIZE)
    x2 = tf.image.resize_images(block2_pool_features[0],RESIZE_SIZE)
    x3 = tf.image.resize_images(block3_pool_features[0],RESIZE_SIZE)
    #x4 = tf.image.resize_images(block4_pool_features[0],RESIZE_SIZE)
    x5 = tf.image.resize_images(block5_pool_features[0],RESIZE_SIZE)
    #x6 = tf.image.resize_images(block6_pool_features[0],RESIZE_SIZE)
    x7 = tf.image.resize_images(block7_pool_features[0],RESIZE_SIZE)
    #x8 = tf.image.resize_images(block8_pool_features[0],RESIZE_SIZE)
    x9 = tf.image.resize_images(block9_pool_features[0],RESIZE_SIZE)
    #x10 = tf.image.resize_images(block10_pool_features[0],RESIZE_SIZE)
    x11 = tf.image.resize_images(block11_pool_features[0],RESIZE_SIZE)
    #x12 = tf.image.resize_images(block12_pool_features[0],RESIZE_SIZE)
    x13 = tf.image.resize_images(block13_pool_features[0],RESIZE_SIZE)
    x14 = tf.image.resize_images(block14_pool_features[0],RESIZE_SIZE)
    x15 = tf.image.resize_images(block15_pool_features[0],RESIZE_SIZE)
    #x16 = tf.image.resize_images(block16_pool_features[0],RESIZE_SIZE)
    x17 = tf.image.resize_images(block17_pool_features[0],RESIZE_SIZE)
    x18 = tf.image.resize_images(block18_pool_features[0],RESIZE_SIZE)
    #x19 = tf.image.resize_images(block19_pool_features[0],RESIZE_SIZE)
    x20 = tf.image.resize_images(block20_pool_features[0],RESIZE_SIZE)
    x21 = tf.image.resize_images(block21_pool_features[0],RESIZE_SIZE)

    F = tf.concat([x1,x2,x3,x5,x7,x9,x11,x13,x14,x15,x17,x18,x20,x21], 3)
    return F, x_img 
Example 48
Project: nyoka   Author: nyoka-pmml   File: testScoreWithAdapaRetinaNet.py    Apache License 2.0 4 votes vote down vote up
def test_01(self):
        RetinanetToPmml(
            model=self.model,
            input_shape=(224,224,3),
            input_format='image',
            backbone_name='resnet',
            trained_classes=self.classes,
            pmml_file_name="RetinaNet.pmml"
        )
        model_name  = self.adapa_utility.upload_to_zserver('RetinaNet.pmml')
        z_boxes, z_scores, z_labels = self.adapa_utility.score_in_zserver(model_name, 'nyoka/tests/test_image_retinanet.png','RN')
        img = load_img('nyoka/tests/test_image_retinanet.png')
        img = img_to_array(img)
        img = preprocess_input(img)
        test = np.expand_dims(img, axis=0)
        boxes, scores, labels = self.model.predict(test)
        scores = scores.flatten()
        boxes = boxes.reshape(-1,4)
        labels = labels.flatten()
        
        scores_cnt = 0
        for a,b in zip(scores, z_scores):
            a = "{:.4f}".format(a)
            b = "{:.4f}".format(b)
            if a!=b:
                scores_cnt += 1

        labels_cnt = 0
        for a,b in zip(labels, z_labels):
            b = self.classes.index(b)
            if a!=b:
                labels_cnt += 1

        boxes_cnt = 0
        for a,b in zip(boxes, z_boxes):
            for a_,b_ in zip(a,b):
                a_ = "{:.2f}".format(a_)
                b_ = "{:.2f}".format(b_)
                if a_ != b_:
                    boxes_cnt += 1
        
        self.assertEqual(scores_cnt, 0)
        self.assertEqual(labels_cnt, 0)
        self.assertEqual(boxes_cnt, 0) 
Example 49
Project: globus_project   Author: LutzFabio   File: cnn_globus.py    GNU General Public License v3.0 4 votes vote down vote up
def create_generators(self):
        '''
        Method that creates the ImageDataGenerators and everything that is
        needed in this regard.
        '''

        # Initiate the train and test ImageDataGenerator.
        train_gen_init = ImageDataGenerator(
            preprocessing_function=preprocess_input,
            rescale=self.im_resc,
            rotation_range=self.rot_range,
            width_shift_range=self.width_shift,
            height_shift_range=self.height_shift,
            shear_range=self.shear_rng,
            zoom_range=self.zoom_rng,
            horizontal_flip=self.hor_flip,
            vertical_flip=self.vert_flip,
            fill_mode=self.fill_mod)

        test_gen_init = ImageDataGenerator(
            preprocessing_function=preprocess_input,
            rescale=self.im_resc)

        # Construct the train and test generators for multiple outputs.
        self.train_gen = self.combine_generators(train_gen_init,
                                                 self.train_df,
                                                 self.batch_size_train)
        self.test_gen = self.combine_generators(test_gen_init,
                                                self.test_df,
                                                self.batch_size_test)

        # Create a test generator for prediction. This is needed because the
        # custom generators cannot be reset.
        self.test_gen_pred = self.combine_generators(test_gen_init,
                                                     self.test_df,
                                                     self.batch_size_test)

        # Due to the custom generator constructed above, the following
        # function generates some attributes and save some data to CSV's
        # that are needed but cannot be derived from the custom generator
        # anymore.
        self.create_pseudo_generators(train_gen_init, test_gen_init)

        return 
Example 50
Project: Kernelized_Correlation_Filter   Author: stevenwudi   File: KCFpy_saliency.py    GNU General Public License v3.0 4 votes vote down vote up
def get_subwindow(self, im, pos, sz):
        """
        Obtain sub-window from image, with replication-padding.
        Returns sub-window of image IM centered at POS ([y, x] coordinates),
        with size SZ ([height, width]). If any pixels are outside of the image,
        they will replicate the values at the borders.

        The subwindow is also normalized to range -0.5 .. 0.5, and the given
        cosine window COS_WINDOW is applied
        (though this part could be omitted to make the function more general).
        """

        if np.isscalar(sz):  # square sub-window
            sz = [sz, sz]

        ys = np.floor(pos[0]) + np.arange(sz[0], dtype=int) - np.floor(sz[0] / 2)
        xs = np.floor(pos[1]) + np.arange(sz[1], dtype=int) - np.floor(sz[1] / 2)

        ys = ys.astype(int)
        xs = xs.astype(int)

        # check for out-of-bounds coordinates and set them to the values at the borders
        ys[ys < 0] = 0
        ys[ys >= self.im_sz[0]] = self.im_sz[0] - 1

        xs[xs < 0] = 0
        xs[xs >= self.im_sz[1]] = self.im_sz[1] - 1

        # extract image

        if self.feature_type == 'raw' or self.feature_type == 'dsst':
            out = im[np.ix_(ys, xs)]
            # introduce scaling, here, we need them to be the same size
            if np.all(self.first_patch_sz == out.shape[:2]):
                return out
            else:
                out = imresize(out, self.first_patch_sz)
                return out / 255.
        elif self.feature_type == 'vgg' or self.feature_type == 'resnet50' or \
             self.feature_type == 'vgg_rnn' or self.feature_type == 'cnn' or self.feature_type == 'multi_cnn':
            c = np.array(range(3))
            out = im[np.ix_(c, ys, xs)]
            # if self.feature_type == 'vgg_rnn' or self.feature_type == 'cnn':
            #     from keras.applications.vgg19 import preprocess_input
            #     x = imresize(out.copy(), self.resize_size)
            #     out = np.multiply(x, self.cos_window_patch[:, :, None])
            return out 
Example 51
Project: Kernelized_Correlation_Filter   Author: stevenwudi   File: KCFpy_saliency.py    GNU General Public License v3.0 4 votes vote down vote up
def get_features(self):
        """
        :param im: input image
        :return:
        """
        if self.feature_type == 'raw':
            #using only grayscale:
            if len(self.im_crop.shape) == 3:
                if self.sub_feature_type == 'gray':
                    img_gray = np.mean(self.im_crop, axis=2)
                    img_gray = img_gray - img_gray.mean()
                    features = np.multiply(img_gray, self.cos_window)
                else:
                    img_colour = self.im_crop - self.im_crop.mean()
                    features = np.multiply(img_colour, self.cos_window[:, :, None])

        elif self.feature_type == 'dsst':
            img_colour = self.im_crop - self.im_crop.mean()
            features = np.multiply(img_colour, self.cos_window[:, :, None])

        elif self.feature_type == 'vgg' or self.feature_type == 'resnet50':
            if self.feature_type == 'vgg':
                from keras.applications.vgg19 import preprocess_input
            elif self.feature_type == 'resnet50':
                from keras.applications.resnet50 import preprocess_input
            x = np.expand_dims(self.im_crop.copy(), axis=0)
            x = preprocess_input(x)
            features = self.extract_model.predict(x)
            features = np.squeeze(features)
            features = (features.transpose(1, 2, 0) - features.min()) / (features.max() - features.min())
            features = np.multiply(features, self.cos_window[:, :, None])

        elif self.feature_type == 'vgg_rnn' or self.feature_type=='cnn':
            from keras.applications.vgg19 import preprocess_input
            x = imresize(self.im_crop.copy(), self.resize_size)
            x = x.transpose((2, 0, 1)).astype(np.float64)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            features = self.extract_model.predict(x)
            features = np.squeeze(features)
            features = (features.transpose(1, 2, 0) - features.min()) / (features.max() - features.min())
            features = np.multiply(features, self.cos_window[:, :, None])

        elif self.feature_type == "multi_cnn":
            from keras.applications.vgg19 import preprocess_input
            x = imresize(self.im_crop.copy(), self.resize_size)
            x = x.transpose((2, 0, 1)).astype(np.float64)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            features_list = self.extract_model_function(x)
            for i, features in enumerate(features_list):
                features = np.squeeze(features)
                features = (features.transpose(1, 2, 0) - features.min()) / (features.max() - features.min())
                if self.saliency_method==1 and self.feature_correlation is not None:
                    features = np.multiply(features, self.feature_correlation[i][None, None, :])
                features_list[i] = np.multiply(features, self.cos_window[i][:, :, None])
            return features_list
        else:
            assert 'Non implemented!'

        return features 
Example 52
Project: Kernelized_Correlation_Filter   Author: stevenwudi   File: KCF_CNN_RNN.py    GNU General Public License v3.0 4 votes vote down vote up
def get_subwindow(self, im, pos, sz):
        """
        Obtain sub-window from image, with replication-padding.
        Returns sub-window of image IM centered at POS ([y, x] coordinates),
        with size SZ ([height, width]). If any pixels are outside of the image,
        they will replicate the values at the borders.

        The subwindow is also normalized to range -0.5 .. 0.5, and the given
        cosine window COS_WINDOW is applied
        (though this part could be omitted to make the function more general).
        """

        if np.isscalar(sz):  # square sub-window
            sz = [sz, sz]

        ys = np.floor(pos[0]) + np.arange(sz[0], dtype=int) - np.floor(sz[0] / 2)
        xs = np.floor(pos[1]) + np.arange(sz[1], dtype=int) - np.floor(sz[1] / 2)

        ys = ys.astype(int)
        xs = xs.astype(int)

        # check for out-of-bounds coordinates and set them to the values at the borders
        ys[ys < 0] = 0
        ys[ys >= self.im_sz[0]] = self.im_sz[0] - 1

        xs[xs < 0] = 0
        xs[xs >= self.im_sz[1]] = self.im_sz[1] - 1

        # extract image

        if self.feature_type == 'raw' or self.feature_type == 'dsst':
            out = im[np.ix_(ys, xs)]
            # introduce scaling, here, we need them to be the same size
            if np.all(self.first_patch_sz == out.shape[:2]):
                return out
            else:
                out = imresize(out, self.first_patch_sz)
                return out / 255.
        elif self.feature_type == 'vgg' or self.feature_type == 'resnet50' or \
             self.feature_type == 'vgg_rnn' or self.feature_type == 'cnn' or self.feature_type == 'multi_cnn':
            c = np.array(range(3))
            out = im[np.ix_(c, ys, xs)]
            # if self.feature_type == 'vgg_rnn' or self.feature_type == 'cnn':
            #     from keras.applications.vgg19 import preprocess_input
            #     x = imresize(out.copy(), self.resize_size)
            #     out = np.multiply(x, self.cos_window_patch[:, :, None])
            return out 
Example 53
Project: Kernelized_Correlation_Filter   Author: stevenwudi   File: KCFpy_debug.py    GNU General Public License v3.0 4 votes vote down vote up
def get_subwindow(self, im, pos, sz):
        """
        Obtain sub-window from image, with replication-padding.
        Returns sub-window of image IM centered at POS ([y, x] coordinates),
        with size SZ ([height, width]). If any pixels are outside of the image,
        they will replicate the values at the borders.

        The subwindow is also normalized to range -0.5 .. 0.5, and the given
        cosine window COS_WINDOW is applied
        (though this part could be omitted to make the function more general).
        """

        if np.isscalar(sz):  # square sub-window
            sz = [sz, sz]

        ys = np.floor(pos[0]) + np.arange(sz[0], dtype=int) - np.floor(sz[0] / 2)
        xs = np.floor(pos[1]) + np.arange(sz[1], dtype=int) - np.floor(sz[1] / 2)

        ys = ys.astype(int)
        xs = xs.astype(int)

        # check for out-of-bounds coordinates and set them to the values at the borders
        ys[ys < 0] = 0
        ys[ys >= self.im_sz[0]] = self.im_sz[0] - 1

        xs[xs < 0] = 0
        xs[xs >= self.im_sz[1]] = self.im_sz[1] - 1

        # extract image

        if self.feature_type == 'raw' or self.feature_type == 'dsst':
            out = im[np.ix_(ys, xs)]
            # introduce scaling, here, we need them to be the same size
            if np.all(self.first_patch_sz == out.shape[:2]):
                return out
            else:
                out = imresize(out, self.first_patch_sz)
                return out / 255.
        elif self.feature_type == 'vgg' or self.feature_type == 'resnet50' or \
             self.feature_type == 'vgg_rnn' or self.feature_type == 'cnn' \
                or self.feature_type == 'multi_cnn' or self.feature_type == 'HDT':
            c = np.array(range(3))
            out = im[np.ix_(c, ys, xs)]
            # if self.feature_type == 'vgg_rnn' or self.feature_type == 'cnn':
            #     from keras.applications.vgg19 import preprocess_input
            #     x = imresize(out.copy(), self.resize_size)
            #     out = np.multiply(x, self.cos_window_patch[:, :, None])
            return out 
Example 54
Project: HistologyCancerDiagnosisDeepPersistenceHomology   Author: KitwareMedical   File: rgb_data_loader.py    Apache License 2.0 4 votes vote down vote up
def RGBTestData(config):

    print 'loading RGB data'
    path_mal_test, _, files_malignant_test = next(os.walk( os.path.join(config.test_dir, 'malignant', 'rgb') ))
    path_ben_test, _, files_benign_test = next(os.walk( os.path.join(config.test_dir, 'benign', 'rgb') ))

    mal_paths_test = glob.glob( os.path.join(path_mal_test, '*') )
    ben_paths_test = glob.glob( os.path.join(path_ben_test, '*') )

    mal_outputs_test = [config.label.malignant] * len(mal_paths_test)
    ben_outputs_test = [config.label.benign] * len(ben_paths_test)

    test_paths = mal_paths_test + ben_paths_test
    test_outputs = mal_outputs_test + ben_outputs_test

    z = zip(test_paths, test_outputs)
    random.shuffle(z)
    test_paths, test_outputs = zip(*z)

    ref_std_lab=(0.57506023, 0.10403329, 0.01364062)
    ref_mu_lab=(8.63234435, -0.11501964, 0.03868433)

    if os.path.isfile('configs/stats.pkl'):
        with open('configs/stats.pkl', 'rb') as f:
            stats = pickle.load(f)
        print '###################  Stats loaded Test ####################'
        config['stats'] = stats
    else:
        print 'No stats file found (To obtain Mu and Sigma from original whole image).'

    len_test = len(test_outputs)

    X = np.zeros((len_test, 256, 256, 3))
    Y = [-1] * len_test


    for i in range(len_test):

        img = skimage.io.imread(test_paths[i])
        if img.shape == (1024, 1024, 3):
                img = img[::4, ::4, :]

        image_id = int(float(re.findall("\d+\.\d+", test_paths[i])[0]))

        if image_id in stats.keys():
            [src_mu, src_sigma] = stats[image_id]
            img_nmzd = htk_cnorm.reinhard(img, ref_mu_lab, ref_std_lab, src_mu=src_mu, src_sigma=src_sigma).astype('float')
        else:
            print '#### stats for %d not present' % (image_id)
            img_nmzd = htk_cnorm.reinhard(img, ref_mu_lab, ref_std_lab).astype('float')

        img = preprocess_resnet(img_nmzd)

        X[i] = img
        Y[i] = test_outputs[i]

    return (X, Y) 
Example 55
Project: HistologyCancerDiagnosisDeepPersistenceHomology   Author: KitwareMedical   File: combined_data_loader.py    Apache License 2.0 4 votes vote down vote up
def __getitem__(self, idx):

        batchx = self.train_files[ idx*self.batch_size : (idx+1)*self.batch_size ]
        batchy = self.train_outputs[ idx*self.batch_size : (idx+1)*self.batch_size ]

        X_RGB = np.zeros((self.batch_size, 256, 256, 3))
        X_Per = np.zeros((self.batch_size, 32, 32, 1))
        Y = np.zeros((self.batch_size, 2))

        for i in range(self.batch_size):
            image_id =  int(float(re.findall("\d+\.\d+", batchx[i])[0]))

            fnameRGB = batchx[i] + '.jpg'
            fnamePer = batchx[i] + '.pkl'

            if batchy[i] == self.label['malignant'] :
                pathRGB = os.path.join(self.path_mal_train, fnameRGB)
                pathPer = os.path.join(self.path_mal_train_per, fnamePer)

            elif batchy[i] == self.label['benign'] :
                pathRGB = os.path.join(self.path_ben_train, fnameRGB)
                pathPer = os.path.join(self.path_ben_train_per, fnamePer)


            img = skimage.io.imread(pathRGB)
            if img.shape == (1024, 1024, 3):
                img = img[::4, ::4, :]

            image_id = int(float(re.findall("\d+\.\d+", pathRGB)[0]))

            if image_id in self.config['stats'].keys():
                [src_mu, src_sigma] = self.stats[image_id]
                img_nmzd = htk_cnorm.reinhard(img, self.ref_mu_lab, self.ref_std_lab, src_mu=src_mu, src_sigma=src_sigma).astype('float')
            else:
                print '#### stats for %d not present' % (image_id)
                img_nmzd = htk_cnorm.reinhard(img, self.ref_mu_lab, self.ref_std_lab).astype('float')

            imgRGB = preprocess_resnet(img_nmzd)


            with open(pathPer, 'rb') as f:
                img = pickle.load(f)

            imgPer = self.preprocess_persistence(img)

            X_RGB[i] = imgRGB
            X_Per[i] = imgPer
            Y[i] = to_categorical(batchy[i], num_classes=2)

        return ([X_RGB, X_Per], Y) 
Example 56
Project: HistologyCancerDiagnosisDeepPersistenceHomology   Author: KitwareMedical   File: combined_data_loader.py    Apache License 2.0 4 votes vote down vote up
def __getitem__(self, idx):

        batchx = self.cv_files[ idx*self.batch_size : (idx+1)*self.batch_size ]
        batchy = self.cv_outputs[ idx*self.batch_size : (idx+1)*self.batch_size ]

        X_RGB = np.zeros((self.batch_size, 256, 256, 3))
        X_Per = np.zeros((self.batch_size, 32, 32, 1))
        Y = np.zeros((self.batch_size, 2))

        for i in range(self.batch_size):
            image_id = int(float(re.findall("\d+\.\d+", batchx[i])[0]))
            fnameRGB = batchx[i] + '.jpg'
            fnamePer = batchx[i] + '.pkl'


            if batchy[i] == self.label['malignant'] :
                pathRGB = os.path.join(self.path_mal_cv, fnameRGB)
                pathPer = os.path.join(self.path_mal_cv_per, fnamePer)

            elif batchy[i] == self.label['benign'] :
                pathRGB = os.path.join(self.path_ben_cv, fnameRGB)
                pathPer = os.path.join(self.path_ben_cv_per, fnamePer)

            img = skimage.io.imread(pathRGB)
            if img.shape == (1024, 1024, 3):
                img = img[::4, ::4, :]

            image_id = int(float(re.findall("\d+\.\d+", pathRGB)[0]))

            if image_id in self.stats.keys():
                [src_mu, src_sigma] = self.stats[image_id]
                img_nmzd = htk_cnorm.reinhard(img, self.ref_mu_lab, self.ref_std_lab, src_mu=src_mu, src_sigma=src_sigma).astype('float')
            else:
                print '#### stats for %d not present' % (image_ids[i])
                img_nmzd = htk_cnorm.reinhard(img, self.ref_mu_lab, self.ref_std_lab).astype('float')

            imgRGB = preprocess_resnet(img_nmzd)

            with open(pathPer, 'rb') as f:
                img = pickle.load(f)

            imgPer = self.preprocess_persistence(img)

            X_RGB[i] = imgRGB
            X_Per[i] = imgPer
            Y[i] = to_categorical(batchy[i], num_classes=2)


        return ([X_RGB, X_Per], Y)