Python keras.applications.inception_resnet_v2.preprocess_input() Examples

The following are 6 code examples of keras.applications.inception_resnet_v2.preprocess_input(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.applications.inception_resnet_v2 , or try the search function .
Example #1
Source File: hp_search.py    From Scene-Classification with MIT License 6 votes vote down vote up
def data():
    train_datagen = ImageDataGenerator(shear_range=0.2,
                                       rotation_range=20.,
                                       width_shift_range=0.3,
                                       height_shift_range=0.3,
                                       zoom_range=0.2,
                                       horizontal_flip=True,
                                       preprocessing_function=preprocess_input)
    test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)

    train_generator = train_datagen.flow_from_directory(train_data, (img_width, img_height), batch_size=batch_size,
                                                        class_mode='categorical', shuffle=True)
    validation_generator = test_datagen.flow_from_directory(valid_data, (img_width, img_height), batch_size=batch_size,
                                                            class_mode='categorical', shuffle=True)

    return train_generator, validation_generator 
Example #2
Source File: data_generator.py    From FaceNet with Apache License 2.0 5 votes vote down vote up
def __getitem__(self, idx):
        i = idx * batch_size

        length = min(batch_size, (len(self.samples) - i))
        batch_inputs = np.empty((3, length, img_size, img_size, channel), dtype=np.float32)
        batch_dummy_target = np.zeros((length, embedding_size * 3), dtype=np.float32)

        for i_batch in range(length):
            sample = self.samples[i + i_batch]
            for j, role in enumerate(['a', 'p', 'n']):
                image_name = sample[role]
                filename = os.path.join(self.image_folder, image_name)
                image = cv.imread(filename)  # BGR
                image = image[:, :, ::-1]  # RGB
                dets = self.detector(image, 1)

                num_faces = len(dets)
                if num_faces > 0:
                    # Find the 5 face landmarks we need to do the alignment.
                    faces = dlib.full_object_detections()
                    for detection in dets:
                        faces.append(self.sp(image, detection))
                    image = dlib.get_face_chip(image, faces[0], size=img_size)
                else:
                    image = cv.resize(image, (img_size, img_size), cv.INTER_CUBIC)

                if self.usage == 'train':
                    image = aug_pipe.augment_image(image)

                batch_inputs[j, i_batch] = preprocess_input(image)

        return [batch_inputs[0], batch_inputs[1], batch_inputs[2]], batch_dummy_target 
Example #3
Source File: inference.py    From FaceNet with Apache License 2.0 4 votes vote down vote up
def run(self):
        # set enviornment
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpuid)
        print("InferenceWorker init, GPU ID: {}".format(self.gpuid))

        from model import build_model

        # load models
        model_weights_path = 'models/model.00-0.0296.hdf5'
        model = build_model()
        model.load_weights(model_weights_path)

        while True:
            try:
                try:
                    item = self.in_queue.get(block=False)
                except queue.Empty:
                    continue

                image_name_0, image_name_1, image_name_2 = item

                filename = os.path.join(image_folder, image_name_0)
                image_bgr = cv.imread(filename)
                image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
                image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
                image_rgb_0 = preprocess_input(image_rgb)
                filename = os.path.join(image_folder, image_name_1)
                image_bgr = cv.imread(filename)
                image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
                image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
                image_rgb_1 = preprocess_input(image_rgb)
                filename = os.path.join(image_folder, image_name_2)
                image_bgr = cv.imread(filename)
                image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
                image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
                image_rgb_2 = preprocess_input(image_rgb)

                batch_inputs = np.empty((3, 1, img_size, img_size, 3), dtype=np.float32)
                batch_inputs[0] = image_rgb_0
                batch_inputs[1] = image_rgb_1
                batch_inputs[2] = image_rgb_2
                y_pred = model.predict([batch_inputs[0], batch_inputs[1], batch_inputs[2]])

                a = y_pred[0, 0:128]
                p = y_pred[0, 128:256]
                n = y_pred[0, 256:384]

                self.out_queue.put({'image_name': image_name_0, 'embedding': a})
                self.out_queue.put({'image_name': image_name_1, 'embedding': p})
                self.out_queue.put({'image_name': image_name_2, 'embedding': n})
                if self.in_queue.qsize() == 0:
                    break
            except Exception as e:
                print(e)

        import keras.backend as K
        K.clear_session()
        print('InferenceWorker done, GPU ID {}'.format(self.gpuid)) 
Example #4
Source File: train_eval.py    From FaceNet with Apache License 2.0 4 votes vote down vote up
def run(self):
        # set enviornment
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpuid)
        print("InferenceWorker init, GPU ID: {}".format(self.gpuid))

        from model import build_model

        # load models
        model = build_model()
        model.load_weights(get_best_model())

        while True:
            try:
                sample = {}
                try:
                    sample['a'] = self.in_queue.get(block=False)
                    sample['p'] = self.in_queue.get(block=False)
                    sample['n'] = self.in_queue.get(block=False)
                except queue.Empty:
                    break

                batch_inputs = np.empty((3, 1, img_size, img_size, channel), dtype=np.float32)

                for j, role in enumerate(['a', 'p', 'n']):
                    image_name = sample[role]
                    filename = os.path.join(image_folder, image_name)
                    image_bgr = cv.imread(filename)
                    image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)
                    image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)
                    batch_inputs[j, 0] = preprocess_input(image_rgb)

                y_pred = model.predict([batch_inputs[0], batch_inputs[1], batch_inputs[2]])
                a = y_pred[0, 0:128]
                p = y_pred[0, 128:256]
                n = y_pred[0, 256:384]

                self.out_queue.put({'image_name': sample['a'], 'embedding': a})
                self.out_queue.put({'image_name': sample['p'], 'embedding': p})
                self.out_queue.put({'image_name': sample['n'], 'embedding': n})
                self.signal_queue.put(SENTINEL)

                if self.in_queue.qsize() == 0:
                    break
            except Exception as e:
                print(e)

        import keras.backend as K
        K.clear_session()
        print('InferenceWorker done, GPU ID {}'.format(self.gpuid)) 
Example #5
Source File: Multitask_train.py    From FashionAI_Tianchi_2018 with MIT License 4 votes vote down vote up
def train(task):
    if (task == 'design'):
        task_list = task_list_design
    else:
        task_list = task_list_length

    label_names = list(task_list.keys())
    print(n)
    y = [np.zeros((n, task_list[x])) for x in task_list.keys()]
    for i in range(n):
        label_name = df.label_name[i]
        label = df.label[i]
        y[label_names.index(label_name)][i, label.find('y')] = 1

    X = getX()
    n_train = int(n * 0.9)
    X_train = X[:n_train]
    X_valid = X[n_train:]
    y_train = [x[:n_train] for x in y]
    y_valid = [x[n_train:] for x in y]
    gen_train = Generator(X_train, y_train, batch_size=40, aug=True)

    base_model = inception_v4.create_model(weights='imagenet', width=width, include_top=False)
    input_tensor = Input((width, width, 3))
    x = input_tensor
    x = Lambda(preprocess_input, name='preprocessing')(x)
    x = base_model(x)
    x = GlobalAveragePooling2D()(x)
    x = Dropout(0.5)(x)
    x = [Dense(count, activation='softmax', name=name)(x) for name, count in task_list.items()]

    model = Model(input_tensor, x)
    # model.load_weights('models/base.h5',by_name=True)
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
    model2 = multi_gpu_model(model, 2)

    model2.compile(optimizer=Adam(0.0001), loss='categorical_crossentropy', metrics=[acc])
    model2.fit_generator(gen_train.generator, steps_per_epoch=gen_train.steps, epochs=3, validation_data=(X_valid, y_valid))

    model2.compile(optimizer=Adam(0.000025), loss='categorical_crossentropy', metrics=[acc])
    model2.fit_generator(gen_train.generator, steps_per_epoch=gen_train.steps, epochs=2, validation_data=(X_valid, y_valid))

    model2.compile(optimizer=Adam(0.00000625), loss='categorical_crossentropy', metrics=[acc])
    model2.fit_generator(gen_train.generator, steps_per_epoch=gen_train.steps, epochs=3, validation_data=(X_valid, y_valid))

    model2.compile(optimizer=Adam(0.00000425), loss='categorical_crossentropy', metrics=[acc])
    model2.fit_generator(gen_train.generator, steps_per_epoch=gen_train.steps, epochs=1, validation_data=(X_valid, y_valid))

    model2.compile(optimizer=Adam(0.000001), loss='categorical_crossentropy', metrics=[acc])
    model2.fit_generator(gen_train.generator, steps_per_epoch=gen_train.steps, epochs=1, validation_data=(X_valid, y_valid))
    model.save_weights('models/%s.h5' % model_name)

    del X
    del model
    gc.collect()

# load the label file and split it into two portions 
Example #6
Source File: run.py    From Generative-Adversarial-Networks-Projects with MIT License 4 votes vote down vote up
def calculate_inception_score(images_path, batch_size=1, splits=10):
    # Create an instance of InceptionV3
    model = InceptionResNetV2()

    images = None
    for image_ in glob.glob(images_path):
        # Load image
        loaded_image = image.load_img(image_, target_size=(299, 299))

        # Convert PIL image to numpy ndarray
        loaded_image = image.img_to_array(loaded_image)

        # Another another dimension (Add batch dimension)
        loaded_image = np.expand_dims(loaded_image, axis=0)

        # Concatenate all images into one tensor
        if images is None:
            images = loaded_image
        else:
            images = np.concatenate([images, loaded_image], axis=0)

    # Calculate number of batches
    num_batches = (images.shape[0] + batch_size - 1) // batch_size

    probs = None

    # Use InceptionV3 to calculate probabilities
    for i in range(num_batches):
        image_batch = images[i * batch_size:(i + 1) * batch_size, :, :, :]
        prob = model.predict(preprocess_input(image_batch))

        if probs is None:
            probs = prob
        else:
            probs = np.concatenate([prob, probs], axis=0)

    # Calculate Inception scores
    divs = []
    split_size = probs.shape[0] // splits

    for i in range(splits):
        prob_batch = probs[(i * split_size):((i + 1) * split_size), :]
        p_y = np.expand_dims(np.mean(prob_batch, 0), 0)
        div = prob_batch * (np.log(prob_batch / p_y))
        div = np.mean(np.sum(div, 1))
        divs.append(np.exp(div))

    return np.mean(divs), np.std(divs)