Python keras.utils() Examples

The following are code examples for showing how to use keras.utils(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: DeepFashion   Author: abhishekrana   File: cnn.py    Apache License 2.0 7 votes vote down vote up
def load_and_preprocess_data_3():
    # The data, shuffled and split between train and test sets:
    (X_train, y_train), (x_test, y_test) = cifar10.load_data()
    logging.debug('X_train shape: {}'.format(X_train.shape))
    logging.debug('train samples: {}'.format(X_train.shape[0]))
    logging.debug('test samples: {}'.format(x_test.shape[0]))

    # Convert class vectors to binary class matrices.
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    X_train = X_train.astype('float32')
    x_test = x_test.astype('float32')
    X_train /= 255
    x_test /= 255

    input_shape = X_train[0].shape
    logging.debug('input_shape {}'.format(input_shape))
    input_shape = X_train.shape[1:]
    logging.debug('input_shape {}'.format(input_shape))

    return X_train, x_test, y_train, y_test, input_shape 
Example 2
Project: humpback-whale-4th-place   Author: daustingm1   File: densenet121_422_train.py    Apache License 2.0 6 votes vote down vote up
def __data_generation(self, list_IDs_temp, list_labels_temp):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        y = np.empty((self.batch_size), dtype=int)
        x_batch = []
        # Generate data
        for i, ID in enumerate(list_IDs_temp):
            # Store sample
            #X[i,] = np.load('data/' + ID + '.npy')
            img = cv2.imread('../class_problem_only/all_traintest2/{}'.format(ID), 0)
            img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
            img = cv2.resize(img, (self.dim, self.dim))
            x_batch.append(img)

            # Store class
            y[i] = list_labels_temp[i]
        x_batch = np.array(x_batch) #, np.float32) / 255
        X = preprocess_input(x_batch)
        return X, keras.utils.to_categorical(y, num_classes=self.n_classes) 
Example 3
Project: humpback-whale-4th-place   Author: daustingm1   File: densenet121_200_train.py    Apache License 2.0 6 votes vote down vote up
def __data_generation(self, list_IDs_temp, list_labels_temp):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        y = np.empty((self.batch_size), dtype=int)
        x_batch = []
        # Generate data
        for i, ID in enumerate(list_IDs_temp):
            img = cv2.imread('../class_problem_only/all_traintest2/{}'.format(ID), 0)
            img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
            img = cv2.resize(img, (self.dim, self.dim))
            x_batch.append(img)

            # Store class
            y[i] = list_labels_temp[i]
        x_batch = np.array(x_batch)
        X = preprocess_input(x_batch)
        return X, keras.utils.to_categorical(y, num_classes=self.n_classes) 
Example 4
Project: PPRL-VGAN   Author: yushuinanrong   File: train.py    MIT License 6 votes vote down vote up
def generate_dataset(ee):
    ## save to numpyz###############
    c = np.random.randint(num_pp, size=x_train.shape[0])
    c_train = keras.utils.to_categorical(c, num_pp)
    c = np.random.randint(num_pp, size=x_test.shape[0])
    c_test = keras.utils.to_categorical(c, num_pp)

    [z_train, mean_var_train] = encoder.predict(x_train)
    encoded_xtrain = decoder.predict([z_train, c_train])

    [z_test, mean_var_test] = encoder.predict(x_test)
    encoded_xtest = decoder.predict([z_test, c_test])

    np.savez('/Z_' + str(date) + 'epoch'+str(ee)+'_64_64_VAE_GAN_labelfull_v2.npz',
             encoded_xtrain, y_train1, y_train2, c_train, encoded_xtest, y_test1, y_test2, c_test)
    np.savez('/X_' + str(date) + 'epoch'+str(ee)+ '_fi_512_VAE_GAN_labelfull_v2.npz',
             z_train, y_train1, y_train2, c_train, z_test, y_test1, y_test2, c_test) 
Example 5
Project: SeqGAN   Author: tyo-yo   File: models.py    MIT License 6 votes vote down vote up
def generate_samples(self, T, g_data, num, output_file):
        '''
        Generate sample sentences to output file
        # Arguments:
            T: int, max time steps
            g_data: SeqGAN.utils.GeneratorPretrainingGenerator
            num: int, number of sentences
            output_file: str, path
        '''
        sentences=[]
        for _ in range(num // self.B + 1):
            actions = self.sampling_sentence(T)
            actions_list = actions.tolist()
            for sentence_id in actions_list:
                sentence = [g_data.id2word[action] for action in sentence_id]
                sentences.append(sentence)
        output_str = ''
        for i in range(num):
            output_str += ' '.join(sentences[i]) + '\n'
        with open(output_file, 'w', encoding='utf-8') as f:
            f.write(output_str) 
Example 6
Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_reg.py    MIT License 6 votes vote down vote up
def __data_generation(self,list_files,labels):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization

        X = np.empty((len(list_files),self.dim[0],self.dim[1],self.dim[2]))
        y = np.empty((len(list_files)),dtype=int)
     #   print(X.shape,y.shape)

        # Generate data
        k = -1 
        for i,f in enumerate(list_files):
            #      print(f)
            img = get_im_cv2(f,dim=self.dim[0])
            img = pre_process(img)
            label = labels[i]
            #label = keras.utils.np_utils.to_categorical(label,self.n_classes)
            X[i,] = img
            y[i,] = label
       # print(X.shape,y.shape)    
        return X,y 
Example 7
Project: Bella   Author: apmoore1   File: tdlstm.py    MIT License 6 votes vote down vote up
def create_training_y(self, train_y: np.ndarray, validation_y: np.ndarray,
                          ) -> Tuple[np.ndarray, np.ndarray]:
        '''
        Converts the training and validation target values from a vector of
        class lables into a matrix of binary values of shape [n_samples,
        n_classes].

        To convert the vector of classes to a matrix we the
        :py:func:`keras.utils.to_categorical` function.

        :param train_y: Vector of class labels, shape = [n_samples]
        :param validation_y: Vector of class labels, shape = [n_samples]
        :return: A tuple of length two containing the train and validation
                 matrices respectively. The shape of each matrix is:
                 [n_samples, n_classes]
        '''
        train_y = to_categorical(train_y).astype(np.float32)
        validation_y = to_categorical(validation_y).astype(np.float32)
        return train_y, validation_y 
Example 8
Project: gumpy-deeplearning   Author: gumpy-bci   File: utils.py    MIT License 5 votes vote down vote up
def load_preprocess_data(data, debug, lowcut, highcut, w0, Q, anti_drift, class_count, cutoff, axis, fs):
    """Load and preprocess data.

    The routine loads data with the use of gumpy's Dataset objects, and
    subsequently applies some post-processing filters to improve the data.
    """
    # TODO: improve documentation

    data_loaded = data.load()

    if debug:
        print('Band-pass filtering the data in frequency range from %.1f Hz to %.1f Hz... '
          %(lowcut, highcut))

        data_notch_filtered = gumpy.signal.notch(data_loaded.raw_data, cutoff, axis)
        data_hp_filtered = gumpy.signal.butter_highpass(data_notch_filtered, anti_drift, axis)
        data_bp_filtered = gumpy.signal.butter_bandpass(data_hp_filtered, lowcut, highcut, axis)

        # Split data into classes.
        # TODO: as soon as gumpy.utils.extract_trails2 is merged with the
        #       regular extract_trails, change here accordingly!
        class1_mat, class2_mat = gumpy.utils.extract_trials2(data_bp_filtered, data_loaded.trials,
                                                             data_loaded.labels, data_loaded.trial_total,
                                                             fs, nbClasses = 2)

        # concatenate data for training and create labels
        x_train = np.concatenate((class1_mat, class2_mat))
        labels_c1 = np.zeros((class1_mat.shape[0], ))
        labels_c2 = np.ones((class2_mat.shape[0], ))
        y_train = np.concatenate((labels_c1, labels_c2))

        # for categorical crossentropy
        y_train = ku.to_categorical(y_train)

        print("Data loaded and processed successfully!")
        return x_train, y_train 
Example 9
Project: gumpy-deeplearning   Author: gumpy-bci   File: utils.py    MIT License 5 votes vote down vote up
def load_preprocess_data(data, debug, lowcut, highcut, w0, Q, anti_drift, class_count, cutoff, axis, fs):
    """Load and preprocess data.
    The routine loads data with the use of gumpy's Dataset objects, and
    subsequently applies some post-processing filters to improve the data.
    """
    # TODO: improve documentation

    data_loaded = data.load()

    if debug:
        print('Band-pass filtering the data in frequency range from %.1f Hz to %.1f Hz... '
          %(lowcut, highcut))

        data_notch_filtered = gumpy.signal.notch(data_loaded.raw_data, cutoff, axis)
        data_hp_filtered = gumpy.signal.butter_highpass(data_notch_filtered, anti_drift, axis)
        data_bp_filtered = gumpy.signal.butter_bandpass(data_hp_filtered, lowcut, highcut, axis)

        # Split data into classes.
        # TODO: as soon as gumpy.utils.extract_trails2 is merged with the
        #       regular extract_trails, change here accordingly!
        class1_mat, class2_mat = gumpy.utils.extract_trials2(data_bp_filtered, data_loaded.trials,
                                                             data_loaded.labels, data_loaded.trial_total,
                                                             fs, nbClasses = 2)

        # concatenate data for training and create labels
        x_train = np.concatenate((class1_mat, class2_mat))
        labels_c1 = np.zeros((class1_mat.shape[0], ))
        labels_c2 = np.ones((class2_mat.shape[0], ))
        y_train = np.concatenate((labels_c1, labels_c2))

        # for categorical crossentropy
        y_train = ku.to_categorical(y_train)

        print("Data loaded and processed successfully!")
        return x_train, y_train 
Example 10
Project: kutils   Author: subpic   File: applications.py    MIT License 5 votes vote down vote up
def model_inceptionresnet_pooled(input_shape=(None, None, 3), pool_size=(5, 5),
                                 name='', return_sizes=False):
    """
    Returns the wide MLSP features, spatially pooled, from InceptionResNetV2.

    :param input_shape: shape of the input images
    :param pool_size: spatial extend of the MLSP features
    :param name: name of the model
    :param return_sizes: return the sizes of each layer: (model, pool_sizes)
    :return: model or (model, pool_sizes)
    """
    
    print 'Loading InceptionResNetV2 multi-pooled with input_shape:', input_shape
    model_base = InceptionResNetV2(weights     = 'imagenet', 
                                   include_top = False, 
                                   input_shape = input_shape)
    print 'Creating multi-pooled model'
    
    ImageResizer = Lambda(lambda x: K.tf.image.resize_area(x, pool_size),
                          name='feature_resizer') 

    feature_layers = [l for l in model_base.layers if 'mixed' in l.name]
    pools = [ImageResizer(l.output) for l in feature_layers]
    conc_pools = Concatenate(name='conc_pools', axis=3)(pools)

    model = Model(inputs  = model_base.input, 
                  outputs = conc_pools)
    if name: model.name = name

    if return_sizes:
        pool_sizes = [[np.int32(x) for x in f.get_shape()[1:]] for f in pools]
        return model, pool_sizes
    else:
        return model    


# ------------------
# RATING model utils
# ------------------ 
Example 11
Project: PPRL-VGAN   Author: yushuinanrong   File: train.py    MIT License 5 votes vote down vote up
def plotGeneratedImages(epoch, idx=0, examples=10, dim=(10, 10), figsize=(10, 10)):
    n = num_pp  # how many digits we will display
    pp_avg = 4500
    plt.figure(figsize=(16, 4))


    sample = x_ori[idx:idx + n, :, :, :]
    c = np.asarray([0, 1, 2, 3, 4, 5, 6, 7])
    c = keras.utils.to_categorical(c, num_pp)

    [z, mean_var] = encoder.predict(sample)
    generated_images = decoder.predict([z, c])

    for i in range(n):
        # display original
        ax = plt.subplot(2, n, i + 1)
        ori = sample[i].reshape(img_rows, img_cols, 3)
        ori = np.uint8(ori * 127.5 + 127.5)
        plt.imshow(ori)
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

        # display reconstruction
        ax = plt.subplot(2, n, i + 1 + n)
        rec = generated_images[i].reshape(img_rows, img_cols, 3)
        rec = np.uint8(rec * 127.5 + 127.5)
        plt.imshow(rec)
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

    # Path to be created
    plt.savefig(path + '/GAN_MUG_results_' + str(date) + '_generated_image_epoch_%d.tif' % epoch)
    plt.close() 
Example 12
Project: robust_physical_perturbations   Author: evtimovi   File: utils_mnist.py    MIT License 5 votes vote down vote up
def model_mnist(logits=False, input_ph=None, img_rows=28, img_cols=28,
                nb_filters=64, nb_classes=10):
    warnings.warn("`utils_mnist.model_mnist` is deprecated. Switch to"
                  "`utils.cnn_model`. `utils_mnist.model_mnist` will "
                  "be removed after 2017-08-17.")
    return utils.cnn_model(logits=logits, input_ph=input_ph,
                           img_rows=img_rows, img_cols=img_cols,
                           nb_filters=nb_filters, nb_classes=nb_classes) 
Example 13
Project: robust_physical_perturbations   Author: evtimovi   File: utils_mnist.py    MIT License 5 votes vote down vote up
def model_mnist(logits=False, input_ph=None, img_rows=28, img_cols=28,
                nb_filters=64, nb_classes=10):
    warnings.warn("`utils_mnist.model_mnist` is deprecated. Switch to"
                  "`utils.cnn_model`. `utils_mnist.model_mnist` will "
                  "be removed after 2017-08-17.")
    return utils.cnn_model(logits=logits, input_ph=input_ph,
                           img_rows=img_rows, img_cols=img_cols,
                           nb_filters=nb_filters, nb_classes=nb_classes) 
Example 14
Project: keras-efficientnets   Author: titu1994   File: test_build.py    MIT License 5 votes vote down vote up
def get_preds(model):
    size = model.input_shape[1]

    filename = os.path.join(os.path.dirname(__file__),
                            'data', '565727409_61693c5e14.jpg')

    batch = KE.preprocess_input(img_to_array(load_img(
                                filename, target_size=(size, size))))

    batch = np.expand_dims(batch, 0)

    pred = decode_predictions(model.predict(batch),
                              backend=K, utils=utils)

    return pred 
Example 15
Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: captcha_solver.py    MIT License 5 votes vote down vote up
def train(dest_train,dest_val,outdir,batch_size,n_classes,dim,shuffle,epochs,lr):
    char_to_index_dict,index_to_char_dict = create_dict_char_to_index()
    model = _model_(n_classes)
    from keras.utils import plot_model
    plot_model(model, to_file=outdir + 'model.png')
    train_generator =  DataGenerator(dest_train,char_to_index_dict,batch_size,n_classes,dim,shuffle)
    val_generator =  DataGenerator(dest_val,char_to_index_dict,batch_size,n_classes,dim,shuffle)
    model.fit_generator(train_generator,epochs=epochs,validation_data=val_generator)
    model.save(outdir + 'captcha_breaker.h5') 
Example 16
Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning.py    MIT License 5 votes vote down vote up
def read_data(self,class_folders,path,num_class,dim,train_val='train'):
		print(train_val)
		train_X,train_y = [],[] 
		for c in class_folders:
			path_class = path + str(train_val) + '/' + str(c)
			file_list = os.listdir(path_class) 
			for f in file_list:
				img = self.get_im_cv2(path_class + '/' + f)
				img = self.pre_process(img)
				train_X.append(img)
				label = int(c.split('class')[1])
				train_y.append(int(label))
		train_y = keras.utils.np_utils.to_categorical(np.array(train_y),num_class) 
		return np.array(train_X),train_y 
Example 17
Project: classification_models   Author: qubvel   File: keras.py    MIT License 5 votes vote down vote up
def get_kwargs():
        return {
            'backend': keras.backend,
            'layers': keras.layers,
            'models': keras.models,
            'utils': keras.utils,
        } 
Example 18
Project: certifiable-distributional-robustness   Author: duchi-lab   File: utils_mnist.py    Apache License 2.0 5 votes vote down vote up
def data_mnist():
    # These values are specific to MNIST
    img_rows = 28
    img_cols = 28
    nb_classes = 10

    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    if keras.backend.image_dim_ordering() == 'th':
        X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
        X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
    else:
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    from sklearn.utils import shuffle
    X_train, Y_train = shuffle(X_train, Y_train)
    return X_train, Y_train, X_test, Y_test 
Example 19
Project: kaggle-rsna18   Author: i-pan   File: train_kaggle.py    MIT License 5 votes vote down vote up
def create_models(backbone_retinanet, num_classes, weights, multi_gpu=0, freeze_backbone=False):
    """ Creates three models (model, training_model, prediction_model).

    Args
        backbone_retinanet : A function to call to create a retinanet model with a given backbone.
        num_classes        : The number of classes to train.
        weights            : The weights to load into the model.
        multi_gpu          : The number of GPUs to use for training.
        freeze_backbone    : If True, disables learning for the backbone.

    Returns
        model            : The base model. This is also the model that is saved in snapshots.
        training_model   : The training model. If multi_gpu=0, this is identical to model.
        prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).
    """
    modifier = freeze_model if freeze_backbone else None

    # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
    # optionally wrap in a parallel model
    if multi_gpu > 1:
        from keras.utils import multi_gpu_model
        with tf.device('/cpu:0'):
            model = model_with_weights(backbone_retinanet(num_classes, modifier=modifier), weights=weights, skip_mismatch=True)
        training_model = multi_gpu_model(model, gpus=multi_gpu)
    else:
        model          = model_with_weights(backbone_retinanet(num_classes, modifier=modifier), weights=weights, skip_mismatch=True)
        training_model = model

    # make prediction model
    prediction_model = retinanet_bbox(model=model)

    # compile model
    training_model.compile(
        loss={
            'regression'    : losses.smooth_l1(),
            'classification': losses.focal()
        },
        optimizer=keras.optimizers.adam(lr=1e-5, clipnorm=0.001)
    )

    return model, training_model, prediction_model 
Example 20
Project: kaggle-rsna18   Author: i-pan   File: train.py    MIT License 5 votes vote down vote up
def create_models(backbone_retinanet, num_classes, weights, multi_gpu=0, freeze_backbone=False):
    """ Creates three models (model, training_model, prediction_model).

    Args
        backbone_retinanet : A function to call to create a retinanet model with a given backbone.
        num_classes        : The number of classes to train.
        weights            : The weights to load into the model.
        multi_gpu          : The number of GPUs to use for training.
        freeze_backbone    : If True, disables learning for the backbone.

    Returns
        model            : The base model. This is also the model that is saved in snapshots.
        training_model   : The training model. If multi_gpu=0, this is identical to model.
        prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).
    """
    modifier = freeze_model if freeze_backbone else None

    # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
    # optionally wrap in a parallel model
    if multi_gpu > 1:
        from keras.utils import multi_gpu_model
        with tf.device('/cpu:0'):
            model = model_with_weights(backbone_retinanet(num_classes, modifier=modifier), weights=weights, skip_mismatch=True)
        training_model = multi_gpu_model(model, gpus=multi_gpu)
    else:
        model          = model_with_weights(backbone_retinanet(num_classes, modifier=modifier), weights=weights, skip_mismatch=True)
        training_model = model

    # make prediction model
    prediction_model = retinanet_bbox(model=model)

    # compile model
    training_model.compile(
        loss={
            'regression'    : losses.smooth_l1(),
            'classification': losses.focal()
        },
        optimizer=keras.optimizers.adam(lr=1e-5, clipnorm=0.001)
    )

    return model, training_model, prediction_model 
Example 21
Project: keras-retinanet   Author: ghada-soliman   File: train.py    Apache License 2.0 5 votes vote down vote up
def create_models(backbone_retinanet, num_classes, weights, multi_gpu=0, freeze_backbone=False):
    """ Creates three models (model, training_model, prediction_model).

    Args
        backbone_retinanet : A function to call to create a retinanet model with a given backbone.
        num_classes        : The number of classes to train.
        weights            : The weights to load into the model.
        multi_gpu          : The number of GPUs to use for training.
        freeze_backbone    : If True, disables learning for the backbone.

    Returns
        model            : The base model. This is also the model that is saved in snapshots.
        training_model   : The training model. If multi_gpu=0, this is identical to model.
        prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).
    """
    modifier = freeze_model if freeze_backbone else None

    # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
    # optionally wrap in a parallel model
    if multi_gpu > 1:
        from keras.utils import multi_gpu_model
        with tf.device('/cpu:0'):
            model = model_with_weights(backbone_retinanet(num_classes, modifier=modifier), weights=weights, skip_mismatch=True)
        training_model = multi_gpu_model(model, gpus=multi_gpu)
    else:
        model          = model_with_weights(backbone_retinanet(num_classes, modifier=modifier), weights=weights, skip_mismatch=True)
        training_model = model

    # make prediction model
    prediction_model = retinanet_bbox(model=model)

    # compile model
    training_model.compile(
        loss={
            'regression'    : losses.smooth_l1(),
            'classification': losses.focal()
        },
        optimizer=keras.optimizers.adam(lr=1e-5, clipnorm=0.001)
    )

    return model, training_model, prediction_model 
Example 22
Project: talos   Author: autonomio   File: datasets.py    MIT License 5 votes vote down vote up
def iris():

    import pandas as pd
    from keras.utils import to_categorical
    base = 'https://raw.githubusercontent.com/autonomio/datasets/master/autonomio-datasets/'
    df = pd.read_csv(base + 'iris.csv')
    df['species'] = df['species'].factorize()[0]
    df = df.sample(len(df))
    y = to_categorical(df['species'])
    x = df.iloc[:, :-1].values

    y = to_categorical(df['species'])
    x = df.iloc[:, :-1].values

    return x, y 
Example 23
Project: talos   Author: autonomio   File: datasets.py    MIT License 5 votes vote down vote up
def mnist():

    '''Note that this dataset, unlike other Talos datasets,returns:

    x_train, y_train, x_val, y_val'''

    import keras
    import numpy as np

    # the data, split between train and test sets
    (x_train, y_train), (x_val, y_val) = keras.datasets.mnist.load_data()

    # input image dimensions
    img_rows, img_cols = 28, 28

    if keras.backend.image_data_format() == 'channels_first':

        x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
        x_val = x_val.reshape(x_val.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)

    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_val = x_val.reshape(x_val.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_val = x_val.astype('float32')
    x_train /= 255
    x_val /= 255

    classes = len(np.unique(y_train))

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, classes)
    y_val = keras.utils.to_categorical(y_val, classes)

    print("Use input_shape %s" % str(input_shape))

    return x_train, y_train, x_val, y_val 
Example 24
Project: efficientnet   Author: qubvel   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def get_submodules_from_kwargs(kwargs):
    backend = kwargs.get('backend', _KERAS_BACKEND)
    layers = kwargs.get('layers', _KERAS_LAYERS)
    models = kwargs.get('models', _KERAS_MODELS)
    utils = kwargs.get('utils', _KERAS_UTILS)
    for key in kwargs.keys():
        if key not in ['backend', 'layers', 'models', 'utils']:
            raise TypeError('Invalid keyword argument: %s', key)
    return backend, layers, models, utils 
Example 25
Project: efficientnet   Author: qubvel   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def inject_keras_modules(func):
    import keras
    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        kwargs['backend'] = keras.backend
        kwargs['layers'] = keras.layers
        kwargs['models'] = keras.models
        kwargs['utils'] = keras.utils
        return func(*args, **kwargs)

    return wrapper 
Example 26
Project: efficientnet   Author: qubvel   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def inject_tfkeras_modules(func):
    import tensorflow.keras as tfkeras
    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        kwargs['backend'] = tfkeras.backend
        kwargs['layers'] = tfkeras.layers
        kwargs['models'] = tfkeras.models
        kwargs['utils'] = tfkeras.utils
        return func(*args, **kwargs)

    return wrapper 
Example 27
Project: efficientnet   Author: qubvel   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def init_keras_custom_objects():
    import keras
    from . import model

    custom_objects = {
        'swish': inject_keras_modules(model.get_swish)(),
        'FixedDropout': inject_keras_modules(model.get_dropout)()
    }

    keras.utils.generic_utils.get_custom_objects().update(custom_objects) 
Example 28
Project: efficientnet   Author: qubvel   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def init_tfkeras_custom_objects():
    import tensorflow.keras as tfkeras
    from . import model

    custom_objects = {
        'swish': inject_tfkeras_modules(model.get_swish)(),
        'FixedDropout': inject_tfkeras_modules(model.get_dropout)()
    }

    tfkeras.utils.get_custom_objects().update(custom_objects) 
Example 29
Project: nnvmt   Author: verivital   File: kerasPrinter.py    MIT License 5 votes vote down vote up
def create_onnx_model(self):
        # Convert the Keras model into ONNX
        if self.no_json:
            model = models.load_model(self.pathToOriginalFile)
        else:
            model = self.load_files(self.jsonFile,self.pathToOriginalFile) 
        self.final_output_path=os.path.join(self.outputFilePath, self.originalFilename)+'.onnx'
        onnx_model = onnxmltools.convert_keras(model)
        # Save as protobuf
        onnxmltools.utils.save_model(onnx_model, self.final_output_path)
        self.originalFile.close()
    
    # Load the plant with parameters included 
Example 30
Project: Cerebro   Author: AmrSaber   File: emotions_model.py    GNU General Public License v3.0 5 votes vote down vote up
def __enhance_image__(self, img):
        # normalize image to wanted size
        img = utils.normalize_image(img, self.imageSize)

        # make sure image is always in grayscale
        img = utils.normalize_channels(img)

        # remove salt and pepper
        img = filters.median(img)

        # sharpen images
        img = filters.laplacian(img)

        return img 
Example 31
Project: funcom   Author: mcmillco   File: ast_attendgru_xtra.py    GNU General Public License v3.0 5 votes vote down vote up
def create_model(self):
        
        dat_input = Input(shape=(self.tdatlen,))
        com_input = Input(shape=(self.comlen,))
        sml_input = Input(shape=(self.smllen,))
        
        ee = Embedding(output_dim=self.embdims, input_dim=self.tdatvocabsize, mask_zero=False)(dat_input)
        se = Embedding(output_dim=self.smldims, input_dim=self.smlvocabsize, mask_zero=False)(sml_input)

        se_enc = CuDNNGRU(self.recdims, return_state=True, return_sequences=True)
        seout, state_sml = se_enc(se)

        enc = CuDNNGRU(self.recdims, return_state=True, return_sequences=True)
        encout, state_h = enc(ee, initial_state=state_sml)
        
        de = Embedding(output_dim=self.embdims, input_dim=self.comvocabsize, mask_zero=False)(com_input)
        dec = CuDNNGRU(self.recdims, return_sequences=True)
        decout = dec(de, initial_state=state_h)

        attn = dot([decout, encout], axes=[2, 2])
        attn = Activation('softmax')(attn)
        context = dot([attn, encout], axes=[2, 1])

        ast_attn = dot([decout, seout], axes=[2, 2])
        ast_attn = Activation('softmax')(ast_attn)
        ast_context = dot([ast_attn, seout], axes=[2, 1])

        context = concatenate([context, decout, ast_context])

        out = TimeDistributed(Dense(self.recdims, activation="relu"))(context)

        out = Flatten()(out)
        out = Dense(self.comvocabsize, activation="softmax")(out)
        
        model = Model(inputs=[dat_input, com_input, sml_input], outputs=out)

        if self.config['multigpu']:
            model = keras.utils.multi_gpu_model(model, gpus=2)
        
        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        return self.config, model 
Example 32
Project: PhaseOfMatter   Author: longyangking   File: ml.py    GNU General Public License v3.0 5 votes vote down vote up
def plot_model(self, filename):
        from keras.utils import plot_model
        plot_model(self.model, show_shapes=True, to_file=filename) 
Example 33
Project: neural-fingerprinting   Author: StephanZheng   File: dataset.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def data_cifar10(train_start=0, train_end=50000, test_start=0, test_end=10000):
    """
    Preprocess CIFAR10 dataset
    :return:
    """

    global keras
    if keras is None:
        import keras
        from keras.datasets import cifar10
        from keras.utils import np_utils

    # These values are specific to CIFAR10
    img_rows = 32
    img_cols = 32
    nb_classes = 10

    # the data, shuffled and split between train and test sets
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    if keras.backend.image_dim_ordering() == 'th':
        x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)

    x_train = x_train[train_start:train_end, :, :, :]
    y_train = y_train[train_start:train_end, :]
    x_test = x_test[test_start:test_end, :]
    y_test = y_test[test_start:test_end, :]

    return x_train, y_train, x_test, y_test 
Example 34
Project: trVAE   Author: theislab   File: _trvae.py    MIT License 4 votes vote down vote up
def predict(self, adata, encoder_labels, decoder_labels, return_adata=True):
        """
            Predicts the cell type provided by the user in stimulated condition.
            # Parameters
                adata: `~anndata.AnnData`
                    Annotated data matrix whether in primary space.
                encoder_labels: `numpy nd-array`
                    `numpy nd-array` of labels to be fed as encoder's condition array.
                decoder_labels: `numpy nd-array`
                    `numpy nd-array` of labels to be fed as decoder's condition array.
                return_adata: boolean
                    if `True`, will output as an `anndata` object or put the results in the `obsm` attribute of `adata`
            # Returns
                output: `~anndata.AnnData`
                    `anndata` object of predicted cells in primary space.
            # Example
            ```python
            import scanpy as sc
            import trvae
            train_data = sc.read("train.h5ad")
            valid_adata = sc.read("validation.h5ad")
            n_conditions = len(train_adata.obs['condition'].unique().tolist())
            network = trvae.archs.trVAEMulti(train_adata.shape[1], n_conditions)
            network.train(train_adata, valid_adata, le=None,
                          condition_key="condition", cell_type_key="cell_label",
                          n_epochs=1000, batch_size=256
                          )
            encoder_labels, _ = trvae.utils.label_encoder(train_adata, condition_key="condition")
            decoder_labels, _ = trvae.utils.label_encoder(train_adata, condition_key="condition")
            pred_adata = network.predict(train_adata, encoder_labels, decoder_labels)
            ```
        """
        adata = remove_sparsity(adata)

        encoder_labels = to_categorical(encoder_labels, num_classes=self.n_conditions)
        decoder_labels = to_categorical(decoder_labels, num_classes=self.n_conditions)

        reconstructed = self.cvae_model.predict([adata.X, encoder_labels, decoder_labels])[0]
        reconstructed = np.nan_to_num(reconstructed)

        if return_adata:
            output = anndata.AnnData(X=reconstructed)
            output.obs = adata.obs.copy(deep=True)
            output.var_names = adata.var_names
        else:
            output = reconstructed

        return output 
Example 35
Project: GILA   Author: RParedesPalacios   File: deepmodels.py    MIT License 4 votes vote down vote up
def eval_class_model(args):
    print("Evaluation mode")

    if (args.load_model==None):
        print("No model name (-load_model)")
        sys.exit(0)


    model=load_json_model(args.load_model)
    if (args.summary==True):
        model.summary()


    if (args.chan=="rgb"):
        print ("setting depth to RGB")
        CHAN=3
    else:
        print ("setting depth to Gray")
        CHAN=1

    Xt=0
    Lt=0
    if (args.tsfile!=None):
        print("Loading test file:",args.tsfile)
        [Xt,Lt]=load_list_file_class_to_numpy(args.trfile,args.height,args.width,CHAN,args.resize)
        num_classes=len(set(Lt))
        Lt = keras.utils.to_categorical(Lt, num_classes)
        numts=len(Lt)
    elif (args.tsdir!=None):
        print("Setting test dir to",args.tsdir)
        TEST=1
        gen=ImageDataGenerator().flow_from_directory(args.tsdir,target_size=(args.height,args.width),
            batch_size=args.batch,
            class_mode='categorical')
        numts=gen.samples

    batch_size=args.batch

    model.compile(loss='categorical_crossentropy',optimizer='sgd',metrics=['accuracy'])

    ts_steps=numts//batch_size

    print("Evaluating...")
    score=model.evaluate_generator(test_generator(args,Xt,Lt),ts_steps)
    print("Acc:%.2f%%" % (score[1]*100))



##################################
### TRAIN CLASS MODELS
################################## 
Example 36
Project: robust_physical_perturbations   Author: evtimovi   File: utils_mnist.py    MIT License 4 votes vote down vote up
def data_mnist(datadir='/tmp/', train_start=0, train_end=60000, test_start=0,
               test_end=10000):
    """
    Load and preprocess MNIST dataset
    :param datadir: path to folder where data should be stored
    :param train_start: index of first training set example
    :param train_end: index of last training set example
    :param test_start: index of first test set example
    :param test_end: index of last test set example
    :return: tuple of four arrays containing training data, training labels,
             testing data and testing labels.
    """
    assert isinstance(train_start, int)
    assert isinstance(train_end, int)
    assert isinstance(test_start, int)
    assert isinstance(test_end, int)

    if 'tensorflow' in sys.modules:
        from tensorflow.examples.tutorials.mnist import input_data
        mnist = input_data.read_data_sets(datadir, one_hot=True, reshape=False)
        X_train = np.vstack((mnist.train.images, mnist.validation.images))
        Y_train = np.vstack((mnist.train.labels, mnist.validation.labels))
        X_test = mnist.test.images
        Y_test = mnist.test.labels
    else:
        warnings.warn("CleverHans support for Theano is deprecated and "
                      "will be dropped on 2017-11-08.")
        import keras
        from keras.datasets import mnist
        from keras.utils import np_utils

        # These values are specific to MNIST
        img_rows = 28
        img_cols = 28
        nb_classes = 10

        # the data, shuffled and split between train and test sets
        (X_train, y_train), (X_test, y_test) = mnist.load_data()

        if keras.backend.image_dim_ordering() == 'th':
            X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
            X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)

        X_train = X_train.astype('float32')
        X_test = X_test.astype('float32')
        X_train /= 255
        X_test /= 255

        # convert class vectors to binary class matrices
        Y_train = np_utils.to_categorical(y_train, nb_classes)
        Y_test = np_utils.to_categorical(y_test, nb_classes)

    X_train = X_train[train_start:train_end]
    Y_train = Y_train[train_start:train_end]
    X_test = X_test[test_start:test_end]
    Y_test = Y_test[test_start:test_end]

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    return X_train, Y_train, X_test, Y_test 
Example 37
Project: robust_physical_perturbations   Author: evtimovi   File: utils_mnist.py    MIT License 4 votes vote down vote up
def data_mnist(datadir='/tmp/', train_start=0, train_end=60000, test_start=0,
               test_end=10000):
    """
    Load and preprocess MNIST dataset
    :param datadir: path to folder where data should be stored
    :param train_start: index of first training set example
    :param train_end: index of last training set example
    :param test_start: index of first test set example
    :param test_end: index of last test set example
    :return: tuple of four arrays containing training data, training labels,
             testing data and testing labels.
    """
    assert isinstance(train_start, int)
    assert isinstance(train_end, int)
    assert isinstance(test_start, int)
    assert isinstance(test_end, int)

    if 'tensorflow' in sys.modules:
        from tensorflow.examples.tutorials.mnist import input_data
        mnist = input_data.read_data_sets(datadir, one_hot=True, reshape=False)
        X_train = np.vstack((mnist.train.images, mnist.validation.images))
        Y_train = np.vstack((mnist.train.labels, mnist.validation.labels))
        X_test = mnist.test.images
        Y_test = mnist.test.labels
    else:
        warnings.warn("CleverHans support for Theano is deprecated and "
                      "will be dropped on 2017-11-08.")
        import keras
        from keras.datasets import mnist
        from keras.utils import np_utils

        # These values are specific to MNIST
        img_rows = 28
        img_cols = 28
        nb_classes = 10

        # the data, shuffled and split between train and test sets
        (X_train, y_train), (X_test, y_test) = mnist.load_data()

        if keras.backend.image_dim_ordering() == 'th':
            X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
            X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)

        X_train = X_train.astype('float32')
        X_test = X_test.astype('float32')
        X_train /= 255
        X_test /= 255

        # convert class vectors to binary class matrices
        Y_train = np_utils.to_categorical(y_train, nb_classes)
        Y_test = np_utils.to_categorical(y_test, nb_classes)

    X_train = X_train[train_start:train_end]
    Y_train = Y_train[train_start:train_end]
    X_test = X_test[test_start:test_end]
    Y_test = Y_test[test_start:test_end]

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    return X_train, Y_train, X_test, Y_test 
Example 38
Project: textcaps   Author: vinojjayasundara   File: textcaps_emnist_bal.py    MIT License 4 votes vote down vote up
def generate_data(self):
        """
        Generating new images and samples with the data generation technique 
        :return: the newly generated images and labels
        """
        data = self.data
        args = self.args   
        (x_train, y_train), (x_test, y_test) = data
        x_masked_inst = self.masked_inst_parameter
        pos = self.global_position
        retrained_decoder = self.retrained_decoder
        class_cov = self.class_variance
        class_max = self.class_max
        class_min = self.class_min
        samples_to_generate = self.samples_to_generate
        generated_images = np.empty([0,x_train.shape[1],x_train.shape[2],x_train.shape[3]])
        generated_images_with_ori = np.empty([0,x_train.shape[1],x_train.shape[2],x_train.shape[3]])
        generated_labels = np.empty([0])
        for cl in range(0,args.num_cls):
            count = 0
            for it in range(0,x_masked_inst.shape[0]): 
                if (count==samples_to_generate):
                    break
                if (pos[it]==cl):
                    count = count + 1
                    generated_images_with_ori = np.concatenate([generated_images_with_ori,x_train[it].reshape(1,x_train.shape[1],x_train.shape[2],x_train.shape[3])])
                    noise_vec = x_masked_inst[it][x_masked_inst[it].nonzero()]
                    for inst in range(int(class_cov.shape[1]/2)):
                        ind = np.where(class_cov[cl]==inst)[0][0]
                        noise = np.random.uniform(class_min[cl][ind],class_max[cl][ind])
                        noise_vec[ind] = noise
                    x_masked_inst[it][x_masked_inst[it].nonzero()] = noise_vec
                    new_image = retrained_decoder.predict(x_masked_inst[it].reshape(1,args.num_cls*class_cov.shape[1]))
                    generated_images = np.concatenate([generated_images,new_image])
                    generated_labels = np.concatenate([generated_labels,np.asarray([cl])])
                    generated_images_with_ori = np.concatenate([generated_images_with_ori,new_image])
        self.save_output_image(generated_images,"generated_images")
        self.save_output_image(generated_images_with_ori,"generated_images with originals")
        generated_labels = keras.utils.to_categorical(generated_labels, num_classes=args.num_cls)
        if not os.path.exists(args.save_dir+"/generated_data"):
            os.makedirs(args.save_dir+"/generated_data")
        np.save(args.save_dir+"/generated_data/generated_images",generated_images)
        np.save(args.save_dir+"/generated_data/generated_label",generated_labels)
        return generated_images,generated_labels 
Example 39
Project: pre-trained-keras-example   Author: innolitics   File: train.py    MIT License 4 votes vote down vote up
def get_model(pretrained_model, all_character_names):
    if pretrained_model == 'inception':
        model_base = keras.applications.inception_v3.InceptionV3(include_top=False, input_shape=(*IMG_SIZE, 3), weights='imagenet')
        output = Flatten()(model_base.output)
    elif pretrained_model == 'xception':
        model_base = keras.applications.xception.Xception(include_top=False, input_shape=(*IMG_SIZE, 3), weights='imagenet')
        output = Flatten()(model_base.output)
    elif pretrained_model == 'resnet50':
        model_base = keras.applications.resnet50.ResNet50(include_top=False, input_shape=(*IMG_SIZE, 3), weights='imagenet')
        output = Flatten()(model_base.output)
    elif pretrained_model == 'vgg19':
        model_base = keras.applications.vgg19.VGG19(include_top=False, input_shape=(*IMG_SIZE, 3), weights='imagenet')
        output = Flatten()(model_base.output)
    elif pretrained_model == 'all':
        input = Input(shape=(*IMG_SIZE, 3))
        inception_model = keras.applications.inception_v3.InceptionV3(include_top=False, input_tensor=input, weights='imagenet')
        xception_model = keras.applications.xception.Xception(include_top=False, input_tensor=input, weights='imagenet')
        resnet_model = keras.applications.resnet50.ResNet50(include_top=False, input_tensor=input, weights='imagenet')

        flattened_outputs = [Flatten()(inception_model.output),
                             Flatten()(xception_model.output),
                             Flatten()(resnet_model.output)]
        output = Concatenate()(flattened_outputs)
        model_base = Model(input, output)

    output = BatchNormalization()(output)
    output = Dropout(0.5)(output)
    output = Dense(128, activation='relu')(output)
    output = BatchNormalization()(output)
    output = Dropout(0.5)(output)
    output = Dense(len(all_character_names), activation='softmax')(output)
    model = Model(model_base.input, output)
    for layer in model_base.layers:
        layer.trainable = False
    model.summary(line_length=200)

    # Generate a plot of a model
    import pydot
    pydot.find_graphviz = lambda: True
    from keras.utils import plot_model
    plot_model(model, show_shapes=True, to_file='../model_pdfs/{}.pdf'.format(pretrained_model))

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model 
Example 40
Project: MLL-machine-learning-language   Author: riccardobernardi   File: testMLL.py    MIT License 4 votes vote down vote up
def test_simple_net(self):

        simple_net = """
        relu := Activation relu
        soft := Activation 'softmax'

        padding $ same or valid

        criterion $ gini or entropy
        

        rf_clf  : RandomForestClassifier 10 entropy
        knn_clf : KNeighborsClassifier 2
        svc_clf : SVC with C=10000.0
        rg_clf  : RidgeClassifier 0.1
        dt_clf  : DecisionTreeClassifier gini
        lr      : LogisticRegression
        sclf : StackingClassifier with classifiers = [ rf_clf, dt_clf, knn_clf, svc_clf, rg_clf ] meta_classifier = lr

        net : Conv2D 32 (3, 3) with input_shape=(100, 100, 3) + relu + (@Flatten) + (Dense 256) + relu + (Dropout 0.5) + Dense 10 activation='relu'
        """

        self.mll = MLL(simple_net)
        self.mll.start()
        print(self.mll.get_string())
        cprint(self.mll.macros["relu"], "yellow")
        self.mll.execute()
        net = self.mll.last_model()

        # Generate dummy data
        x_train = np.random.random((100, 100, 100, 3))
        y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
        x_test = np.random.random((20, 100, 100, 3))
        y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)

        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        net.compile(loss='categorical_crossentropy', optimizer=sgd)

        net.fit(x_train, y_train, batch_size=32, epochs=10)
        score = net.evaluate(x_test, y_test, batch_size=32)

        print(net.summary()) 
Example 41
Project: cleverhans-models   Author: tiendzung-le   File: utils_mnist.py    MIT License 4 votes vote down vote up
def data_mnist(datadir='/tmp/', train_start=0, train_end=60000, test_start=0,
               test_end=10000):
    """
    Load and preprocess MNIST dataset
    :param datadir: path to folder where data should be stored
    :param train_start: index of first training set example
    :param train_end: index of last training set example
    :param test_start: index of first test set example
    :param test_end: index of last test set example
    :return: tuple of four arrays containing training data, training labels,
             testing data and testing labels.
    """
    assert isinstance(train_start, int)
    assert isinstance(train_end, int)
    assert isinstance(test_start, int)
    assert isinstance(test_end, int)

    if 'tensorflow' in sys.modules:
        from tensorflow.examples.tutorials.mnist import input_data
        mnist = input_data.read_data_sets(datadir, one_hot=True, reshape=False)
        X_train = np.vstack((mnist.train.images, mnist.validation.images))
        Y_train = np.vstack((mnist.train.labels, mnist.validation.labels))
        X_test = mnist.test.images
        Y_test = mnist.test.labels
    else:
        warnings.warn("CleverHans support for Theano is deprecated and "
                      "will be dropped on 2017-11-08.")
        import keras
        from keras.datasets import mnist
        from keras.utils import np_utils

        # These values are specific to MNIST
        img_rows = 28
        img_cols = 28
        nb_classes = 10

        # the data, shuffled and split between train and test sets
        (X_train, y_train), (X_test, y_test) = mnist.load_data()

        if keras.backend.image_dim_ordering() == 'th':
            X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
            X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)

        X_train = X_train.astype('float32')
        X_test = X_test.astype('float32')
        X_train /= 255
        X_test /= 255

        # convert class vectors to binary class matrices
        Y_train = np_utils.to_categorical(y_train, nb_classes)
        Y_test = np_utils.to_categorical(y_test, nb_classes)

    X_train = X_train[train_start:train_end]
    Y_train = Y_train[train_start:train_end]
    X_test = X_test[test_start:test_end]
    Y_test = Y_test[test_start:test_end]

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    return X_train, Y_train, X_test, Y_test 
Example 42
Project: Ground-Plane-Polling   Author: arangesh   File: train.py    MIT License 4 votes vote down vote up
def create_models(backbone_retinanet, num_classes, weights, multi_gpu=0, freeze_backbone=False):
    """ Creates three models (model, training_model, prediction_model).

    Args
        backbone_retinanet : A function to call to create a retinanet model with a given backbone.
        num_classes        : The number of classes to train.
        weights            : The weights to load into the model.
        multi_gpu          : The number of GPUs to use for training.
        freeze_backbone    : If True, disables learning for the backbone.

    Returns
        model            : The base model. This is also the model that is saved in snapshots.
        training_model   : The training model. If multi_gpu=0, this is identical to model.
        prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).
    """
    modifier = freeze_model if freeze_backbone else None

    # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
    # optionally wrap in a parallel model
    if multi_gpu > 1:
        from keras.utils import multi_gpu_model
        with tf.device('/cpu:0'):
            model = model_with_weights(backbone_retinanet(num_classes, modifier=modifier), weights=weights, skip_mismatch=True)
        training_model = multi_gpu_model(model, gpus=multi_gpu)
    else:
        model          = model_with_weights(backbone_retinanet(num_classes, modifier=modifier), weights=weights, skip_mismatch=True)
        training_model = model

    # make prediction model
    prediction_model = retinanet_bbox(model=model)

    # compile model
    training_model.compile(
        loss={
            'regression'    : losses.smooth_l1(),
            'regression_dim': losses.smooth_l1_dim(),
            'classification': losses.focal()
        },
        optimizer=keras.optimizers.adam(lr=1e-5, clipnorm=0.001)
    )

    return model, training_model, prediction_model 
Example 43
Project: OrgaQuant   Author: TKassis   File: train.py    MIT License 4 votes vote down vote up
def create_models(backbone_retinanet, num_classes, weights, multi_gpu=0,
                  freeze_backbone=False, lr=1e-5, config=None):
    """ Creates three models (model, training_model, prediction_model).

    Args
        backbone_retinanet : A function to call to create a retinanet model with a given backbone.
        num_classes        : The number of classes to train.
        weights            : The weights to load into the model.
        multi_gpu          : The number of GPUs to use for training.
        freeze_backbone    : If True, disables learning for the backbone.
        config             : Config parameters, None indicates the default configuration.

    Returns
        model            : The base model. This is also the model that is saved in snapshots.
        training_model   : The training model. If multi_gpu=0, this is identical to model.
        prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).
    """

    modifier = freeze_model if freeze_backbone else None

    # load anchor parameters, or pass None (so that defaults will be used)
    anchor_params = None
    num_anchors   = None
    if config and 'anchor_parameters' in config:
        anchor_params = parse_anchor_parameters(config)
        num_anchors   = anchor_params.num_anchors()

    # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
    # optionally wrap in a parallel model
    if multi_gpu > 1:
        from keras.utils import multi_gpu_model
        with tf.device('/cpu:0'):
            model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)
        training_model = multi_gpu_model(model, gpus=multi_gpu)
    else:
        model          = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)
        training_model = model

    # make prediction model
    prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)

    # compile model
    training_model.compile(
        loss={
            'regression'    : losses.smooth_l1(),
            'classification': losses.focal()
        },
        optimizer=keras.optimizers.adam(lr=lr, clipnorm=0.001)
    )

    return model, training_model, prediction_model 
Example 44
Project: Sarcasm-Detection   Author: MirunaPislar   File: data_prep_for_visualization.py    MIT License 4 votes vote down vote up
def prepare_data(shuffle=False, labels_to_categorical=True):
    path = os.getcwd()[:os.getcwd().rfind("/")]
    to_write_filename = path + "/stats/data_prep_for_lstm_visualization.txt"
    utils.initialize_writer(to_write_filename)

    train_filename = "train.txt"
    test_filename = "test.txt"
    tokens_filename = "clean_original_"     # other types of tokens to experiment with in /res/tokens/
    data_path = path + "/res/tokens/tokens_"

    # Load the data
    train_data = utils.load_file(data_path + tokens_filename + train_filename)
    test_data = utils.load_file(data_path + tokens_filename + test_filename)

    if shuffle:
        train_data = utils.shuffle_words(train_data)
        test_data = utils.shuffle_words(test_data)
        print("DATA IS SHUFFLED")

    # Load the labels
    train_labels = [int(l) for l in utils.load_file(path + "/res/datasets/ghosh/labels_" + train_filename)]
    test_labels = [int(l) for l in utils.load_file(path + "/res/datasets/ghosh/labels_" + test_filename)]

    # Get the max length of the train tweets
    max_tweet_length = utils.get_max_len_info(train_data)

    # Convert all tweets into sequences of word indices
    tokenizer, train_indices, test_indices = utils.encode_text_as_word_indexes(train_data, test_data, lower=True)
    vocab_size = len(tokenizer.word_counts) + 1
    word_to_index = tokenizer.word_index
    print("There are %s unique tokens." % len(word_to_index))

    # Pad sequences with 0s (can do it post or pre - post works better here)
    x_train = pad_sequences(train_indices, maxlen=max_tweet_length, padding="post", truncating="post", value=0.)
    x_test = pad_sequences(test_indices, maxlen=max_tweet_length, padding="post", truncating="post", value=0.)

    # Transform the output into categorical data or just keep it as it is (in a numpy array)
    if labels_to_categorical:
        train_labels = to_categorical(np.asarray(train_labels))
        test_labels = to_categorical(np.asarray(test_labels))
    else:
        train_labels = np.array(train_labels)
        test_labels = np.array(test_labels)
    return x_train, train_labels, x_test, test_labels, vocab_size, tokenizer, max_tweet_length


# Visualize the activations for one tweet 
Example 45
Project: Sarcasm-Detection   Author: MirunaPislar   File: data_prep_for_visualization.py    MIT License 4 votes vote down vote up
def train_lstm_for_visualization():
    checkpoints = glob(MODEL_PATH + "*.h5")
    if len(checkpoints) > 0:
        checkpoints = natsorted(checkpoints)
        assert len(checkpoints) != 0, "No checkpoints for visualization found."
        checkpoint_file = checkpoints[-1]
        print("Loading [{}]".format(checkpoint_file))
        model = load_model(checkpoint_file)
        model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy", utils.f1_score])
        print(model.summary())

        # Load the data
        x_train, y_train, x_test, y_test, vocab_size, tokenizer, max_tweet_length = prepare_data(SHUFFLE)

        # Get the word to index and the index to word mappings
        word_index = tokenizer.word_index
        index_to_word = {index: word for word, index in word_index.items()}

        # Evaluate the previously trained model on test data
        test_loss, test_acc, test_fscore = model.evaluate(x_test, y_test, verbose=1, batch_size=256)
        print("Loss: %.3f\nF-score: %.3f\n" % (test_loss, test_fscore))
        return model, index_to_word, x_test
    else:
        # Load the data
        x_train, y_train, x_test, y_test, vocab_size, tokenizer, max_tweet_length = prepare_data(SHUFFLE)

        # Get the word to index and the index to word mappings
        word_index = tokenizer.word_index
        index_to_word = {index: word for word, index in word_index.items()}

        # Build, evaluate and save the model
        model = Sequential()
        model.add(Embedding(input_dim=vocab_size, output_dim=EMBEDDING_DIM, input_length=max_tweet_length,
                            embeddings_initializer="glorot_normal", name="embedding_layer"))
        model.add(LSTM(output_dim=HIDDEN_UNITS, name="recurrent_layer", activation="tanh", return_sequences=True))
        model.add(Flatten())
        model.add(Dense(DENSE_UNITS, activation="relu", name="dense_layer"))
        model.add(Dense(NO_OF_CLASSES, activation="softmax"))
        model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(),
                      metrics=["accuracy", utils.f1_score])
        model.summary()
        checkpoint = ModelCheckpoint(monitor="val_acc", filepath=MODEL_PATH + "model_{epoch:02d}_{val_acc:.3f}.h5",
                                     save_best_only=True, mode="max")
        model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS,
                  validation_data=(x_test, y_test), callbacks=[checkpoint])
        score = model.evaluate(x_test, y_test)
        print("Loss: %.3f\nF-score: %.3f\n" % (score[0], score[1]))
        return model, index_to_word, x_test 
Example 46
Project: cleverhans-attacking-bnns   Author: AngusG   File: utils_mnist.py    MIT License 4 votes vote down vote up
def data_mnist(datadir='/tmp/', train_start=0, train_end=60000, test_start=0,
               test_end=10000):
    """
    Load and preprocess MNIST dataset
    :param datadir: path to folder where data should be stored
    :param train_start: index of first training set example
    :param train_end: index of last training set example
    :param test_start: index of first test set example
    :param test_end: index of last test set example
    :return: tuple of four arrays containing training data, training labels,
             testing data and testing labels.
    """
    assert isinstance(train_start, int)
    assert isinstance(train_end, int)
    assert isinstance(test_start, int)
    assert isinstance(test_end, int)

    if 'tensorflow' in sys.modules:
        from tensorflow.examples.tutorials.mnist import input_data
        mnist = input_data.read_data_sets(datadir, one_hot=True, reshape=False)
        X_train = np.vstack((mnist.train.images, mnist.validation.images))
        Y_train = np.vstack((mnist.train.labels, mnist.validation.labels))
        X_test = mnist.test.images
        Y_test = mnist.test.labels
    else:
        warnings.warn("CleverHans support for Theano is deprecated and "
                      "will be dropped on 2017-11-08.")
        import keras
        from keras.datasets import mnist
        from keras.utils import np_utils

        # These values are specific to MNIST
        img_rows = 28
        img_cols = 28
        nb_classes = 10

        # the data, shuffled and split between train and test sets
        (X_train, y_train), (X_test, y_test) = mnist.load_data()

        if keras.backend.image_dim_ordering() == 'th':
            X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
            X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)

        X_train = X_train.astype('float32')
        X_test = X_test.astype('float32')
        X_train /= 255
        X_test /= 255

        # convert class vectors to binary class matrices
        Y_train = np_utils.to_categorical(y_train, nb_classes)
        Y_test = np_utils.to_categorical(y_test, nb_classes)

    X_train = X_train[train_start:train_end]
    Y_train = Y_train[train_start:train_end]
    X_test = X_test[test_start:test_end]
    Y_test = Y_test[test_start:test_end]

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    return X_train, Y_train, X_test, Y_test 
Example 47
Project: keras-retinanet   Author: fizyr   File: train.py    Apache License 2.0 4 votes vote down vote up
def create_models(backbone_retinanet, num_classes, weights, multi_gpu=0,
                  freeze_backbone=False, lr=1e-5, config=None):
    """ Creates three models (model, training_model, prediction_model).

    Args
        backbone_retinanet : A function to call to create a retinanet model with a given backbone.
        num_classes        : The number of classes to train.
        weights            : The weights to load into the model.
        multi_gpu          : The number of GPUs to use for training.
        freeze_backbone    : If True, disables learning for the backbone.
        config             : Config parameters, None indicates the default configuration.

    Returns
        model            : The base model. This is also the model that is saved in snapshots.
        training_model   : The training model. If multi_gpu=0, this is identical to model.
        prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).
    """

    modifier = freeze_model if freeze_backbone else None

    # load anchor parameters, or pass None (so that defaults will be used)
    anchor_params = None
    num_anchors   = None
    if config and 'anchor_parameters' in config:
        anchor_params = parse_anchor_parameters(config)
        num_anchors   = anchor_params.num_anchors()

    # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
    # optionally wrap in a parallel model
    if multi_gpu > 1:
        from keras.utils import multi_gpu_model
        with tf.device('/cpu:0'):
            model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)
        training_model = multi_gpu_model(model, gpus=multi_gpu)
    else:
        model          = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)
        training_model = model

    # make prediction model
    prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)

    # compile model
    training_model.compile(
        loss={
            'regression'    : losses.smooth_l1(),
            'classification': losses.focal()
        },
        optimizer=keras.optimizers.adam(lr=lr, clipnorm=0.001)
    )

    return model, training_model, prediction_model 
Example 48
Project: funcom   Author: mcmillco   File: myutils.py    GNU General Public License v3.0 4 votes vote down vote up
def divideseqs(self, batchfids, seqdata, comvocabsize, tt):
        import keras.utils
        
        datseqs = list()
        comseqs = list()
        comouts = list()

        for fid in batchfids:
            input_datseq = seqdata['dt%s' % (tt)][fid]
            input_comseq = seqdata['c%s' % (tt)][fid]

        limit = -1
        c = 0
        for fid in batchfids:
            wdatseq = seqdata['dt%s' % (tt)][fid]
            wcomseq = seqdata['c%s' % (tt)][fid]
            
            wdatseq = wdatseq[:self.config['tdatlen']]
            
            for i in range(len(wcomseq)):
                datseqs.append(wdatseq)
                comseq = wcomseq[:i]
                comout = keras.utils.to_categorical(wcomseq[i], num_classes=comvocabsize)
                #comout = np.asarray([wcomseq[i]])
                
                for j in range(0, len(wcomseq)):
                    try:
                        comseq[j]
                    except IndexError as ex:
                        comseq = np.append(comseq, 0)

                comseqs.append(np.asarray(comseq))
                comouts.append(np.asarray(comout))

            c += 1
            if(c == limit):
                break

        datseqs = np.asarray(datseqs)
        comseqs = np.asarray(comseqs)
        comouts = np.asarray(comouts)

        return [[datseqs, comseqs], comouts] 
Example 49
Project: funcom   Author: mcmillco   File: myutils.py    GNU General Public License v3.0 4 votes vote down vote up
def divideseqs_ast(self, batchfids, seqdata, comvocabsize, tt):
        import keras.utils
        
        datseqs = list()
        comseqs = list()
        smlseqs = list()
        comouts = list()

        limit = -1
        c = 0
        for fid in batchfids:

            wdatseq = seqdata['dt%s' % (tt)][fid]
            wcomseq = seqdata['c%s' % (tt)][fid]
            wsmlseq = seqdata['s%s' % (tt)][fid]

            wdatseq = wdatseq[:self.config['tdatlen']]

            for i in range(0, len(wcomseq)):
                datseqs.append(wdatseq)
                smlseqs.append(wsmlseq)
                # slice up whole comseq into seen sequence and current sequence
                # [a b c d] => [] [a], [a] [b], [a b] [c], [a b c] [d], ...
                comseq = wcomseq[0:i]
                comout = wcomseq[i]
                comout = keras.utils.to_categorical(comout, num_classes=comvocabsize)

                # extend length of comseq to expected sequence size
                # the model will be expecting all input vectors to have the same size
                for j in range(0, len(wcomseq)):
                    try:
                        comseq[j]
                    except IndexError as ex:
                        comseq = np.append(comseq, 0)

                comseqs.append(comseq)
                comouts.append(np.asarray(comout))

            c += 1
            if(c == limit):
                break

        datseqs = np.asarray(datseqs)
        smlseqs = np.asarray(smlseqs)
        comseqs = np.asarray(comseqs)
        comouts = np.asarray(comouts)

        return [[datseqs, comseqs, smlseqs], comouts]