Python keras.utils.np_utils.to_categorical() Examples

The following are 30 code examples of keras.utils.np_utils.to_categorical(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.utils.np_utils , or try the search function .
Example #1
Source Project: DeepFashion   Author: abhishekrana   File: cnn.py    License: Apache License 2.0 7 votes vote down vote up
def load_and_preprocess_data_3():
    # The data, shuffled and split between train and test sets:
    (X_train, y_train), (x_test, y_test) = cifar10.load_data()
    logging.debug('X_train shape: {}'.format(X_train.shape))
    logging.debug('train samples: {}'.format(X_train.shape[0]))
    logging.debug('test samples: {}'.format(x_test.shape[0]))

    # Convert class vectors to binary class matrices.
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    X_train = X_train.astype('float32')
    x_test = x_test.astype('float32')
    X_train /= 255
    x_test /= 255

    input_shape = X_train[0].shape
    logging.debug('input_shape {}'.format(input_shape))
    input_shape = X_train.shape[1:]
    logging.debug('input_shape {}'.format(input_shape))

    return X_train, x_test, y_train, y_test, input_shape 
Example #2
Source Project: CAPTCHA-breaking   Author: lllcho   File: test_tasks.py    License: MIT License 7 votes vote down vote up
def test_temporal_clf(self):
        print('temporal classification data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(5,10), 
                                                             classification=True, nb_class=2)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)

        model = Sequential()
        model.add(GRU(X_train.shape[-1], y_train.shape[-1]))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='adadelta')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
        self.assertTrue(history.history['val_acc'][-1] > 0.9) 
Example #3
Source Project: super-simple-distributed-keras   Author: harvitronix   File: datasets.py    License: MIT License 7 votes vote down vote up
def get_mnist():
    """Retrieve the MNIST dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 128
    input_shape = (784,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example #4
Source Project: keras2pmml   Author: vaclavcadek   File: sequential.py    License: MIT License 6 votes vote down vote up
def setUp(self):
        iris = load_iris()

        theano.config.floatX = 'float32'
        X = iris.data.astype(theano.config.floatX)
        y = iris.target.astype(np.int32)
        y_ohe = np_utils.to_categorical(y)

        model = Sequential()
        model.add(Dense(input_dim=X.shape[1], output_dim=5, activation='tanh'))
        model.add(Dense(input_dim=5, output_dim=y_ohe.shape[1], activation='sigmoid'))
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        model.fit(X, y_ohe, nb_epoch=10, batch_size=1, verbose=3, validation_data=None)

        params = {'copyright': 'Václav Čadek', 'model_name': 'Iris Model'}
        self.model = model
        self.pmml = keras2pmml(self.model, **params)
        self.num_inputs = self.model.input_shape[1]
        self.num_outputs = self.model.output_shape[1]
        self.num_connection_layers = len(self.model.layers)
        self.features = ['x{}'.format(i) for i in range(self.num_inputs)]
        self.class_values = ['y{}'.format(i) for i in range(self.num_outputs)] 
Example #5
Source Project: Jtyoui   Author: jtyoui   File: HandWritingRecognition.py    License: MIT License 6 votes vote down vote up
def nn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train.reshape(x_train.shape[0], -1) / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)
    # constant(value=1.)自定义常数,constant(value=1.)===one()
    # 创建模型:输入784个神经元,输出10个神经元
    model = Sequential([
        Dense(units=200, input_dim=784, bias_initializer=constant(value=1.), activation=tanh),
        Dense(units=100, bias_initializer=one(), activation=tanh),
        Dense(units=10, bias_initializer=one(), activation=softmax),
    ])

    opt = SGD(lr=0.2, clipnorm=1.)  # 优化器
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['acc', 'mae'])  # 编译
    model.fit(x_train, y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
Example #6
Source Project: CAPTCHA-breaking   Author: lllcho   File: test_tasks.py    License: MIT License 6 votes vote down vote up
def test_vector_clf(self):
        nb_hidden = 10

        print('vector classification data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(10,),
                                                             classification=True, nb_class=2)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)

        model = Sequential()
        model.add(Dense(X_train.shape[-1], nb_hidden))
        model.add(Activation('relu'))
        model.add(Dense(nb_hidden, y_train.shape[-1]))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
        print(history.history)
        self.assertTrue(history.history['val_acc'][-1] > 0.9) 
Example #7
Source Project: CAPTCHA-breaking   Author: lllcho   File: test_tasks.py    License: MIT License 6 votes vote down vote up
def test_img_clf(self):
        print('image classification data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(3, 32, 32),
                                                             classification=True, nb_class=2)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)

        model = Sequential()
        model.add(Convolution2D(32, 3, 32, 32))
        model.add(Activation('sigmoid'))
        model.add(Flatten())
        model.add(Dense(32, y_test.shape[-1]))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
        self.assertTrue(history.history['val_acc'][-1] > 0.9) 
Example #8
Source Project: super-simple-distributed-keras   Author: harvitronix   File: datasets.py    License: MIT License 6 votes vote down vote up
def get_cifar10():
    """Retrieve the CIFAR dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 64
    input_shape = (3072,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train = x_train.reshape(50000, 3072)
    x_test = x_test.reshape(10000, 3072)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example #9
Source Project: VQA-Keras-Visual-Question-Answering   Author: anantzoid   File: prepare_data.py    License: MIT License 6 votes vote down vote up
def read_data(data_limit):
    print "Reading Data..."
    img_data = h5py.File(data_img)
    ques_data = h5py.File(data_prepo)
  
    img_data = np.array(img_data['images_train'])
    img_pos_train = ques_data['img_pos_train'][:data_limit]
    train_img_data = np.array([img_data[_-1,:] for _ in img_pos_train])
    # Normalizing images
    tem = np.sqrt(np.sum(np.multiply(train_img_data, train_img_data), axis=1))
    train_img_data = np.divide(train_img_data, np.transpose(np.tile(tem,(4096,1))))

    #shifting padding to left side
    ques_train = np.array(ques_data['ques_train'])[:data_limit, :]
    ques_length_train = np.array(ques_data['ques_length_train'])[:data_limit]
    ques_train = right_align(ques_train, ques_length_train)

    train_X = [train_img_data, ques_train]
    # NOTE should've consturcted one-hots using exhausitve list of answers, cause some answers may not be in dataset
    # To temporarily rectify this, all those answer indices is set to 1 in validation set
    train_y = to_categorical(ques_data['answers'])[:data_limit, :]

    return train_X, train_y 
Example #10
def loadData(x_load_path, y_load_path):
    # load train data
    x_data_mat = sio.loadmat(x_load_path)
    x_data_complex = x_data_mat['train_data']
    x_data_real = x_data_complex.real
    x_data_imag = x_data_complex.imag
    x_data_real = x_data_real.reshape((x_data_real.shape[0], seqLen))
    x_data_imag = x_data_imag.reshape((x_data_imag.shape[0], seqLen))
    x_train = np.stack((x_data_real, x_data_imag), axis=2)
    y_data_mat = sio.loadmat(y_load_path)
    y_data = y_data_mat['train_label']
    y_train = np_utils.to_categorical(y_data, nClass)
    # train data shuffle
    index = np.arange(y_train.shape[0])
    np.random.shuffle(index)
    x_train = x_train[index,:]
    y_train = y_train[index]
    return [x_train, y_train]


# fix random seed 
Example #11
def loadData(x_load_path, y_load_path):
    # load train data
    x_data_mat = sio.loadmat(x_load_path)
    x_data_complex = x_data_mat['train_data']
    x_data_real = x_data_complex.real
    x_data_imag = x_data_complex.imag
    x_data_real = x_data_real.reshape((x_data_real.shape[0], seqLen))
    x_data_imag = x_data_imag.reshape((x_data_imag.shape[0], seqLen))
    x_train = np.stack((x_data_real, x_data_imag), axis=2)
    y_data_mat = sio.loadmat(y_load_path)
    y_data = y_data_mat['train_label']
    y_train = np_utils.to_categorical(y_data, nClass)
    # train data shuffle
    index = np.arange(y_train.shape[0])
    np.random.shuffle(index)
    x_train = x_train[index,:]
    y_train = y_train[index]
    return [x_train, y_train]


# fix random seed 
Example #12
def loadData(x_load_path, y_load_path):
    # load train data
    x_data_mat = sio.loadmat(x_load_path)
    x_data_complex = x_data_mat['train_data']
    x_data_real = x_data_complex.real
    x_data_imag = x_data_complex.imag
    x_data_real = x_data_real.reshape((x_data_real.shape[0], seqLen))
    x_data_imag = x_data_imag.reshape((x_data_imag.shape[0], seqLen))
    x_train = np.stack((x_data_real, x_data_imag), axis=2)
    y_data_mat = sio.loadmat(y_load_path)
    y_data = y_data_mat['train_label']
    y_train = np_utils.to_categorical(y_data, nClass)
    # train data shuffle
    index = np.arange(y_train.shape[0])
    np.random.shuffle(index)
    x_train = x_train[index,:]
    y_train = y_train[index]
    return [x_train, y_train]


# fix random seed 
Example #13
Source Project: neural-network-genetic-algorithm   Author: harvitronix   File: train.py    License: MIT License 6 votes vote down vote up
def get_cifar10():
    """Retrieve the CIFAR dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 64
    input_shape = (3072,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train = x_train.reshape(50000, 3072)
    x_test = x_test.reshape(10000, 3072)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example #14
Source Project: neural-network-genetic-algorithm   Author: harvitronix   File: train.py    License: MIT License 6 votes vote down vote up
def get_mnist():
    """Retrieve the MNIST dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 128
    input_shape = (784,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example #15
Source Project: imgclsmob   Author: osmr   File: utils.py    License: MIT License 6 votes vote down vote up
def get_data_generator(data_iterator,
                       num_classes):
    def get_arrays(db):
        data = db.data[0].asnumpy()
        if K.image_data_format() == "channels_last":
            data = data.transpose((0, 2, 3, 1))
        labels = to_categorical(
            y=db.label[0].asnumpy(),
            num_classes=num_classes)
        return data, labels

    while True:
        try:
            db = data_iterator.next()

        except StopIteration:
            # logging.warning("get_data exception due to end of data - resetting iterator")
            data_iterator.reset()
            db = data_iterator.next()

        finally:
            yield get_arrays(db) 
Example #16
Source Project: speech-emotion-recognition   Author: harry-7   File: cnn_example.py    License: MIT License 6 votes vote down vote up
def cnn_example():
    to_flatten = False
    x_train, x_test, y_train, y_test, num_labels = extract_data(
        flatten=to_flatten)
    y_train = np_utils.to_categorical(y_train)
    y_test_train = np_utils.to_categorical(y_test)
    in_shape = x_train[0].shape
    x_train = x_train.reshape(x_train.shape[0], in_shape[0], in_shape[1], 1)
    x_test = x_test.reshape(x_test.shape[0], in_shape[0], in_shape[1], 1)
    model = CNN(input_shape=x_train[0].shape,
                num_classes=num_labels)
    model.train(x_train, y_train, x_test, y_test_train)
    model.evaluate(x_test, y_test)
    filename = '../dataset/Sad/09b03Ta.wav'
    print('prediction', model.predict_one(
        get_feature_vector_from_mfcc(filename, flatten=to_flatten)),
          'Actual 3')
    print('CNN Done') 
Example #17
Source Project: docker-python   Author: Kaggle   File: test_keras.py    License: Apache License 2.0 6 votes vote down vote up
def test_train(self):
        train = pd.read_csv("/input/tests/data/train.csv")

        x_train = train.iloc[:,1:].values.astype('float32')
        y_train = to_categorical(train.iloc[:,0].astype('int32'))

        model = Sequential()
        model.add(Dense(units=10, input_dim=784, activation='softmax'))

        model.compile(
            loss='categorical_crossentropy',
            optimizer=RMSprop(lr=0.001),
            metrics=['accuracy'])

        model.fit(x_train, y_train, epochs=1, batch_size=32)

    # Uses convnet which depends on libcudnn when running on GPU 
Example #18
Source Project: docker-python   Author: Kaggle   File: test_keras.py    License: Apache License 2.0 6 votes vote down vote up
def test_lstm(self):
        x_train = np.random.random((100, 100, 100))
        y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
        x_test = np.random.random((20, 100, 100))
        y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)

        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

        model = Sequential()
        model.add(LSTM(32, return_sequences=True, input_shape=(100, 100)))
        model.add(Flatten())
        model.add(Dense(10, activation='softmax'))


        model.compile(loss='categorical_crossentropy', optimizer=sgd)
        model.fit(x_train, y_train, batch_size=32, epochs=1)
        model.evaluate(x_test, y_test, batch_size=32) 
Example #19
Source Project: recipe-summarization   Author: rtlee9   File: generate.py    License: MIT License 6 votes vote down vote up
def conv_seq_labels(xds, xhs, nflips, model, debug, oov0, glove_idx2idx, vocab_size, nb_unknown_words, idx2word):
    """Convert description and hedlines to padded input vectors; headlines are one-hot to label."""
    batch_size = len(xhs)
    assert len(xds) == batch_size
    x = [
        vocab_fold(lpadd(xd) + xh, oov0, glove_idx2idx, vocab_size, nb_unknown_words)
        for xd, xh in zip(xds, xhs)]  # the input does not have 2nd eos
    x = sequence.pad_sequences(x, maxlen=maxlen, value=empty, padding='post', truncating='post')
    x = flip_headline(x, nflips=nflips, model=model, debug=debug, oov0=oov0, idx2word=idx2word)

    y = np.zeros((batch_size, maxlenh, vocab_size))
    for i, xh in enumerate(xhs):
        xh = vocab_fold(xh, oov0, glove_idx2idx, vocab_size, nb_unknown_words) + [eos] + [empty] * maxlenh  # output does have a eos at end
        xh = xh[:maxlenh]
        y[i, :, :] = np_utils.to_categorical(xh, vocab_size)

    return x, y 
Example #20
Source Project: elephas   Author: maxpumperla   File: hyperparam_optimization.py    License: MIT License 6 votes vote down vote up
def data():
    """Data providing function:

    Make sure to have every relevant import statement included here and return data as
    used in model function below. This function is separated from model() so that hyperopt
    won't reload data for each evaluation run.
    """
    from keras.datasets import mnist
    from keras.utils import np_utils
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    nb_classes = 10
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)
    return x_train, y_train, x_test, y_test 
Example #21
Source Project: recognition_gender   Author: StevenKe8080   File: image_train.py    License: MIT License 6 votes vote down vote up
def extract_data(self,train_path):
        imgs, labels, counter = get_file(train_path)
        print(labels)
        # 避免过拟合,采用交叉验证,验证集占训练集30%,固定随机种子(random_state)
        X_train, X_test, y_train, y_test = train_test_split(imgs, labels, test_size=0.3,
                                                            random_state=random.randint(0, 100))

        #数据预处理 keras backend 用的TensorFlow 黑白图片 channel 1
        X_train = X_train.reshape(X_train.shape[0], 1, self.img_size, self.img_size) / 255.
        X_test = X_test.reshape(X_test.shape[0], 1, self.img_size, self.img_size) / 255.

        #label 转为 one-hot 数据
        Y_train = np_utils.to_categorical(y_train, num_classes=counter)
        Y_test = np_utils.to_categorical(y_test, num_classes=counter)

        self.X_train = X_train
        self.X_test = X_test
        self.Y_train = Y_train
        self.Y_test = Y_test
        self.nb_classes = counter


#建立model  使用CNN(卷积神经网络) 
Example #22
Source Project: Sign-Language-Interpreter-using-Deep-Learning   Author: harshbg   File: cnn_model_train.py    License: MIT License 6 votes vote down vote up
def train():
	with open("train_images", "rb") as f:
		train_images = np.array(pickle.load(f))
	with open("train_labels", "rb") as f:
		train_labels = np.array(pickle.load(f), dtype=np.int32)

	with open("val_images", "rb") as f:
		val_images = np.array(pickle.load(f))
	with open("val_labels", "rb") as f:
		val_labels = np.array(pickle.load(f), dtype=np.int32)

	train_images = np.reshape(train_images, (train_images.shape[0], image_x, image_y, 1))
	val_images = np.reshape(val_images, (val_images.shape[0], image_x, image_y, 1))
	train_labels = np_utils.to_categorical(train_labels)
	val_labels = np_utils.to_categorical(val_labels)

	print(val_labels.shape)

	model, callbacks_list = cnn_model()
	model.summary()
	model.fit(train_images, train_labels, validation_data=(val_images, val_labels), epochs=15, batch_size=500, callbacks=callbacks_list)
	scores = model.evaluate(val_images, val_labels, verbose=0)
	print("CNN Error: %.2f%%" % (100-scores[1]*100))
	#model.save('cnn_model_keras2.h5') 
Example #23
Source Project: Jtyoui   Author: jtyoui   File: HandWritingRecognition.py    License: MIT License 5 votes vote down vote up
def cnn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train.reshape(-1, 28, 28, 1) / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)

    model = Sequential([
        # input_shape:输入平面,就在第一个位置设置
        # filters:卷积核、滤波器
        # kernel_size:卷积核大小
        # strides:步长
        # padding有两种方式:same/valid
        # activation:激活函数
        Convolution2D(input_shape=(28, 28, 1), filters=32, kernel_size=5, strides=1, padding='same', activation=relu),
        MaxPool2D(pool_size=2, strides=2, padding='same'),
        Convolution2D(filters=64, kernel_size=5, padding='same', activation=relu),
        MaxPool2D(pool_size=2, trainable=2, padding='same'),
        Flatten(),  # 扁平化
        Dense(units=1024, activation=relu),
        Dropout(0.5),
        Dense(units=10, activation=softmax),
    ])
    opt = Adam(lr=1e-4)
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['accuracy'])
    model.fit(x=x_train, y=y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
Example #24
Source Project: Jtyoui   Author: jtyoui   File: HandWritingRecognition.py    License: MIT License 5 votes vote down vote up
def rnn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)

    model = Sequential([
        SimpleRNN(units=50, input_shape=(28, 28)),
        Dense(units=10, activation=softmax),
    ])
    opt = RMSprop(lr=1e-4)
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['accuracy'])
    model.fit(x=x_train, y=y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
Example #25
Source Project: recaptcha-cracker   Author: nocturnaltortoise   File: preprocessors.py    License: GNU General Public License v3.0 5 votes vote down vote up
def convert_to_one_hot(labels):
        return np_utils.to_categorical(labels) 
Example #26
Source Project: blackbox-attacks   Author: sunblaze-ucb   File: mnist.py    License: MIT License 5 votes vote down vote up
def data_mnist(one_hot=True):
    """
    Preprocess MNIST dataset
    """
    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    y_train = y_train


    X_train = X_train.reshape(X_train.shape[0],
                              FLAGS.IMAGE_ROWS,
                              FLAGS.IMAGE_COLS,
                              FLAGS.NUM_CHANNELS)

    X_test = X_test.reshape(X_test.shape[0],
                            FLAGS.IMAGE_ROWS,
                            FLAGS.IMAGE_COLS,
                            FLAGS.NUM_CHANNELS)

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    print "Loaded MNIST test data."

    if one_hot:
        # convert class vectors to binary class matrices
        y_train = np_utils.to_categorical(y_train, FLAGS.NUM_CLASSES).astype(np.float32)
        y_test = np_utils.to_categorical(y_test, FLAGS.NUM_CLASSES).astype(np.float32)

    return X_train, y_train, X_test, y_test 
Example #27
Source Project: kaggle_Otto   Author: puyokw   File: kerasNN2_2nd.py    License: MIT License 5 votes vote down vote up
def preprocess_labels(labels, encoder=None, categorical=True):
    if not encoder:
        encoder = LabelEncoder()
        encoder.fit(labels)
    y = encoder.transform(labels).astype(np.int32)
    if categorical:
        y = np_utils.to_categorical(y)
    return y, encoder 
Example #28
Source Project: kaggle_Otto   Author: puyokw   File: kerasNN4_tfidf.py    License: MIT License 5 votes vote down vote up
def preprocess_labels(labels, encoder=None, categorical=True):
    if not encoder:
        encoder = LabelEncoder()
        encoder.fit(labels)
    y = encoder.transform(labels).astype(np.int32)
    if categorical:
        y = np_utils.to_categorical(y)
    return y, encoder 
Example #29
Source Project: kaggle_Otto   Author: puyokw   File: kerasNN3.py    License: MIT License 5 votes vote down vote up
def preprocess_labels(labels, encoder=None, categorical=True):
    if not encoder:
        encoder = LabelEncoder()
        encoder.fit(labels)
    y = encoder.transform(labels).astype(np.int32)
    if categorical:
        y = np_utils.to_categorical(y)
    return y, encoder 
Example #30
Source Project: kaggle_Otto   Author: puyokw   File: kerasNN2_tfidf.py    License: MIT License 5 votes vote down vote up
def preprocess_labels(labels, encoder=None, categorical=True):
    if not encoder:
        encoder = LabelEncoder()
        encoder.fit(labels)
    y = encoder.transform(labels).astype(np.int32)
    if categorical:
        y = np_utils.to_categorical(y)
    return y, encoder