Python keras.utils.np_utils.to_categorical() Examples

The following are 30 code examples of keras.utils.np_utils.to_categorical(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.utils.np_utils , or try the search function .
Example #1
Source File: test_tasks.py    From CAPTCHA-breaking with MIT License 7 votes vote down vote up
def test_temporal_clf(self):
        print('temporal classification data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(5,10), 
                                                             classification=True, nb_class=2)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)

        model = Sequential()
        model.add(GRU(X_train.shape[-1], y_train.shape[-1]))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='adadelta')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
        self.assertTrue(history.history['val_acc'][-1] > 0.9) 
Example #2
Source File: cnn.py    From DeepFashion with Apache License 2.0 7 votes vote down vote up
def load_and_preprocess_data_3():
    # The data, shuffled and split between train and test sets:
    (X_train, y_train), (x_test, y_test) = cifar10.load_data()
    logging.debug('X_train shape: {}'.format(X_train.shape))
    logging.debug('train samples: {}'.format(X_train.shape[0]))
    logging.debug('test samples: {}'.format(x_test.shape[0]))

    # Convert class vectors to binary class matrices.
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    X_train = X_train.astype('float32')
    x_test = x_test.astype('float32')
    X_train /= 255
    x_test /= 255

    input_shape = X_train[0].shape
    logging.debug('input_shape {}'.format(input_shape))
    input_shape = X_train.shape[1:]
    logging.debug('input_shape {}'.format(input_shape))

    return X_train, x_test, y_train, y_test, input_shape 
Example #3
Source File: datasets.py    From super-simple-distributed-keras with MIT License 7 votes vote down vote up
def get_mnist():
    """Retrieve the MNIST dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 128
    input_shape = (784,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example #4
Source File: cnn_example.py    From speech-emotion-recognition with MIT License 6 votes vote down vote up
def cnn_example():
    to_flatten = False
    x_train, x_test, y_train, y_test, num_labels = extract_data(
        flatten=to_flatten)
    y_train = np_utils.to_categorical(y_train)
    y_test_train = np_utils.to_categorical(y_test)
    in_shape = x_train[0].shape
    x_train = x_train.reshape(x_train.shape[0], in_shape[0], in_shape[1], 1)
    x_test = x_test.reshape(x_test.shape[0], in_shape[0], in_shape[1], 1)
    model = CNN(input_shape=x_train[0].shape,
                num_classes=num_labels)
    model.train(x_train, y_train, x_test, y_test_train)
    model.evaluate(x_test, y_test)
    filename = '../dataset/Sad/09b03Ta.wav'
    print('prediction', model.predict_one(
        get_feature_vector_from_mfcc(filename, flatten=to_flatten)),
          'Actual 3')
    print('CNN Done') 
Example #5
Source File: cnn_model_train.py    From Sign-Language-Interpreter-using-Deep-Learning with MIT License 6 votes vote down vote up
def train():
	with open("train_images", "rb") as f:
		train_images = np.array(pickle.load(f))
	with open("train_labels", "rb") as f:
		train_labels = np.array(pickle.load(f), dtype=np.int32)

	with open("val_images", "rb") as f:
		val_images = np.array(pickle.load(f))
	with open("val_labels", "rb") as f:
		val_labels = np.array(pickle.load(f), dtype=np.int32)

	train_images = np.reshape(train_images, (train_images.shape[0], image_x, image_y, 1))
	val_images = np.reshape(val_images, (val_images.shape[0], image_x, image_y, 1))
	train_labels = np_utils.to_categorical(train_labels)
	val_labels = np_utils.to_categorical(val_labels)

	print(val_labels.shape)

	model, callbacks_list = cnn_model()
	model.summary()
	model.fit(train_images, train_labels, validation_data=(val_images, val_labels), epochs=15, batch_size=500, callbacks=callbacks_list)
	scores = model.evaluate(val_images, val_labels, verbose=0)
	print("CNN Error: %.2f%%" % (100-scores[1]*100))
	#model.save('cnn_model_keras2.h5') 
Example #6
Source File: HandWritingRecognition.py    From Jtyoui with MIT License 6 votes vote down vote up
def nn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train.reshape(x_train.shape[0], -1) / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)
    # constant(value=1.)自定义常数,constant(value=1.)===one()
    # 创建模型:输入784个神经元,输出10个神经元
    model = Sequential([
        Dense(units=200, input_dim=784, bias_initializer=constant(value=1.), activation=tanh),
        Dense(units=100, bias_initializer=one(), activation=tanh),
        Dense(units=10, bias_initializer=one(), activation=softmax),
    ])

    opt = SGD(lr=0.2, clipnorm=1.)  # 优化器
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['acc', 'mae'])  # 编译
    model.fit(x_train, y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
Example #7
Source File: test_tasks.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def test_vector_clf(self):
        nb_hidden = 10

        print('vector classification data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(10,),
                                                             classification=True, nb_class=2)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)

        model = Sequential()
        model.add(Dense(X_train.shape[-1], nb_hidden))
        model.add(Activation('relu'))
        model.add(Dense(nb_hidden, y_train.shape[-1]))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
        print(history.history)
        self.assertTrue(history.history['val_acc'][-1] > 0.9) 
Example #8
Source File: sequential.py    From keras2pmml with MIT License 6 votes vote down vote up
def setUp(self):
        iris = load_iris()

        theano.config.floatX = 'float32'
        X = iris.data.astype(theano.config.floatX)
        y = iris.target.astype(np.int32)
        y_ohe = np_utils.to_categorical(y)

        model = Sequential()
        model.add(Dense(input_dim=X.shape[1], output_dim=5, activation='tanh'))
        model.add(Dense(input_dim=5, output_dim=y_ohe.shape[1], activation='sigmoid'))
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        model.fit(X, y_ohe, nb_epoch=10, batch_size=1, verbose=3, validation_data=None)

        params = {'copyright': 'Václav Čadek', 'model_name': 'Iris Model'}
        self.model = model
        self.pmml = keras2pmml(self.model, **params)
        self.num_inputs = self.model.input_shape[1]
        self.num_outputs = self.model.output_shape[1]
        self.num_connection_layers = len(self.model.layers)
        self.features = ['x{}'.format(i) for i in range(self.num_inputs)]
        self.class_values = ['y{}'.format(i) for i in range(self.num_outputs)] 
Example #9
Source File: test_tasks.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def test_img_clf(self):
        print('image classification data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(3, 32, 32),
                                                             classification=True, nb_class=2)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)

        model = Sequential()
        model.add(Convolution2D(32, 3, 32, 32))
        model.add(Activation('sigmoid'))
        model.add(Flatten())
        model.add(Dense(32, y_test.shape[-1]))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
        self.assertTrue(history.history['val_acc'][-1] > 0.9) 
Example #10
Source File: image_train.py    From recognition_gender with MIT License 6 votes vote down vote up
def extract_data(self,train_path):
        imgs, labels, counter = get_file(train_path)
        print(labels)
        # 避免过拟合,采用交叉验证,验证集占训练集30%,固定随机种子(random_state)
        X_train, X_test, y_train, y_test = train_test_split(imgs, labels, test_size=0.3,
                                                            random_state=random.randint(0, 100))

        #数据预处理 keras backend 用的TensorFlow 黑白图片 channel 1
        X_train = X_train.reshape(X_train.shape[0], 1, self.img_size, self.img_size) / 255.
        X_test = X_test.reshape(X_test.shape[0], 1, self.img_size, self.img_size) / 255.

        #label 转为 one-hot 数据
        Y_train = np_utils.to_categorical(y_train, num_classes=counter)
        Y_test = np_utils.to_categorical(y_test, num_classes=counter)

        self.X_train = X_train
        self.X_test = X_test
        self.Y_train = Y_train
        self.Y_test = Y_test
        self.nb_classes = counter


#建立model  使用CNN(卷积神经网络) 
Example #11
Source File: hyperparam_optimization.py    From elephas with MIT License 6 votes vote down vote up
def data():
    """Data providing function:

    Make sure to have every relevant import statement included here and return data as
    used in model function below. This function is separated from model() so that hyperopt
    won't reload data for each evaluation run.
    """
    from keras.datasets import mnist
    from keras.utils import np_utils
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    nb_classes = 10
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)
    return x_train, y_train, x_test, y_test 
Example #12
Source File: test_keras.py    From docker-python with Apache License 2.0 6 votes vote down vote up
def test_lstm(self):
        x_train = np.random.random((100, 100, 100))
        y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
        x_test = np.random.random((20, 100, 100))
        y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)

        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

        model = Sequential()
        model.add(LSTM(32, return_sequences=True, input_shape=(100, 100)))
        model.add(Flatten())
        model.add(Dense(10, activation='softmax'))


        model.compile(loss='categorical_crossentropy', optimizer=sgd)
        model.fit(x_train, y_train, batch_size=32, epochs=1)
        model.evaluate(x_test, y_test, batch_size=32) 
Example #13
Source File: datasets.py    From super-simple-distributed-keras with MIT License 6 votes vote down vote up
def get_cifar10():
    """Retrieve the CIFAR dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 64
    input_shape = (3072,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train = x_train.reshape(50000, 3072)
    x_test = x_test.reshape(10000, 3072)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example #14
Source File: utils.py    From imgclsmob with MIT License 6 votes vote down vote up
def get_data_generator(data_iterator,
                       num_classes):
    def get_arrays(db):
        data = db.data[0].asnumpy()
        if K.image_data_format() == "channels_last":
            data = data.transpose((0, 2, 3, 1))
        labels = to_categorical(
            y=db.label[0].asnumpy(),
            num_classes=num_classes)
        return data, labels

    while True:
        try:
            db = data_iterator.next()

        except StopIteration:
            # logging.warning("get_data exception due to end of data - resetting iterator")
            data_iterator.reset()
            db = data_iterator.next()

        finally:
            yield get_arrays(db) 
Example #15
Source File: test_keras.py    From docker-python with Apache License 2.0 6 votes vote down vote up
def test_train(self):
        train = pd.read_csv("/input/tests/data/train.csv")

        x_train = train.iloc[:,1:].values.astype('float32')
        y_train = to_categorical(train.iloc[:,0].astype('int32'))

        model = Sequential()
        model.add(Dense(units=10, input_dim=784, activation='softmax'))

        model.compile(
            loss='categorical_crossentropy',
            optimizer=RMSprop(lr=0.001),
            metrics=['accuracy'])

        model.fit(x_train, y_train, epochs=1, batch_size=32)

    # Uses convnet which depends on libcudnn when running on GPU 
Example #16
Source File: train.py    From neural-network-genetic-algorithm with MIT License 6 votes vote down vote up
def get_mnist():
    """Retrieve the MNIST dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 128
    input_shape = (784,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example #17
Source File: train.py    From neural-network-genetic-algorithm with MIT License 6 votes vote down vote up
def get_cifar10():
    """Retrieve the CIFAR dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 64
    input_shape = (3072,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train = x_train.reshape(50000, 3072)
    x_test = x_test.reshape(10000, 3072)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example #18
Source File: train_LSTM_memLess_8class.py    From Automatic-Modulation-Classification with GNU General Public License v3.0 6 votes vote down vote up
def loadData(x_load_path, y_load_path):
    # load train data
    x_data_mat = sio.loadmat(x_load_path)
    x_data_complex = x_data_mat['train_data']
    x_data_real = x_data_complex.real
    x_data_imag = x_data_complex.imag
    x_data_real = x_data_real.reshape((x_data_real.shape[0], seqLen))
    x_data_imag = x_data_imag.reshape((x_data_imag.shape[0], seqLen))
    x_train = np.stack((x_data_real, x_data_imag), axis=2)
    y_data_mat = sio.loadmat(y_load_path)
    y_data = y_data_mat['train_label']
    y_train = np_utils.to_categorical(y_data, nClass)
    # train data shuffle
    index = np.arange(y_train.shape[0])
    np.random.shuffle(index)
    x_train = x_train[index,:]
    y_train = y_train[index]
    return [x_train, y_train]


# fix random seed 
Example #19
Source File: train_LSTM_memLess.py    From Automatic-Modulation-Classification with GNU General Public License v3.0 6 votes vote down vote up
def loadData(x_load_path, y_load_path):
    # load train data
    x_data_mat = sio.loadmat(x_load_path)
    x_data_complex = x_data_mat['train_data']
    x_data_real = x_data_complex.real
    x_data_imag = x_data_complex.imag
    x_data_real = x_data_real.reshape((x_data_real.shape[0], seqLen))
    x_data_imag = x_data_imag.reshape((x_data_imag.shape[0], seqLen))
    x_train = np.stack((x_data_real, x_data_imag), axis=2)
    y_data_mat = sio.loadmat(y_load_path)
    y_data = y_data_mat['train_label']
    y_train = np_utils.to_categorical(y_data, nClass)
    # train data shuffle
    index = np.arange(y_train.shape[0])
    np.random.shuffle(index)
    x_train = x_train[index,:]
    y_train = y_train[index]
    return [x_train, y_train]


# fix random seed 
Example #20
Source File: train_LSTM_memLess.py    From Automatic-Modulation-Classification with GNU General Public License v3.0 6 votes vote down vote up
def loadData(x_load_path, y_load_path):
    # load train data
    x_data_mat = sio.loadmat(x_load_path)
    x_data_complex = x_data_mat['train_data']
    x_data_real = x_data_complex.real
    x_data_imag = x_data_complex.imag
    x_data_real = x_data_real.reshape((x_data_real.shape[0], seqLen))
    x_data_imag = x_data_imag.reshape((x_data_imag.shape[0], seqLen))
    x_train = np.stack((x_data_real, x_data_imag), axis=2)
    y_data_mat = sio.loadmat(y_load_path)
    y_data = y_data_mat['train_label']
    y_train = np_utils.to_categorical(y_data, nClass)
    # train data shuffle
    index = np.arange(y_train.shape[0])
    np.random.shuffle(index)
    x_train = x_train[index,:]
    y_train = y_train[index]
    return [x_train, y_train]


# fix random seed 
Example #21
Source File: prepare_data.py    From VQA-Keras-Visual-Question-Answering with MIT License 6 votes vote down vote up
def read_data(data_limit):
    print "Reading Data..."
    img_data = h5py.File(data_img)
    ques_data = h5py.File(data_prepo)
  
    img_data = np.array(img_data['images_train'])
    img_pos_train = ques_data['img_pos_train'][:data_limit]
    train_img_data = np.array([img_data[_-1,:] for _ in img_pos_train])
    # Normalizing images
    tem = np.sqrt(np.sum(np.multiply(train_img_data, train_img_data), axis=1))
    train_img_data = np.divide(train_img_data, np.transpose(np.tile(tem,(4096,1))))

    #shifting padding to left side
    ques_train = np.array(ques_data['ques_train'])[:data_limit, :]
    ques_length_train = np.array(ques_data['ques_length_train'])[:data_limit]
    ques_train = right_align(ques_train, ques_length_train)

    train_X = [train_img_data, ques_train]
    # NOTE should've consturcted one-hots using exhausitve list of answers, cause some answers may not be in dataset
    # To temporarily rectify this, all those answer indices is set to 1 in validation set
    train_y = to_categorical(ques_data['answers'])[:data_limit, :]

    return train_X, train_y 
Example #22
Source File: generate.py    From recipe-summarization with MIT License 6 votes vote down vote up
def conv_seq_labels(xds, xhs, nflips, model, debug, oov0, glove_idx2idx, vocab_size, nb_unknown_words, idx2word):
    """Convert description and hedlines to padded input vectors; headlines are one-hot to label."""
    batch_size = len(xhs)
    assert len(xds) == batch_size
    x = [
        vocab_fold(lpadd(xd) + xh, oov0, glove_idx2idx, vocab_size, nb_unknown_words)
        for xd, xh in zip(xds, xhs)]  # the input does not have 2nd eos
    x = sequence.pad_sequences(x, maxlen=maxlen, value=empty, padding='post', truncating='post')
    x = flip_headline(x, nflips=nflips, model=model, debug=debug, oov0=oov0, idx2word=idx2word)

    y = np.zeros((batch_size, maxlenh, vocab_size))
    for i, xh in enumerate(xhs):
        xh = vocab_fold(xh, oov0, glove_idx2idx, vocab_size, nb_unknown_words) + [eos] + [empty] * maxlenh  # output does have a eos at end
        xh = xh[:maxlenh]
        y[i, :, :] = np_utils.to_categorical(xh, vocab_size)

    return x, y 
Example #23
Source File: model.py    From ancient-Chinese-poem-generator with MIT License 5 votes vote down vote up
def one_hot_encode(data_X,data_Y,n_vocab):
	# n_vocab: 		size of vocabulary
	n_patterns = len(data_X)
	# reshape X to be [samples, time steps, features]
	X = numpy.reshape(data_X, (n_patterns, seq_len, 1))
	# normalize
	X = X / float(n_vocab)
	# one hot encode the output variable
	Y = np_utils.to_categorical(data_Y)
	return X,Y 
Example #24
Source File: voc_generator.py    From keras-fcn with MIT License 5 votes vote down vote up
def next(self):
        """Next batch."""
        with self.lock:
            index_array, current_index, current_batch_size = next(
                self.index_generator)
        batch_x = np.zeros(
            (current_batch_size,) + self.image_shape,
            dtype=K.floatx())
        batch_y = np.zeros(
            (current_batch_size,) + self.label_shape,
            dtype=np.int8)
        #batch_y = np.reshape(batch_y, (current_batch_size, -1, self.classes))

        for i, j in enumerate(index_array):
            fn = self.filenames[j]
            x = self.image_set_loader.load_img(fn)
            x = self.image_data_generator.standardize(x)
            batch_x[i] = x
            y = self.image_set_loader.load_seg(fn)
            y = to_categorical(y, self.classes).reshape(self.label_shape)
            #y = np.reshape(y, (-1, self.classes))
            batch_y[i] = y

        # save augmented images to disk for debugging
        #if self.image_set_loader.save_to_dir:
        #    for i in range(current_batch_size):
        #        x = batch_x[i]
        #        y = batch_y[i].argmax(
        #            self.image_data_generator.channel_axis - 1)
        #        if self.image_data_generator.data_format == 'channels_first':
        #            y = y[np.newaxis, ...]
        #        else:
        #            y = y[..., np.newaxis]
        #        self.image_set_loader.save(x, y, current_index + i)

        return batch_x, batch_y 
Example #25
Source File: snli_rnn.py    From keras_snli with MIT License 5 votes vote down vote up
def get_data(fn, limit=None):
  raw_data = list(yield_examples(fn=fn, limit=limit))
  left = [s1 for _, s1, s2 in raw_data]
  right = [s2 for _, s1, s2 in raw_data]
  print(max(len(x.split()) for x in left))
  print(max(len(x.split()) for x in right))

  LABELS = {'contradiction': 0, 'neutral': 1, 'entailment': 2}
  Y = np.array([LABELS[l] for l, s1, s2 in raw_data])
  Y = np_utils.to_categorical(Y, len(LABELS))

  return left, right, Y 
Example #26
Source File: test_keras.py    From docker-python with Apache License 2.0 5 votes vote down vote up
def test_conv2d(self):
        # Generate dummy data
        x_train = np.random.random((100, 100, 100, 3))
        y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
        x_test = np.random.random((20, 100, 100, 3))
        y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)

        model = Sequential()
        # input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
        # this applies 32 convolution filters of size 3x3 each.
        model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
        model.add(Conv2D(32, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(256, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax'))

        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

        # This throws if libcudnn is not properly installed with on a GPU
        model.compile(loss='categorical_crossentropy', optimizer=sgd)
        model.fit(x_train, y_train, batch_size=32, epochs=1)
        
        model.evaluate(x_test, y_test, batch_size=32) 
Example #27
Source File: AlexNet.py    From cnn_evaluation_smoke with GNU General Public License v3.0 5 votes vote down vote up
def generate_arrays(train_filename, batch_size, max_sample, new_size):
    batch_features = np.zeros((batch_size, new_size, new_size, 3))
    batch_labels = np.zeros((batch_size,1))
    
    current_sample_idx = 0
    combined_num = 0

    print('GENERATOR: Train file = {}, batch = {}, total samples = {}'.format(train_filename,  batch_size, max_sample))
    while 1:
        reached_end = False
        start_idx = current_sample_idx
        end_idx = batch_size + start_idx
        
        if (end_idx > max_sample):
            end_idx = batch_size
            reached_end = True
        
        print('GENERATOR: Start idx = {}, end_idx = {}, total samples = {}'.format(start_idx,  end_idx, max_sample))    
        x = HDF5Matrix(train_filename, 'data', start=start_idx, end=end_idx)
        y = HDF5Matrix(train_filename, 'labels', start=start_idx, end=end_idx)
        x = np.array(x)
        y = np.array(y)
        y = np_utils.to_categorical(y, NUMBER_OF_CLASSES)
        
        current_sample_idx = end_idx
        if reached_end:
            current_sample_idx = 0
        
        print("Shapes. x = {}, y = {}".format(x.shape, y.shape))
        
        #batch_labels = np_utils.to_categorical(batch_labels, NUMBER_OF_CLASSES)
        yield(x,y) 
Example #28
Source File: ResNet.py    From cnn_evaluation_smoke with GNU General Public License v3.0 5 votes vote down vote up
def generate_arrays(train_filename, batch_size, max_sample, new_size):
    batch_features = np.zeros((batch_size, new_size, new_size, 3))
    batch_labels = np.zeros((batch_size,1))
    
    current_sample_idx = 0
    combined_num = 0
   

    print('GENERATOR: Train file = {}, batch = {}, total samples = {}'.format(train_filename,  batch_size, max_sample))
    while 1:
        reached_end = False
        start_idx = current_sample_idx
        end_idx = batch_size + start_idx
        
        if (end_idx > max_sample):
            end_idx = batch_size
            reached_end = True
        
        print('GENERATOR: Start idx = {}, end_idx = {}, total samples = {}'.format(start_idx,  end_idx, max_sample))    
        x = HDF5Matrix(train_filename, 'data', start=start_idx, end=end_idx)
        y = HDF5Matrix(train_filename, 'labels', start=start_idx, end=end_idx)
        x = np.array(x)
        y = np.array(y)
        y = np_utils.to_categorical(y, NUMBER_OF_CLASSES)
        
        current_sample_idx = end_idx
        if reached_end:
            current_sample_idx = 0
        
        print("Shapes. x = {}, y = {}".format(x.shape, y.shape))
        
        #batch_labels = np_utils.to_categorical(batch_labels, NUMBER_OF_CLASSES)
        yield(x,y) 
Example #29
Source File: InceptionV3.py    From cnn_evaluation_smoke with GNU General Public License v3.0 5 votes vote down vote up
def generate_arrays(train_filename, batch_size, max_sample, new_size):
    batch_features = np.zeros((batch_size, new_size, new_size, 3))
    batch_labels = np.zeros((batch_size,1))
    
    current_sample_idx = 0
    combined_num = 0
   

    print('GENERATOR: Train file = {}, batch = {}, total samples = {}'.format(train_filename,  batch_size, max_sample))
    while 1:
        reached_end = False
        start_idx = current_sample_idx
        end_idx = batch_size + start_idx
        
        if (end_idx > max_sample):
            end_idx = batch_size
            reached_end = True
        
        print('GENERATOR: Start idx = {}, end_idx = {}, total samples = {}'.format(start_idx,  end_idx, max_sample))    
        x = HDF5Matrix(train_filename, 'data', start=start_idx, end=end_idx)
        y = HDF5Matrix(train_filename, 'labels', start=start_idx, end=end_idx)
        x = np.array(x)
        y = np.array(y)
        y = np_utils.to_categorical(y, NUMBER_OF_CLASSES)
        
        current_sample_idx = end_idx
        if reached_end:
            current_sample_idx = 0
        
        print("Shapes. x = {}, y = {}".format(x.shape, y.shape))
        
        #batch_labels = np_utils.to_categorical(batch_labels, NUMBER_OF_CLASSES)
        yield(x,y) 
Example #30
Source File: lstm_example.py    From speech-emotion-recognition with MIT License 5 votes vote down vote up
def lstm_example():
    to_flatten = False
    x_train, x_test, y_train, y_test, num_labels = extract_data(
        flatten=to_flatten)
    y_train = np_utils.to_categorical(y_train)
    y_test_train = np_utils.to_categorical(y_test)
    print('Starting LSTM')
    model = LSTM(input_shape=x_train[0].shape,
                 num_classes=num_labels)
    model.train(x_train, y_train, x_test, y_test_train, n_epochs=50)
    model.evaluate(x_test, y_test)
    filename = '../dataset/Sad/09b03Ta.wav'
    print('prediction', model.predict_one(
        get_feature_vector_from_mfcc(filename, flatten=to_flatten)),
          'Actual 3')