Python keras.datasets.mnist.load_data() Examples

The following are 30 code examples for showing how to use keras.datasets.mnist.load_data(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.datasets.mnist , or try the search function .

Example 1
Project: super-simple-distributed-keras   Author: harvitronix   File: datasets.py    License: MIT License 7 votes vote down vote up
def get_mnist():
    """Retrieve the MNIST dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 128
    input_shape = (784,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example 2
Project: gandlf   Author: codekansas   File: mnist_gan.py    License: MIT License 6 votes vote down vote up
def get_mnist_data(binarize=False):
    """Puts the MNIST data in the right format."""

    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    if binarize:
        X_test = np.where(X_test >= 10, 1, -1)
        X_train = np.where(X_train >= 10, 1, -1)
    else:
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_test = (X_test.astype(np.float32) - 127.5) / 127.5

    X_train = np.expand_dims(X_train, axis=-1)
    X_test = np.expand_dims(X_test, axis=-1)

    y_train = np.expand_dims(y_train, axis=-1)
    y_test = np.expand_dims(y_test, axis=-1)

    return (X_train, y_train), (X_test, y_test) 
Example 3
Project: gandlf   Author: codekansas   File: reversing_gan.py    License: MIT License 6 votes vote down vote up
def get_mnist_data(binarize=False):
    """Puts the MNIST data in the right format."""

    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    if binarize:
        X_test = np.where(X_test >= 10, 1, -1)
        X_train = np.where(X_train >= 10, 1, -1)
    else:
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_test = (X_test.astype(np.float32) - 127.5) / 127.5

    X_train = np.expand_dims(X_train, axis=-1)
    X_test = np.expand_dims(X_test, axis=-1)

    y_train = np.eye(10)[y_train]
    y_test = np.eye(10)[y_test]

    return (X_train, y_train), (X_test, y_test) 
Example 4
Project: Keras-GAN   Author: eriklindernoren   File: data_loader.py    License: MIT License 6 votes vote down vote up
def setup_mnist(self, img_res):

        print ("Setting up MNIST...")

        if not os.path.exists('datasets/mnist_x.npy'):
            # Load the dataset
            (mnist_X, mnist_y), (_, _) = mnist.load_data()

            # Normalize and rescale images
            mnist_X = self.normalize(mnist_X)
            mnist_X = np.array([imresize(x, img_res) for x in mnist_X])
            mnist_X = np.expand_dims(mnist_X, axis=-1)
            mnist_X = np.repeat(mnist_X, 3, axis=-1)

            self.mnist_X, self.mnist_y = mnist_X, mnist_y

            # Save formatted images
            np.save('datasets/mnist_x.npy', self.mnist_X)
            np.save('datasets/mnist_y.npy', self.mnist_y)
        else:
            self.mnist_X = np.load('datasets/mnist_x.npy')
            self.mnist_y = np.load('datasets/mnist_y.npy')

        print ("+ Done.") 
Example 5
Project: Jtyoui   Author: jtyoui   File: HandWritingRecognition.py    License: MIT License 6 votes vote down vote up
def nn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train.reshape(x_train.shape[0], -1) / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)
    # constant(value=1.)自定义常数,constant(value=1.)===one()
    # 创建模型:输入784个神经元,输出10个神经元
    model = Sequential([
        Dense(units=200, input_dim=784, bias_initializer=constant(value=1.), activation=tanh),
        Dense(units=100, bias_initializer=one(), activation=tanh),
        Dense(units=10, bias_initializer=one(), activation=softmax),
    ])

    opt = SGD(lr=0.2, clipnorm=1.)  # 优化器
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['acc', 'mae'])  # 编译
    model.fit(x_train, y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
Example 6
Project: brainforge   Author: csxeba   File: xp_elm.py    License: GNU General Public License v3.0 6 votes vote down vote up
def pull_mnist(split=0.1, flatten=True):
    learning, testing = mnist.load_data()
    X = np.concatenate([learning[0], testing[0]]).astype(typing.floatX)
    Y = np.concatenate([learning[1], testing[1]]).astype("uint8")
    X -= X.mean()
    X /= X.std()
    if flatten:
        X = X.reshape(-1, 784)
    else:
        X = X[:, None, ...]
    Y = np.eye(10)[Y]

    if split:
        arg = np.arange(len(X))
        np.random.shuffle(arg)
        div = int(len(X) * split)
        targ, larg = arg[:div], arg[div:]
        return X[larg], Y[larg], X[targ], Y[targ]

    return X, Y 
Example 7
Project: deep_architect   Author: negrinho   File: test_hyperband.py    License: MIT License 6 votes vote down vote up
def main():

    num_classes = 10
    num_samples = 3 # number of architecture to sample
    metric = 'val_accuracy' # evaluation metric
    resource_type = 'epoch'
    max_resource = 81 # max resource that a configuration can have

    # load and normalize data
    (x_train, y_train),(x_test, y_test) = mnist.load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0

    # defining searcher and evaluator
    evaluator = SimpleClassifierEvaluator((x_train, y_train), num_classes,
                                        max_num_training_epochs=5)
    searcher = se.RandomSearcher(get_search_space(num_classes))
    hyperband = SimpleArchitectureSearchHyperBand(searcher, hyperband, metric, resource_type)
    (best_config, best_perf) = hyperband.evaluate(max_resource)
    print("Best %s is %f with architecture %d" % (metric, best_perf[0], best_config[0])) 
Example 8
Project: CAPTCHA-breaking   Author: lllcho   File: test_datasets.py    License: MIT License 6 votes vote down vote up
def test_cifar(self):
        print('cifar10')
        (X_train, y_train), (X_test, y_test) = cifar10.load_data()
        print(X_train.shape)
        print(X_test.shape)
        print(y_train.shape)
        print(y_test.shape)

        print('cifar100 fine')
        (X_train, y_train), (X_test, y_test) = cifar100.load_data('fine')
        print(X_train.shape)
        print(X_test.shape)
        print(y_train.shape)
        print(y_test.shape)

        print('cifar100 coarse')
        (X_train, y_train), (X_test, y_test) = cifar100.load_data('coarse')
        print(X_train.shape)
        print(X_test.shape)
        print(y_train.shape)
        print(y_test.shape) 
Example 9
Project: CAPTCHA-breaking   Author: lllcho   File: test_datasets.py    License: MIT License 6 votes vote down vote up
def test_imdb(self):
        print('imdb')
        (X_train, y_train), (X_test, y_test) = imdb.load_data() 
Example 10
Project: super-simple-distributed-keras   Author: harvitronix   File: datasets.py    License: MIT License 6 votes vote down vote up
def get_cifar10():
    """Retrieve the CIFAR dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 64
    input_shape = (3072,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train = x_train.reshape(50000, 3072)
    x_test = x_test.reshape(10000, 3072)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example 11
Project: MassImageRetrieval   Author: liuguiyangnwpu   File: DataSampler.py    License: Apache License 2.0 6 votes vote down vote up
def mnist_dataset_reader():
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)
    X_test = X_test.reshape(10000, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255  # 归一化
    X_test /= 255

    digit_indices = [np.where(y_train == i)[0] for i in range(10)]
    tr_pairs, tr_y = create_pairs(X_train, digit_indices)

    digit_indices = [np.where(y_test == i)[0] for i in range(10)]
    te_pairs, te_y = create_pairs(X_test, digit_indices)

    input_dim = 784

    return input_dim, tr_pairs, tr_y, te_pairs, te_y 
Example 12
Project: neural-network-genetic-algorithm   Author: harvitronix   File: train.py    License: MIT License 6 votes vote down vote up
def get_cifar10():
    """Retrieve the CIFAR dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 64
    input_shape = (3072,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train = x_train.reshape(50000, 3072)
    x_test = x_test.reshape(10000, 3072)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example 13
Project: neural-network-genetic-algorithm   Author: harvitronix   File: train.py    License: MIT License 6 votes vote down vote up
def get_mnist():
    """Retrieve the MNIST dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 128
    input_shape = (784,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
Example 14
Project: elephas   Author: maxpumperla   File: hyperparam_optimization.py    License: MIT License 6 votes vote down vote up
def data():
    """Data providing function:

    Make sure to have every relevant import statement included here and return data as
    used in model function below. This function is separated from model() so that hyperopt
    won't reload data for each evaluation run.
    """
    from keras.datasets import mnist
    from keras.utils import np_utils
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    nb_classes = 10
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)
    return x_train, y_train, x_test, y_test 
Example 15
Project: Self-Attention-GAN-Tensorflow   Author: taki0112   File: utils.py    License: MIT License 6 votes vote down vote up
def load_mnist(size=64):
    (train_data, train_labels), (test_data, test_labels) = mnist.load_data()
    train_data = normalize(train_data)
    test_data = normalize(test_data)

    x = np.concatenate((train_data, test_data), axis=0)
    # y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)

    seed = 777
    np.random.seed(seed)
    np.random.shuffle(x)
    # np.random.seed(seed)
    # np.random.shuffle(y)
    # x = np.expand_dims(x, axis=-1)

    x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])
    x = np.expand_dims(x, axis=-1)
    return x 
Example 16
Project: Self-Attention-GAN-Tensorflow   Author: taki0112   File: utils.py    License: MIT License 6 votes vote down vote up
def load_cifar10(size=64) :
    (train_data, train_labels), (test_data, test_labels) = cifar10.load_data()
    train_data = normalize(train_data)
    test_data = normalize(test_data)

    x = np.concatenate((train_data, test_data), axis=0)
    # y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)

    seed = 777
    np.random.seed(seed)
    np.random.shuffle(x)
    # np.random.seed(seed)
    # np.random.shuffle(y)

    x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])

    return x 
Example 17
Project: RelativisticGAN-Tensorflow   Author: taki0112   File: utils.py    License: MIT License 6 votes vote down vote up
def load_mnist(size=64):
    (train_data, train_labels), (test_data, test_labels) = mnist.load_data()
    train_data = normalize(train_data)
    test_data = normalize(test_data)

    x = np.concatenate((train_data, test_data), axis=0)
    # y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)

    seed = 777
    np.random.seed(seed)
    np.random.shuffle(x)
    # np.random.seed(seed)
    # np.random.shuffle(y)
    # x = np.expand_dims(x, axis=-1)

    x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])
    x = np.expand_dims(x, axis=-1)
    return x 
Example 18
Project: RelativisticGAN-Tensorflow   Author: taki0112   File: utils.py    License: MIT License 6 votes vote down vote up
def load_cifar10(size=64) :
    (train_data, train_labels), (test_data, test_labels) = cifar10.load_data()
    train_data = normalize(train_data)
    test_data = normalize(test_data)

    x = np.concatenate((train_data, test_data), axis=0)
    # y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)

    seed = 777
    np.random.seed(seed)
    np.random.shuffle(x)
    # np.random.seed(seed)
    # np.random.shuffle(y)

    x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])

    return x 
Example 19
Project: DiscriminativeActiveLearning   Author: dsgissin   File: main.py    License: MIT License 6 votes vote down vote up
def load_mnist():
    """
    load and pre-process the MNIST data
    """

    from keras.datasets import mnist
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    if K.image_data_format() == 'channels_last':
        x_train = x_train.reshape((x_train.shape[0], 28, 28, 1))
        x_test = x_test.reshape((x_test.shape[0], 28, 28, 1))
    else:
        x_train = x_train.reshape((x_train.shape[0], 1, 28, 28))
        x_test = x_test.reshape((x_test.shape[0], 1, 28, 28))

    # standardise the dataset:
    x_train = np.array(x_train).astype('float32') / 255
    x_test = np.array(x_test).astype('float32') / 255

    # shuffle the data:
    perm = np.random.permutation(x_train.shape[0])
    x_train = x_train[perm]
    y_train = y_train[perm]

    return (x_train, y_train), (x_test, y_test) 
Example 20
Project: gandlf   Author: codekansas   File: upsample_gan.py    License: MIT License 5 votes vote down vote up
def generate_training_data(data='mnist'):
    if data == 'mnist':
        (X_train, _), (_, _) = mnist.load_data()
        X_train = np.expand_dims(X_train, -1) / 255.
    elif data == 'cifar':
        (X_train, _), (_, _) = cifar10.load_data()
        X_train = X_train / 255.
    else:
        raise ValueError('data should be "mnist" or "cifar", got '
                         '"%s".' % data)

    # Downsamples by averaging adjacent pixels.
    X_low_dim = mean_bins(X_train)

    return X_low_dim, X_train 
Example 21
Project: Jtyoui   Author: jtyoui   File: HandWritingRecognition.py    License: MIT License 5 votes vote down vote up
def cnn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train.reshape(-1, 28, 28, 1) / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)

    model = Sequential([
        # input_shape:输入平面,就在第一个位置设置
        # filters:卷积核、滤波器
        # kernel_size:卷积核大小
        # strides:步长
        # padding有两种方式:same/valid
        # activation:激活函数
        Convolution2D(input_shape=(28, 28, 1), filters=32, kernel_size=5, strides=1, padding='same', activation=relu),
        MaxPool2D(pool_size=2, strides=2, padding='same'),
        Convolution2D(filters=64, kernel_size=5, padding='same', activation=relu),
        MaxPool2D(pool_size=2, trainable=2, padding='same'),
        Flatten(),  # 扁平化
        Dense(units=1024, activation=relu),
        Dropout(0.5),
        Dense(units=10, activation=softmax),
    ])
    opt = Adam(lr=1e-4)
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['accuracy'])
    model.fit(x=x_train, y=y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
Example 22
Project: Jtyoui   Author: jtyoui   File: HandWritingRecognition.py    License: MIT License 5 votes vote down vote up
def rnn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)

    model = Sequential([
        SimpleRNN(units=50, input_shape=(28, 28)),
        Dense(units=10, activation=softmax),
    ])
    opt = RMSprop(lr=1e-4)
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['accuracy'])
    model.fit(x=x_train, y=y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
Example 23
Project: deep_architect   Author: negrinho   File: main_keras.py    License: MIT License 5 votes vote down vote up
def main():
    num_classes = 10
    num_samples = 4
    num_training_epochs = 2
    validation_frac = 0.2
    # NOTE: change to True for graph visualization
    show_graph = False

    # load the data.
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    fn = lambda X: X.reshape((X.shape[0], -1))
    X_train = fn(X_train) / 255.0
    X_test = fn(X_test) / 255.0
    num_train = int((1.0 - validation_frac) * X_train.shape[0])
    X_train, X_val = X_train[:num_train], X_train[num_train:]
    y_train, y_val = y_train[:num_train], y_train[num_train:]

    # define the search and the evalutor
    evaluator = SimpleClassifierEvaluator(
        X_train,
        y_train,
        X_val,
        y_val,
        num_classes,
        num_training_epochs=num_training_epochs)
    search_space_fn = lambda: dnn_net(num_classes)
    searcher = se.RandomSearcher(search_space_fn)

    for i in range(num_samples):
        (inputs, outputs, hyperp_value_lst,
         searcher_eval_token) = searcher.sample()
        if show_graph:
            # try setting draw_module_hyperparameter_info=False and
            # draw_hyperparameters=True for a different visualization.
            vi.draw_graph(outputs,
                          draw_module_hyperparameter_info=False,
                          draw_hyperparameters=True)
        results = evaluator.evaluate(inputs, outputs)
        # updating the searcher. no-op for the random searcher.
        searcher.update(results['validation_accuracy'], searcher_eval_token) 
Example 24
Project: deep_architect   Author: negrinho   File: loaders.py    License: MIT License 5 votes vote down vote up
def load_mnist(flatten=False, one_hot=True, validation_frac=0.1):
    from keras.datasets import mnist
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train / 255.0
    X_test = X_test / 255.0
    if flatten:
        X_train = X_train.reshape((X_train.shape[0], -1))
        X_test = X_test.reshape((X_test.shape[0], -1))
    if one_hot:
        y_train = au.idx_to_onehot(y_train, 10)
        y_test = au.idx_to_onehot(y_test, 10)
    num_train = int((1.0 - validation_frac) * X_train.shape[0])
    X_train, X_val = X_train[:num_train], X_train[num_train:]
    y_train, y_val = y_train[:num_train], y_train[num_train:]
    return (X_train, y_train, X_val, y_val, X_test, y_test) 
Example 25
Project: blackbox-attacks   Author: sunblaze-ucb   File: mnist.py    License: MIT License 5 votes vote down vote up
def data_mnist(one_hot=True):
    """
    Preprocess MNIST dataset
    """
    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    y_train = y_train


    X_train = X_train.reshape(X_train.shape[0],
                              FLAGS.IMAGE_ROWS,
                              FLAGS.IMAGE_COLS,
                              FLAGS.NUM_CHANNELS)

    X_test = X_test.reshape(X_test.shape[0],
                            FLAGS.IMAGE_ROWS,
                            FLAGS.IMAGE_COLS,
                            FLAGS.NUM_CHANNELS)

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    print "Loaded MNIST test data."

    if one_hot:
        # convert class vectors to binary class matrices
        y_train = np_utils.to_categorical(y_train, FLAGS.NUM_CLASSES).astype(np.float32)
        y_test = np_utils.to_categorical(y_test, FLAGS.NUM_CLASSES).astype(np.float32)

    return X_train, y_train, X_test, y_test 
Example 26
Project: CAPTCHA-breaking   Author: lllcho   File: test_datasets.py    License: MIT License 5 votes vote down vote up
def test_reuters(self):
        print('reuters')
        (X_train, y_train), (X_test, y_test) = reuters.load_data() 
Example 27
Project: CAPTCHA-breaking   Author: lllcho   File: test_datasets.py    License: MIT License 5 votes vote down vote up
def test_mnist(self):
        print('mnist')
        (X_train, y_train), (X_test, y_test) = mnist.load_data()
        print(X_train.shape)
        print(X_test.shape)
        print(y_train.shape)
        print(y_test.shape) 
Example 28
Project: deepJDOT   Author: bbdamodaran   File: da_dataload.py    License: MIT License 5 votes vote down vote up
def usps_to_mnist():
    from DatasetLoad import usps_digit_dataload
    source_traindata, source_trainlabel, source_testdata, source_testlabel = usps_digit_dataload()
    source_trainlabel =source_trainlabel-1
    source_testlabel =source_testlabel-1

    # 2d to 3d for CNN
    source_traindata = source_traindata.reshape(-1, 16, 16,1)
    source_testdata = source_testdata.reshape(-1,16, 16,1)

    from preprocess import zero_mean_unitvarince, resize_data

    source_traindata = zero_mean_unitvarince(source_traindata, scaling=True)
    source_testdata = zero_mean_unitvarince(source_testdata, scaling=True)

    
    #
    from keras.datasets import mnist
    (target_traindata, target_trainlabel), (target_testdata, target_testlabel) = mnist.load_data()
    target_size = target_traindata.shape
    
    resize = True
    resize_size =16

    if resize == True:
       target_traindata = resize_data(target_traindata, resize_size=resize_size)
       target_testdata = resize_data(target_testdata, resize_size=resize_size)
    
    target_size = target_traindata.shape
    
    target_traindata = zero_mean_unitvarince(target_traindata,scaling=True)
    target_testdata = zero_mean_unitvarince(target_testdata,scaling=True)
    
    
    target_traindata = target_traindata.reshape(-1,target_size[1],target_size[2],1)
    target_testdata =target_testdata.reshape(-1,target_size[1],target_size[2],1)
    
    return (source_traindata, source_trainlabel, source_testdata, source_testlabel), (target_traindata, target_trainlabel, target_testdata, target_testlabel)
    

#%% MNIST MNISTM 
Example 29
Project: deepJDOT   Author: bbdamodaran   File: da_dataload.py    License: MIT License 5 votes vote down vote up
def mnist_to_mnistm():
    from keras.datasets import mnist
    (source_traindata, source_trainlabel), (source_testdata, source_testlabel) = mnist.load_data()
    
    source_size = source_traindata.shape
    resize = False
    resize_size =32
    from preprocess import zero_mean_unitvarince,resize_data
    if resize == True:
       source_traindata = resize_data(source_traindata, resize_size=resize_size)
       source_testdata = resize_data(source_testdata, resize_size=resize_size)
    
    source_size = source_traindata.shape
    
    source_traindata = zero_mean_unitvarince(source_traindata,scaling=True)
    source_testdata = zero_mean_unitvarince(source_testdata,scaling=True)
    
    convert_rgb=1
    if convert_rgb:
        source_traindata = np.stack((source_traindata,source_traindata,source_traindata), axis=3)
        source_testdata = np.stack((source_testdata,source_testdata,source_testdata), axis=3)
        
    from DatasetLoad import mnist_m_dataload
    from skimage.color import rgb2gray
    target_traindata, target_trainlabel, target_testdata, target_testlabel= mnist_m_dataload()
    target_size = target_traindata.shape
    resize = False
    resize_size =28
    
    if resize == True:
       target_traindata = resize_data(target_traindata, resize_size=resize_size)
       target_testdata = resize_data(target_testdata, resize_size=resize_size)
    
    target_size = target_traindata.shape
    
    target_traindata = zero_mean_unitvarince(target_traindata,scaling=True)
    target_testdata = zero_mean_unitvarince(target_testdata,scaling=True)
    
    return (source_traindata, source_trainlabel, source_testdata, source_testlabel), (target_traindata, target_trainlabel, target_testdata, target_testlabel)
    
#%% 
Example 30
Project: lottery-ticket-hypothesis   Author: google-research   File: download_data.py    License: Apache License 2.0 5 votes vote down vote up
def download(location=locations.MNIST_LOCATION):
  d = {}
  (d['x_train'], d['y_train']), (d['x_test'], d['y_test']) = mnist.load_data()
  save_restore.save_network(location, d)