Python tensorflow.examples.tutorials.mnist.input_data.read_data_sets() Examples

The following are 30 code examples of tensorflow.examples.tutorials.mnist.input_data.read_data_sets(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.examples.tutorials.mnist.input_data , or try the search function .
Example #1
Source File: prepro.py    From dl-uncertainty with MIT License 6 votes vote down vote up
def main():
    mnist = input_data.read_data_sets(train_dir='mnist')

    train = {'X': resize_images(mnist.train.images.reshape(-1, 28, 28)),
             'y': mnist.train.labels}
    
    test = {'X': resize_images(mnist.test.images.reshape(-1, 28, 28)),
            'y': mnist.test.labels}
    #~ train = {'X': mnist.train.images,
             #~ 'y': mnist.train.labels}
    
    #~ test = {'X': mnist.test.images,
            #~ 'y': mnist.test.labels}
        
    save_pickle(train, 'mnist/train.pkl')
    save_pickle(test, 'mnist/test.pkl') 
Example #2
Source File: download_and_process_mnist.py    From adversarial-feature-augmentation with MIT License 6 votes vote down vote up
def download_and_process_mnist():
    
    
    if not os.path.exists('./data/mnist'):
	os.makedirs('./data/mnist')
    
    mnist = input_data.read_data_sets(train_dir='./data/mnist')

    train = {'X': resize_images(mnist.train.images.reshape(-1, 28, 28)),
             'y': mnist.train.labels}
    
    test = {'X': resize_images(mnist.test.images.reshape(-1, 28, 28)),
            'y': mnist.test.labels}
        
    with open('./data/mnist/train.pkl','w') as f:
	cPickle.dump(train,f,cPickle.HIGHEST_PROTOCOL)
    
    with open('./data/mnist/test.pkl','w') as f:
	cPickle.dump(test,f,cPickle.HIGHEST_PROTOCOL) 
Example #3
Source File: task.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def do_eval(sess,
            eval_correct,
            images_placeholder,
            labels_placeholder,
            data_set):
  """Runs one evaluation against the full epoch of data.

  Args:
    sess: The session in which the model has been trained.
    eval_correct: The Tensor that returns the number of correct predictions.
    images_placeholder: The images placeholder.
    labels_placeholder: The labels placeholder.
    data_set: The set of images and labels to evaluate, from
      input_data.read_data_sets().
  """
  # And run one epoch of eval.
  true_count = 0  # Counts the number of correct predictions.
  steps_per_epoch = data_set.num_examples // FLAGS.batch_size
  num_examples = steps_per_epoch * FLAGS.batch_size
  for step in xrange(steps_per_epoch):
    feed_dict = fill_feed_dict(data_set, images_placeholder, labels_placeholder)
    true_count += sess.run(eval_correct, feed_dict=feed_dict)
  precision = true_count / num_examples
  print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f' %
        (num_examples, true_count, precision)) 
Example #4
Source File: mnist_data.py    From forge with GNU General Public License v3.0 6 votes vote down vote up
def load(config, **unused_kwargs):

    del unused_kwargs

    if not os.path.exists(config.data_folder):
        os.makedirs(config.data_folder)

    dataset = input_data.read_data_sets(config.data_folder)

    train_data = {'imgs': dataset.train.images, 'labels': dataset.train.labels}
    valid_data = {'imgs': dataset.validation.images, 'labels': dataset.validation.labels}

    # This function turns a dictionary of numpy.ndarrays into tensors.
    train_tensors = tensors_from_data(train_data, config.batch_size, shuffle=True)
    valid_tensors = tensors_from_data(valid_data, config.batch_size, shuffle=False)

    data_dict = AttrDict(
        train_img=train_tensors['imgs'],
        valid_img=valid_tensors['imgs'],
        train_label=train_tensors['labels'],
        valid_label=valid_tensors['labels'],
    )

    return data_dict 
Example #5
Source File: prepro.py    From minimal-entropy-correlation-alignment with MIT License 6 votes vote down vote up
def main():
    mnist = input_data.read_data_sets(train_dir='mnist')

    train = {'X': resize_images(mnist.train.images.reshape(-1, 28, 28)),
             'y': mnist.train.labels}
    
    test = {'X': resize_images(mnist.test.images.reshape(-1, 28, 28)),
            'y': mnist.test.labels}
    #~ train = {'X': mnist.train.images,
             #~ 'y': mnist.train.labels}
    
    #~ test = {'X': mnist.test.images,
            #~ 'y': mnist.test.labels}
        
    save_pickle(train, 'mnist/train.pkl')
    save_pickle(test, 'mnist/test.pkl') 
Example #6
Source File: mnist_data.py    From forge with GNU General Public License v3.0 6 votes vote down vote up
def load(config, **unused_kwargs):

    del unused_kwargs

    if not os.path.exists(config.data_folder):
        os.makedirs(config.data_folder)

    dataset = input_data.read_data_sets(config.data_folder)

    train_data = {'imgs': dataset.train.images, 'labels': dataset.train.labels}
    valid_data = {'imgs': dataset.validation.images, 'labels': dataset.validation.labels}

    train_tensors = tensors_from_data(train_data, config.batch_size, shuffle=True)
    valid_tensors = tensors_from_data(valid_data, config.batch_size, shuffle=False)

    data_dict = AttrDict(
        train_img=train_tensors['imgs'],
        valid_img=valid_tensors['imgs'],
        train_label=train_tensors['labels'],
        valid_label=valid_tensors['labels'],
    )

    return data_dict 
Example #7
Source File: mnist.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def fill_feed_dict(data_set, images_pl, labels_pl, batch_size):
  """Fills the feed_dict for training the given step.

  Args:
    data_set: The set of images and labels, from input_data.read_data_sets()
    images_pl: The images placeholder, from placeholder_inputs().
    labels_pl: The labels placeholder, from placeholder_inputs().
    batch_size: Batch size of data to feed.

  Returns:
    feed_dict: The feed dictionary mapping from placeholders to values.
  """
  # Create the feed_dict for the placeholders filled with the next
  # `batch size ` examples.
  images_feed, labels_feed = data_set.next_batch(batch_size, FLAGS.fake_data)
  feed_dict = {
      images_pl: images_feed,
      labels_pl: labels_feed,
  }
  return feed_dict 
Example #8
Source File: AGNModel.py    From Machine-Learning-Study-Notes with Apache License 2.0 6 votes vote down vote up
def load_model(self):
        tf.train.Saver().restore(self._sess, tf.train.latest_checkpoint("/home/ilmare/Desktop/FaceReplace/model/"))
        mnist = input_data.read_data_sets("/home/ilmare/dataSet/mnist", one_hot=True)
        source = np.reshape(mnist.train.images[0], [1, 784])
        dest = self.reconstrct(source)
        source = np.reshape(source, [28, 28])
        dest = np.reshape(dest, [28, 28])
        print(source.shape, dest.shape)
        # fig = plt.figure("test")
        # ax = fig.add_subplot(121)
        # ax.imshow(source)
        # bx = fig.add_subplot(122)
        # bx.imshow(dest)
        # plt.show()
        cv2.imshow("test", dest)
        cv2.waitKey(0) 
Example #9
Source File: mnist.py    From lightnn with Apache License 2.0 6 votes vote down vote up
def model_mlp_mnist():
    """test MLP with MNIST data and Model

    """
    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets('/tmp/data', one_hot=True)
    training_data = np.array([image.flatten() for image in mnist.train.images])
    training_label = mnist.train.labels
    valid_data = np.array([image.flatten() for image in mnist.validation.images])
    valid_label = mnist.validation.labels
    input_dim = training_data.shape[1]
    label_size = training_label.shape[1]

    dense_1 = Dense(300, input_dim=input_dim, activator=None)
    dense_2 = Activation('selu')(dense_1)
    dropout_1 = Dropout(0.2)(dense_2)
    softmax_1 = Softmax(label_size)(dropout_1)
    model = Model(dense_1, softmax_1)
    model.compile('CCE', optimizer=Adadelta())
    model.fit(training_data, training_label, validation_data=(valid_data, valid_label)) 
Example #10
Source File: mnist.py    From lightnn with Apache License 2.0 6 votes vote down vote up
def cnn_mnist():
    """test CNN with MNIST data and Sequential

    """
    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets('/tmp/data', one_hot=True)
    training_data = np.array([image.reshape(28, 28, 1) for image in mnist.train.images])
    training_label = mnist.train.labels
    valid_data = np.array([image.reshape(28, 28, 1) for image in mnist.validation.images])
    valid_label = mnist.validation.labels
    label_size = training_label.shape[1]

    model =Sequential()
    model.add(Input(batch_input_shape=(None, 28, 28, 1)))
    model.add(Conv2d((3, 3), 1, activator='selu'))
    model.add(AvgPooling((2, 2), stride=2))
    model.add(Conv2d((4, 4), 2, activator='selu'))
    model.add(AvgPooling((2, 2), stride=2))
    model.add(Flatten())
    model.add(Softmax(label_size))
    model.compile('CCE', optimizer=SGD(lr=1e-2))
    model.fit(training_data, training_label, validation_data=(valid_data, valid_label), verbose=2) 
Example #11
Source File: mnist.py    From lightnn with Apache License 2.0 6 votes vote down vote up
def mlp_mnist():
    """test MLP with MNIST data and Sequential

    """
    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets('/tmp/data', one_hot=True)
    training_data = np.array([image.flatten() for image in mnist.train.images])
    training_label = mnist.train.labels
    valid_data = np.array([image.flatten() for image in mnist.validation.images])
    valid_label = mnist.validation.labels
    input_dim = training_data.shape[1]
    label_size = training_label.shape[1]

    model = Sequential()
    model.add(Input(input_shape=(input_dim, )))
    model.add(Dense(300, activator='selu'))
    model.add(Dropout(0.2))
    model.add(Softmax(label_size))
    model.compile('CCE', optimizer=SGD())
    model.fit(training_data, training_label, validation_data=(valid_data, valid_label)) 
Example #12
Source File: mnist_data.py    From gated-pixel-cnn with Apache License 2.0 6 votes vote down vote up
def get_dataset(data_dir, preprocess_fcn=None, dtype=tf.float32, reshape=True):
  """Construct a DataSet.
  `dtype` can be either
  `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
  `[0, 1]`.
   `reshape` Convert shape from [num examples, rows, columns, depth]
    to [num examples, rows*columns] (assuming depth == 1)    
  """
  from tensorflow.examples.tutorials.mnist import input_data

  datasets = input_data.read_data_sets(data_dir, dtype=dtype, reshape=reshape)
  
  if preprocess_fcn is not None:
    train = _preprocess_dataset(datasets.train, preprocess_fcn, dtype, reshape)
    validation = _preprocess_dataset(datasets.validation, preprocess_fcn, dtype, reshape)
    test = _preprocess_dataset(datasets.test, preprocess_fcn, dtype, reshape)
  else:
    train = datasets.train
    validation = datasets.validation
    test = datasets.test

  height, width, channels = 28, 28, 1 
  return Datasets(train, validation, test, height, width, channels) 
Example #13
Source File: autoencoder_t-sne.py    From Autoencoder-TensorBoard-t-SNE with MIT License 6 votes vote down vote up
def generate_metadata_file():
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir,
                                      one_hot=True)
    # The ".tsv" file will contain one number per row to point to the good label
    # for each test example in the dataset.
    # For example, labels could be saved as plain text on those lines if needed.
    # In our case we have only 10 possible different labels, so their
    # "uniqueness" is recognised to later associate colors automatically in
    # TensorBoard.
    def save_metadata(file):
        with open(file, 'w') as f:
            for i in range(NB_TEST_DATA):
                c = np.nonzero(mnist.test.labels[::1])[1:][0][i]
                f.write('{}\n'.format(c))

    save_metadata(FLAGS.log_dir + '/projector/metadata.tsv') 
Example #14
Source File: mnist_env.py    From HardRLWithYoutube with MIT License 6 votes vote down vote up
def __init__(
            self,
            seed=0,
            episode_len=None,
            no_images=None
    ):
        from tensorflow.examples.tutorials.mnist import input_data
        # we could use temporary directory for this with a context manager and 
        # TemporaryDirecotry, but then each test that uses mnist would re-download the data
        # this way the data is not cleaned up, but we only download it once per machine
        mnist_path = osp.join(tempfile.gettempdir(), 'MNIST_data')
        with filelock.FileLock(mnist_path + '.lock'):
           self.mnist = input_data.read_data_sets(mnist_path)

        self.np_random = np.random.RandomState()
        self.np_random.seed(seed)

        self.observation_space = Box(low=0.0, high=1.0, shape=(28,28,1))
        self.action_space = Discrete(10)
        self.episode_len = episode_len
        self.time = 0
        self.no_images = no_images

        self.train_mode()
        self.reset() 
Example #15
Source File: tf_mnist_example.py    From telegrad with GNU General Public License v3.0 6 votes vote down vote up
def fill_feed_dict(data_set, images_pl, labels_pl):
  """Fills the feed_dict for training the given step.
  A feed_dict takes the form of:
  feed_dict = {
      <placeholder>: <tensor of values to be passed for placeholder>,
      ....
  }
  Args:
    data_set: The set of images and labels, from input_data.read_data_sets()
    images_pl: The images placeholder, from placeholder_inputs().
    labels_pl: The labels placeholder, from placeholder_inputs().
  Returns:
    feed_dict: The feed dictionary mapping from placeholders to values.
  """
  # Create the feed_dict for the placeholders filled with the next
  # `batch size` examples.
  images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,
                                                 FLAGS.fake_data)
  feed_dict = {
      images_pl: images_feed,
      labels_pl: labels_feed,
  }
  return feed_dict 
Example #16
Source File: mnist_with_summaries.py    From mnist-tensorboard-embeddings with MIT License 5 votes vote down vote up
def generate_metadata_file():
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir,
                                      one_hot=True,
                                      fake_data=FLAGS.fake_data)
    def save_metadata(file):
        with open(file, 'w') as f:
#            f.write('id\tchar\n')
            for i in range(FLAGS.max_steps):
                c = np.nonzero(mnist.test.labels[::1])[1:][0][i]
                f.write('{}\n'.format(c))
    # save metadata file
    save_metadata(FLAGS.log_dir + '/projector/metadata.tsv') 
Example #17
Source File: InceptionNet.py    From Machine-Learning-Study-Notes with Apache License 2.0 5 votes vote down vote up
def test(self):
        mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
        count = 0
        i = 0
        for img, label in zip(mnist.test.images, mnist.test.labels):
            img = np.reshape(img, [1, 784])
            label = np.reshape(label, [1, 10])
            pre = self._sess.run(self._pre, 
                                 feed_dict={self._x: img, self._y: label, self._keep_prob:1.0})
            if np.equal(np.argmax(pre, 1), np.argmax(label, 1)):
                count += 1
            i += 1
            if i % 100 == 0:
                print("step: {0:d}/{1:d}, accuracy: {2:.3f}".format(i, len(mnist.test.images), count / i))
        print("accuracy: ", (count / i)) 
Example #18
Source File: InceptionNet.py    From Machine-Learning-Study-Notes with Apache License 2.0 5 votes vote down vote up
def train(self):
        try:
#             mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
#             fig = plt.figure("cross-entropy")
#             mpl.rcParams['xtick.labelsize'] = 8
#             mpl.rcParams['ytick.labelsize'] = 8
#             ax = fig.add_subplot(111)
#             ax.grid(True)
            ac = []
            aac = []
            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                for i in range(self._maxIter):
#                     train, label = mnist.train.next_batch(50)
                    train, label = self._imageObject.nextBatch(24)
                    _, accuracy, loss = sess.run([self._train, self._accuracy, self._cross_entry], feed_dict={self._x: train, 
                                                     self._y: label, self._keep_prob: 0.5})
                    ac.append(accuracy)
                    aac.append(np.mean(np.array(ac)))
#                     ax.plot(np.arange(len(ac)), np.array(ac), linewidth=0.8, color="b")
#                     ax.plot(np.arange(len(aac)), np.array(aac), linewidth=0.8, color="r")
#                     plt.pause(0.1)
                    if i % 10 == 0:
                        print("step {0:d}/{1:d},accuracy: {2:.3f}, loss: {3:.3f}".format(i, self._maxIter, accuracy, loss))
                    if i % 250 == 0:    
                        tf.train.Saver().save(sess, "{0}model".format(save_path), global_step=i)
        except Exception as e:
            print(e)
        finally:
            fig = plt.figure("cross-entropy")
            mpl.rcParams['xtick.labelsize'] = 8
            mpl.rcParams['ytick.labelsize'] = 8
            ax = fig.add_subplot(111)
            ax.plot(np.arange(len(ac)), np.array(ac), linewidth=0.8, color="b")
            ax.plot(np.arange(len(aac)), np.array(aac), linewidth=0.8, color="r")
            plt.show() 
Example #19
Source File: SimpleCNN.py    From Machine-Learning-Study-Notes with Apache License 2.0 5 votes vote down vote up
def train(self):
        try:
            mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
            fig = plt.figure("cross-entropy")
            mpl.rcParams['xtick.labelsize'] = 8
            mpl.rcParams['ytick.labelsize'] = 8
            ax = fig.add_subplot(111)
            ax.grid(True)
            ac = []
            aac = []
            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                for i in range(self._maxIter):
                    train, label = mnist.train.next_batch(50)
                    _, accuracy, loss = sess.run([self._train_step, self._accuracy, self._cross_entry], feed_dict={self._x: train, 
                                                     self._y: label, self._keep_prob: 0.5})
                    ac.append(accuracy)
                    aac.append(np.mean(np.array(ac)))
                    ax.plot(np.arange(len(ac)), np.array(ac), linewidth=0.8, color="b")
                    ax.plot(np.arange(len(aac)), np.array(aac), linewidth=0.8, color="r")
                    plt.pause(0.1)
                    if i % 10 == 0:
                        print("step {0:d}/{1:d},accuracy: {2:.3f}, loss: {3:.3f}".format(i, self._maxIter, accuracy, loss))
                    if i % 100 == 0:    
                        tf.train.Saver().save(sess, "{0}model".format(save_path), global_step=i)
        except Exception as e:
            print(e)
        finally:
            plt.show() 
Example #20
Source File: utils.py    From Transforming-Autoencoder-TF with MIT License 5 votes vote down vote up
def load_validation_data():
    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
    return mnist.validation.images 
Example #21
Source File: mnist_correctness_test.py    From gradient-checkpointing with MIT License 5 votes vote down vote up
def train_dataset(data_dir):
  """Returns a tf.data.Dataset yielding (image, label) pairs for training."""
  data = input_data.read_data_sets(data_dir, one_hot=True).train
  return tf.data.Dataset.from_tensor_slices((data.images, data.labels)) 
Example #22
Source File: MNIST.py    From TensorFlow-VAE with MIT License 5 votes vote down vote up
def load_data():
    """ Download MNIST data from TensorFlow package """
    mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
    train_data = mnist.train.images
    test_data = mnist.test.images
    valid_data = mnist.validation.images
    train_label = mnist.train.labels
    test_label = mnist.test.labels
    valid_label = mnist.validation.labels
    all_data = [train_data, test_data, valid_data]
    all_labels = [train_label, test_label, valid_label]
    return all_data, all_labels 
Example #23
Source File: mnist_t-sne.py    From mnist-tensorboard-embeddings with MIT License 5 votes vote down vote up
def generate_metadata_file():
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir,
                                      one_hot=True,
                                      fake_data=FLAGS.fake_data)
    def save_metadata(file):
        with open(file, 'w') as f:
            for i in range(FLAGS.max_steps):
                c = np.nonzero(mnist.test.labels[::1])[1:][0][i]
                f.write('{}\n'.format(c))

    save_metadata(FLAGS.log_dir + '/projector/metadata.tsv') 
Example #24
Source File: mnist_t-sne.py    From mnist-tensorboard-embeddings with MIT License 5 votes vote down vote up
def generate_embeddings():
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir,
                                      one_hot=True,
                                      fake_data=FLAGS.fake_data)
    sess = tf.InteractiveSession()

    # Input set for Embedded TensorBoard visualization
    # Performed with cpu to conserve memory and processing power
    with tf.device("/cpu:0"):
        embedding = tf.Variable(tf.stack(mnist.test.images[:FLAGS.max_steps], axis=0), trainable=False, name='embedding')

    tf.global_variables_initializer().run()

    saver = tf.train.Saver()
    writer = tf.summary.FileWriter(FLAGS.log_dir + '/projector', sess.graph)

    # Add embedding tensorboard visualization. Need tensorflow version
    # >= 0.12.0RC0
    config = projector.ProjectorConfig()
    embed= config.embeddings.add()
    embed.tensor_name = 'embedding:0'
    embed.metadata_path = os.path.join(FLAGS.log_dir + '/projector/metadata.tsv')
    embed.sprite.image_path = os.path.join(FLAGS.data_dir + '/mnist_10k_sprite.png')

    # Specify the width and height of a single thumbnail.
    embed.sprite.single_image_dim.extend([28, 28])
    projector.visualize_embeddings(writer, config)

    saver.save(sess, os.path.join(
        FLAGS.log_dir, 'projector/a_model.ckpt'), global_step=FLAGS.max_steps) 
Example #25
Source File: lshutils.py    From Fly-LSH with MIT License 5 votes vote down vote up
def __init__(self,name,path='./datasets/'):
        self.path=path
        self.name=name.upper()
        if self.name=='MNIST' or self.name=='FMNIST':
            self.indim=784
            try:
                self.data=read_data_sets(self.path+self.name)
            except OSError as err:
                print(str(err))
                raise ValueError('Try again')

        elif self.name=='CIFAR10':
            self.indim=(32,32,3)
            if self.name not in os.listdir(self.path):
                print('Data not in path')
                raise ValueError()
        elif self.name=='GLOVE':
            self.indim=300
            self.data=pickle.load(open(self.path+'glove30k.p','rb'))

        elif self.name=='SIFT':
            self.indim=128
            self.data=loadmat(self.path+self.name+'/siftvecs.mat')['vecs']

        elif self.name=='GIST':
            self.indim=960
            self.data=loadmat(self.path+self.name+'/gistvecs.mat')['vecs']

        elif self.name=='LMGIST':
            self.indim=512
            self.data=loadmat(self.path+self.name+'/LabelMe_gist.mat')['gist']

        elif self.name=='RANDOM':
            self.indim=128
            self.data=np.random.random(size=(100_000,self.indim)) #np.random.randn(100_000,self.indim) 
Example #26
Source File: utils.py    From VAE-GMVAE with Apache License 2.0 5 votes vote down vote up
def load_MNIST():
    data_path = '../data/MNIST_data'
    data = input_data.read_data_sets(data_path, one_hot=False)
    x_train_aux = data.train.images
    x_test = data.test.images
    data_dim = data.train.images.shape[1]
    n_train = data.train.images.shape[0]

    train_size = int(n_train * 0.8)
    valid_size = n_train - train_size
    x_valid, x_train = merge_datasets(x_train_aux, data_dim, train_size, valid_size)
    print('Data loaded. ', time.localtime().tm_hour,
          ':', time.localtime().tm_min, 'h')
    # logs.write('\tData loaded ' + str(time.localtime().tm_hour) +':' + str(time.localtime().tm_min) + 'h\n')

    x_train = np.reshape(x_train, [-1, 28, 28, 1])
    x_valid = np.reshape(x_valid, [-1, 28, 28, 1])
    x_test = np.reshape(x_test, [-1, 28, 28, 1])


    train_dataset = Dataset(x_train, data.train.labels)
    valid_dataset = Dataset(x_valid, data.train.labels)
    test_dataset = Dataset(x_test, data.test.labels)

    print('Train Data: ', train_dataset.x.shape)
    print('Valid Data: ', valid_dataset.x.shape)
    print('Test Data: ', test_dataset.x.shape)

    return train_dataset, valid_dataset, test_dataset 
Example #27
Source File: fully_connected_feed.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def fill_feed_dict(data_set, images_pl, labels_pl):
  """Fills the feed_dict for training the given step.

  A feed_dict takes the form of:
  feed_dict = {
      <placeholder>: <tensor of values to be passed for placeholder>,
      ....
  }

  Args:
    data_set: The set of images and labels, from input_data.read_data_sets()
    images_pl: The images placeholder, from placeholder_inputs().
    labels_pl: The labels placeholder, from placeholder_inputs().

  Returns:
    feed_dict: The feed dictionary mapping from placeholders to values.
  """
  # Create the feed_dict for the placeholders filled with the next
  # `batch size` examples.
  images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,
                                                 FLAGS.fake_data)
  feed_dict = {
      images_pl: images_feed,
      labels_pl: labels_feed,
  }
  return feed_dict 
Example #28
Source File: random_forest_mnist.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def train_and_eval():
  """Train and evaluate the model."""
  model_dir = tempfile.mkdtemp() if not FLAGS.model_dir else FLAGS.model_dir
  print('model directory = %s' % model_dir)

  estimator = build_estimator(model_dir)

  # TensorForest's loss hook allows training to terminate early if the
  # forest is no longer growing.
  early_stopping_rounds = 100
  monitor = random_forest.TensorForestLossHook(early_stopping_rounds)

  mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=False)

  estimator.fit(x=mnist.train.images, y=mnist.train.labels,
                batch_size=FLAGS.batch_size, monitors=[monitor])

  metric_name = 'accuracy'
  metric = {metric_name:
            metric_spec.MetricSpec(
                eval_metrics.get_metric(metric_name),
                prediction_key=eval_metrics.get_prediction_key(metric_name))}

  results = estimator.evaluate(x=mnist.test.images, y=mnist.test.labels,
                               batch_size=FLAGS.batch_size,
                               metrics=metric)
  for key in sorted(results):
    print('%s: %s' % (key, results[key])) 
Example #29
Source File: fully_connected_feed.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def do_eval(sess,
            eval_correct,
            images_placeholder,
            labels_placeholder,
            data_set):
  """Runs one evaluation against the full epoch of data.

  Args:
    sess: The session in which the model has been trained.
    eval_correct: The Tensor that returns the number of correct predictions.
    images_placeholder: The images placeholder.
    labels_placeholder: The labels placeholder.
    data_set: The set of images and labels to evaluate, from
      input_data.read_data_sets().
  """
  # And run one epoch of eval.
  true_count = 0  # Counts the number of correct predictions.
  steps_per_epoch = data_set.num_examples // FLAGS.batch_size
  num_examples = steps_per_epoch * FLAGS.batch_size
  for step in xrange(steps_per_epoch):
    feed_dict = fill_feed_dict(data_set,
                               images_placeholder,
                               labels_placeholder)
    true_count += sess.run(eval_correct, feed_dict=feed_dict)
  precision = true_count / num_examples
  print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f' %
        (num_examples, true_count, precision)) 
Example #30
Source File: mnist_softmax.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def main(_):
  mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

  # Create the model
  x = tf.placeholder(tf.float32, [None, 784])
  W = tf.Variable(tf.zeros([784, 10]))
  b = tf.Variable(tf.zeros([10]))
  y = tf.matmul(x, W) + b

  # Define loss and optimizer
  y_ = tf.placeholder(tf.float32, [None, 10])

  # The raw formulation of cross-entropy,
  #
  #   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
  #                                 reduction_indices=[1]))
  #
  # can be numerically unstable.
  #
  # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
  # outputs of 'y', and then average across the batch.
  cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
  train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

  sess = tf.InteractiveSession()
  # Train
  tf.global_variables_initializer().run()
  for _ in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

  # Test trained model
  correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  print(sess.run(accuracy, feed_dict={x: mnist.test.images,
                                      y_: mnist.test.labels}))