Python tensorflow.contrib.learn.python.learn.datasets.mnist.read_data_sets() Examples

The following are 6 code examples of tensorflow.contrib.learn.python.learn.datasets.mnist.read_data_sets(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.learn.python.learn.datasets.mnist , or try the search function .
Example #1
Source File: MNIST.py    From dynamic-training-bench with Mozilla Public License 2.0 6 votes vote down vote up
def _maybe_download_and_extract(self):
        """Download and extract the MNIST dataset"""
        data_sets = mnist.read_data_sets(
            self._data_dir,
            dtype=tf.uint8,
            reshape=False,
            validation_size=self._num_examples_per_epoch_for_eval)

        # Convert to Examples and write the result to TFRecords.
        if not tf.gfile.Exists(os.path.join(self._data_dir, 'train.tfrecords')):
            convert_to_tfrecords(data_sets.train, 'train', self._data_dir)

        if not tf.gfile.Exists(
                os.path.join(self._data_dir, 'validation.tfrecords')):
            convert_to_tfrecords(data_sets.validation, 'validation',
                                 self._data_dir)

        if not tf.gfile.Exists(os.path.join(self._data_dir, 'test.tfrecords')):
            convert_to_tfrecords(data_sets.test, 'test', self._data_dir) 
Example #2
Source File: create_records.py    From cloudml-dist-mnist-example with Apache License 2.0 5 votes vote down vote up
def main(unused_argv):
  # Get the data.
  data_sets = mnist.read_data_sets(FLAGS.directory,
                                   dtype=tf.uint8,
                                   reshape=False,
                                   validation_size=FLAGS.validation_size)

  # Convert to Examples and write the result to TFRecords.
  convert_to(data_sets.train, 'train')
  convert_to(data_sets.validation, 'validation')
  convert_to(data_sets.test, 'test') 
Example #3
Source File: convert_to_records.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def main(unused_argv):
  # Get the data.
  data_sets = mnist.read_data_sets(FLAGS.directory,
                                   dtype=tf.uint8,
                                   reshape=False,
                                   validation_size=FLAGS.validation_size)

  # Convert to Examples and write the result to TFRecords.
  convert_to(data_sets.train, 'train')
  convert_to(data_sets.validation, 'validation')
  convert_to(data_sets.test, 'test') 
Example #4
Source File: vq_vae.py    From ml-on-gcp with Apache License 2.0 5 votes vote down vote up
def build_input_pipeline(data_dir, batch_size, heldout_size, mnist_type):
  """Builds an Iterator switching between train and heldout data."""
  # Build an iterator over training batches.
  if mnist_type in [MnistType.FAKE_DATA, MnistType.THRESHOLD]:
    if mnist_type == MnistType.FAKE_DATA:
      mnist_data = build_fake_data()
    else:
      mnist_data = mnist.read_data_sets(data_dir)
    training_dataset = tf.data.Dataset.from_tensor_slices(
        (mnist_data.train.images, np.int32(mnist_data.train.labels)))
    heldout_dataset = tf.data.Dataset.from_tensor_slices(
        (mnist_data.validation.images,
         np.int32(mnist_data.validation.labels)))
  elif mnist_type == MnistType.BERNOULLI:
    training_dataset = load_bernoulli_mnist_dataset(data_dir, "train")
    heldout_dataset = load_bernoulli_mnist_dataset(data_dir, "valid")
  else:
    raise ValueError("Unknown MNIST type.")

  training_batches = training_dataset.repeat().batch(batch_size)
  training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)

  # Build a iterator over the heldout set with batch_size=heldout_size,
  # i.e., return the entire heldout set as a constant.
  heldout_frozen = (heldout_dataset.take(heldout_size).
                    repeat().batch(heldout_size))
  heldout_iterator = tf.compat.v1.data.make_one_shot_iterator(heldout_frozen)

  # Combine these into a feedable iterator that can switch between training
  # and validation inputs.
  handle = tf.compat.v1.placeholder(tf.string, shape=[])
  feedable_iterator = tf.compat.v1.data.Iterator.from_string_handle(
      handle, training_batches.output_types, training_batches.output_shapes)
  images, labels = feedable_iterator.get_next()
  # Reshape as a pixel image and binarize pixels.
  images = tf.reshape(images, shape=[-1] + IMAGE_SHAPE)
  if mnist_type in [MnistType.FAKE_DATA, MnistType.THRESHOLD]:
    images = tf.cast(images > 0.5, dtype=tf.int32)

  return images, labels, handle, training_iterator, heldout_iterator 
Example #5
Source File: mnist_to_tfrecords.py    From AmusingPythonCodes with MIT License 5 votes vote down vote up
def main():
    # Get the data.
    data_sets = mnist.read_data_sets(FLAGS.directory, dtype=tf.uint8, reshape=False,
                                     validation_size=FLAGS.validation_size)
    # Convert to Examples and write the result to TFRecords.
    convert_to(data_sets.train, 'train')
    convert_to(data_sets.validation, 'validation')
    convert_to(data_sets.test, 'test') 
Example #6
Source File: convert_to_records.py    From mnist-multi-gpu with Apache License 2.0 5 votes vote down vote up
def main(unused_argv):
  # Get the data.
  data_sets = mnist.read_data_sets(FLAGS.directory,
                                   dtype=tf.uint8,
                                   reshape=False,
                                   validation_size=FLAGS.validation_size)

  # Convert to Examples and write the result to TFRecords.
  convert_to(data_sets.train, 'train')
  convert_to(data_sets.validation, 'validation')
  convert_to(data_sets.test, 'test')