Python tensorflow.WholeFileReader() Examples

The following are 30 code examples of tensorflow.WholeFileReader(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: kpn_data_provider.py    From burst-denoising with Apache License 2.0 6 votes vote down vote up
def load_batch_noised(depth, dataset_dir, batch_size=32, height=64, width=64, degamma=1., sig_range=20.):

  filenames = [os.path.join(dataset_dir, f) for f in gfile.ListDirectory(dataset_dir)]
  filename_queue = tf.train.string_input_producer(filenames)

  noised_stack = None
  while noised_stack == None:
    _, image_file = tf.WholeFileReader().read(filename_queue)
    image = tf.image.decode_image(image_file)
    noised_stack, denoised_stack, sig_stack = make_stack_noised((tf.cast(image[0], tf.float32) / 255.)**degamma, height, width, depth, sig_range)

  # Batch it up.
  noised, denoised, sig = tf.train.shuffle_batch(
        [noised_stack, denoised_stack, sig_stack],
        batch_size=batch_size,
        num_threads=2,
        capacity=1024 + 3 * batch_size,
        enqueue_many=True,
        min_after_dequeue=500)

  return noised, denoised, sig 
Example #2
Source File: input.py    From UnFlow with MIT License 6 votes vote down vote up
def _read_flow(filenames, num_epochs=None):
    """Given a list of filenames, constructs a reader op for ground truth flow files."""
    filename_queue = tf.train.string_input_producer(filenames,
        shuffle=False, capacity=len(filenames), num_epochs=num_epochs)
    reader = tf.WholeFileReader()
    _, value = reader.read(filename_queue)
    value = tf.reshape(value, [1])
    value_width = tf.substr(value, 4, 4)
    value_height = tf.substr(value, 8, 4)
    width = tf.reshape(tf.decode_raw(value_width, out_type=tf.int32), [])
    height = tf.reshape(tf.decode_raw(value_height, out_type=tf.int32), [])

    value_flow = tf.substr(value, 12, 8 * width * height)
    flow = tf.decode_raw(value_flow, out_type=tf.float32)
    flow = tf.reshape(flow, [height, width, 2])
    mask = tf.to_float(tf.logical_and(flow[:, :, 0] < 1e9, flow[:, :, 1] < 1e9))
    mask = tf.reshape(mask, [height, width, 1])

    return flow, mask 
Example #3
Source File: validate.py    From tf-vaegan with MIT License 6 votes vote down vote up
def SingleFileReader(filename, shape, rtype='tanh', ext='jpg'):    
    n, h, w, c = shape
    if ext == 'jpg' or ext == 'jpeg':
        decoder = tf.image.decode_jpeg
    elif ext == 'png':
        decoder = tf.image.decode_png
    else:
        raise ValueError('Unsupported file type: {:s}.'.format(ext) + 
            ' (only *.png and *.jpg are supported')

    filename_queue = tf.train.string_input_producer(filename, shuffle=False)
    reader = tf.WholeFileReader()
    key, value = reader.read(filename_queue)
    img = decoder(value, channels=c)
    img = tf.image.crop_to_bounding_box(img, 0, 0, h, w)
    img = tf.to_float(img)
    if rtype == 'tanh':
        img = tf.div(img, 127.5) - 1.

    imgs = tf.train.batch(
        [img],
        batch_size=n,
        capacity=1)
    return imgs, key 
Example #4
Source File: reader_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testInfiniteEpochs(self):
    with self.test_session() as sess:
      reader = tf.WholeFileReader("test_reader")
      queue = tf.FIFOQueue(99, [tf.string], shapes=())
      enqueue = queue.enqueue_many([self._filenames])
      key, value = reader.read(queue)

      enqueue.run()
      self._ExpectRead(sess, key, value, 0)
      self._ExpectRead(sess, key, value, 1)
      enqueue.run()
      self._ExpectRead(sess, key, value, 2)
      self._ExpectRead(sess, key, value, 0)
      self._ExpectRead(sess, key, value, 1)
      enqueue.run()
      self._ExpectRead(sess, key, value, 2)
      self._ExpectRead(sess, key, value, 0) 
Example #5
Source File: data.py    From layered-scene-inference with Apache License 2.0 6 votes vote down vote up
def img_queue_loader(self, img_list, nc=3):
    """Load images into queue."""
    with tf.name_scope('queued_data_loader'):
      filename_queue = tf.train.string_input_producer(
          img_list, seed=0, shuffle=True)
      image_reader = tf.WholeFileReader()
      _, image_file = image_reader.read(filename_queue)
      image = tf.image.decode_image(image_file)
      image = tf.cast(tf.image.decode_image(image_file), 'float32')
      image *= 1.0 / 255  # since images are loaded in [0, 255]
      image = tf.slice(image, [0, 0, 0], [-1, -1, nc])

      orig_shape = tf.shape(image)
      orig_shape.set_shape((3))

      image = tf.image.resize_images(
          image, [self.h, self.w], method=tf.image.ResizeMethod.AREA)
      image.set_shape((self.h, self.w, nc))

      return image, orig_shape 
Example #6
Source File: input.py    From UnFlow with MIT License 6 votes vote down vote up
def _read_flow(filenames, num_epochs=None):
    """Given a list of filenames, constructs a reader op for ground truth flow files."""
    filename_queue = tf.train.string_input_producer(filenames,
        shuffle=False, capacity=len(filenames), num_epochs=num_epochs)
    reader = tf.WholeFileReader()
    _, value = reader.read(filename_queue)
    value = tf.reshape(value, [1])
    value_width = tf.substr(value, 4, 4)
    value_height = tf.substr(value, 8, 4)
    width = tf.reshape(tf.decode_raw(value_width, out_type=tf.int32), [])
    height = tf.reshape(tf.decode_raw(value_height, out_type=tf.int32), [])

    value_flow = tf.substr(value, 12, 8 * 436 * 1024)
    flow = tf.decode_raw(value_flow, out_type=tf.float32)

    return tf.reshape(flow, [436, 1024, 2]) 
Example #7
Source File: tensorcheck_test.py    From in-silico-labeling with Apache License 2.0 6 votes vote down vote up
def setUp(self):
    super(ShapeTest, self).setUp()

    filename_op = tf.train.string_input_producer([
        os.path.join(os.environ['TEST_SRCDIR'],
                     'isl/testdata/research_logo.jpg')
    ])

    reader = tf.WholeFileReader()
    _, encoded_image_op = reader.read(filename_op)
    image_op = tf.image.decode_jpeg(encoded_image_op, channels=3)

    self.correct_shape_op = tf.identity(image_op)
    self.correct_shape_op.set_shape([250, 250, 3])
    self.correct_lt = lt.LabeledTensor(self.correct_shape_op,
                                       ['x', 'y', 'color'])

    self.incorrect_shape_op = tf.identity(image_op)
    self.incorrect_shape_op.set_shape([50, 50, 3])
    self.incorrect_lt = lt.LabeledTensor(self.incorrect_shape_op,
                                         ['x', 'y', 'color'])

    self.okay_lt = tensorcheck.shape(self.correct_lt)
    self.error_lt = tensorcheck.shape(self.incorrect_lt) 
Example #8
Source File: test_util.py    From in-silico-labeling with Apache License 2.0 6 votes vote down vote up
def load_tensorflow_image(self, channel_label: str,
                            image_name: str) -> lt.LabeledTensor:
    # All images will be cropped to this size.
    crop_size = 1024

    filename_op = tf.train.string_input_producer([self.data_path(image_name)])
    wfr = tf.WholeFileReader()
    _, encoded_png_op = wfr.read(filename_op)
    image_op = tf.image.decode_png(
        tf.reshape(encoded_png_op, shape=[]), channels=1, dtype=tf.uint16)
    image_op = image_op[:crop_size, :crop_size, :]
    image_op = tf.to_float(image_op) / np.iinfo(np.uint16).max
    image_op = tf.reshape(image_op, [1, 1024, 1024, 1])

    return lt.LabeledTensor(
        image_op, ['batch', 'row', 'column', ('channel', [channel_label])]) 
Example #9
Source File: word2vec.py    From tensorflow_nlp with Apache License 2.0 6 votes vote down vote up
def read_word_freq(filename):
    filename_queue = tf.train.string_input_producer([filename])
    reader = tf.WholeFileReader()
    key, value = reader.read(filename_queue)
    lines = tf.string_split([value], "\n")

    with tf.Session() as sess:
        # Start populating the filename queue.
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        sess.run([lines])
        lines_eval = lines.eval()
        result = []
        for line in lines_eval.values:
            s = line.split()
            result.append((s[0], int(s[1])))
        coord.request_stop()
        coord.join(threads)
    return result 
Example #10
Source File: GAN_models.py    From WassersteinGAN.tensorflow with MIT License 6 votes vote down vote up
def _read_input(self, filename_queue):
        class DataRecord(object):
            pass

        reader = tf.WholeFileReader()
        key, value = reader.read(filename_queue)
        record = DataRecord()
        decoded_image = tf.image.decode_jpeg(value,
                                             channels=3)  # Assumption:Color images are read and are to be generated

        # decoded_image_4d = tf.expand_dims(decoded_image, 0)
        # resized_image = tf.image.resize_bilinear(decoded_image_4d, [self.target_image_size, self.target_image_size])
        # record.input_image = tf.squeeze(resized_image, squeeze_dims=[0])

        cropped_image = tf.cast(
            tf.image.crop_to_bounding_box(decoded_image, 55, 35, self.crop_image_size, self.crop_image_size),
            tf.float32)
        decoded_image_4d = tf.expand_dims(cropped_image, 0)
        resized_image = tf.image.resize_bilinear(decoded_image_4d, [self.resized_image_size, self.resized_image_size])
        record.input_image = tf.squeeze(resized_image, squeeze_dims=[0])
        return record 
Example #11
Source File: kpn_data_provider.py    From burst-denoising with Apache License 2.0 6 votes vote down vote up
def load_batch_hqjitter(dataset_dir, patches_per_img=32, min_queue=8, BURST_LENGTH=1, batch_size=32,
                        repeats=1, height=64, width=64, degamma=1.,
                        to_shift=1., upscale=1, jitter=1, smalljitter=1):

  filenames = [os.path.join(dataset_dir, f) for f in gfile.ListDirectory(dataset_dir)]
  filename_queue = tf.train.string_input_producer(filenames)

  _, image_file = tf.WholeFileReader().read(filename_queue)
  image = tf.image.decode_image(image_file)
  patches = make_stack_hqjitter((tf.cast(image[0], tf.float32) / 255.)**degamma,
                                                    height, width, patches_per_img, BURST_LENGTH, to_shift, upscale, jitter)
  unique = batch_size//repeats
  # Batch it up.
  patches  = tf.train.shuffle_batch(
        [patches],
        batch_size=unique,
        num_threads=2,
        capacity=min_queue + 3 * batch_size,
        enqueue_many=True,
        min_after_dequeue=min_queue)

  print('PATCHES =================',patches.get_shape().as_list())

  patches = make_batch_hqjitter(patches, BURST_LENGTH, batch_size, repeats, height, width, to_shift, upscale, jitter, smalljitter)
  return patches 
Example #12
Source File: kpn_data_provider.py    From burst-denoising with Apache License 2.0 6 votes vote down vote up
def load_batch_demosaic(BURST_LENGTH, dataset_dir, batch_size=32, height=64, width=64, degamma=1., to_shift=1., upscale=1, jitter=1):

  filenames = [os.path.join(dataset_dir, f) for f in gfile.ListDirectory(dataset_dir)]
  filename_queue = tf.train.string_input_producer(filenames)

  mosaic = None
  while mosaic == None:
    _, image_file = tf.WholeFileReader().read(filename_queue)
    image = tf.image.decode_image(image_file)
    mosaic, demosaic, shift = make_stack_demosaic((tf.cast(image[0], tf.float32) / 255.)**degamma,
                                                  height, width, 128, BURST_LENGTH, to_shift, upscale, jitter)

  # Batch it up.
  mosaic, demosaic, shift = tf.train.shuffle_batch(
        [mosaic, demosaic, shift],
        batch_size=batch_size,
        num_threads=2,
        capacity=500 + 3 * batch_size,
        enqueue_many=True,
        min_after_dequeue=100)

  return mosaic, demosaic, shift 
Example #13
Source File: TrainLSP.py    From deeppose with GNU General Public License v3.0 6 votes vote down vote up
def read_my_file_format(filename_queue):
    image_reader = tf.WholeFileReader()
    _, image_data = image_reader.read(filename_queue)
    
    # Convert from a string to a vector of uint8 that is record_bytes long.
    record_bytes = tf.decode_raw(image_data, tf.uint8)
    
    # The first bytes represent the label, which we convert from uint8->float32.
    labels_ = tf.cast(tf.slice(record_bytes, [0], [LSPGlobals.TotalLabels]), tf.float32)
    
    # The remaining bytes after the label represent the image, which we reshape
    # from [depth * height * width] to [depth, height, width].
    depth_major = tf.reshape(tf.slice(record_bytes, [LSPGlobals.TotalLabels], [LSPGlobals.TotalImageBytes]),
                          [FLAGS.input_size, FLAGS.input_size, FLAGS.input_depth])
    # Convert from [depth, height, width] to [height, width, depth].
    #processed_example = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
    

    return depth_major, labels_ 
Example #14
Source File: load_folder_images.py    From Generative-Adversarial-Network-based-Synthesis-for-Supervised-Medical-Image-Segmentation with MIT License 6 votes vote down vote up
def load_images_from_idlist(idlist, batch_size, num_preprocess_threads, min_queue_examples, shift_param = -128, rescale_param = 128, resized_image_size = [128, 128], shuffle = True):
  # Make a queue of file names including all the image files in the relative
  # image directory.
  filename_queue = tf.train.string_input_producer(idlist,
                                                  shuffle=shuffle)
  
  # Read an entire image file. If the images
  # are too large they could be split in advance to smaller files or use the Fixed
  # reader to split up the file.
  image_reader = tf.WholeFileReader()
  
  # Read a whole file from the queue, the first returned value in the tuple is the
  # filename which we are ignoring.
  _, image_file = image_reader.read(filename_queue)

  return _load_images(image_file, batch_size, num_preprocess_threads, min_queue_examples, shift_param, rescale_param, resized_image_size, shuffle) 
Example #15
Source File: load_folder_images.py    From Generative-Adversarial-Network-based-Synthesis-for-Supervised-Medical-Image-Segmentation with MIT License 6 votes vote down vote up
def load_images(folder_path_match, batch_size, num_preprocess_threads, min_queue_examples, shift_param = -128, rescale_param = 128, resized_image_size = [128, 128], shuffle = True):
  # Make a queue of file names including all the image files in the relative
  # image directory.
  filename_queue = tf.train.string_input_producer(
    tf.train.match_filenames_once(folder_path_match),
    shuffle=shuffle)
  
  # Read an entire image file. If the images
  # are too large they could be split in advance to smaller files or use the Fixed
  # reader to split up the file.
  image_reader = tf.WholeFileReader()
  
  # Read a whole file from the queue, the first returned value in the tuple is the
  # filename which we are ignoring.
  _, image_file = image_reader.read(filename_queue)

  return _load_images(image_file, batch_size, num_preprocess_threads, min_queue_examples, shift_param, rescale_param, resized_image_size, shuffle) 
Example #16
Source File: inputs.py    From TheNumericsOfGANs with MIT License 6 votes vote down vote up
def get_input_image(filename_queue, output_size, image_size, c_dim):
    # Read a record, getting filenames from the filename_queue.
    reader = tf.WholeFileReader()
    key, value = reader.read(filename_queue)
    image = tf.image.decode_image(value, channels=c_dim)
    image = tf.cast(image, tf.float32)/255.

    image_shape = tf.shape(image)
    image_height, image_width = image_shape[0], image_shape[1]
    offset_height = tf.cast((image_height - image_size)/2, tf.int32)
    offset_width = tf.cast((image_width - image_size)/2, tf.int32)
    image = tf.image.crop_to_bounding_box(
        image, offset_height, offset_width, image_size, image_size)
    image = tf.image.resize_images(image, [output_size, output_size])

    image.set_shape([output_size, output_size, c_dim])

    return image 
Example #17
Source File: swivel.py    From models with Apache License 2.0 5 votes vote down vote up
def _count_matrix_input(self, filenames, submatrix_rows, submatrix_cols):
    """Creates ops that read submatrix shards from disk."""
    random.shuffle(filenames)
    filename_queue = tf.train.string_input_producer(filenames)
    reader = tf.WholeFileReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        features={
            'global_row': tf.FixedLenFeature([submatrix_rows], dtype=tf.int64),
            'global_col': tf.FixedLenFeature([submatrix_cols], dtype=tf.int64),
            'sparse_local_row': tf.VarLenFeature(dtype=tf.int64),
            'sparse_local_col': tf.VarLenFeature(dtype=tf.int64),
            'sparse_value': tf.VarLenFeature(dtype=tf.float32)
        })

    global_row = features['global_row']
    global_col = features['global_col']

    sparse_local_row = features['sparse_local_row'].values
    sparse_local_col = features['sparse_local_col'].values
    sparse_count = features['sparse_value'].values

    sparse_indices = tf.concat(
        axis=1, values=[tf.expand_dims(sparse_local_row, 1),
                        tf.expand_dims(sparse_local_col, 1)])

    count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols],
                               sparse_count)

    return global_row, global_col, count 
Example #18
Source File: input.py    From DeepLearningImplementations with MIT License 5 votes vote down vote up
def input_data(sess):

    FLAGS = tf.app.flags.FLAGS

    list_images = glob.glob(os.path.join(FLAGS.dataset, "*.jpg"))

    # Read each JPEG file
    reader = tf.WholeFileReader()
    filename_queue = tf.train.string_input_producer(list_images)
    key, value = reader.read(filename_queue)
    channels = FLAGS.channels
    image = tf.image.decode_jpeg(value, channels=channels, name="dataset_image")
    image.set_shape([None, None, channels])

    # Crop and other random augmentations
    image = tf.image.random_flip_left_right(image)
    # image = tf.image.random_saturation(image, .95, 1.05)
    # image = tf.image.random_brightness(image, .05)
    # image = tf.image.random_contrast(image, .95, 1.05)

    # Center crop
    image = tf.image.central_crop(image, FLAGS.central_fraction)

    # Resize
    image = tf.image.resize_images(image, (FLAGS.img_size, FLAGS.img_size), method=tf.image.ResizeMethod.AREA)

    # Normalize
    image = normalize_image(image)

    # Format image to correct ordering
    if FLAGS.data_format == "NCHW":
        image = tf.transpose(image, (2,0,1))

    # Using asynchronous queues
    img_batch = tf.train.batch([image],
                               batch_size=FLAGS.batch_size,
                               num_threads=FLAGS.num_threads,
                               capacity=FLAGS.capacity_factor * FLAGS.batch_size,
                               name='batch_input')

    return img_batch 
Example #19
Source File: swivel.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def count_matrix_input(filenames, submatrix_rows, submatrix_cols):
  """Reads submatrix shards from disk."""
  filename_queue = tf.train.string_input_producer(filenames)
  reader = tf.WholeFileReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
      serialized_example,
      features={
          'global_row': tf.FixedLenFeature([submatrix_rows], dtype=tf.int64),
          'global_col': tf.FixedLenFeature([submatrix_cols], dtype=tf.int64),
          'sparse_local_row': tf.VarLenFeature(dtype=tf.int64),
          'sparse_local_col': tf.VarLenFeature(dtype=tf.int64),
          'sparse_value': tf.VarLenFeature(dtype=tf.float32)
      })

  global_row = features['global_row']
  global_col = features['global_col']

  sparse_local_row = features['sparse_local_row'].values
  sparse_local_col = features['sparse_local_col'].values
  sparse_count = features['sparse_value'].values

  sparse_indices = tf.concat(1, [tf.expand_dims(sparse_local_row, 1),
                                 tf.expand_dims(sparse_local_col, 1)])
  count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols],
                             sparse_count)

  queued_global_row, queued_global_col, queued_count = tf.train.batch(
      [global_row, global_col, count],
      batch_size=1,
      num_threads=4,
      capacity=32)

  queued_global_row = tf.reshape(queued_global_row, [submatrix_rows])
  queued_global_col = tf.reshape(queued_global_col, [submatrix_cols])
  queued_count = tf.reshape(queued_count, [submatrix_rows, submatrix_cols])

  return queued_global_row, queued_global_col, queued_count 
Example #20
Source File: create_cityscapes_tf_record.py    From motion-rcnn with MIT License 5 votes vote down vote up
def _read_raw(paths):
    path_queue = tf.train.string_input_producer(
        paths, shuffle=False, capacity=len(paths), num_epochs=1)
    reader = tf.WholeFileReader()
    _, raw = reader.read(path_queue)
    return raw 
Example #21
Source File: swivel.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def _count_matrix_input(self, filenames, submatrix_rows, submatrix_cols):
    """Creates ops that read submatrix shards from disk."""
    random.shuffle(filenames)
    filename_queue = tf.train.string_input_producer(filenames)
    reader = tf.WholeFileReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        features={
            'global_row': tf.FixedLenFeature([submatrix_rows], dtype=tf.int64),
            'global_col': tf.FixedLenFeature([submatrix_cols], dtype=tf.int64),
            'sparse_local_row': tf.VarLenFeature(dtype=tf.int64),
            'sparse_local_col': tf.VarLenFeature(dtype=tf.int64),
            'sparse_value': tf.VarLenFeature(dtype=tf.float32)
        })

    global_row = features['global_row']
    global_col = features['global_col']

    sparse_local_row = features['sparse_local_row'].values
    sparse_local_col = features['sparse_local_col'].values
    sparse_count = features['sparse_value'].values

    sparse_indices = tf.concat(
        axis=1, values=[tf.expand_dims(sparse_local_row, 1),
                        tf.expand_dims(sparse_local_col, 1)])

    count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols],
                               sparse_count)

    return global_row, global_col, count 
Example #22
Source File: swivel.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def _count_matrix_input(self, filenames, submatrix_rows, submatrix_cols):
    """Creates ops that read submatrix shards from disk."""
    random.shuffle(filenames)
    filename_queue = tf.train.string_input_producer(filenames)
    reader = tf.WholeFileReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        features={
            'global_row': tf.FixedLenFeature([submatrix_rows], dtype=tf.int64),
            'global_col': tf.FixedLenFeature([submatrix_cols], dtype=tf.int64),
            'sparse_local_row': tf.VarLenFeature(dtype=tf.int64),
            'sparse_local_col': tf.VarLenFeature(dtype=tf.int64),
            'sparse_value': tf.VarLenFeature(dtype=tf.float32)
        })

    global_row = features['global_row']
    global_col = features['global_col']

    sparse_local_row = features['sparse_local_row'].values
    sparse_local_col = features['sparse_local_col'].values
    sparse_count = features['sparse_value'].values

    sparse_indices = tf.concat(
        axis=1, values=[tf.expand_dims(sparse_local_row, 1),
                        tf.expand_dims(sparse_local_col, 1)])

    count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols],
                               sparse_count)

    return global_row, global_col, count 
Example #23
Source File: 07_basic_filters.py    From stanford-tensorflow-tutorials with MIT License 5 votes vote down vote up
def read_one_image(filename):
    """ This is just to demonstrate how to open an image in TensorFlow,
    but it's actually a lot easier to use Pillow 
    """
    filename_queue = tf.train.string_input_producer([filename])
    image_reader = tf.WholeFileReader()
    _, image_file = image_reader.read(filename_queue)
    image = tf.image.decode_jpeg(image_file, channels=3)
    image = tf.cast(image, tf.float32) / 256.0 # cast to float to make conv2d work
    return image 
Example #24
Source File: data_provider.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def provide_data_from_image_files(file_pattern,
                                  batch_size=32,
                                  shuffle=True,
                                  num_threads=1,
                                  patch_height=32,
                                  patch_width=32,
                                  colors=3):
  """Provides a batch of image data from image files.

  Args:
    file_pattern: A file pattern (glob), or 1D `Tensor` of file patterns.
    batch_size: The number of images in each minibatch.  Defaults to 32.
    shuffle: Whether to shuffle the read images. Defaults to True.
    num_threads: Number of prefetching threads. Defaults to 1.
    patch_height: A Python integer. The read images height. Defaults to 32.
    patch_width: A Python integer. The read images width. Defaults to 32.
    colors: Number of channels. Defaults to 3.

  Returns:
    A float `Tensor` of shape [batch_size, patch_height, patch_width, 3]
    representing a batch of images.
  """
  filename_queue = tf.train.string_input_producer(
      tf.train.match_filenames_once(file_pattern),
      shuffle=shuffle,
      capacity=5 * batch_size)
  _, image_bytes = tf.WholeFileReader().read(filename_queue)
  return batch_images(
      image=normalize_image(tf.image.decode_image(image_bytes)),
      patch_height=patch_height,
      patch_width=patch_width,
      colors=colors,
      batch_size=batch_size,
      shuffle=shuffle,
      num_threads=num_threads) 
Example #25
Source File: data_utils.py    From DeepLearningImplementations with MIT License 5 votes vote down vote up
def input_data_mnist(sess):

    FLAGS = tf.app.flags.FLAGS

    list_images = glob.glob("/home/tmain/Desktop/DeepLearning/Data/mnist/*.jpg")

    # Read each JPEG file

    with tf.device('/cpu:0'):

        reader = tf.WholeFileReader()
        filename_queue = tf.train.string_input_producer(list_images)
        key, value = reader.read(filename_queue)
        channels = FLAGS.channels
        image = tf.image.decode_jpeg(value, channels=channels, name="dataset_image")
        image.set_shape([28, 28, 1])

        # Crop and other random augmentations
        # image = tf.image.random_flip_left_right(image)
        # image = tf.image.random_saturation(image, .95, 1.05)
        # image = tf.image.random_brightness(image, .05)
        # image = tf.image.random_contrast(image, .95, 1.05)

        # Normalize
        image = normalize_image(image)

        # Format image to correct ordering
        if FLAGS.data_format == "NCHW":
            image = tf.transpose(image, (2,0,1))

        # Using asynchronous queues
        img_batch = tf.train.batch([image],
                                   batch_size=FLAGS.batch_size,
                                   num_threads=FLAGS.num_threads,
                                   capacity=2 * FLAGS.num_threads * FLAGS.batch_size,
                                   name='X_real_input')

        return img_batch 
Example #26
Source File: playground.py    From ImageFlow with Apache License 2.0 5 votes vote down vote up
def _read_jpg():


  dumm = glob.glob('/Users/HANEL/Desktop/' + '*.png')
  print(len(dumm))
  filename_queue = tf.train.string_input_producer(dumm)
  # filename_queue = tf.train.string_input_producer(['/Users/HANEL/Desktop/tf.png', '/Users/HANEL/Desktop/ft.png'])

  reader = tf.WholeFileReader()
  key, value = reader.read(filename_queue)

  my_img = tf.image.decode_png(value)
  # my_img_flip = tf.image.flip_up_down(my_img)

  init_op = tf.initialize_all_variables()
  with tf.Session() as sess:
    sess.run(init_op)

    # Start populating the filename queue.
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    for i in range(1):
      gunel = my_img.eval()

      print(gunel.shape)

    Image._showxv(Image.fromarray(np.asarray(gunel)))
    coord.request_stop()
    coord.join(threads)

#
# _read_jpg() 
Example #27
Source File: swivel.py    From hands-detection with MIT License 5 votes vote down vote up
def _count_matrix_input(self, filenames, submatrix_rows, submatrix_cols):
    """Creates ops that read submatrix shards from disk."""
    random.shuffle(filenames)
    filename_queue = tf.train.string_input_producer(filenames)
    reader = tf.WholeFileReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        features={
            'global_row': tf.FixedLenFeature([submatrix_rows], dtype=tf.int64),
            'global_col': tf.FixedLenFeature([submatrix_cols], dtype=tf.int64),
            'sparse_local_row': tf.VarLenFeature(dtype=tf.int64),
            'sparse_local_col': tf.VarLenFeature(dtype=tf.int64),
            'sparse_value': tf.VarLenFeature(dtype=tf.float32)
        })

    global_row = features['global_row']
    global_col = features['global_col']

    sparse_local_row = features['sparse_local_row'].values
    sparse_local_col = features['sparse_local_col'].values
    sparse_count = features['sparse_value'].values

    sparse_indices = tf.concat(
        axis=1, values=[tf.expand_dims(sparse_local_row, 1),
                        tf.expand_dims(sparse_local_col, 1)])

    count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols],
                               sparse_count)

    return global_row, global_col, count 
Example #28
Source File: swivel.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def count_matrix_input(filenames, submatrix_rows, submatrix_cols):
  """Reads submatrix shards from disk."""
  filename_queue = tf.train.string_input_producer(filenames)
  reader = tf.WholeFileReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
      serialized_example,
      features={
          'global_row': tf.FixedLenFeature([submatrix_rows], dtype=tf.int64),
          'global_col': tf.FixedLenFeature([submatrix_cols], dtype=tf.int64),
          'sparse_local_row': tf.VarLenFeature(dtype=tf.int64),
          'sparse_local_col': tf.VarLenFeature(dtype=tf.int64),
          'sparse_value': tf.VarLenFeature(dtype=tf.float32)
      })

  global_row = features['global_row']
  global_col = features['global_col']

  sparse_local_row = features['sparse_local_row'].values
  sparse_local_col = features['sparse_local_col'].values
  sparse_count = features['sparse_value'].values

  sparse_indices = tf.concat(1, [tf.expand_dims(sparse_local_row, 1),
                                 tf.expand_dims(sparse_local_col, 1)])
  count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols],
                             sparse_count)

  queued_global_row, queued_global_col, queued_count = tf.train.batch(
      [global_row, global_col, count],
      batch_size=1,
      num_threads=4,
      capacity=32)

  queued_global_row = tf.reshape(queued_global_row, [submatrix_rows])
  queued_global_col = tf.reshape(queued_global_col, [submatrix_cols])
  queued_count = tf.reshape(queued_count, [submatrix_rows, submatrix_cols])

  return queued_global_row, queued_global_col, queued_count 
Example #29
Source File: input.py    From DF-Net with MIT License 5 votes vote down vote up
def read_png_image(filenames, num_epochs=None):
    """Given a list of filenames, constructs a reader op for images."""
    filename_queue = tf.train.string_input_producer(filenames,
        shuffle=False, capacity=len(filenames))
    reader = tf.WholeFileReader()
    _, value = reader.read(filename_queue)
    image_uint8 = tf.image.decode_png(value, channels=3)
    image = tf.cast(image_uint8, tf.float32)
    return image 
Example #30
Source File: image_data_loader.py    From style_swap_tensorflow with Apache License 2.0 5 votes vote down vote up
def get_data(self):
        data_files = get_data_files(self.config.Image_files)
        # print("data files", data_files)
        filename_queue = tf.train.string_input_producer(
            data_files, num_epochs=self.config.num_epochs, shuffle=self.shuffle,
            name='filenames')
        reader = tf.WholeFileReader()
        _, value = reader.read(filename_queue)
        image = tf.image.decode_image(value)

        return image