Python tensorflow.parse_single_example() Examples

The following are 30 code examples of tensorflow.parse_single_example(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: dataset.py    From DNA-GAN with MIT License 7 votes vote down vote up
def parse_fn(self, serialized_example):
        features={
            'image/id_name': tf.FixedLenFeature([], tf.string),
            'image/height' : tf.FixedLenFeature([], tf.int64),
            'image/width'  : tf.FixedLenFeature([], tf.int64),
            'image/encoded': tf.FixedLenFeature([], tf.string),
        }
        for name in self.feature_list:
            features[name] = tf.FixedLenFeature([], tf.int64)

        example = tf.parse_single_example(serialized_example, features=features)
        image = tf.decode_raw(example['image/encoded'], tf.uint8)
        raw_height = tf.cast(example['image/height'], tf.int32)
        raw_width = tf.cast(example['image/width'], tf.int32)
        image = tf.reshape(image, [raw_height, raw_width, 3])
        image = tf.image.resize_images(image, size=[self.height, self.width])
        # from IPython import embed; embed(); exit()

        feature_val_list = [tf.cast(example[name], tf.float32) for name in self.feature_list]
        return image, feature_val_list 
Example #2
Source File: model.py    From cloudml-dist-mnist-example with Apache License 2.0 6 votes vote down vote up
def read_and_decode(filename_queue):
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)

  features = tf.parse_single_example(
      serialized_example,
      features={
          'image_raw': tf.FixedLenFeature([], tf.string),
          'label': tf.FixedLenFeature([], tf.int64),
      })

  image = tf.decode_raw(features['image_raw'], tf.uint8)
  image.set_shape([784])
  image = tf.cast(image, tf.float32) * (1. / 255)
  label = tf.cast(features['label'], tf.int32)

  return image, label 
Example #3
Source File: vfn_train.py    From view-finding-network with GNU General Public License v3.0 6 votes vote down vote up
def read_and_decode(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
      # Defaults are not specified since both keys are required.
        features={
            'image_raw': tf.FixedLenFeature([], tf.string),
        })

    image = tf.decode_raw(features['image_raw'], tf.uint8)
    image = tf.reshape(image, [227, 227, 6])

  # Convert from [0, 255] -> [-0.5, 0.5] floats.
    image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
    return tf.split(image, 2, 2) # 3rd dimension two parts 
Example #4
Source File: vfn_train.py    From view-finding-network with GNU General Public License v3.0 6 votes vote down vote up
def read_and_decode_aug(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
      # Defaults are not specified since both keys are required.
        features={
            'image_raw': tf.FixedLenFeature([], tf.string),
        })

    image = tf.decode_raw(features['image_raw'], tf.uint8)
    image = tf.image.random_flip_left_right(tf.reshape(image, [227, 227, 6]))
  # Convert from [0, 255] -> [-0.5, 0.5] floats.
    image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
    image = tf.image.random_brightness(image, 0.01)
    image = tf.image.random_contrast(image, 0.95, 1.05)
    return tf.split(image, 2, 2) # 3rd dimension two parts 
Example #5
Source File: dump_tfrecord.py    From cwavegan with MIT License 6 votes vote down vote up
def _mapper(example_proto):
  features = {
      'samples': tf.FixedLenSequenceFeature([1], tf.float32, allow_missing=True),
      'label': tf.FixedLenSequenceFeature([], tf.string, allow_missing=True)
  }
  example = tf.parse_single_example(example_proto, features)

  wav = example['samples'][:, 0]

  wav = wav[:16384]
  wav_len = tf.shape(wav)[0]
  wav = tf.pad(wav, [[0, 16384 - wav_len]])

  label = tf.reduce_join(example['label'], 0)

  return wav, label 
Example #6
Source File: get_data.py    From glow with MIT License 6 votes vote down vote up
def parse_tfrecord_tf(record, res, rnd_crop):
    features = tf.parse_single_example(record, features={
        'shape': tf.FixedLenFeature([3], tf.int64),
        'data': tf.FixedLenFeature([], tf.string),
        'label': tf.FixedLenFeature([1], tf.int64)})
    # label is always 0 if uncondtional
    # to get CelebA attr, add 'attr': tf.FixedLenFeature([40], tf.int64)
    data, label, shape = features['data'], features['label'], features['shape']
    label = tf.cast(tf.reshape(label, shape=[]), dtype=tf.int32)
    img = tf.decode_raw(data, tf.uint8)
    if rnd_crop:
        # For LSUN Realnvp only - random crop
        img = tf.reshape(img, shape)
        img = tf.random_crop(img, [res, res, 3])
    img = tf.reshape(img, [res, res, 3])
    return img, label  # to get CelebA attr, also return attr 
Example #7
Source File: inputs_test.py    From tensorflow_fasttext with MIT License 6 votes vote down vote up
def test_parse_spec():
    fc = FeatureColumns(
        True,
        False,
        VOCAB_FILE,
        VOCAB_SIZE,
        10,
        10,
        1000,
        10)
    parse_spec = tf.feature_column.make_parse_example_spec(fc)
    print parse_spec
    reader = tf.python_io.tf_record_iterator(INPUT_FILE)
    sess = tf.Session()
    for record in reader:
        example = tf.parse_single_example(
            record,
            parse_spec)
        print sess.run(example)
        break 
Example #8
Source File: input_fn.py    From 3D-Unet--Tensorflow with GNU General Public License v3.0 6 votes vote down vote up
def decode_pred(serialized_example):
	"""Parses prediction data from the given `serialized_example`."""

	features = tf.parse_single_example(
					serialized_example,
					features={
						'T1':tf.FixedLenFeature([],tf.string),
						'T2':tf.FixedLenFeature([], tf.string)
					})

	patch_shape = [conf.patch_size, conf.patch_size, conf.patch_size]

	# Convert from a scalar string tensor
	image_T1 = tf.decode_raw(features['T1'], tf.int16)
	image_T1 = tf.reshape(image_T1, patch_shape)
	image_T2 = tf.decode_raw(features['T2'], tf.int16)
	image_T2 = tf.reshape(image_T2, patch_shape)

	# Convert dtype.
	image_T1 = tf.cast(image_T1, tf.float32)
	image_T2 = tf.cast(image_T2, tf.float32)
	label = tf.zeros(image_T1.shape) # pseudo label

	return image_T1, image_T2, label 
Example #9
Source File: trainer.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def dataset_parser(value):
    keys_to_features = {
        'image/encoded': tf.FixedLenFeature((), tf.string, ''),
        'image/format': tf.FixedLenFeature((), tf.string, 'jpeg'),
        'image/class/label': tf.FixedLenFeature([], tf.int64, -1)
    }

    parsed = tf.parse_single_example(value, keys_to_features)
    image_bytes = tf.reshape(parsed['image/encoded'], shape=[])

    # Preprocess the images.
    image = tf.image.decode_jpeg(image_bytes)
    image = tf.image.random_flip_left_right(image)
    image = tf.image.resize_images(image, [IMAGE_SIZE, IMAGE_SIZE])
    image = tf.image.convert_image_dtype(
      image, dtype=tf.bfloat16)

    # Subtract one so that labels are in [0, 1000).
    label = tf.cast(
        tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32) - 1

    return image, label 
Example #10
Source File: reader.py    From CapsLayer with Apache License 2.0 6 votes vote down vote up
def parse_fun(serialized_example):
    """ Data parsing function.
    """
    features = tf.parse_single_example(serialized_example,
                                       features={'image': tf.FixedLenFeature([], tf.string),
                                                 'label': tf.FixedLenFeature([], tf.int64),
                                                 'height': tf.FixedLenFeature([], tf.int64),
                                                 'width': tf.FixedLenFeature([], tf.int64),
                                                 'depth': tf.FixedLenFeature([], tf.int64)})
    height = tf.cast(features['height'], tf.int32)
    width = tf.cast(features['width'], tf.int32)
    depth = tf.cast(features['depth'], tf.int32)
    image = tf.decode_raw(features['image'], tf.float32)
    image = tf.reshape(image, shape=[height * width * depth])
    image.set_shape([28 * 28 * 1])
    image = tf.cast(image, tf.float32) * (1. / 255)
    label = tf.cast(features['label'], tf.int32)
    features = {'images': image, 'labels': label}
    return(features) 
Example #11
Source File: reader.py    From CapsLayer with Apache License 2.0 6 votes vote down vote up
def parse_fun(serialized_example):
    """ Data parsing function.
    """
    features = tf.parse_single_example(serialized_example,
                                       features={'image': tf.FixedLenFeature([], tf.string),
                                                 'label': tf.FixedLenFeature([], tf.int64),
                                                 'height': tf.FixedLenFeature([], tf.int64),
                                                 'width': tf.FixedLenFeature([], tf.int64),
                                                 'depth': tf.FixedLenFeature([], tf.int64)})
    height = tf.cast(features['height'], tf.int32)
    width = tf.cast(features['width'], tf.int32)
    depth = tf.cast(features['depth'], tf.int32)
    image = tf.decode_raw(features['image'], tf.float32)
    image = tf.reshape(image, shape=[height * width * depth])
    image.set_shape([28 * 28 * 1])
    image = tf.cast(image, tf.float32) * (1. / 255)
    label = tf.cast(features['label'], tf.int32)
    features = {'images': image, 'labels': label}
    return(features) 
Example #12
Source File: read_tfrecord_data.py    From tensorflow_input_image_by_tfrecord with Apache License 2.0 6 votes vote down vote up
def read_and_decode(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example, features = {
        "image/encoded": tf.FixedLenFeature([], tf.string),
        "image/height": tf.FixedLenFeature([], tf.int64),
        "image/width": tf.FixedLenFeature([], tf.int64),
        "image/filename": tf.FixedLenFeature([], tf.string),
        "image/class/label": tf.FixedLenFeature([], tf.int64),})

    image_encoded = features["image/encoded"]
    image_raw = tf.image.decode_jpeg(image_encoded, channels=3)

    current_image_object = image_object()

    current_image_object.image = tf.image.resize_image_with_crop_or_pad(image_raw, FLAGS.image_height, FLAGS.image_width) # cropped image with size 299x299
#    current_image_object.image = tf.cast(image_crop, tf.float32) * (1./255) - 0.5
    current_image_object.height = features["image/height"] # height of the raw image
    current_image_object.width = features["image/width"] # width of the raw image
    current_image_object.filename = features["image/filename"] # filename of the raw image
    current_image_object.label = tf.cast(features["image/class/label"], tf.int32) # label of the raw image
    
    return current_image_object 
Example #13
Source File: inputs.py    From ffn with Apache License 2.0 6 votes vote down vote up
def load_patch_coordinates_from_filename_queue(filename_queue):
  """Loads coordinates and volume names from filename queue.

  Args:
    filename_queue: Tensorflow queue created from create_filename_queue()

  Returns:
    Tuple of coordinates (shape `[1, 3]`) and volume name (shape `[1]`) tensors.
  """
  record_options = tf.python_io.TFRecordOptions(
      tf.python_io.TFRecordCompressionType.GZIP)
  keys, protos = tf.TFRecordReader(options=record_options).read(filename_queue)
  examples = tf.parse_single_example(protos, features=dict(
      center=tf.FixedLenFeature(shape=[1, 3], dtype=tf.int64),
      label_volume_name=tf.FixedLenFeature(shape=[1], dtype=tf.string),
  ))
  coord = examples['center']
  volname = examples['label_volume_name']
  return coord, volname 
Example #14
Source File: train.py    From centernet_tensorflow_wilderface_voc with MIT License 6 votes vote down vote up
def parse_color_data(example_proto):
    features = {"img_raw": tf.FixedLenFeature([], tf.string),
                "label": tf.FixedLenFeature([], tf.string),
                "width": tf.FixedLenFeature([], tf.int64),
                "height": tf.FixedLenFeature([], tf.int64)}
    parsed_features = tf.parse_single_example(example_proto, features)
    img = parsed_features["img_raw"]
    img = tf.decode_raw(img, tf.uint8)
    width = parsed_features["width"]
    height = parsed_features["height"]
    img = tf.reshape(img, [height, width, 3])
    img = tf.cast(img, tf.float32) * (1. / 255.) - 0.5
    label = parsed_features["label"]
    label = tf.decode_raw(label, tf.float32)

    return img, label 
Example #15
Source File: loader.py    From SketchCNN with MIT License 6 votes vote down vote up
def _read_raw(self):
        """Read raw data from TFRecord.

        Returns:
            :return: data list [input_raw, label_raw].
        """
        self._reader = tf.TFRecordReader()

        _, serialized_example = self._reader.read(self._queue)

        features = tf.parse_single_example(serialized_example,
                                           features={
                                               'name': tf.FixedLenFeature([], tf.string),
                                               'block': tf.FixedLenFeature([], tf.string)
                                           })

        input_raw, label_raw = decode_block(features['block'], tensor_size=self._raw_size)

        if self._with_key:
            return input_raw, label_raw, features['name']
        return input_raw, label_raw 
Example #16
Source File: util.py    From R-Net with MIT License 5 votes vote down vote up
def get_record_parser(config, is_test=False):
    def parse(example):
        para_limit = config.test_para_limit if is_test else config.para_limit
        ques_limit = config.test_ques_limit if is_test else config.ques_limit
        char_limit = config.char_limit
        features = tf.parse_single_example(example,
                                           features={
                                               "context_idxs": tf.FixedLenFeature([], tf.string),
                                               "ques_idxs": tf.FixedLenFeature([], tf.string),
                                               "context_char_idxs": tf.FixedLenFeature([], tf.string),
                                               "ques_char_idxs": tf.FixedLenFeature([], tf.string),
                                               "y1": tf.FixedLenFeature([], tf.string),
                                               "y2": tf.FixedLenFeature([], tf.string),
                                               "id": tf.FixedLenFeature([], tf.int64)
                                           })
        context_idxs = tf.reshape(tf.decode_raw(
            features["context_idxs"], tf.int32), [para_limit])
        ques_idxs = tf.reshape(tf.decode_raw(
            features["ques_idxs"], tf.int32), [ques_limit])
        context_char_idxs = tf.reshape(tf.decode_raw(
            features["context_char_idxs"], tf.int32), [para_limit, char_limit])
        ques_char_idxs = tf.reshape(tf.decode_raw(
            features["ques_char_idxs"], tf.int32), [ques_limit, char_limit])
        y1 = tf.reshape(tf.decode_raw(
            features["y1"], tf.float32), [para_limit])
        y2 = tf.reshape(tf.decode_raw(
            features["y2"], tf.float32), [para_limit])
        qa_id = features["id"]
        return context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id
    return parse 
Example #17
Source File: resnet.py    From DeepFolding with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build_input(self):
        with tf.device('/cpu:0'):
            def parser(record):
                keys_to_features = {
                'x1d' :tf.FixedLenFeature([], tf.string),
                'x2d' :tf.FixedLenFeature([], tf.string),
                'y'   :tf.FixedLenFeature([], tf.string),
                'size':tf.FixedLenFeature([], tf.int64)}
                parsed = tf.parse_single_example(record, keys_to_features)
                x1d = tf.decode_raw(parsed['x1d'], tf.float32)
                x2d = tf.decode_raw(parsed['x2d'] ,tf.float32)
                size = parsed['size']
                x1d = tf.reshape(x1d, tf.stack([size, -1]))
                x2d = tf.reshape(x2d, tf.stack([size, size, -1]))
                y = tf.decode_raw(parsed['y'],tf.int16)
                y = tf.cast(y, tf.float32)
                y = tf.reshape(y, tf.stack([size, size]))
                return x1d, x2d, y, size

            dataset = tf.data.TFRecordDataset(self.input_tfrecord_files)
            dataset = dataset.map(parser, num_parallel_calls=64)
            dataset = dataset.prefetch(1024)
            dataset = dataset.shuffle(buffer_size=512)
            dataset = dataset.padded_batch(self.train_config.batch_size,
                    padded_shapes=([PADDING_FULL_LEN, self.x1d_channel_dim],
                        [PADDING_FULL_LEN, PADDING_FULL_LEN, self.x2d_channel_dim],
                        [PADDING_FULL_LEN, PADDING_FULL_LEN], []),
                    padding_values=(0.0, 0.0, -1.0, np.int64(PADDING_FULL_LEN)))
            iterator = dataset.make_initializable_iterator()
            x1d, x2d, y, size = iterator.get_next()
            return  x1d, x2d, y, size, iterator 
Example #18
Source File: OxFlowers_BCNN_85.py    From OxFlowers_BCNN with MIT License 5 votes vote down vote up
def parse_function(example_proto):
	"""parse function is used to parse a single TFRecord example in the dataset."""
	# Parses a single Example proto.
	# Returns a dict mapping feature keys to Tensor and SparseTensor values.
	features = tf.parse_single_example(example_proto,features={
	'label': tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string),})
	# Reinterpret the bytes of a string as a vector of numbers.
	imgs = tf.decode_raw(features['img_raw'], tf.uint8)
	# Reshapes a tensor.
	imgs = tf.reshape(imgs, [ImageWidth, ImageHeight, 3])  
	# cast the data from (0, 255) to (-0.5, 0.5)
	# (-0.5, 0.5) may be better than (0, 1).
	imgs = tf.cast(imgs, tf.float32) * (1. / 255) - 0.5
	labels = tf.cast(features['label'], tf.int64) 
	return {'x': imgs}, labels 
Example #19
Source File: run_bert_open_qa_eval.py    From XQA with MIT License 5 votes vote down vote up
def input_fn_builder(input_file, seq_length, drop_remainder):
  """Creates an `input_fn` closure to be passed to TPUEstimator."""

  name_to_features = {
      "unique_ids": tf.FixedLenFeature([], tf.int64),
      "input_ids": tf.FixedLenFeature([seq_length], tf.int64),
      "input_mask": tf.FixedLenFeature([seq_length], tf.int64),
      "segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
  }

  def _decode_record(record, name_to_features):
    """Decodes a record to a TensorFlow example."""
    example = tf.parse_single_example(record, name_to_features)

    # tf.Example only supports tf.int64, but the TPU only supports tf.int32.
    # So cast all int64 to int32.
    for name in list(example.keys()):
      t = example[name]
      if t.dtype == tf.int64:
        t = tf.to_int32(t)
      example[name] = t

    return example

  def input_fn(params):
    """The actual input function."""
    batch_size = params["batch_size"]

    # For training, we want a lot of parallel reading and shuffling.
    # For eval, we want no shuffling and parallel reading doesn't matter.
    d = tf.data.TFRecordDataset(input_file)
    d = d.apply(
        tf.contrib.data.map_and_batch(
            lambda record: _decode_record(record, name_to_features),
            batch_size=batch_size,
            drop_remainder=drop_remainder))

    return d

  return input_fn 
Example #20
Source File: OxFlowers_CNN_75.py    From OxFlowers_BCNN with MIT License 5 votes vote down vote up
def read_and_decode(filename):
	"""
	read and decode a TFRecords file.
	returns numpy array objects.
	pipeline: TFRecords --> queue --> serialized_example --> dict.
	"""
	# Output strings (e.g. filenames) to a queue for an input pipeline.
	filename_queue = tf.train.string_input_producer([filename])
	# print(filename_queue)
	# A Reader that outputs the records from a TFRecords file.
	reader = tf.TFRecordReader()
	# reader.read(queue)
	# Args queue: A Queue or a mutable string Tensor representing a handle to a Queue, with string work items.
	# Returns: A tuple of Tensors (key, value). key: A string scalar Tensor. value: A string scalar Tensor.
	_, serialized_example = reader.read(filename_queue)
	# print(serialized_example)
	
	# Parses a single Example proto.
	# Returns a dict mapping feature keys to Tensor and SparseTensor values.
	features = tf.parse_single_example(serialized_example,features={
	'label': tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string),})
	# Reinterpret the bytes of a string as a vector of numbers.
	imgs = tf.decode_raw(features['img_raw'], tf.uint8)
	# print(img.dtype)
	# print(img.shape)
	# Reshapes a tensor.
	imgs = tf.reshape(imgs, [-1, ImageWidth, ImageHeight, ImageChannels])  
	# cast the data from (0, 255) to (-0.5, 0.5)
	# (-0.5, 0.5) may be better than (0, 1).
	imgs = tf.cast(imgs, tf.float32) * (1. / 255) - 0.5
	labels = tf.cast(features['label'], tf.int64)  
	
	# print(type(imgs))
	# print(imgs.shape)
	# print(type(labels))
	# print(labels.shape)
	return imgs, labels 
Example #21
Source File: detection_inference.py    From ros_people_object_detection_tensorflow with Apache License 2.0 5 votes vote down vote up
def build_input(tfrecord_paths):
  """Builds the graph's input.

  Args:
    tfrecord_paths: List of paths to the input TFRecords

  Returns:
    serialized_example_tensor: The next serialized example. String scalar Tensor
    image_tensor: The decoded image of the example. Uint8 tensor,
        shape=[1, None, None,3]
  """
  filename_queue = tf.train.string_input_producer(
      tfrecord_paths, shuffle=False, num_epochs=1)

  tf_record_reader = tf.TFRecordReader()
  _, serialized_example_tensor = tf_record_reader.read(filename_queue)
  features = tf.parse_single_example(
      serialized_example_tensor,
      features={
          standard_fields.TfExampleFields.image_encoded:
              tf.FixedLenFeature([], tf.string),
      })
  encoded_image = features[standard_fields.TfExampleFields.image_encoded]
  image_tensor = tf.image.decode_image(encoded_image, channels=3)
  image_tensor.set_shape([None, None, 3])
  image_tensor = tf.expand_dims(image_tensor, 0)

  return serialized_example_tensor, image_tensor 
Example #22
Source File: coco.py    From FastMaskRCNN with Apache License 2.0 5 votes vote down vote up
def read(tfrecords_filename):

  if not isinstance(tfrecords_filename, list):
    tfrecords_filename = [tfrecords_filename]
  filename_queue = tf.train.string_input_producer(
    tfrecords_filename, num_epochs=100)

  options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)
  reader = tf.TFRecordReader(options=options)
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
    serialized_example,
    features={
      'image/img_id': tf.FixedLenFeature([], tf.int64),
      'image/encoded': tf.FixedLenFeature([], tf.string),
      'image/height': tf.FixedLenFeature([], tf.int64),
      'image/width': tf.FixedLenFeature([], tf.int64),
      'label/num_instances': tf.FixedLenFeature([], tf.int64),
      'label/gt_masks': tf.FixedLenFeature([], tf.string),
      'label/gt_boxes': tf.FixedLenFeature([], tf.string),
      'label/encoded': tf.FixedLenFeature([], tf.string),
      })
  # image = tf.image.decode_jpeg(features['image/encoded'], channels=3)
  img_id = tf.cast(features['image/img_id'], tf.int32)
  ih = tf.cast(features['image/height'], tf.int32)
  iw = tf.cast(features['image/width'], tf.int32)
  num_instances = tf.cast(features['label/num_instances'], tf.int32)
  image = tf.decode_raw(features['image/encoded'], tf.uint8)
  imsize = tf.size(image)
  image = tf.cond(tf.equal(imsize, ih * iw), \
          lambda: tf.image.grayscale_to_rgb(tf.reshape(image, (ih, iw, 1))), \
          lambda: tf.reshape(image, (ih, iw, 3)))

  gt_boxes = tf.decode_raw(features['label/gt_boxes'], tf.float32)
  gt_boxes = tf.reshape(gt_boxes, [num_instances, 5])
  gt_masks = tf.decode_raw(features['label/gt_masks'], tf.uint8)
  gt_masks = tf.cast(gt_masks, tf.int32)
  gt_masks = tf.reshape(gt_masks, [num_instances, ih, iw])
  
  return image, ih, iw, gt_boxes, gt_masks, num_instances, img_id 
Example #23
Source File: fully_connected_reader.py    From tensorflow_input_image_by_tfrecord with Apache License 2.0 5 votes vote down vote up
def read_and_decode(filename_queue):
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
      serialized_example,
      # Defaults are not specified since both keys are required.
      features={
          'image/encoded': tf.FixedLenFeature([], tf.string),
          'image/class/label': tf.FixedLenFeature([], tf.int64),
      })

  # Convert from a scalar string tensor (whose single string has
  # length mnist.IMAGE_PIXELS) to a uint8 tensor with shape
  # [mnist.IMAGE_PIXELS].
  image = tf.decode_raw(features['image/encoded'], tf.uint8)
  image.set_shape([128*128])

  # OPTIONAL: Could reshape into a 28x28 image and apply distortions
  # here.  Since we are not applying any distortions in this
  # example, and the next step expects the image to be flattened
  # into a vector, we don't bother.

  # Convert from [0, 255] -> [-0.5, 0.5] floats.
  image = tf.cast(image, tf.float32) * (1. / 255) - 0.5

  # Convert label from a scalar uint8 tensor to an int32 scalar.
  label = tf.cast(features['image/class/label'], tf.int32)

  return image, label 
Example #24
Source File: OxFlowers_CNN_75.py    From OxFlowers_BCNN with MIT License 5 votes vote down vote up
def parse_function(example_proto):
	"""parse function is used to parse a single TFRecord example in the dataset."""
	# Parses a single Example proto.
	# Returns a dict mapping feature keys to Tensor and SparseTensor values.
	features = tf.parse_single_example(example_proto,features={
	'label': tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string),})
	# Reinterpret the bytes of a string as a vector of numbers.
	imgs = tf.decode_raw(features['img_raw'], tf.uint8)
	# Reshapes a tensor.
	imgs = tf.reshape(imgs, [ImageWidth, ImageHeight, 3])  
	# cast the data from (0, 255) to (-0.5, 0.5)
	# (-0.5, 0.5) may be better than (0, 1).
	imgs = tf.cast(imgs, tf.float32) * (1. / 255) - 0.5
	labels = tf.cast(features['label'], tf.int64) 
	return {'x': imgs}, labels 
Example #25
Source File: read_tfrecord.py    From R2CNN_Faster-RCNN_Tensorflow with MIT License 5 votes vote down vote up
def read_single_example_and_decode(filename_queue):

    # tfrecord_options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)

    # reader = tf.TFRecordReader(options=tfrecord_options)
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)

    features = tf.parse_single_example(
        serialized=serialized_example,
        features={
            'img_name': tf.FixedLenFeature([], tf.string),
            'img_height': tf.FixedLenFeature([], tf.int64),
            'img_width': tf.FixedLenFeature([], tf.int64),
            'img': tf.FixedLenFeature([], tf.string),
            'gtboxes_and_label': tf.FixedLenFeature([], tf.string),
            'num_objects': tf.FixedLenFeature([], tf.int64)
        }
    )
    img_name = features['img_name']
    img_height = tf.cast(features['img_height'], tf.int32)
    img_width = tf.cast(features['img_width'], tf.int32)
    img = tf.decode_raw(features['img'], tf.uint8)

    img = tf.reshape(img, shape=[img_height, img_width, 3])

    gtboxes_and_label = tf.decode_raw(features['gtboxes_and_label'], tf.int32)
    gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 9])

    num_objects = tf.cast(features['num_objects'], tf.int32)
    return img_name, img, gtboxes_and_label, num_objects 
Example #26
Source File: generate.py    From glow with MIT License 5 votes vote down vote up
def parse_image(max_res):
    def _process_image(img):
        img = centre_crop(img)
        img = tf.image.resize_images(
            img, [max_res, max_res], method=_DOWNSAMPLING)
        img = tf.cast(img, 'float32')
        resolution_log2 = int(np.log2(max_res))
        q_imgs = []
        for lod in range(resolution_log2 - 1):
            if lod:
                img = downsample(img)
            quant = x_to_uint8(img)
            q_imgs.append(quant)
        return q_imgs

    def _parse_image(example):
        feature_map = {
            'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
                                                default_value=''),
            'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
                                                    default_value=-1)
        }
        features = tf.parse_single_example(example, feature_map)
        img, label = features['image/encoded'], features['image/class/label']
        label = tf.cast(tf.reshape(label, shape=[]), dtype=tf.int32) - 1
        img = tf.image.decode_jpeg(img, channels=_NUM_CHANNELS)
        imgs = _process_image(img)
        parsed = (label, *imgs)
        return parsed

    return _parse_image 
Example #27
Source File: reader.py    From CycleGAN-TensorFlow with MIT License 5 votes vote down vote up
def feed(self):
    """
    Returns:
      images: 4D tensor [batch_size, image_width, image_height, image_depth]
    """
    with tf.name_scope(self.name):
      filename_queue = tf.train.string_input_producer([self.tfrecords_file])
      reader = tf.TFRecordReader()

      _, serialized_example = self.reader.read(filename_queue)
      features = tf.parse_single_example(
          serialized_example,
          features={
            'image/file_name': tf.FixedLenFeature([], tf.string),
            'image/encoded_image': tf.FixedLenFeature([], tf.string),
          })

      image_buffer = features['image/encoded_image']
      image = tf.image.decode_jpeg(image_buffer, channels=3)
      image = self._preprocess(image)
      images = tf.train.shuffle_batch(
            [image], batch_size=self.batch_size, num_threads=self.num_threads,
            capacity=self.min_queue_examples + 3*self.batch_size,
            min_after_dequeue=self.min_queue_examples
          )

      tf.summary.image('_input', images)
    return images 
Example #28
Source File: generate.py    From glow with MIT License 5 votes vote down vote up
def parse_celeba_image(max_res, transpose=False):
    def _process_image(img):
        img = tf.cast(img, 'float32')
        resolution_log2 = int(np.log2(max_res))
        q_imgs = []
        for lod in range(resolution_log2 - 1):
            if lod:
                img = downsample(img)
            quant = x_to_uint8(img)
            q_imgs.append(quant)
        return q_imgs

    def _parse_image(example):
        features = tf.parse_single_example(example, features={
            'shape': tf.FixedLenFeature([3], tf.int64),
            'data': tf.FixedLenFeature([], tf.string),
            'attr': tf.FixedLenFeature([40], tf.int64)})
        shape = features['shape']
        data = features['data']
        attr = features['attr']
        data = tf.decode_raw(data, tf.uint8)
        img = tf.reshape(data, shape)
        if transpose:
            img = tf.transpose(img, (1, 2, 0))  # CHW -> HWC
        imgs = _process_image(img)
        parsed = (attr, *imgs)
        return parsed

    return _parse_image 
Example #29
Source File: pretrain_on_vcr.py    From HGL-pytorch with MIT License 5 votes vote down vote up
def _decode_record(record, name_to_features):
    """Decodes a record to a TensorFlow example."""
    example = tf.parse_single_example(record, name_to_features)

    # tf.Example only supports tf.int64, but the TPU only supports tf.int32.
    # So cast all int64 to int32.
    for name in list(example.keys()):
        t = example[name]
        if t.dtype == tf.int64:
            t = tf.to_int32(t)
        example[name] = t

    return example 
Example #30
Source File: OxFlowers_BCNN_85.py    From OxFlowers_BCNN with MIT License 5 votes vote down vote up
def read_and_decode(filename):
	"""
	read and decode a TFRecords file.
	returns numpy array objects.
	pipeline: TFRecords --> queue --> serialized_example --> dict.
	"""
	# Output strings (e.g. filenames) to a queue for an input pipeline.
	filename_queue = tf.train.string_input_producer([filename])
	# print(filename_queue)
	# A Reader that outputs the records from a TFRecords file.
	reader = tf.TFRecordReader()
	# reader.read(queue)
	# Args queue: A Queue or a mutable string Tensor representing a handle to a Queue, with string work items.
	# Returns: A tuple of Tensors (key, value). key: A string scalar Tensor. value: A string scalar Tensor.
	_, serialized_example = reader.read(filename_queue)
	# print(serialized_example)
	
	# Parses a single Example proto.
	# Returns a dict mapping feature keys to Tensor and SparseTensor values.
	features = tf.parse_single_example(serialized_example,features={
	'label': tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string),})
	# Reinterpret the bytes of a string as a vector of numbers.
	imgs = tf.decode_raw(features['img_raw'], tf.uint8)
	# print(img.dtype)
	# print(img.shape)
	# Reshapes a tensor.
	imgs = tf.reshape(imgs, [-1, ImageWidth, ImageHeight, ImageChannels])  
	# cast the data from (0, 255) to (-0.5, 0.5)
	# (-0.5, 0.5) may be better than (0, 1).
	imgs = tf.cast(imgs, tf.float32) * (1. / 255) - 0.5
	labels = tf.cast(features['label'], tf.int64)  
	
	# print(type(imgs))
	# print(imgs.shape)
	# print(type(labels))
	# print(labels.shape)
	return imgs, labels