Python tensorflow.parallel_stack() Examples

The following are 30 code examples of tensorflow.parallel_stack(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: grasp_dataset.py    From costar_plan with Apache License 2.0 6 votes vote down vote up
def to_training_tensor(time_ordered_feature_tensor_dicts, feature_name):
        """ Calls to_tensors plus converts the data to a single large tensor.

        This returns a single tf tensor for a single feature
        in a format you can pass directly to a Keras model.

        # Arguments

        time_ordered_feature_tensor_dicts: A dictionary with keys which are strings and values which are lists of tensors.
        feature_name: A string identifying which specific feature in the dictionary to convert.
        """
        if feature_name is None or feature_name is '':
            return None
        # image of a clear scene view, originally from 'view_clear_scene' step,
        # There is also a move_to_grasp versions copied from view_clear_scene then repeated once for each time step.
        op_batch = GraspDataset.to_tensors(time_ordered_feature_tensor_dicts, feature_name)
        # make one long list from each list of lists
        op_batch = list(itertools.chain.from_iterable(op_batch))
        # stack all the data in a way that will let it run in parallel
        op_batch = tf.parallel_stack(op_batch)
        return op_batch 
Example #2
Source File: cifar10_main.py    From object_detection_with_tensorflow with MIT License 5 votes vote down vote up
def input_fn(data_dir,
             subset,
             num_shards,
             batch_size,
             use_distortion_for_training=True):
  """Create input graph for model.

  Args:
    data_dir: Directory where TFRecords representing the dataset are located.
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
    batch_size: total batch size for training to be divided by the number of
    shards.
    use_distortion_for_training: True to use distortions.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and use_distortion_for_training
    dataset = cifar10.Cifar10DataSet(data_dir, subset, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
Example #3
Source File: preprocessing.py    From dlcookbook-dlbs with Apache License 2.0 5 votes vote down vote up
def minibatch(self, dataset, subset, use_datasets, cache_data,
                shift_ratio=0):
    """Get test image batches."""
    del dataset, use_datasets, cache_data
    if (not hasattr(self, 'fake_images') or
        not hasattr(self, 'fake_labels')):
      raise ValueError('Must call set_fake_data() before calling minibatch '
                       'on TestImagePreprocessor')
    if self.expected_subset is not None:
      assert subset == self.expected_subset

    shift_ratio = shift_ratio or self.shift_ratio
    fake_images = cnn_util.roll_numpy_batches(self.fake_images, self.batch_size,
                                              shift_ratio)
    fake_labels = cnn_util.roll_numpy_batches(self.fake_labels, self.batch_size,
                                              shift_ratio)

    with tf.name_scope('batch_processing'):
      image_slice, label_slice = tf.train.slice_input_producer(
          [fake_images, fake_labels],
          shuffle=False,
          name='image_slice')
      raw_images, raw_labels = tf.train.batch(
          [image_slice, label_slice], batch_size=self.batch_size,
          name='image_batch')
      images = [[] for _ in range(self.num_splits)]
      labels = [[] for _ in range(self.num_splits)]
      for i in xrange(self.batch_size):
        split_index = i % self.num_splits
        raw_image = tf.cast(raw_images[i], self.dtype)
        images[split_index].append(raw_image)
        labels[split_index].append(raw_labels[i])
      for split_index in xrange(self.num_splits):
        images[split_index] = tf.parallel_stack(images[split_index])
        labels[split_index] = tf.parallel_stack(labels[split_index])

      return images, labels 
Example #4
Source File: interp.py    From nngp with Apache License 2.0 5 votes vote down vote up
def _get_interp_idxs_weights_2d(x, xp, y, yp, x_log_spacing=False):
  with tf.name_scope('get_interp_idxs_weights_2d'):
    if x_log_spacing:
      x = tf.log(x)
      xp = tf.log(xp)

    with tf.control_dependencies([yp]):
      xp = tf.tile(xp, yp.shape)
    xyp = tf.expand_dims(tf.parallel_stack([xp, yp]), 1)
    xy0 = tf.reshape(tf.parallel_stack([x[0], y[0]]), [2, 1, 1])
    xy1 = tf.reshape(tf.parallel_stack([x[1], y[1]]), [2, 1, 1])

    spacing = xy1 - xy0
    ind_grid = (xyp - xy0) / spacing
    ind = tf.cast(ind_grid, tf.int32) + [[[0], [1]]]

    max_ind = [[[x.shape[0].value - 1]], [[y.shape[0].value - 1]]]
    ind = tf.minimum(ind, max_ind)
    ind_float = tf.cast(ind, tf.float64)

    xy_grid = ind_float * spacing + xy0

    weight = tf.abs(xyp - xy_grid) / spacing
    if x_log_spacing:
      weight = tf.parallel_stack([tf.exp(weight[0]), weight[1]])
    weight = 1. - weight

    weight_sum = tf.reduce_sum(weight, axis=1, keep_dims=True)
    weight /= weight_sum

    return ind, weight 
Example #5
Source File: nvcnn.py    From dlcookbook-dlbs with Apache License 2.0 5 votes vote down vote up
def device_minibatches(self, total_batch_size):
        record_input = data_flow_ops.RecordInput(
            file_pattern=os.path.join(FLAGS.data_dir, '%s-*' % self.subset),
            parallelism=64,
            # Note: This causes deadlock during init if larger than dataset
            buffer_size=FLAGS.input_buffer_size,
            batch_size=total_batch_size)
        records = record_input.get_yield_op()
        # Split batch into individual images
        records = tf.split(records, total_batch_size, 0)
        records = [tf.reshape(record, []) for record in records]
        # Deserialize and preprocess images into batches for each device
        images = defaultdict(list)
        labels = defaultdict(list)
        with tf.name_scope('input_pipeline'):
            for i, record in enumerate(records):
                imgdata, label, bbox, text = deserialize_image_record(record)
                image = self.preprocess(imgdata, bbox, thread_id=i)
                label -= 1 # Change to 0-based (don't use background class)
                device_num = i % self.num_devices
                images[device_num].append(image)
                labels[device_num].append(label)
            # Stack images back into a sub-batch for each device
            for device_num in range(self.num_devices):
                images[device_num] = tf.parallel_stack(images[device_num])
                labels[device_num] = tf.concat(labels[device_num], 0)
                images[device_num] = tf.reshape(images[device_num],
                                                [-1, self.height, self.width, 3])
                images[device_num] = tf.clip_by_value(images[device_num], 0., 255.)
                images[device_num] = tf.cast(images[device_num], self.dtype)
        return images, labels 
Example #6
Source File: cifar10_main.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def input_fn(data_dir,
             subset,
             num_shards,
             batch_size,
             use_distortion_for_training=True):
  """Create input graph for model.

  Args:
    data_dir: Directory where TFRecords representing the dataset are located.
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
    batch_size: total batch size for training to be divided by the number of
    shards.
    use_distortion_for_training: True to use distortions.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and use_distortion_for_training
    dataset = cifar10.Cifar10DataSet(data_dir, subset, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
Example #7
Source File: cifar10_main.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 5 votes vote down vote up
def input_fn(data_dir,
             subset,
             num_shards,
             batch_size,
             use_distortion_for_training=True):
  """Create input graph for model.

  Args:
    data_dir: Directory where TFRecords representing the dataset are located.
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
    batch_size: total batch size for training to be divided by the number of
    shards.
    use_distortion_for_training: True to use distortions.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and use_distortion_for_training
    dataset = cifar10.Cifar10DataSet(data_dir, subset, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
Example #8
Source File: cifar10_myMain.py    From nasbot with MIT License 5 votes vote down vote up
def input_fn(data_dir,
             subset,
             num_shards,
             batch_size,
             use_distortion_for_training=True):
  """Create input graph for model.

  Args:
    data_dir: Directory where TFRecords representing the dataset are located.
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
    batch_size: total batch size for training to be divided by the number of
    shards.
    use_distortion_for_training: True to use distortions.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and use_distortion_for_training
    dataset = cifar10.Cifar10DataSet(data_dir, subset, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
Example #9
Source File: cifar10_main.py    From hands-detection with MIT License 5 votes vote down vote up
def input_fn(subset, num_shards):
  """Create input graph for model.

  Args:
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  if subset == 'train':
    batch_size = FLAGS.train_batch_size
  elif subset == 'validate' or subset == 'eval':
    batch_size = FLAGS.eval_batch_size
  else:
    raise ValueError('Subset must be one of \'train\', \'validate\' and \'eval\'')
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and FLAGS.use_distortion_for_training
    dataset = cifar10.Cifar10DataSet(FLAGS.data_dir, subset, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
Example #10
Source File: cifar10_main.py    From uai-sdk with Apache License 2.0 5 votes vote down vote up
def input_fn(data_dir,
             subset,
             num_shards,
             batch_size,
             use_distortion_for_training=True):
  """Create input graph for model.

  Args:
    data_dir: Directory where TFRecords representing the dataset are located.
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
    batch_size: total batch size for training to be divided by the number of
    shards.
    use_distortion_for_training: True to use distortions.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and use_distortion_for_training
    dataset = cifar10.Cifar10DataSet(data_dir, subset, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
Example #11
Source File: preprocessing_synthetic.py    From tf-imagenet with Apache License 2.0 5 votes vote down vote up
def minibatch(self, dataset, subset, use_datasets, cache_data,
                  shift_ratio=-1):
        del dataset, use_datasets, cache_data, shift_ratio
        if (not hasattr(self, 'fake_images') or not hasattr(self, 'fake_labels')):
            raise ValueError('Must call set_fake_data() before calling minibatch '
                             'on TestImagePreprocessor')
        if self.expected_subset is not None:
            assert subset == self.expected_subset

        with tf.name_scope('batch_processing'):
            image_slice, label_slice = tf.train.slice_input_producer(
                [self.fake_images, self.fake_labels],
                shuffle=False,
                name='image_slice')
            raw_images, raw_labels = tf.train.batch(
                [image_slice, label_slice], batch_size=self.batch_size,
                name='image_batch')
            images = [[] for _ in range(self.num_splits)]
            labels = [[] for _ in range(self.num_splits)]
            for i in xrange(self.batch_size):
                split_index = i % self.num_splits
                raw_image = tf.cast(raw_images[i], self.dtype)
                images[split_index].append(raw_image)
                labels[split_index].append(raw_labels[i])
            for split_index in xrange(self.num_splits):
                images[split_index] = tf.parallel_stack(images[split_index])
                labels[split_index] = tf.parallel_stack(labels[split_index])

            return images, labels 
Example #12
Source File: cifar10_myMain.py    From dragonfly with MIT License 5 votes vote down vote up
def input_fn(data_dir,
             subset,
             num_shards,
             batch_size,
             use_distortion_for_training=True):
  """Create input graph for model.

  Args:
    data_dir: Directory where TFRecords representing the dataset are located.
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
    batch_size: total batch size for training to be divided by the number of
    shards.
    use_distortion_for_training: True to use distortions.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and use_distortion_for_training
    dataset = cifar10.Cifar10DataSet(data_dir, subset, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
Example #13
Source File: preprocessing.py    From deeplearning-benchmark with Apache License 2.0 5 votes vote down vote up
def minibatch(self, dataset, subset, use_datasets, cache_data,
                shift_ratio=-1):
    del dataset, use_datasets, cache_data, shift_ratio
    if (not hasattr(self, 'fake_images') or
        not hasattr(self, 'fake_labels')):
      raise ValueError('Must call set_fake_data() before calling minibatch '
                       'on TestImagePreprocessor')
    if self.expected_subset is not None:
      assert subset == self.expected_subset

    with tf.name_scope('batch_processing'):
      image_slice, label_slice = tf.train.slice_input_producer(
          [self.fake_images, self.fake_labels],
          shuffle=False,
          name='image_slice')
      raw_images, raw_labels = tf.train.batch(
          [image_slice, label_slice], batch_size=self.batch_size,
          name='image_batch')
      images = [[] for _ in range(self.num_splits)]
      labels = [[] for _ in range(self.num_splits)]
      for i in xrange(self.batch_size):
        split_index = i % self.num_splits
        raw_image = tf.cast(raw_images[i], self.dtype)
        images[split_index].append(raw_image)
        labels[split_index].append(raw_labels[i])
      for split_index in xrange(self.num_splits):
        images[split_index] = tf.parallel_stack(images[split_index])
        labels[split_index] = tf.parallel_stack(labels[split_index])

      return images, labels 
Example #14
Source File: cifar10_main.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def input_fn(data_dir,
             subset,
             num_shards,
             batch_size,
             use_distortion_for_training=True):
  """Create input graph for model.

  Args:
    data_dir: Directory where TFRecords representing the dataset are located.
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
    batch_size: total batch size for training to be divided by the number of
    shards.
    use_distortion_for_training: True to use distortions.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and use_distortion_for_training
    dataset = cifar10.Cifar10DataSet(data_dir, subset, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
Example #15
Source File: preprocessing.py    From parallax with Apache License 2.0 5 votes vote down vote up
def minibatch(self, dataset, subset, use_datasets, cache_data,
                  shift_ratio=-1):
        del dataset, use_datasets, cache_data, shift_ratio
        if (not hasattr(self, 'fake_images') or
                not hasattr(self, 'fake_labels')):
            raise ValueError(
                'Must call set_fake_data() before calling minibatch '
                'on TestImagePreprocessor')
        if self.expected_subset is not None:
            assert subset == self.expected_subset

        with tf.name_scope('batch_processing'):
            image_slice, label_slice = tf.train.slice_input_producer(
                [self.fake_images, self.fake_labels],
                shuffle=False,
                name='image_slice')
            raw_images, raw_labels = tf.train.batch(
                [image_slice, label_slice], batch_size=self.batch_size,
                name='image_batch')
            images = [[] for _ in range(self.num_splits)]
            labels = [[] for _ in range(self.num_splits)]
            for i in xrange(self.batch_size):
                split_index = i % self.num_splits
                raw_image = tf.cast(raw_images[i], self.dtype)
                images[split_index].append(raw_image)
                labels[split_index].append(raw_labels[i])
            for split_index in xrange(self.num_splits):
                images[split_index] = tf.parallel_stack(images[split_index])
                labels[split_index] = tf.parallel_stack(labels[split_index])

            return images, labels 
Example #16
Source File: cifar10_main.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def input_fn(data_dir,
             subset,
             num_shards,
             batch_size,
             use_distortion_for_training=True):
  """Create input graph for model.

  Args:
    data_dir: Directory where TFRecords representing the dataset are located.
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
    batch_size: total batch size for training to be divided by the number of
    shards.
    use_distortion_for_training: True to use distortions.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and use_distortion_for_training
    dataset = cifar10.Cifar10DataSet(data_dir, subset, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
Example #17
Source File: cifar10_main.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def input_fn(data_dir,
             subset,
             num_shards,
             batch_size,
             use_distortion_for_training=True):
  """Create input graph for model.

  Args:
    data_dir: Directory where TFRecords representing the dataset are located.
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
    batch_size: total batch size for training to be divided by the number of
    shards.
    use_distortion_for_training: True to use distortions.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and use_distortion_for_training
    dataset = cifar10.Cifar10DataSet(data_dir, subset, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
Example #18
Source File: image_preprocessing.py    From models with Apache License 2.0 5 votes vote down vote up
def minibatch(self, dataset, subset):
    with tf.compat.v1.name_scope('batch_processing'):
      images = [[] for i in range(self.device_count)]
      labels = [[] for i in range(self.device_count)]
      record_input = data_flow_ops.RecordInput(
          file_pattern=dataset.tf_record_pattern(subset),
          seed=randint(0, 9000),
          parallelism=64,
          buffer_size=10000,
          batch_size=self.batch_size,
          name='record_input')
      records = record_input.get_yield_op()
      records = tf.split(records, self.batch_size, 0)
      records = [tf.reshape(record, []) for record in records]
      for i in xrange(self.batch_size):
        value = records[i]
        image_buffer, label_index, bbox, _ = parse_example_proto(value)
        image = self.preprocess(image_buffer, bbox, i % 4)

        device_index = i % self.device_count
        images[device_index].append(image)
        labels[device_index].append(label_index)
      label_index_batch = [None] * self.device_count
      for device_index in xrange(self.device_count):
        images[device_index] = tf.parallel_stack(images[device_index])
        label_index_batch[device_index] = tf.concat(labels[device_index], 0)

        # dynamic_pad=True) # HACK TESTING dynamic_pad=True
        images[device_index] = tf.cast(images[device_index], self.dtype)
        depth = 3
        images[device_index] = tf.reshape(
            images[device_index],
            shape=[self.batch_size_per_device, self.height, self.width, depth])
        label_index_batch[device_index] = tf.reshape(
            label_index_batch[device_index], [self.batch_size_per_device])
        # Display the training images in the visualizer.
        # tf.summary.image('images', images)

      return images, label_index_batch 
Example #19
Source File: preprocessing.py    From models with Apache License 2.0 5 votes vote down vote up
def minibatch(self, dataset, subset):
    with tf.compat.v1.name_scope('batch_processing'):
      images = [[] for i in range(self.device_count)]
      labels = [[] for i in range(self.device_count)]
      record_input = data_flow_ops.RecordInput(
          file_pattern=dataset.tf_record_pattern(subset),
          seed=randint(0, 9000),
          parallelism=64,
          buffer_size=10000,
          batch_size=self.batch_size,
          name='record_input')
      records = record_input.get_yield_op()
      records = tf.split(records, self.batch_size, 0)
      records = [tf.reshape(record, []) for record in records]
      for i in xrange(self.batch_size):
        value = records[i]
        image_buffer, label_index, bbox, _ = parse_example_proto(value)
        image = self.preprocess(image_buffer, bbox, i % 4)

        device_index = i % self.device_count
        images[device_index].append(image)
        labels[device_index].append(label_index)
      label_index_batch = [None] * self.device_count
      for device_index in xrange(self.device_count):
        images[device_index] = tf.parallel_stack(images[device_index])
        label_index_batch[device_index] = tf.concat(labels[device_index], 0)

        # dynamic_pad=True) # HACK TESTING dynamic_pad=True
        images[device_index] = tf.cast(images[device_index], self.dtype)
        depth = 3
        images[device_index] = tf.reshape(
            images[device_index],
            shape=[self.batch_size_per_device, self.height, self.width, depth])
        label_index_batch[device_index] = tf.reshape(
            label_index_batch[device_index], [self.batch_size_per_device])
        # Display the training images in the visualizer.
        # tf.summary.image('images', images)

      return images, label_index_batch 
Example #20
Source File: preprocessing.py    From models with Apache License 2.0 5 votes vote down vote up
def minibatch(self, dataset, subset):
    with tf.compat.v1.name_scope('batch_processing'):
      images = [[] for i in range(self.device_count)]
      labels = [[] for i in range(self.device_count)]
      record_input = data_flow_ops.RecordInput(
          file_pattern=dataset.tf_record_pattern(subset),
          seed=randint(0, 9000),
          parallelism=64,
          buffer_size=10000,
          batch_size=self.batch_size,
          name='record_input')
      records = record_input.get_yield_op()
      records = tf.split(records, self.batch_size, 0)
      records = [tf.reshape(record, []) for record in records]
      for i in xrange(self.batch_size):
        value = records[i]
        image_buffer, label_index, bbox, _ = parse_example_proto(value)
        image = self.preprocess(image_buffer, bbox, i % 4)
        device_index = i % self.device_count
        images[device_index].append(image)
        labels[device_index].append(label_index)
      label_index_batch = [None] * self.device_count
      for device_index in xrange(self.device_count):
        images[device_index] = tf.parallel_stack(images[device_index])
        label_index_batch[device_index] = tf.concat(labels[device_index], 0)

        # dynamic_pad=True) # HACK TESTING dynamic_pad=True
        images[device_index] = tf.cast(images[device_index], self.dtype)
        depth = 3
        images[device_index] = tf.reshape(
            images[device_index],
            shape=[self.batch_size_per_device, self.height, self.width, depth])
        label_index_batch[device_index] = tf.reshape(
            label_index_batch[device_index], [self.batch_size_per_device])
        # Display the training images in the visualizer.
        # tf.summary.image('images', images)

      return images, label_index_batch, records 
Example #21
Source File: preprocessing.py    From models with Apache License 2.0 5 votes vote down vote up
def minibatch(self, dataset, subset):
    with tf.compat.v1.name_scope('batch_processing'):
      images = [[] for i in range(self.device_count)]
      labels = [[] for i in range(self.device_count)]
      record_input = data_flow_ops.RecordInput(
          file_pattern=dataset.tf_record_pattern(subset),
          seed=randint(0, 9000),
          parallelism=64,
          buffer_size=10000,
          batch_size=self.batch_size,
          name='record_input')
      records = record_input.get_yield_op()
      records = tf.split(records, self.batch_size, 0)
      records = [tf.reshape(record, []) for record in records]
      for i in xrange(self.batch_size):
        value = records[i]
        image_buffer, label_index, bbox, _ = parse_example_proto(value)
        image = self.preprocess(image_buffer, bbox, i % 4)
        device_index = i % self.device_count
        images[device_index].append(image)
        labels[device_index].append(label_index)
      label_index_batch = [None] * self.device_count
      for device_index in xrange(self.device_count):
        images[device_index] = tf.parallel_stack(images[device_index])
        label_index_batch[device_index] = tf.concat(labels[device_index], 0)

        # dynamic_pad=True) # HACK TESTING dynamic_pad=True
        images[device_index] = tf.cast(images[device_index], self.dtype)
        depth = 3
        images[device_index] = tf.reshape(
            images[device_index],
            shape=[self.batch_size_per_device, self.height, self.width, depth])
        label_index_batch[device_index] = tf.reshape(
            label_index_batch[device_index], [self.batch_size_per_device])
        # Display the training images in the visualizer.
        # tf.summary.image('images', images)

      return images, label_index_batch, records 
Example #22
Source File: categorical_distribution.py    From tf-example-models with Apache License 2.0 5 votes vote down vote up
def get_parameter_updaters(self, data, gamma_weighted, gamma_sum):
        tf_parameter_updaters = []
        for dim in range(self.dims):
            tf_partition = tf.dynamic_partition(gamma_weighted, data[0][:, dim], self.counts[dim])
            tf_new_means = tf.parallel_stack([tf.reduce_sum(p) for p in tf_partition])
            tf_parameter_updaters.append(self.tf_means[dim].assign(tf_new_means))

        return tf_parameter_updaters 
Example #23
Source File: categorical_distribution.py    From tf-example-models with Apache License 2.0 5 votes vote down vote up
def get_log_probabilities(self, data):
        tf_log_probabilities = []
        for dim in range(self.dims):
            tf_log_means = tf.log(self.tf_means[dim])
            tf_log_probabilities.append(
                tf.gather(tf_log_means, data[0][:, dim])
            )

        return tf.reduce_sum(tf.parallel_stack(tf_log_probabilities), axis=0) 
Example #24
Source File: cifar10_main.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def input_fn(subset, num_shards):
  """Create input graph for model.

  Args:
    subset: one of 'train', 'validate' and 'eval'.
    num_shards: num of towers participating in data-parallel training.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  if subset == 'train':
    batch_size = FLAGS.train_batch_size
  elif subset == 'validate' or subset == 'eval':
    batch_size = FLAGS.eval_batch_size
  else:
    raise ValueError('Subset must be one of \'train\', \'validate\' and \'eval\'')
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and FLAGS.use_distortion_for_training
    dataset = cifar10.Cifar10DataSet(FLAGS.data_dir, subset, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
Example #25
Source File: array_ops.py    From keras-lambda with MIT License 4 votes vote down vote up
def parallel_stack(values, name="parallel_stack"):
  """Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.

  Requires that the shape of inputs be known at graph construction time.

  Packs the list of tensors in `values` into a tensor with rank one higher than
  each tensor in `values`, by packing them along the first dimension.
  Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
  tensor will have the shape `(N, A, B, C)`.

  For example:

  ```prettyprint
  # 'x' is [1, 4]
  # 'y' is [2, 5]
  # 'z' is [3, 6]
  parallel_stack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]
  ```

  The difference between stack and parallel_stack is that stack requires all
  of the inputs be computed before the operation will begin but doesn't require
  that the input shapes be known during graph construction.  Parallel stack
  will copy pieces of the input into the output as they become available, in
  some situations this can provide a performance benefit.

  This is the opposite of unstack.  The numpy equivalent is

      tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])

  Args:
    values: A list of `Tensor` objects with the same shape and type.
    name: A name for this operation (optional).

  Returns:
    output: A stacked `Tensor` with the same type as `values`.
  """
  with ops.name_scope(name):
    value_t = ops.convert_to_tensor(values[0])
    value_shape = ops.convert_to_tensor(value_t).get_shape()

    output_shape = tensor_shape.TensorShape([len(values)])
    output_shape = output_shape.concatenate(value_shape)
    # expand_dims converts concat to stack.
    return gen_array_ops._parallel_concat(
        [expand_dims(value, 0) for value in values], shape=output_shape) 
Example #26
Source File: nngp.py    From nngp with Apache License 2.0 4 votes vote down vote up
def k_full(self, input1, input2=None):
    """Iteratively building the full NNGP kernel.
    """
    input1 = self._input_layer_normalization(input1)
    if input2 is None:
      input2 = input1
    else:
      input2 = self._input_layer_normalization(input2)

    with tf.name_scope("k_full"):
      cov_init = tf.matmul(
          input1, input2, transpose_b=True) / input1.shape[1].value

      self.k_diag(input1)
      q_aa_init = self.layer_qaa_dict[0]

      q_ab = cov_init
      q_ab = self.weight_var * q_ab + self.bias_var
      corr = q_ab / q_aa_init[0]

      if FLAGS.fraction_of_int32 > 1:
        batch_size, batch_count = self._get_batch_size_and_count(input1, input2)
        with tf.name_scope("q_ab"):
          q_ab_all = []
          for b_x in range(batch_count):
            with tf.name_scope("batch_%d" % b_x):
              corr_flat_batch = corr[
                  batch_size * b_x : batch_size * (b_x + 1), :]
              corr_flat_batch = tf.reshape(corr_flat_batch, [-1])

              for l in xrange(self.depth):
                with tf.name_scope("layer_%d" % l):
                  q_aa = self.layer_qaa_dict[l]
                  q_ab = interp.interp_lin_2d(x=self.var_aa_grid,
                                              y=self.corr_ab_grid,
                                              z=self.qab_grid,
                                              xp=q_aa,
                                              yp=corr_flat_batch)

                  q_ab = self.weight_var * q_ab + self.bias_var
                  corr_flat_batch = q_ab / self.layer_qaa_dict[l + 1][0]

              q_ab_all.append(q_ab)

          q_ab_all = tf.parallel_stack(q_ab_all)
      else:
        with tf.name_scope("q_ab"):
          corr_flat = tf.reshape(corr, [-1])
          for l in xrange(self.depth):
            with tf.name_scope("layer_%d" % l):
              q_aa = self.layer_qaa_dict[l]
              q_ab = interp.interp_lin_2d(x=self.var_aa_grid,
                                          y=self.corr_ab_grid,
                                          z=self.qab_grid,
                                          xp=q_aa,
                                          yp=corr_flat)
              q_ab = self.weight_var * q_ab + self.bias_var
              corr_flat = q_ab / self.layer_qaa_dict[l+1][0]
            q_ab_all = q_ab

    return tf.reshape(q_ab_all, cov_init.shape, "qab") 
Example #27
Source File: cifar_main.py    From class-balanced-loss with MIT License 4 votes vote down vote up
def input_fn(data_dir,
             subset,
             imbalance_factor,
             num_shards,
             batch_size,
             use_distortion_for_training=True):
  """Create input graph for model.

  Args:
    data_dir: Directory where TFRecords representing the dataset are located.
    subset: one of 'train', 'validate' and 'eval'.
    imbalance_factor: float, None if this dataset is not long tailed.
    num_shards: num of towers participating in data-parallel training.
    batch_size: total batch size for training to be divided by the number of
    shards.
    use_distortion_for_training: True to use distortions.
  Returns:
    two lists of tensors for features and labels, each of num_shards length.
  """
  with tf.device('/cpu:0'):
    use_distortion = subset == 'train' and use_distortion_for_training
    dataset = cifar.CifarDataSet(
        data_dir, dir2version(data_dir),
        subset, imbalance_factor, use_distortion)
    image_batch, label_batch = dataset.make_batch(batch_size)
    if num_shards <= 1:
      # No GPU available or only 1 GPU.
      return [image_batch], [label_batch]

    # Note that passing num=batch_size is safe here, even though
    # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
    # examples. This is because it does so only when repeating for a limited
    # number of epochs, but our dataset repeats forever.
    image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
    label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
    feature_shards = [[] for i in range(num_shards)]
    label_shards = [[] for i in range(num_shards)]
    for i in xrange(batch_size):
      idx = i % num_shards
      feature_shards[idx].append(image_batch[i])
      label_shards[idx].append(label_batch[i])
    feature_shards = [tf.parallel_stack(x) for x in feature_shards]
    label_shards = [tf.parallel_stack(x) for x in label_shards]
    return feature_shards, label_shards 
Example #28
Source File: array_ops.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 4 votes vote down vote up
def parallel_stack(values, name="parallel_stack"):
  """Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.

  Requires that the shape of inputs be known at graph construction time.

  Packs the list of tensors in `values` into a tensor with rank one higher than
  each tensor in `values`, by packing them along the first dimension.
  Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
  tensor will have the shape `(N, A, B, C)`.

  For example:

  ```python
  x = tf.constant([1, 4])
  y = tf.constant([2, 5])
  z = tf.constant([3, 6])
  tf.parallel_stack([x, y, z])  # [[1, 4], [2, 5], [3, 6]]
  ```

  The difference between `stack` and `parallel_stack` is that `stack` requires
  all the inputs be computed before the operation will begin but doesn't require
  that the input shapes be known during graph construction.

  `parallel_stack` will copy pieces of the input into the output as they become
  available, in some situations this can provide a performance benefit.

  Unlike `stack`, `parallel_stack` does NOT support backpropagation.

  This is the opposite of unstack.  The numpy equivalent is

      tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])

  Args:
    values: A list of `Tensor` objects with the same shape and type.
    name: A name for this operation (optional).

  Returns:
    output: A stacked `Tensor` with the same type as `values`.
  """
  with ops.name_scope(name):
    value_t = ops.convert_to_tensor(values[0])
    value_shape = ops.convert_to_tensor(value_t).get_shape()

    output_shape = tensor_shape.TensorShape([len(values)])
    output_shape = output_shape.concatenate(value_shape)
    # expand_dims converts concat to stack.
    return gen_array_ops._parallel_concat(
        [expand_dims(value, 0) for value in values], shape=output_shape) 
Example #29
Source File: preprocessing.py    From dlcookbook-dlbs with Apache License 2.0 4 votes vote down vote up
def minibatch(self, dataset, subset, use_datasets, cache_data,
                shift_ratio=-1):
    # TODO(jsimsa): Implement datasets code path
    del use_datasets, cache_data, shift_ratio
    with tf.name_scope('batch_processing'):
      all_images, all_labels = dataset.read_data_files(subset)
      all_images = tf.constant(all_images)
      all_labels = tf.constant(all_labels)
      input_image, input_label = tf.train.slice_input_producer(
          [all_images, all_labels])
      input_image = tf.cast(input_image, self.dtype)
      input_label = tf.cast(input_label, tf.int32)
      # Ensure that the random shuffling has good mixing properties.
      min_fraction_of_examples_in_queue = 0.4
      min_queue_examples = int(dataset.num_examples_per_epoch(subset) *
                               min_fraction_of_examples_in_queue)
      raw_images, raw_labels = tf.train.shuffle_batch(
          [input_image, input_label], batch_size=self.batch_size,
          capacity=min_queue_examples + 3 * self.batch_size,
          min_after_dequeue=min_queue_examples)

      images = [[] for i in range(self.num_splits)]
      labels = [[] for i in range(self.num_splits)]

      # Create a list of size batch_size, each containing one image of the
      # batch. Without the unstack call, raw_images[i] would still access the
      # same image via a strided_slice op, but would be slower.
      raw_images = tf.unstack(raw_images, axis=0)
      raw_labels = tf.unstack(raw_labels, axis=0)
      for i in xrange(self.batch_size):
        split_index = i % self.num_splits
        # The raw image read from data has the format [depth, height, width]
        # reshape to the format returned by minibatch.
        raw_image = tf.reshape(raw_images[i],
                               [dataset.depth, dataset.height, dataset.width])
        raw_image = tf.transpose(raw_image, [1, 2, 0])
        image = self.preprocess(raw_image)
        images[split_index].append(image)

        labels[split_index].append(raw_labels[i])

      for split_index in xrange(self.num_splits):
        images[split_index] = tf.parallel_stack(images[split_index])
        labels[split_index] = tf.parallel_stack(labels[split_index])
      return images, labels 
Example #30
Source File: resnet_common.py    From keras_experiments with The Unlicense 4 votes vote down vote up
def device_minibatches(cls, num_devices, data_dir, total_batch_size,
                           height, width, distort_color,
                           val=False):
        dtype = tf.float32
        subset = 'validation' if val else 'train'

        nrecord = get_num_records(os.path.join(
            data_dir, '{}-*'.format(subset)))
        input_buffer_size = min(10000, nrecord)

        record_input = data_flow_ops.RecordInput(
            file_pattern=os.path.join(data_dir, '{}-*'.format(subset)),
            parallelism=64,
            # Note: This causes deadlock during init if
            # larger than dataset
            buffer_size=input_buffer_size,
            batch_size=total_batch_size,
            seed=0)

        records = record_input.get_yield_op()

        # Split batch into individual images
        records = tf.split(records, total_batch_size, 0)
        records = [tf.reshape(record, []) for record in records]
        # Deserialize and preprocess images into batches for each device
        images = defaultdict(list)
        labels = defaultdict(list)
        with tf.name_scope('input_pipeline'):
            for thread_id, record in enumerate(records):
                imgdata, label, bbox, _ = cls._deserialize_image_record(record)
                image = cls._preprocess(
                    imgdata, bbox, thread_id, height, width, distort_color,
                    val=val)
                label -= 1  # Change to 0-based (don't use background class)
                device_num = thread_id % num_devices
                images[device_num].append(image)
                labels[device_num].append(label)

            # Stack images back into a sub-batch for each device
            for device_num in xrange(num_devices):
                images[device_num] = tf.parallel_stack(images[device_num])
                labels[device_num] = tf.concat(labels[device_num], 0)
                images[device_num] = tf.reshape(
                    images[device_num], [-1, height, width, 3])
                images[device_num] = tf.clip_by_value(
                    images[device_num], 0., 255.)
                images[device_num] = tf.cast(images[device_num], dtype)

        return images, labels, nrecord