Python tensorflow.stack() Examples

The following are 30 code examples of tensorflow.stack(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: unet.py    From spleeter with MIT License 6 votes vote down vote up
def softmax_unet(input_tensor, instruments, params={}):
    """ Apply softmax to multitrack unet in order to have mask suming to one.

    :param input_tensor: Tensor to apply blstm to.
    :param instruments: Iterable that provides a collection of instruments.
    :param params: (Optional) dict of BLSTM parameters.
    :returns: Created output tensor dict.
    """
    logit_mask_list = []
    for instrument in instruments:
        out_name = f'{instrument}_spectrogram'
        logit_mask_list.append(
            apply_unet(
                input_tensor,
                output_name=out_name,
                params=params,
                output_mask_logit=True))
    masks = Softmax(axis=4)(tf.stack(logit_mask_list, axis=4))
    output_dict = {}
    for i, instrument in enumerate(instruments):
        out_name = f'{instrument}_spectrogram'
        output_dict[out_name] = Multiply(name=out_name)([
            masks[..., i],
            input_tensor])
    return output_dict 
Example #2
Source File: select_dim_value.py    From post--memorization-in-rnns with MIT License 6 votes vote down vote up
def select_dim_value(x, indices, name=None):
    with tf.name_scope(name, "select-dim-value", values=[x, indices]):
        # x.shape = (rest..., dims)
        rest = tf.shape(x)[:-1]
        dims = tf.shape(x)[-1]
        size = tf.size(indices, out_type=indices.dtype)

        # reshape to (size, dims)
        t = tf.reshape(x, shape=[-1, dims])
        # then index as ([1,2,3,...,size], indices.ravel())
        nd_indices = tf.stack([
            tf.range(0, size, dtype=indices.dtype),
            tf.reshape(indices, shape=[-1])
        ], axis=1)
        t = tf.gather_nd(t, indices=nd_indices)

        # reshape back to (rest...)
        t = tf.reshape(t, rest)
        t.set_shape(x.get_shape()[:-1])
        return t 
Example #3
Source File: layers.py    From ARU-Net with GNU General Public License v2.0 6 votes vote down vote up
def sequence_to_images(tensor, num_batches):
  """Convert a batch of sequences into a batch of images.

  Args:
    tensor: (num_steps, num_batchesRNN, depth) sequence tensor
    num_batches: the number of image batches

  Returns:
    (num_batches, height, width, depth) tensor
  """

  shapeT = tf.shape(tensor)
  shapeL = tensor.get_shape().as_list()
  # Calculate the ouput size of the upsampled tensor
  height = tf.to_int32(shapeT[1] / num_batches)
  n_shape = tf.stack([
      shapeT[0],
      num_batches,
      height,
      shapeL[2]
  ])

  reshaped = tf.reshape(tensor, n_shape)
  return tf.transpose(reshaped, [1, 2, 0, 3]) 
Example #4
Source File: layers.py    From ARU-Net with GNU General Public License v2.0 6 votes vote down vote up
def images_to_sequence(tensor):
  """Convert a batch of images into a batch of sequences.

  Args:
    tensor: a (num_images, height, width, depth) tensor

  Returns:
    (width, num_images*height, depth) sequence tensor
  """
  transposed = tf.transpose(tensor, [2, 0, 1, 3])

  shapeT = tf.shape(transposed)
  shapeL = transposed.get_shape().as_list()
  # Calculate the ouput size of the upsampled tensor
  n_shape = tf.stack([
      shapeT[0],
      shapeT[1]*shapeT[2],
      shapeL[3]
  ])
  reshaped = tf.reshape(transposed, n_shape)
  return reshaped 
Example #5
Source File: expert_utils.py    From fine-lm with MIT License 6 votes vote down vote up
def combine(self, expert_out, multiply_by_gates=True):
    """Sum together the expert output, multiplied by the corresponding gates.

    Args:
      expert_out: a list of `num_experts` `Tensor`s, each with shape
        `[expert_batch_size_i, <extra_output_dims>]`.
      multiply_by_gates: a boolean.

    Returns:
      a list of num_datashards `Tensor`s with shapes
        `[batch_size[d], <extra_output_dims>]`.
    """
    expert_part_sizes = tf.unstack(
        tf.stack([d.part_sizes for d in self._dispatchers]),
        num=self._ep.n,
        axis=1)
    # list of lists of shape [num_experts][num_datashards]
    expert_output_parts = self._ep(tf.split, expert_out, expert_part_sizes)
    expert_output_parts_t = transpose_list_of_lists(expert_output_parts)
    def my_combine(dispatcher, parts):
      return dispatcher.combine(
          common_layers.convert_gradient_to_tensor(tf.concat(parts, 0)),
          multiply_by_gates=multiply_by_gates)
    return self._dp(my_combine, self._dispatchers, expert_output_parts_t) 
Example #6
Source File: ops.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def one_hot_encoding(labels, num_classes, scope=None):
  """Transform numeric labels into onehot_labels.

  Args:
    labels: [batch_size] target labels.
    num_classes: total number of classes.
    scope: Optional scope for name_scope.
  Returns:
    one hot encoding of the labels.
  """
  with tf.name_scope(scope, 'OneHotEncoding', [labels]):
    batch_size = labels.get_shape()[0]
    indices = tf.expand_dims(tf.range(0, batch_size), 1)
    labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
    concated = tf.concat(axis=1, values=[indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.stack([batch_size, num_classes]), 1.0, 0.0)
    onehot_labels.set_shape([batch_size, num_classes])
    return onehot_labels 
Example #7
Source File: network_units.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def convert_network_state_tensorarray(tensorarray):
  """Converts a source TensorArray to a source Tensor.

  Performs a permutation between the steps * [stride, D] shape of a
  source TensorArray and the (flattened) [stride * steps, D] shape of
  a source Tensor.

  The TensorArrays used during recurrence have an additional zeroth step that
  needs to be removed.

  Args:
    tensorarray: TensorArray object to be converted.

  Returns:
    Tensor object after conversion.
  """
  tensor = tensorarray.stack()  # Results in a [steps, stride, D] tensor.
  tensor = tf.slice(tensor, [1, 0, 0], [-1, -1, -1])  # Lop off the 0th step.
  tensor = tf.transpose(tensor, [1, 0, 2])  # Switch steps and stride.
  return tf.reshape(tensor, [-1, tf.shape(tensor)[2]]) 
Example #8
Source File: decoder.py    From neural-combinatorial-optimization-rl-tensorflow with MIT License 6 votes vote down vote up
def loop_decode(self):
        # decoder_initial_state: Tuple Tensor (c,h) of size [batch_size x cell.state_size]
        # decoder_first_input: Tensor [batch_size x cell.state_size]

        # Loop the decoding process and collect results
        s,i = self.decoder_initial_state,  tf.cast(self.decoder_first_input,tf.float32)
        for step in range(self.seq_length):
            s, i = self.decode(s,i,step)

        # Return to start
        self.positions.append(self.first_city)

        # Stack visited indices
        self.positions=tf.stack(self.positions,axis=1)  # [Batch,seq_length+1]

        # Sum log_softmax over output steps
        self.log_softmax=tf.add_n(self.log_softmax)  # [Batch,seq_length]

        # Stack attending & pointing distribution
        self.attending=tf.stack(self.attending,axis=1) # [Batch,seq_length,seq_length]
        self.pointing=tf.stack(self.pointing,axis=1) # [Batch,seq_length,seq_length]
        
        # Return stacked lists of visited_indices and log_softmax for backprop
        return self.positions,self.log_softmax 
Example #9
Source File: vision_baseline_lstm.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def lstm_online(cell_fn, num_steps, inputs, state, varscope):
  # inputs is B x num_steps x C, C channels.
  # state is 2 tuple with B x 1 x C1, B x 1 x C2 
  # Output state is always B x 1 x C
  inputs = tf.unstack(inputs, axis=1, num=num_steps)
  state = tf.unstack(state, axis=1, num=1)[0]
  outputs = [] 
  
  if num_steps > 1: 
    varscope.reuse_variables()
  
  for s in range(num_steps):
    output, state = cell_fn(inputs[s], state)
    outputs.append(output)
  outputs = tf.stack(outputs, axis=1)
  state = tf.stack([state], axis=1)
  return outputs, state 
Example #10
Source File: box_coder_test.py    From object_detector_app with MIT License 6 votes vote down vote up
def test_batch_decode(self):
    mock_anchor_corners = tf.constant(
        [[0, 0.1, 0.2, 0.3], [0.2, 0.4, 0.4, 0.6]], tf.float32)
    mock_anchors = box_list.BoxList(mock_anchor_corners)
    mock_box_coder = MockBoxCoder()

    expected_boxes = [[[0.0, 0.1, 0.5, 0.6], [0.5, 0.6, 0.7, 0.8]],
                      [[0.1, 0.2, 0.3, 0.4], [0.7, 0.8, 0.9, 1.0]]]

    encoded_boxes_list = [mock_box_coder.encode(
        box_list.BoxList(tf.constant(boxes)), mock_anchors)
                          for boxes in expected_boxes]
    encoded_boxes = tf.stack(encoded_boxes_list)
    decoded_boxes = box_coder.batch_decode(
        encoded_boxes, mock_box_coder, mock_anchors)

    with self.test_session() as sess:
      decoded_boxes_result = sess.run(decoded_boxes)
      self.assertAllClose(expected_boxes, decoded_boxes_result) 
Example #11
Source File: box_coder_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def test_batch_decode(self):
    mock_anchor_corners = tf.constant(
        [[0, 0.1, 0.2, 0.3], [0.2, 0.4, 0.4, 0.6]], tf.float32)
    mock_anchors = box_list.BoxList(mock_anchor_corners)
    mock_box_coder = MockBoxCoder()

    expected_boxes = [[[0.0, 0.1, 0.5, 0.6], [0.5, 0.6, 0.7, 0.8]],
                      [[0.1, 0.2, 0.3, 0.4], [0.7, 0.8, 0.9, 1.0]]]

    encoded_boxes_list = [mock_box_coder.encode(
        box_list.BoxList(tf.constant(boxes)), mock_anchors)
                          for boxes in expected_boxes]
    encoded_boxes = tf.stack(encoded_boxes_list)
    decoded_boxes = box_coder.batch_decode(
        encoded_boxes, mock_box_coder, mock_anchors)

    with self.test_session() as sess:
      decoded_boxes_result = sess.run(decoded_boxes)
      self.assertAllClose(expected_boxes, decoded_boxes_result) 
Example #12
Source File: tfutil.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def _create_autosummary_var(name, value_expr):
    assert not _autosummary_finalized
    v = tf.cast(value_expr, tf.float32)
    if v.shape.ndims is 0:
        v = [v, np.float32(1.0)]
    elif v.shape.ndims is 1:
        v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)]
    else:
        v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))]
    v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2))
    with tf.control_dependencies(None):
        var = tf.Variable(tf.zeros(2)) # [numerator, denominator]
    update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
    if name in _autosummary_vars:
        _autosummary_vars[name].append(var)
    else:
        _autosummary_vars[name] = [var]
    return update_op

#----------------------------------------------------------------------------
# Call filewriter.add_summary() with all summaries in the default graph,
# automatically finalizing and merging them on the first call. 
Example #13
Source File: distributions.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self, x_bxu, z_size, name, var_min=0.0):
    """Create an input dependent diagonal Gaussian distribution.

    Args:
      x: The input tensor from which the mean and variance are computed,
        via a linear transformation of x.  I.e.
          mu = Wx + b, log(var) = Mx + c
      z_size: The size of the distribution.
      name:  The name to prefix to learned variables.
      var_min (optional): Minimal variance allowed.  This is an additional
        way to control the amount of information getting through the stochastic
        layer.
    """
    size_bxn = tf.stack([tf.shape(x_bxu)[0], z_size])
    self.mean_bxn = mean_bxn = linear(x_bxu, z_size, name=(name+"/mean"))
    logvar_bxn = linear(x_bxu, z_size, name=(name+"/logvar"))
    if var_min > 0.0:
      logvar_bxn = tf.log(tf.exp(logvar_bxn) + var_min)
    self.logvar_bxn = logvar_bxn

    self.noise_bxn = noise_bxn = tf.random_normal(size_bxn)
    self.noise_bxn.set_shape([None, z_size])
    self.sample_bxn = mean_bxn + tf.exp(0.5 * logvar_bxn) * noise_bxn 
Example #14
Source File: target_assigner.py    From object_detector_app with MIT License 5 votes vote down vote up
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
    """Returns a regression target for each anchor.

    Args:
      anchors: a BoxList representing N anchors
      groundtruth_boxes: a BoxList representing M groundtruth_boxes
      match: a matcher.Match object

    Returns:
      reg_targets: a float32 tensor with shape [N, box_code_dimension]
    """
    matched_anchor_indices = match.matched_column_indices()
    unmatched_ignored_anchor_indices = (match.
                                        unmatched_or_ignored_column_indices())
    matched_gt_indices = match.matched_row_indices()
    matched_anchors = box_list_ops.gather(anchors,
                                          matched_anchor_indices)
    matched_gt_boxes = box_list_ops.gather(groundtruth_boxes,
                                           matched_gt_indices)
    matched_reg_targets = self._box_coder.encode(matched_gt_boxes,
                                                 matched_anchors)
    unmatched_ignored_reg_targets = tf.tile(
        self._default_regression_target(),
        tf.stack([tf.size(unmatched_ignored_anchor_indices), 1]))
    reg_targets = tf.dynamic_stitch(
        [matched_anchor_indices, unmatched_ignored_anchor_indices],
        [matched_reg_targets, unmatched_ignored_reg_targets])
    # TODO: summarize the number of matches on average.
    return reg_targets 
Example #15
Source File: inception_v3.py    From DeepLab_v3 with MIT License 5 votes vote down vote up
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  """Define kernel size which is automatically reduced for small input.

  If the shape of the input images is unknown at graph construction time this
  function assumes that the input images are is large enough.

  Args:
    input_tensor: input tensor of size [batch_size, height, width, channels].
    kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]

  Returns:
    a tensor with the kernel size.

  TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  can be done with the code below. Problems are two-fold: (1) If the shape was
  known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  handle tensors that define the kernel size.
      shape = tf.shape(input_tensor)
      return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
                         tf.minimum(shape[2], kernel_size[1])])

  """
  shape = input_tensor.get_shape().as_list()
  if shape[1] is None or shape[2] is None:
    kernel_size_out = kernel_size
  else:
    kernel_size_out = [min(shape[1], kernel_size[0]),
                       min(shape[2], kernel_size[1])]
  return kernel_size_out 
Example #16
Source File: batcher_test.py    From object_detector_app with MIT License 5 votes vote down vote up
def test_batcher_when_batch_size_is_one(self):
    with self.test_session() as sess:
      batch_size = 1
      num_batches = 2
      examples = tf.Variable(tf.constant(2, dtype=tf.int32))
      counter = examples.count_up_to(num_batches * batch_size + 2)
      image = tf.reshape(
          tf.range(counter * counter), tf.stack([counter, counter]))
      batch_queue = batcher.BatchQueue(
          tensor_dict={'image': image},
          batch_size=batch_size,
          batch_queue_capacity=100,
          num_batch_queue_threads=1,
          prefetch_queue_capacity=100)
      batch = batch_queue.dequeue()

      for tensor_dict in batch:
        for tensor in tensor_dict.values():
          self.assertAllEqual([None, None], tensor.get_shape().as_list())

      tf.initialize_all_variables().run()
      with slim.queues.QueueRunners(sess):
        i = 2
        for _ in range(num_batches):
          batch_np = sess.run(batch)
          for tensor_dict in batch_np:
            for tensor in tensor_dict.values():
              self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i)))
              i += 1
        with self.assertRaises(tf.errors.OutOfRangeError):
          sess.run(batch) 
Example #17
Source File: target_assigner.py    From object_detector_app with MIT License 5 votes vote down vote up
def _create_classification_targets(self, groundtruth_labels, match):
    """Create classification targets for each anchor.

    Assign a classification target of for each anchor to the matching
    groundtruth label that is provided by match.  Anchors that are not matched
    to anything are given the target self._unmatched_cls_target

    Args:
      groundtruth_labels:  a tensor of shape [num_gt_boxes, d_1, ... d_k]
        with labels for each of the ground_truth boxes. The subshape
        [d_1, ... d_k] can be empty (corresponding to scalar labels).
      match: a matcher.Match object that provides a matching between anchors
        and groundtruth boxes.

    Returns:
      cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
        where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
        which has shape [num_gt_boxes, d_1, d_2, ... d_k].
    """
    matched_anchor_indices = match.matched_column_indices()
    unmatched_ignored_anchor_indices = (match.
                                        unmatched_or_ignored_column_indices())
    matched_gt_indices = match.matched_row_indices()
    matched_cls_targets = tf.gather(groundtruth_labels, matched_gt_indices)

    ones = self._unmatched_cls_target.shape.ndims * [1]
    unmatched_ignored_cls_targets = tf.tile(
        tf.expand_dims(self._unmatched_cls_target, 0),
        tf.stack([tf.size(unmatched_ignored_anchor_indices)] + ones))

    cls_targets = tf.dynamic_stitch(
        [matched_anchor_indices, unmatched_ignored_anchor_indices],
        [matched_cls_targets, unmatched_ignored_cls_targets])
    return cls_targets 
Example #18
Source File: batcher_test.py    From object_detector_app with MIT License 5 votes vote down vote up
def test_batch_and_unpad_2d_tensors_of_different_sizes_in_1st_dimension(self):
    with self.test_session() as sess:
      batch_size = 3
      num_batches = 2
      examples = tf.Variable(tf.constant(2, dtype=tf.int32))
      counter = examples.count_up_to(num_batches * batch_size + 2)
      boxes = tf.tile(
          tf.reshape(tf.range(4), [1, 4]), tf.stack([counter, tf.constant(1)]))
      batch_queue = batcher.BatchQueue(
          tensor_dict={'boxes': boxes},
          batch_size=batch_size,
          batch_queue_capacity=100,
          num_batch_queue_threads=1,
          prefetch_queue_capacity=100)
      batch = batch_queue.dequeue()

      for tensor_dict in batch:
        for tensor in tensor_dict.values():
          self.assertAllEqual([None, 4], tensor.get_shape().as_list())

      tf.initialize_all_variables().run()
      with slim.queues.QueueRunners(sess):
        i = 2
        for _ in range(num_batches):
          batch_np = sess.run(batch)
          for tensor_dict in batch_np:
            for tensor in tensor_dict.values():
              self.assertAllEqual(tensor, np.tile(np.arange(4), (i, 1)))
              i += 1
        with self.assertRaises(tf.errors.OutOfRangeError):
          sess.run(batch) 
Example #19
Source File: inception_v2.py    From DeepLab_v3 with MIT License 5 votes vote down vote up
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  """Define kernel size which is automatically reduced for small input.

  If the shape of the input images is unknown at graph construction time this
  function assumes that the input images are is large enough.

  Args:
    input_tensor: input tensor of size [batch_size, height, width, channels].
    kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]

  Returns:
    a tensor with the kernel size.

  TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  can be done with the code below. Problems are two-fold: (1) If the shape was
  known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  handle tensors that define the kernel size.
      shape = tf.shape(input_tensor)
      return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
                         tf.minimum(shape[2], kernel_size[1])])

  """
  shape = input_tensor.get_shape().as_list()
  if shape[1] is None or shape[2] is None:
    kernel_size_out = kernel_size
  else:
    kernel_size_out = [min(shape[1], kernel_size[0]),
                       min(shape[2], kernel_size[1])]
  return kernel_size_out 
Example #20
Source File: common_layers.py    From fine-lm with MIT License 5 votes vote down vote up
def index_last_dim_with_indices(x, indices):
  """Use indices to index into the last axis of x.

  This can be useful for recovering the actual probabilities of a sample from a
  probability distribution.

  Args:
    x: Tensor, n-d.
    indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1)
      dimensions of x. The values of indices will be used to index into the last
      axis of x.

  Returns:
    Tensor, (n-1)-d.
  """
  assert len(x.shape) == len(indices.shape) + 1

  x_shape = shape_list(x)
  vocab_size = x_shape[-1]

  flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size])
  flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])])

  idx = tf.stack(
      [
          tf.range(tf.to_int64(shape_list(flat_indices)[0])),
          tf.to_int64(flat_indices)
      ],
      axis=1)
  flat_x_idx = tf.gather_nd(flat_x, idx)

  x_idx = tf.reshape(flat_x_idx, x_shape[:-1])

  return x_idx 
Example #21
Source File: inputpipeline.py    From imgcomp-cvpr with GNU General Public License v3.0 5 votes vote down vote up
def _preprocess(frames, crop_size, num_crops_per_frame, seed=None):
    """
    :param frames: HW3
    :param crop_size: tuple width, height
    :param num_crops_per_frame:
    :return: R3HW, where R == num_crops_per_frame.
    """
    with tf.name_scope('preprocess'):
        crop_height, crop_width = crop_size
        frames = tf.stack([
            tf.random_crop(frames, [crop_height, crop_width, 3], seed=seed)
            for _ in range(num_crops_per_frame)])  # RHW3)
        frames = tf_helpers.random_flip(frames, axis=2, seed=seed)  # RHW3, flip W
        frames = tf.transpose(frames, (0, 3, 1, 2), name='NCHW')  # R3HW
        return frames 
Example #22
Source File: modalities.py    From fine-lm with MIT License 5 votes vote down vote up
def top(self, body_output, _):
    frames = tf.stack(body_output, axis=1)
    rgb_frames = common_layers.convert_real_to_rgb(frames)
    common_layers.summarize_video(rgb_frames, "body_output")
    return tf.expand_dims(rgb_frames, axis=-1) 
Example #23
Source File: expert_utils.py    From fine-lm with MIT License 5 votes vote down vote up
def _my_top_k(x, k):
  """GPU-compatible version of top-k that works for very small constant k.

  Calls argmax repeatedly.

  tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense,
  seems not to be, so if we use tf.nn.top_k, then both the top_k and its
  gradient go on cpu.  Once this is not an issue, this function becomes
  obsolete and should be replaced by tf.nn.top_k.

  Args:
    x: a 2d Tensor.
    k: a small integer.

  Returns:
    values: a Tensor of shape [batch_size, k]
    indices: a int32 Tensor of shape [batch_size, k]
  """
  if k > 10:
    return tf.nn.top_k(x, k)
  values = []
  indices = []
  depth = tf.shape(x)[1]
  for i in range(k):
    values.append(tf.reduce_max(x, 1))
    argmax = tf.argmax(x, 1)
    indices.append(argmax)
    if i + 1 < k:
      x += tf.one_hot(argmax, depth, -1e9)
  return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1)) 
Example #24
Source File: tf_reader.py    From ArtGAN with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def decode_jpg(filename, directory, center=False, crop=None, flip=False, resize=None, ratio=False, filename_offset='',
               normalize='imagenet'):
    # Read image
    im_content = tf.read_file(directory + filename_offset + filename)
    example = tf.image.decode_jpeg(im_content, channels=3)
    # preprocessing
    example = tf.cast(example[:, :, ::-1], tf.float32)
    if normalize is 'imagenet':
        example = example - IMAGENET_MEAN
    elif normalize:
        example = example / 127.5 - 1.
    # cropping
    if crop:
        shape = tf.shape(example)
        if ratio:
            assert isinstance(crop, list)
            crop_h = crop[0]
            crop_w = crop[1]
        else:
            assert isinstance(crop, int)
            shortest = tf.cond(tf.less(shape[0], shape[1]), lambda: shape[0], lambda: shape[1])
            crop_h = tf.cond(tf.less_equal(shortest, tf.constant(crop)), lambda: shortest, lambda: tf.constant(crop))
            crop_w = crop_h
        if center:
            example = tf.image.resize_image_with_crop_or_pad(example, crop_h, crop_w)
        else:
            example = tf.random_crop(example, [crop_h, crop_w, 3])
    # resize
    if resize:
        assert isinstance(resize, (int, float))
        new_size = tf.stack([resize, resize])
        example = tf.image.resize_images(example, new_size)
    # random horizontal flip
    if flip:
        example = tf.image.random_flip_left_right(example)
    return tf.transpose(example, [2, 0, 1]) 
Example #25
Source File: tf_reader.py    From ArtGAN with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def read_csv(filename, directory, num_epoch=None, records=None):
    if records is None:
        records = records_default
    # Read examples and labels from file
    filename_queue = tf.train.string_input_producer([os.path.join(directory, filename)],
                                                    num_epochs=num_epoch, shuffle=True)
    reader = tf.TextLineReader()
    _, value = reader.read(filename_queue)
    decoded = tf.decode_csv(value, record_defaults=records)
    im_path = tf.stack(decoded[0])
    if len(decoded) > 1:
        label = tf.stack(decoded[1:])
    else:
        label = [0]
    return im_path, label 
Example #26
Source File: utility.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def lambda_return(reward, value, length, discount, lambda_):
  """TD-lambda returns."""
  timestep = tf.range(reward.shape[1].value)
  mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
  sequence = mask * reward + discount * value * (1 - lambda_)
  discount = mask * discount * lambda_
  sequence = tf.stack([sequence, discount], 2)
  return_ = tf.reverse(tf.transpose(tf.scan(
      lambda agg, cur: cur[0] + cur[1] * agg,
      tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]),
      tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1])
  return tf.check_numerics(tf.stop_gradient(return_), 'return') 
Example #27
Source File: memory.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def append(self, transitions, rows=None):
    """Append a batch of transitions to rows of the memory.

    Args:
      transitions: Tuple of transition quantities with batch dimension.
      rows: Episodes to append to, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity,
        message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less(
          tf.gather(self._length, rows), self._max_length,
          message='max length exceeded')
    append_ops = []
    with tf.control_dependencies([assert_max_length]):
      for buffer_, elements in zip(self._buffers, transitions):
        timestep = tf.gather(self._length, rows)
        indices = tf.stack([rows, timestep], 1)
        append_ops.append(tf.scatter_nd_update(buffer_, indices, elements))
    with tf.control_dependencies(append_ops):
      episode_mask = tf.reduce_sum(tf.one_hot(
          rows, self._capacity, dtype=tf.int32), 0)
      return self._length.assign_add(episode_mask) 
Example #28
Source File: utility.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def lambda_return(reward, value, length, discount, lambda_):
  """TD-lambda returns."""
  timestep = tf.range(reward.shape[1].value)
  mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
  sequence = mask * reward + discount * value * (1 - lambda_)
  discount = mask * discount * lambda_
  sequence = tf.stack([sequence, discount], 2)
  return_ = tf.reverse(tf.transpose(tf.scan(
      lambda agg, cur: cur[0] + cur[1] * agg,
      tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]),
      tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1])
  return tf.check_numerics(tf.stop_gradient(return_), 'return') 
Example #29
Source File: memory.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def append(self, transitions, rows=None):
    """Append a batch of transitions to rows of the memory.

    Args:
      transitions: Tuple of transition quantities with batch dimension.
      rows: Episodes to append to, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity,
        message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less(
          tf.gather(self._length, rows), self._max_length,
          message='max length exceeded')
    append_ops = []
    with tf.control_dependencies([assert_max_length]):
      for buffer_, elements in zip(self._buffers, transitions):
        timestep = tf.gather(self._length, rows)
        indices = tf.stack([rows, timestep], 1)
        append_ops.append(tf.scatter_nd_update(buffer_, indices, elements))
    with tf.control_dependencies(append_ops):
      episode_mask = tf.reduce_sum(tf.one_hot(
          rows, self._capacity, dtype=tf.int32), 0)
      return self._length.assign_add(episode_mask) 
Example #30
Source File: box_coder.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def batch_decode(encoded_boxes, box_coder, anchors):
  """Decode a batch of encoded boxes.

  This op takes a batch of encoded bounding boxes and transforms
  them to a batch of bounding boxes specified by their corners in
  the order of [y_min, x_min, y_max, x_max].

  Args:
    encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
      code_size] representing the location of the objects.
    box_coder: a BoxCoder object.
    anchors: a BoxList of anchors used to encode `encoded_boxes`.

  Returns:
    decoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
      coder_size] representing the corners of the objects in the order
      of [y_min, x_min, y_max, x_max].

  Raises:
    ValueError: if batch sizes of the inputs are inconsistent, or if
    the number of anchors inferred from encoded_boxes and anchors are
    inconsistent.
  """
  encoded_boxes.get_shape().assert_has_rank(3)
  if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static():
    raise ValueError('The number of anchors inferred from encoded_boxes'
                     ' and anchors are inconsistent: shape[1] of encoded_boxes'
                     ' %s should be equal to the number of anchors: %s.' %
                     (encoded_boxes.get_shape()[1].value,
                      anchors.num_boxes_static()))

  decoded_boxes = tf.stack([
      box_coder.decode(boxes, anchors).get()
      for boxes in tf.unstack(encoded_boxes)
  ])
  return decoded_boxes