Python tensorflow.slice() Examples

The following are 30 code examples of tensorflow.slice(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: ops.py    From object_detector_app with MIT License 6 votes vote down vote up
def expanded_shape(orig_shape, start_dim, num_dims):
  """Inserts multiple ones into a shape vector.

  Inserts an all-1 vector of length num_dims at position start_dim into a shape.
  Can be combined with tf.reshape to generalize tf.expand_dims.

  Args:
    orig_shape: the shape into which the all-1 vector is added (int32 vector)
    start_dim: insertion position (int scalar)
    num_dims: length of the inserted all-1 vector (int scalar)
  Returns:
    An int32 vector of length tf.size(orig_shape) + num_dims.
  """
  with tf.name_scope('ExpandedShape'):
    start_dim = tf.expand_dims(start_dim, 0)  # scalar to rank-1
    before = tf.slice(orig_shape, [0], start_dim)
    add_shape = tf.ones(tf.reshape(num_dims, [1]), dtype=tf.int32)
    after = tf.slice(orig_shape, start_dim, [-1])
    new_shape = tf.concat([before, add_shape, after], 0)
    return new_shape 
Example #2
Source File: ipcr_model.py    From pointnet-registration-framework with MIT License 6 votes vote down vote up
def get_loss(predicted_transformation, batch_size, template_pointclouds_pl, source_pointclouds_pl):
	with tf.variable_scope('loss') as LossEvaluation:
		predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
		predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])

		# with tf.variable_scope('quat_normalization') as norm:
		norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
		norm_predicted_quat = tf.sqrt(norm_predicted_quat)
		norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
		const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
		norm_predicted_quat = tf.add(norm_predicted_quat,const)
		predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)

		transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat,predicted_position)

		#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
		loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
	return loss 
Example #3
Source File: TestUpd.py    From NTM-One-Shot-TF with MIT License 6 votes vote down vote up
def omniglot():

    sess = tf.InteractiveSession()

    """    def wrapper(v):
        return tf.Print(v, [v], message="Printing v")

    v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
    temp = wrapper(v)
    #with tf.control_dependencies([temp]):
    temp.eval()
    print 'Hello'"""

    def update_tensor(V, dim2, val):  # Update tensor V, with index(:,dim2[:]) by val[:]
        val = tf.cast(val, V.dtype)
        def body(_, (v, d2, chg)):
            d2_int = tf.cast(d2, tf.int32)
            return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
        Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
        return Z 
Example #4
Source File: ops.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def expanded_shape(orig_shape, start_dim, num_dims):
  """Inserts multiple ones into a shape vector.

  Inserts an all-1 vector of length num_dims at position start_dim into a shape.
  Can be combined with tf.reshape to generalize tf.expand_dims.

  Args:
    orig_shape: the shape into which the all-1 vector is added (int32 vector)
    start_dim: insertion position (int scalar)
    num_dims: length of the inserted all-1 vector (int scalar)
  Returns:
    An int32 vector of length tf.size(orig_shape) + num_dims.
  """
  with tf.name_scope('ExpandedShape'):
    start_dim = tf.expand_dims(start_dim, 0)  # scalar to rank-1
    before = tf.slice(orig_shape, [0], start_dim)
    add_shape = tf.ones(tf.reshape(num_dims, [1]), dtype=tf.int32)
    after = tf.slice(orig_shape, start_dim, [-1])
    new_shape = tf.concat([before, add_shape, after], 0)
    return new_shape 
Example #5
Source File: vgsl_model.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _PadLabels2d(logits_size, labels):
  """Pads or slices the 2nd dimension of 2-d labels to match logits_size.

  Covers the case of 1-d softmax output, when labels is [batch, seq] and
  logits is [batch, seq, onehot]
  Args:
    logits_size: Tensor returned from tf.shape giving the target size.
    labels:      2-d, but not necessarily matching in size.

  Returns:
    labels: Resized by padding or clipping the last dimension to logits_size.
  """
  pad = logits_size - tf.shape(labels)[1]

  def _PadFn():
    return tf.pad(labels, [[0, 0], [0, pad]])

  def _SliceFn():
    return tf.slice(labels, [0, 0], [-1, logits_size])

  return tf.cond(tf.greater(pad, 0), _PadFn, _SliceFn) 
Example #6
Source File: expert_utils.py    From fine-lm with MIT License 6 votes vote down vote up
def combine(self, expert_out, multiply_by_gates=True):
    """Sum together the expert output, weighted by the gates.

    The slice corresponding to a particular batch element `b` is computed
    as the sum over all experts `i` of the expert output, weighted by the
    corresponding gate values.  If `multiply_by_gates` is set to False, the
    gate values are ignored.

    Args:
      expert_out: a list of `num_experts` `Tensor`s, each with shape
        `[expert_batch_size_i, <extra_output_dims>]`.
      multiply_by_gates: a boolean

    Returns:
      a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
    """
    # see comments on convert_gradient_to_tensor
    stitched = common_layers.convert_gradient_to_tensor(
        tf.concat(expert_out, 0))
    if multiply_by_gates:
      stitched *= tf.expand_dims(self._nonzero_gates, 1)
    combined = tf.unsorted_segment_sum(stitched, self._batch_index,
                                       tf.shape(self._gates)[0])
    return combined 
Example #7
Source File: vgsl_model.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _AddOutputs(self, prev_layer, out_dims, out_func, num_classes):
    """Adds the output layer and loss function.

    Args:
      prev_layer:  Output of last layer of main network.
      out_dims:    Number of output dimensions, 0, 1 or 2.
      out_func:    Output non-linearity. 's' or 'c'=softmax, 'l'=logistic.
      num_classes: Number of outputs/size of last output dimension.
    """
    height_in = shapes.tensor_dim(prev_layer, dim=1)
    logits, outputs = self._AddOutputLayer(prev_layer, out_dims, out_func,
                                           num_classes)
    if self.mode == 'train':
      # Setup loss for training.
      self.loss = self._AddLossFunction(logits, height_in, out_dims, out_func)
      tf.summary.scalar('loss', self.loss)
    elif out_dims == 0:
      # Be sure the labels match the output, even in eval mode.
      self.labels = tf.slice(self.labels, [0, 0], [-1, 1])
      self.labels = tf.reshape(self.labels, [-1])

    logging.info('Final output=%s', outputs)
    logging.info('Labels tensor=%s', self.labels)
    self.output = outputs 
Example #8
Source File: pcr_model.py    From pointnet-registration-framework with MIT License 6 votes vote down vote up
def get_loss_b(self,predicted_transformation,batch_size,template_pointclouds_pl,source_pointclouds_pl):	
		with tf.variable_scope('loss') as LossEvaluation:
			predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
			predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])

			# with tf.variable_scope('quat_normalization') as norm:
			norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
			norm_predicted_quat = tf.sqrt(norm_predicted_quat)
			norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
			const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
			norm_predicted_quat = tf.add(norm_predicted_quat,const)
			predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)
	
			transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat, predicted_position)

			# Use 1024 Points to find loss.
			#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
			loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
			# loss = 0
		return loss 
Example #9
Source File: ops.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def expanded_shape(orig_shape, start_dim, num_dims):
  """Inserts multiple ones into a shape vector.

  Inserts an all-1 vector of length num_dims at position start_dim into a shape.
  Can be combined with tf.reshape to generalize tf.expand_dims.

  Args:
    orig_shape: the shape into which the all-1 vector is added (int32 vector)
    start_dim: insertion position (int scalar)
    num_dims: length of the inserted all-1 vector (int scalar)
  Returns:
    An int32 vector of length tf.size(orig_shape) + num_dims.
  """
  with tf.name_scope('ExpandedShape'):
    start_dim = tf.expand_dims(start_dim, 0)  # scalar to rank-1
    before = tf.slice(orig_shape, [0], start_dim)
    add_shape = tf.ones(tf.reshape(num_dims, [1]), dtype=tf.int32)
    after = tf.slice(orig_shape, start_dim, [-1])
    new_shape = tf.concat([before, add_shape, after], 0)
    return new_shape 
Example #10
Source File: ssd_meta_arch.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def _minibatch_subsample_fn(self, inputs):
    """Randomly samples anchors for one image.

    Args:
      inputs: a list of 2 inputs. First one is a tensor of shape [num_anchors,
        num_classes] indicating targets assigned to each anchor. Second one
        is a tensor of shape [num_anchors] indicating the class weight of each
        anchor.

    Returns:
      batch_sampled_indicator: bool tensor of shape [num_anchors] indicating
        whether the anchor should be selected for loss computation.
    """
    cls_targets, cls_weights = inputs
    if self._add_background_class:
      # Set background_class bits to 0 so that the positives_indicator
      # computation would not consider background class.
      background_class = tf.zeros_like(tf.slice(cls_targets, [0, 0], [-1, 1]))
      regular_class = tf.slice(cls_targets, [0, 1], [-1, -1])
      cls_targets = tf.concat([background_class, regular_class], 1)
    positives_indicator = tf.reduce_sum(cls_targets, axis=1)
    return self._random_example_sampler.subsample(
        tf.cast(cls_weights, tf.bool),
        batch_size=None,
        labels=tf.cast(positives_indicator, tf.bool)) 
Example #11
Source File: network_units.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def convert_network_state_tensorarray(tensorarray):
  """Converts a source TensorArray to a source Tensor.

  Performs a permutation between the steps * [stride, D] shape of a
  source TensorArray and the (flattened) [stride * steps, D] shape of
  a source Tensor.

  The TensorArrays used during recurrence have an additional zeroth step that
  needs to be removed.

  Args:
    tensorarray: TensorArray object to be converted.

  Returns:
    Tensor object after conversion.
  """
  tensor = tensorarray.stack()  # Results in a [steps, stride, D] tensor.
  tensor = tf.slice(tensor, [1, 0, 0], [-1, -1, -1])  # Lop off the 0th step.
  tensor = tf.transpose(tensor, [1, 0, 2])  # Switch steps and stride.
  return tf.reshape(tensor, [-1, tf.shape(tensor)[2]]) 
Example #12
Source File: common_attention.py    From fine-lm with MIT License 6 votes vote down vote up
def add_positional_embedding(x, max_length, name, positions=None):
  """Add positional embedding.

  Args:
    x: a Tensor with shape [batch, length, depth]
    max_length: an integer.  static maximum size of any dimension.
    name: a name for this layer.
    positions: an optional tensor with shape [batch, length]

  Returns:
    a Tensor the same shape as x.
  """
  _, length, depth = common_layers.shape_list(x)
  var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype)
  if positions is None:
    sliced = tf.cond(
        tf.less(length, max_length),
        lambda: tf.slice(var, [0, 0], [length, -1]),
        lambda: tf.pad(var, [[0, length - max_length], [0, 0]]))
    return x + tf.expand_dims(sliced, 0)
  else:
    return x + tf.gather(var, tf.to_int32(positions)) 
Example #13
Source File: tf_io_pipline_tools.py    From lanenet-lane-detection with Apache License 2.0 6 votes vote down vote up
def central_crop(image, crop_height, crop_width):
    """
    Performs central crops of the given image
    :param image:
    :param crop_height:
    :param crop_width:
    :return:
    """
    shape = tf.shape(input=image)
    height, width = shape[0], shape[1]

    amount_to_be_cropped_h = (height - crop_height)
    crop_top = amount_to_be_cropped_h // 2
    amount_to_be_cropped_w = (width - crop_width)
    crop_left = amount_to_be_cropped_w // 2

    return tf.slice(image, [crop_top, crop_left, 0], [crop_height, crop_width, -1]) 
Example #14
Source File: model.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def compute_first_or_last(self, select, first=True):
    #perform first ot last operation on row select with probabilistic row selection
    answer = tf.zeros_like(select)
    running_sum = tf.zeros([self.batch_size, 1], self.data_type)
    for i in range(self.max_elements):
      if (first):
        current = tf.slice(select, [0, i], [self.batch_size, 1])
      else:
        current = tf.slice(select, [0, self.max_elements - 1 - i],
                           [self.batch_size, 1])
      curr_prob = current * (1 - running_sum)
      curr_prob = curr_prob * tf.cast(curr_prob >= 0.0, self.data_type)
      running_sum += curr_prob
      temp_ans = []
      curr_prob = tf.expand_dims(tf.reshape(curr_prob, [self.batch_size]), 0)
      for i_ans in range(self.max_elements):
        if (not (first) and i_ans == self.max_elements - 1 - i):
          temp_ans.append(curr_prob)
        elif (first and i_ans == i):
          temp_ans.append(curr_prob)
        else:
          temp_ans.append(tf.zeros_like(curr_prob))
      temp_ans = tf.transpose(tf.concat(axis=0, values=temp_ans))
      answer += temp_ans
    return answer 
Example #15
Source File: test_attacks_tf.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_fgm_gradient_max(self):
        input_dim = 2
        num_classes = 3
        batch_size = 4
        rng = np.random.RandomState([2017, 8, 23])
        x = tf.placeholder(tf.float32, [batch_size, input_dim])
        weights = tf.placeholder(tf.float32, [input_dim, num_classes])
        logits = tf.matmul(x, weights)
        probs = tf.nn.softmax(logits)
        adv_x = fgm(x, probs)
        random_example = rng.randint(batch_size)
        random_feature = rng.randint(input_dim)
        output = tf.slice(adv_x, [random_example, random_feature], [1, 1])
        dx, = tf.gradients(output, x)
        # The following line catches GitHub issue #243
        self.assertIsNotNone(dx)
        dx = self.sess.run(dx, feed_dict=random_feed_dict(rng, [x, weights]))
        ground_truth = np.zeros((batch_size, input_dim))
        ground_truth[random_example, random_feature] = 1.
        self.assertClose(dx, ground_truth) 
Example #16
Source File: common_attention.py    From fine-lm with MIT License 6 votes vote down vote up
def _relative_position_to_absolute_position_masked(x):
  """Helper to dot_product_self_attention_relative_v2.

  Rearrange an attention logits or weights Tensor.

  The dimensions of the input represent:
  [batch, heads, query_position, memory_position - query_position + length - 1]

  The dimensions of the output represent:
  [batch, heads, query_position, memory_position]

  Only works with masked_attention.  Undefined behavior for regions of the
  input where memory_position > query_position.

  Args:
    x: a Tensor with shape [batch, heads, length, length]

  Returns:
    a Tensor with shape [batch, heads, length, length]
  """
  batch, heads, length, _ = common_layers.shape_list(x)
  x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]])
  x = tf.reshape(x, [batch, heads, 1 + length, length])
  x = tf.slice(x, [0, 0, 1, 0], [-1, -1, -1, -1])
  return x 
Example #17
Source File: common_attention.py    From fine-lm with MIT License 6 votes vote down vote up
def _absolute_position_to_relative_position_masked(x):
  """Helper to dot_product_self_attention_relative_v2.

  Rearrange an attention logits or weights Tensor.

  The dimensions of the input represent:
  [batch, heads, query_position, memory_position]

  The dimensions of the output represent:
  [batch, heads, query_position, memory_position - query_position + length - 1]

  Only works with masked_attention.  Undefined behavior for regions of the
  input where memory_position > query_position.

  Args:
    x: a Tensor with shape [batch, heads, length, length]

  Returns:
    a Tensor with shape [batch, heads, length, length]
  """
  batch, heads, length, _ = common_layers.shape_list(x)
  x = tf.pad(x, [[0, 0], [0, 0], [1, 0], [0, 0]])
  x = tf.reshape(x, [batch, heads, length, length + 1])
  x = tf.slice(x, [0, 0, 0, 1], [batch, heads, length, length])
  return x 
Example #18
Source File: common_model.py    From mipsqa with Apache License 2.0 6 votes vote down vote up
def glove_layer(features, scope=None):
  """GloVe embedding layer.

  The first two words of `features['emb_mat']` are <PAD> and <UNK>.
  The other words are actual words. So we learn the representations of the
  first two words but the representation of other words are fixed (GloVe).

  Args:
    features: `dict` of feature tensors.
    scope: `str` for scope name.
  Returns:
    A tuple of tensors, `(glove_emb_mat, context_emb, question_emb)`.
  """
  with tf.variable_scope(scope or 'glove_layer'):
    glove_emb_mat_const = tf.slice(features['emb_mat'], [2, 0], [-1, -1])
    glove_emb_mat_var = tf.get_variable('glove_emb_mat_var',
                                        [2,
                                         glove_emb_mat_const.get_shape()[1]])
    glove_emb_mat = tf.concat([glove_emb_mat_var, glove_emb_mat_const], 0)
    xv = tf.nn.embedding_lookup(glove_emb_mat,
                                features['glove_indexed_context_words'])
    qv = tf.nn.embedding_lookup(glove_emb_mat,
                                features['glove_indexed_question_words'])
    return glove_emb_mat, xv, qv 
Example #19
Source File: model.py    From tensorflow-wavenet with MIT License 6 votes vote down vote up
def predict_proba(self, waveform, global_condition=None, name='wavenet'):
        '''Computes the probability distribution of the next sample based on
        all samples in the input waveform.
        If you want to generate audio by feeding the output of the network back
        as an input, see predict_proba_incremental for a faster alternative.'''
        with tf.name_scope(name):
            if self.scalar_input:
                encoded = tf.cast(waveform, tf.float32)
                encoded = tf.reshape(encoded, [-1, 1])
            else:
                encoded = self._one_hot(waveform)

            gc_embedding = self._embed_gc(global_condition)
            raw_output = self._create_network(encoded, gc_embedding)
            out = tf.reshape(raw_output, [-1, self.quantization_channels])
            # Cast to float64 to avoid bug in TensorFlow
            proba = tf.cast(
                tf.nn.softmax(tf.cast(out, tf.float64)), tf.float32)
            last = tf.slice(
                proba,
                [tf.shape(proba)[0] - 1, 0],
                [1, self.quantization_channels])
            return tf.reshape(last, [-1]) 
Example #20
Source File: tf_io_pipline_tools.py    From lanenet-lane-detection with Apache License 2.0 5 votes vote down vote up
def random_horizon_flip_batch_images(gt_image, gt_binary_image, gt_instance_image):
    """
    Random horizon flip image batch data for training
    :param gt_image:
    :param gt_binary_image:
    :param gt_instance_image:
    :return:
    """
    concat_images = tf.concat([gt_image, gt_binary_image, gt_instance_image], axis=-1)

    [image_height, image_width, _] = gt_image.get_shape().as_list()

    concat_flipped_images = tf.image.random_flip_left_right(
        image=concat_images,
        seed=tf.random.set_random_seed(1)
    )

    flipped_gt_image = tf.slice(
        concat_flipped_images,
        begin=[0, 0, 0],
        size=[image_height, image_width, 3]
    )
    flipped_gt_binary_image = tf.slice(
        concat_flipped_images,
        begin=[0, 0, 3],
        size=[image_height, image_width, 1]
    )
    flipped_gt_instance_image = tf.slice(
        concat_flipped_images,
        begin=[0, 0, 4],
        size=[image_height, image_width, 1]
    )

    return flipped_gt_image, flipped_gt_binary_image, flipped_gt_instance_image 
Example #21
Source File: batcher.py    From vehicle_counting_tensorflow with MIT License 5 votes vote down vote up
def dequeue(self):
    """Dequeues a batch of tensor_dict from the BatchQueue.

    TODO: use allow_smaller_final_batch to allow running over the whole eval set

    Returns:
      A list of tensor_dicts of the requested batch_size.
    """
    batched_tensors = self._queue.dequeue()
    # Separate input tensors from tensors containing their runtime shapes.
    tensors = {}
    shapes = {}
    for key, batched_tensor in batched_tensors.items():
      unbatched_tensor_list = tf.unstack(batched_tensor)
      for i, unbatched_tensor in enumerate(unbatched_tensor_list):
        if rt_shape_str in key:
          shapes[(key[:-len(rt_shape_str)], i)] = unbatched_tensor
        else:
          tensors[(key, i)] = unbatched_tensor

    # Undo that padding using shapes and create a list of size `batch_size` that
    # contains tensor dictionaries.
    tensor_dict_list = []
    batch_size = self._batch_size
    for batch_id in range(batch_size):
      tensor_dict = {}
      for key in self._static_shapes:
        tensor_dict[key] = tf.slice(tensors[(key, batch_id)],
                                    tf.zeros_like(shapes[(key, batch_id)]),
                                    shapes[(key, batch_id)])
        tensor_dict[key].set_shape(self._static_shapes[key])
      tensor_dict_list.append(tensor_dict)

    return tensor_dict_list 
Example #22
Source File: tf_io_pipline_tools.py    From lanenet-lane-detection with Apache License 2.0 5 votes vote down vote up
def random_crop_batch_images(gt_image, gt_binary_image, gt_instance_image, cropped_size):
    """
    Random crop image batch data for training
    :param gt_image:
    :param gt_binary_image:
    :param gt_instance_image:
    :param cropped_size:
    :return:
    """
    concat_images = tf.concat([gt_image, gt_binary_image, gt_instance_image], axis=-1)

    concat_cropped_images = tf.image.random_crop(
        concat_images,
        [cropped_size[1], cropped_size[0], tf.shape(concat_images)[-1]],
        seed=tf.random.set_random_seed(1234)
    )

    cropped_gt_image = tf.slice(
        concat_cropped_images,
        begin=[0, 0, 0],
        size=[cropped_size[1], cropped_size[0], 3]
    )
    cropped_gt_binary_image = tf.slice(
        concat_cropped_images,
        begin=[0, 0, 3],
        size=[cropped_size[1], cropped_size[0], 1]
    )
    cropped_gt_instance_image = tf.slice(
        concat_cropped_images,
        begin=[0, 0, 4],
        size=[cropped_size[1], cropped_size[0], 1]
    )

    return cropped_gt_image, cropped_gt_binary_image, cropped_gt_instance_image 
Example #23
Source File: symbolic_imagenet.py    From benchmarks with The Unlicense 5 votes vote down vote up
def center_crop(image, size):
    image_height = tf.shape(image)[0]
    image_width = tf.shape(image)[1]

    offset_height = (image_height - size) // 2
    offset_width = (image_width - size) // 2
    image = tf.slice(image, [offset_height, offset_width, 0], [size, size, -1])
    return image 
Example #24
Source File: common_attention.py    From fine-lm with MIT License 5 votes vote down vote up
def _make_local_block(x, depth, batch, heads, num_blocks, block_length):
  """Helper function to create a local version of the keys or values for 1d."""
  prev_block = tf.slice(x, [0, 0, 0, 0, 0],
                        [-1, -1, num_blocks - 1, -1, -1])
  cur_block = tf.slice(x, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1])
  local_block = tf.concat([prev_block, cur_block], 3)
  return tf.reshape(local_block,
                    [batch, heads, num_blocks - 1, block_length * 2, depth]) 
Example #25
Source File: pcr_model.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def get_model(self, source_point_cloud, template_point_cloud, feature_size, is_training, bn_decay=None):
		point_cloud = tf.concat([source_point_cloud, template_point_cloud], 0)
		batch_size = point_cloud.get_shape()[0].value
		num_point = point_cloud.get_shape()[1].value
		end_points = {}

		input_image = tf.expand_dims(point_cloud, -1)

		net = tf_util.conv2d(input_image, 64, [1,3],
							 padding='VALID', stride=[1,1],
							 bn=False, is_training=is_training,
							 scope='conv1', bn_decay=bn_decay)
		net = tf_util.conv2d(net, 64, [1,1],
							 padding='VALID', stride=[1,1],
							 bn=False, is_training=is_training,
							 scope='conv2', bn_decay=bn_decay)

		net = tf_util.conv2d(net, 64, [1,1],
							 padding='VALID', stride=[1,1],
							 bn=False, is_training=is_training,
							 scope='conv3', bn_decay=bn_decay)
		net = tf_util.conv2d(net, 128, [1,1],
							 padding='VALID', stride=[1,1],
							 bn=False, is_training=is_training,
							 scope='conv4', bn_decay=bn_decay)
		net = tf_util.conv2d(net, feature_size, [1,1],
							 padding='VALID', stride=[1,1],
							 bn=False, is_training=is_training,
							 scope='conv5', bn_decay=bn_decay)

		# Symmetric function: max pooling
		net = tf_util.max_pool2d(net, [num_point,1],
								 padding='VALID', scope='maxpool')
		net = tf.reshape(net, [batch_size, -1])
		 
		# Extract the features from the network.
		source_global_feature = tf.slice(net, [0,0], [int(batch_size/2),feature_size])
		template_global_feature = tf.slice(net, [int(batch_size/2),0], [int(batch_size/2),feature_size])
		return source_global_feature, template_global_feature 
Example #26
Source File: ipcr_model.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def get_model(source_point_cloud, template_point_cloud, is_training, bn_decay=None):
	point_cloud = tf.concat([source_point_cloud, template_point_cloud],0)
	batch_size = point_cloud.get_shape()[0].value
	num_point = point_cloud.get_shape()[1].value
	end_points = {}

	input_image = tf.expand_dims(point_cloud, -1)

	net = tf_util.conv2d(input_image, 64, [1,3],
						 padding='VALID', stride=[1,1],
						 bn=False, is_training=is_training,
						 scope='conv1', bn_decay=bn_decay)
	net = tf_util.conv2d(net, 64, [1,1],
						 padding='VALID', stride=[1,1],
						 bn=False, is_training=is_training,
						 scope='conv2', bn_decay=bn_decay)
	net = tf_util.conv2d(net, 64, [1,1],
						 padding='VALID', stride=[1,1],
						 bn=False, is_training=is_training,
						 scope='conv3', bn_decay=bn_decay)
	net = tf_util.conv2d(net, 128, [1,1],
						 padding='VALID', stride=[1,1],
						 bn=False, is_training=is_training,
						 scope='conv4', bn_decay=bn_decay)
	net = tf_util.conv2d(net, 1024, [1,1],
						 padding='VALID', stride=[1,1],
						 bn=False, is_training=is_training,
						 scope='conv5', bn_decay=bn_decay)

	# Symmetric function: max pooling
	net = tf_util.max_pool2d(net, [num_point,1],
							 padding='VALID', scope='maxpool')
	net = tf.reshape(net, [batch_size, -1])
	source_global_feature = tf.slice(net, [0,0], [int(batch_size/2),1024])
	template_global_feature = tf.slice(net, [int(batch_size/2),0], [int(batch_size/2),1024])
	return source_global_feature, template_global_feature 
Example #27
Source File: batcher.py    From object_detector_app with MIT License 5 votes vote down vote up
def dequeue(self):
    """Dequeues a batch of tensor_dict from the BatchQueue.

    TODO: use allow_smaller_final_batch to allow running over the whole eval set

    Returns:
      A list of tensor_dicts of the requested batch_size.
    """
    batched_tensors = self._queue.dequeue()
    # Separate input tensors from tensors containing their runtime shapes.
    tensors = {}
    shapes = {}
    for key, batched_tensor in batched_tensors.iteritems():
      unbatched_tensor_list = tf.unstack(batched_tensor)
      for i, unbatched_tensor in enumerate(unbatched_tensor_list):
        if isinstance(key, tuple) and key[1] == 'runtime_shapes':
          shapes[(key[0], i)] = unbatched_tensor
        else:
          tensors[(key, i)] = unbatched_tensor

    # Undo that padding using shapes and create a list of size `batch_size` that
    # contains tensor dictionaries.
    tensor_dict_list = []
    batch_size = self._batch_size
    for batch_id in range(batch_size):
      tensor_dict = {}
      for key in self._static_shapes:
        tensor_dict[key] = tf.slice(tensors[(key, batch_id)],
                                    tf.zeros_like(shapes[(key, batch_id)]),
                                    shapes[(key, batch_id)])
        tensor_dict[key].set_shape(self._static_shapes[key])
      tensor_dict_list.append(tensor_dict)

    return tensor_dict_list 
Example #28
Source File: ops.py    From object_detector_app with MIT License 5 votes vote down vote up
def dense_to_sparse_boxes(dense_locations, dense_num_boxes, num_classes):
  """Converts bounding boxes from dense to sparse form.

  Args:
    dense_locations:  a [max_num_boxes, 4] tensor in which only the first k rows
      are valid bounding box location coordinates, where k is the sum of
      elements in dense_num_boxes.
    dense_num_boxes: a [max_num_classes] tensor indicating the counts of
       various bounding box classes e.g. [1, 0, 0, 2] means that the first
       bounding box is of class 0 and the second and third bounding boxes are
       of class 3. The sum of elements in this tensor is the number of valid
       bounding boxes.
    num_classes: number of classes

  Returns:
    box_locations: a [num_boxes, 4] tensor containing only valid bounding
       boxes (i.e. the first num_boxes rows of dense_locations)
    box_classes: a [num_boxes] tensor containing the classes of each bounding
       box (e.g. dense_num_boxes = [1, 0, 0, 2] => box_classes = [0, 3, 3]
  """

  num_valid_boxes = tf.reduce_sum(dense_num_boxes)
  box_locations = tf.slice(dense_locations,
                           tf.constant([0, 0]), tf.stack([num_valid_boxes, 4]))
  tiled_classes = [tf.tile([i], tf.expand_dims(dense_num_boxes[i], 0))
                   for i in range(num_classes)]
  box_classes = tf.concat(tiled_classes, 0)
  box_locations.set_shape([None, 4])
  return box_locations, box_classes 
Example #29
Source File: imagenet.py    From fine-lm with MIT License 5 votes vote down vote up
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.

  Args:
    image: `Tensor` image of shape [height, width, channels].
    offset_height: `Tensor` indicating the height offset.
    offset_width: `Tensor` indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    the cropped (and resized) image.

  Raises:
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3), ["Rank of image must be equal to 3."])
  with tf.control_dependencies([rank_assertion]):
    cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ["Crop size greater than the image size."])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  with tf.control_dependencies([size_assertion]):
    image = tf.slice(image, offsets, cropped_shape)
  return tf.reshape(image, cropped_shape) 
Example #30
Source File: common_attention.py    From fine-lm with MIT License 5 votes vote down vote up
def right_shift_blockwise(x, query_shape, name=None):
  """Right shifts once in every block.

  Args:
    x: a tensor of shape [batch, height, width, depth]
    query_shape: A 2d tuple of ints
    name: a string

  Returns:
    output: a tensor of the same shape as x
  """
  with tf.variable_scope(
      name, default_name="right_shift_blockwise", values=[x]):
    x_list_shape = x.get_shape().as_list()
    x_shape = common_layers.shape_list(x)
    # Add a dummy dimension for heads
    x = tf.expand_dims(x, axis=1)
    x = pad_to_multiple_2d(x, query_shape)
    padded_x_shape = common_layers.shape_list(x)
    # Setting up q blocks
    x_indices = gather_indices_2d(x, query_shape, query_shape)
    x_new = get_shifted_center_blocks(x, x_indices)

    # putting the representations back in the right place
    output = scatter_blocks_2d(x_new, x_indices, padded_x_shape)
    # Removing the dummy head dimension
    output = tf.squeeze(output, axis=1)
    # Remove the padding if introduced
    output = tf.slice(output, [0, 0, 0, 0], [-1, x_shape[1], x_shape[2], -1])
    output.set_shape(x_list_shape)
    return output