Python tensorflow.scatter_nd() Examples

The following are 30 code examples of tensorflow.scatter_nd(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: batch_lbs.py    From tf_smpl with MIT License 7 votes vote down vote up
def batch_skew(vec, batch_size=None):
    """
    vec is N x 3, batch_size is int

    returns N x 3 x 3. Skew_sym version of each matrix.
    """
    with tf.variable_scope("batch_skew", [vec]):
        if batch_size is None:
            batch_size = vec.shape.as_list()[0]
        col_inds = tf.constant([1, 2, 3, 5, 6, 7])
        indices = tf.reshape(
            tf.reshape(tf.range(0, batch_size) * 9, [-1, 1]) + col_inds,
            [-1, 1])
        updates = tf.reshape(
            tf.stack(
                [
                    -vec[:, 2], vec[:, 1], vec[:, 2], -vec[:, 0], -vec[:, 1],
                    vec[:, 0]
                ],
                axis=1), [-1])
        out_shape = [batch_size * 9]
        res = tf.scatter_nd(indices, updates, out_shape)
        res = tf.reshape(res, [batch_size, 3, 3])

        return res 
Example #2
Source File: expert_utils.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def restore(self, x):
    """Add padding back to the given tensor.

    Args:
      x (tf.Tensor): of shape [dim_compressed,...]

    Returns:
      a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The
      dim is restored from the original reference tensor
    """
    with tf.name_scope("pad_reduce/restore"):
      x = tf.scatter_nd(
          indices=self.nonpad_ids,
          updates=x,
          shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
      )
    return x 
Example #3
Source File: misc.py    From model-optimization with Apache License 2.0 6 votes vote down vote up
def decode(self,
             encoded_tensors,
             decode_params,
             num_summands=None,
             shape=None):
    """See base class."""
    del decode_params, num_summands  # Unused.

    indices = encoded_tensors[self.ENCODED_INDICES_KEY]
    non_zero_x = encoded_tensors[self.ENCODED_VALUES_KEY]

    indices = tf.expand_dims(indices, 1)
    shape = tf.cast(shape, indices.dtype)
    decoded_x = tf.scatter_nd(indices=indices, updates=non_zero_x, shape=shape)

    return decoded_x 
Example #4
Source File: metric_utils.py    From ULTRA with Apache License 2.0 6 votes vote down vote up
def _to_nd_indices(indices):
    """Returns indices used for tf.gather_nd or tf.scatter_nd.

    Args:
      indices: A `Tensor` of shape [batch_size, size] with integer values. The
        values are the indices of another `Tensor`. For example, `indices` is the
        output of tf.argsort or tf.math.top_k.

    Returns:
      A `Tensor` with shape [batch_size, size, 2] that can be used by tf.gather_nd
      or tf.scatter_nd.

    """
    indices.get_shape().assert_has_rank(2)
    batch_ids = tf.ones_like(indices) * tf.expand_dims(
        tf.range(tf.shape(input=indices)[0]), 1)
    return tf.stack([batch_ids, indices], axis=-1) 
Example #5
Source File: transformer_utils.py    From Counterfactual-StoryRW with MIT License 6 votes vote down vote up
def restore(self, x):
        """Add padding back to the given tensor.

        Args:
            x: A Tensor of shape [dim_compressed,...]

        Returns:
            A tensor of shape [dim_origin,...] with
            dim_compressed >= dim_origin. The
            dim is restored from the original reference tensor
        """
        with tf.name_scope("pad_reduce/restore"):
            x = tf.scatter_nd(
                indices=self.nonpad_ids,
                updates=x,
                shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
            )
        return x 
Example #6
Source File: scatter_nd_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _disabledTestScatterOutOfRangeGpu(self):
    if not tf.test.IsBuiltWithCuda():
      return
    # TODO(simister): Re-enable once binary size increase due to
    # scatter_nd ops is under control.
    # tf.scatter_nd_mul, tf.scatter_nd_div,
    for op in (tf.scatter_nd_add, tf.scatter_nd_sub, tf.scatter_nd_update):
      params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
      updates = np.array([-3, -4, -5]).astype(np.float32)
      # With GPU, the code ignores indices that are out of range.
      # We don't test the implementation; just test there's no failures.
      with self.test_session(force_gpu=True):
        ref = tf.Variable(params)
        ref.initializer.run()

        # Indices all in range, no problem.
        indices = np.array([2, 0, 5])
        op(ref, indices, updates).eval()

        # Indicies out of range should not fail.
        indices = np.array([-1, 0, 5])
        op(ref, indices, updates).eval()
        indices = np.array([2, 0, 6])
        op(ref, indices, updates).eval() 
Example #7
Source File: layers.py    From face_landmark_dnn with MIT License 6 votes vote down vote up
def LandmarkImageLayer(Landmarks):
    
    def draw_landmarks(L):
        def draw_landmarks_helper(Point):
            intLandmark = tf.to_int32(Point)
            locations = Offsets + intLandmark
            dxdy = Point - tf.to_float(intLandmark)
            offsetsSubPix = tf.to_float(Offsets) - dxdy
            vals = 1 / (1 + tf.norm(offsetsSubPix, axis=2))
            img = tf.scatter_nd(locations, vals, shape=(IMGSIZE, IMGSIZE))
            return img
        Landmark = tf.reverse(tf.reshape(L, [-1,2]), [-1])
        # Landmark = tf.reshape(L, (-1, 2))
        Landmark = tf.clip_by_value(Landmark, HalfSize, IMGSIZE - 1 - HalfSize)
        # Ret = 1 / (tf.norm(tf.map_fn(DoIn,Landmarks),axis = 3) + 1)
        Ret = tf.map_fn(draw_landmarks_helper, Landmark)
        Ret = tf.reshape(tf.reduce_max(Ret, axis=0), [IMGSIZE, IMGSIZE, 1])
        return Ret
    return tf.map_fn(draw_landmarks, Landmarks) 
Example #8
Source File: network_utils.py    From TensorFlow_DCIGN with MIT License 6 votes vote down vote up
def unpool(net, mask, stride=2):
  assert mask is not None
  with tf.name_scope('UnPool2D'):
    ksize = [1, stride, stride, 1]
    input_shape = net.get_shape().as_list()
    #  calculation new shape
    output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])
    # calculation indices for batch, height, width and feature maps
    one_like_mask = tf.ones_like(mask)
    batch_range = tf.reshape(tf.range(output_shape[0], dtype=tf.int64), shape=[input_shape[0], 1, 1, 1])
    b = one_like_mask * batch_range
    y = mask // (output_shape[2] * output_shape[3])
    x = mask % (output_shape[2] * output_shape[3]) // output_shape[3]
    feature_range = tf.range(output_shape[3], dtype=tf.int64)
    f = one_like_mask * feature_range
    # transpose indices & reshape update values to one dimension
    updates_size = tf.size(net)
    indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, updates_size]))
    values = tf.reshape(net, [updates_size])
    ret = tf.scatter_nd(indices, values, output_shape)
    return ret 
Example #9
Source File: token_generator_gumbel.py    From BERT with Apache License 2.0 6 votes vote down vote up
def reorder(updates, sd_indices, argsort_axis=1):
	"""
	updates: [N, M]
	"""
	def prepare_fd(fd_indices, sd_dims):
		fd_indices = tf.expand_dims(fd_indices, 1)
		fd_indices = tf.tile(fd_indices, [1, sd_dims])
		return fd_indices

	# define the updates
	sd_dims = tf.shape(updates)[1]
	fd_indices_range = tf.range(0, limit=tf.shape(updates)[0])

	# define the indices
	indices1 = tf.stack((prepare_fd(fd_indices_range, sd_dims), sd_indices), axis=2)
	shape = tf.shape(updates)
	scatter1 = tf.scatter_nd(indices1, updates, shape)
	return scatter1 
Example #10
Source File: expert_utils.py    From BERT with Apache License 2.0 6 votes vote down vote up
def restore(self, x):
    """Add padding back to the given tensor.

    Args:
      x (tf.Tensor): of shape [dim_compressed,...]

    Returns:
      a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The
      dim is restored from the original reference tensor
    """
    with tf.name_scope("pad_reduce/restore"):
      x = tf.scatter_nd(
          indices=self.nonpad_ids,
          updates=x,
          shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
      )
    return x 
Example #11
Source File: segnet_vgg.py    From MachineLearning with Apache License 2.0 6 votes vote down vote up
def max_unpool_with_argmax(bottom, mask, output_shape=None):
    with tf.name_scope('max_unpool_with_argmax'):
        ksize = [1, 2, 2, 1]
        input_shape = bottom.get_shape().as_list()
        #  calculation new shape
        if output_shape is None:
            output_shape = (input_shape[0],
                            input_shape[1] * ksize[1],
                            input_shape[2] * ksize[2],
                            input_shape[3])
        # calculation indices for batch, height, width and feature maps
        one_like_mask = tf.ones_like(mask)
        batch_range = tf.reshape(tf.range(output_shape[0],
                                          dtype=tf.int64),
                                 shape=[input_shape[0], 1, 1, 1])
        b = one_like_mask * batch_range
        y = mask // (output_shape[2] * output_shape[3])
        x = mask % (output_shape[2] * output_shape[3]) // output_shape[3]
        feature_range = tf.range(output_shape[3], dtype=tf.int64)
        f = one_like_mask * feature_range
        # transpose indices & reshape update values to one dimension
        updates_size = tf.size(bottom)
        indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, updates_size]))
        values = tf.reshape(bottom, [updates_size])
        return tf.scatter_nd(indices, values, output_shape) 
Example #12
Source File: ops.py    From strawberryfields with Apache License 2.0 6 votes vote down vote up
def fock_state(n, cutoff, pure=True, batched=False):
    """creates a single mode input Fock state"""
    if not isinstance(n, (np.ndarray, int)):
        raise ValueError("'n' is expected to be either an int or a numpy array")
    if batched:
        batch_size = n.shape[0]
        idxs = [(b, f) for (b, f) in zip(range(batch_size), n)]
        values = [1.0] * batch_size
        shape = [batch_size, cutoff]
    else:
        idxs = [(n,)]
        values = [1.0]
        shape = [cutoff]
    fock_sparse = tf.scatter_nd(idxs, values, shape)
    fock = tf.cast(fock_sparse, def_type)
    if not pure:
        fock = mixed(fock, batched)
    return fock 
Example #13
Source File: scatter_nd_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testScatterOutOfRangeCpu(self):
    # TODO(simister): Re-enable once binary size increase due to
    # scatter_nd ops is under control.
    #  tf.scatter_nd_mul, tf.scatter_nd_div,
    for op in (tf.scatter_nd_add, tf.scatter_nd_sub, tf.scatter_nd_update):
      params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
      updates = np.array([-3, -4, -5]).astype(np.float32)
      with self.test_session(use_gpu=False):
        ref = tf.Variable(params)
        ref.initializer.run()

        # Indices all in range, no problem.
        indices = np.array([[2], [0], [5]])
        op(ref, indices, updates).eval()

        # Test some out of range errors.
        indices = np.array([[-1], [0], [5]])
        with self.assertRaisesOpError(
            r"Invalid indices: \[0,0\] = \[-1\] is not in \[0, 6\)"):
          op(ref, indices, updates).eval()

        indices = np.array([[2], [0], [6]])
        with self.assertRaisesOpError(
            r"Invalid indices: \[2,0\] = \[6\] is not in \[0, 6\)"):
          op(ref, indices, updates).eval() 
Example #14
Source File: utilities.py    From bgsCNN with GNU General Public License v3.0 6 votes vote down vote up
def unpool(pool, ind, shape, ksize=[1, 2, 2, 1], scope=None):
    with tf.name_scope(scope):
        input_shape =  tf.shape(pool)
        output_shape = [input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3]]
        flat_input_size = tf.cumprod(input_shape)[-1]
        flat_output_shape = tf.stack([output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]])
        pool_ = tf.reshape(pool, tf.stack([flat_input_size]))
        batch_range = tf.reshape(tf.range(tf.cast(output_shape[0], tf.int64), dtype=ind.dtype),
                                shape=tf.stack([input_shape[0], 1, 1, 1]))
        b = tf.ones_like(ind) * batch_range
        b = tf.reshape(b, tf.stack([flat_input_size, 1]))
        ind_ = tf.reshape(ind, tf.stack([flat_input_size, 1]))
        ind_ = tf.concat([b, ind_], 1)
        ret = tf.scatter_nd(ind_, pool_, shape=tf.cast(flat_output_shape, tf.int64))
        ret = tf.reshape(ret, tf.stack(output_shape))
        ret = tf.reshape(ret, shape=shape)
        return ret 
Example #15
Source File: labelprop.py    From gnn-benchmark with MIT License 6 votes vote down vote up
def __init__(self, features, graph_adj, targets, nodes_to_consider, labelled_nodes, prop_type, return_prob):

        if prop_type not in ['vanilla', 'smoothed']:
            raise ValueError('Unsupported propagation type.')
        self.prop_type = prop_type

        # if running on Planetoid data these typecasts are necessary
        if isinstance(labelled_nodes, range):
            labelled_nodes = np.array(list(labelled_nodes), dtype=np.int64)
        if targets.dtype != np.float32:
            targets = targets.astype(np.float32)

        super().__init__(features, graph_adj, tf.gather(targets, nodes_to_consider))
        self.labelled_nodes = tf.constant(labelled_nodes, dtype=tf.int64)
        self.initial_predicted_labels = tf.scatter_nd(tf.expand_dims(self.labelled_nodes, -1),
                                                      targets[labelled_nodes], shape=targets.shape)
        self.predicted_labels = tf.Variable(self.initial_predicted_labels, dtype=tf.float32, name="predicted_labels")

        self.nodes_to_consider = nodes_to_consider
        self.num_nodes = int(self.graph_adj.get_shape()[0])
        self.num_classes = int(self.targets.get_shape()[1])

        self.return_prob = return_prob

        self._build_model_graphs() 
Example #16
Source File: util.py    From gnn-benchmark with MIT License 6 votes vote down vote up
def scatter_add_tensor(tensor, indices, out_shape, name=None):
    """
    Code taken from https://github.com/tensorflow/tensorflow/issues/2358 and adapted.

    Adds up elements in tensor that have the same value in indices.

    Must have shape(tensor)[0] == shape(indices)[0].
    :param tensor: A Tensor. Must be one of the following types: float32, float64, int64, int32, uint8, uint16,
        int16, int8, complex64, complex128, qint8, quint8, qint32, half.
    :param indices: 1-D tensor of indices.
    :param out_shape: The shape of the output tensor. Must have out_shape[1] == shape(tensor)[1].
    :param name: A name for the operation (optional).
    :return: Tensor with same datatype as tensor and shape out_shape.
    """
    with tf.name_scope(name, 'scatter_add_tensor') as scope:
        indices = tf.expand_dims(indices, -1)
        # the scatter_nd function adds up values for duplicate indices what is exactly what we want
        return tf.scatter_nd(indices, tensor, out_shape, name=scope) 
Example #17
Source File: sampler.py    From addons with Apache License 2.0 6 votes vote down vote up
def next_inputs(self, time, outputs, state, sample_ids):
        (finished, base_next_inputs, state) = super().next_inputs(
            time=time, outputs=outputs, state=state, sample_ids=sample_ids
        )

        def maybe_sample():
            """Perform scheduled sampling."""
            where_sampling = tf.cast(tf.where(sample_ids > -1), tf.int32)
            where_not_sampling = tf.cast(tf.where(sample_ids <= -1), tf.int32)
            sample_ids_sampling = tf.gather_nd(sample_ids, where_sampling)
            inputs_not_sampling = tf.gather_nd(base_next_inputs, where_not_sampling)
            sampled_next_inputs = self.embedding_fn(sample_ids_sampling)
            base_shape = tf.shape(base_next_inputs)
            return tf.scatter_nd(
                indices=where_sampling, updates=sampled_next_inputs, shape=base_shape
            ) + tf.scatter_nd(
                indices=where_not_sampling,
                updates=inputs_not_sampling,
                shape=base_shape,
            )

        all_finished = tf.reduce_all(finished)
        next_inputs = tf.cond(all_finished, lambda: base_next_inputs, maybe_sample)
        return (finished, next_inputs, state) 
Example #18
Source File: batch_lbs.py    From phd with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def batch_skew(vec, batch_size=None):
    """
    vec is N x 3, batch_size is int

    returns N x 3 x 3. Skew_sym version of each matrix.
    """
    with tf.name_scope("batch_skew", values=[vec]):
        if batch_size is None:
            batch_size = vec.shape.as_list()[0]
        col_inds = tf.constant([1, 2, 3, 5, 6, 7])
        indices = tf.reshape(
            tf.reshape(tf.range(0, batch_size) * 9, [-1, 1]) + col_inds,
            [-1, 1])
        updates = tf.reshape(
            tf.stack(
                [
                    -vec[:, 2], vec[:, 1], vec[:, 2], -vec[:, 0], -vec[:, 1],
                    vec[:, 0]
                ],
                axis=1), [-1])
        out_shape = [batch_size * 9]
        res = tf.scatter_nd(indices, updates, out_shape)
        res = tf.reshape(res, [batch_size, 3, 3])

        return res 
Example #19
Source File: expert_utils.py    From fine-lm with MIT License 6 votes vote down vote up
def restore(self, x):
    """Add padding back to the given tensor.

    Args:
      x (tf.Tensor): of shape [dim_compressed,...]

    Returns:
      a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The
      dim is restored from the original reference tensor
    """
    with tf.name_scope("pad_reduce/restore"):
      x = tf.scatter_nd(
          indices=self.nonpad_ids,
          updates=x,
          shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
      )
    return x 
Example #20
Source File: expert_utils.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def restore(self, x):
    """Add padding back to the given tensor.

    Args:
      x (tf.Tensor): of shape [dim_compressed,...]

    Returns:
      a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The
      dim is restored from the original reference tensor
    """
    with tf.name_scope("pad_reduce/restore"):
      x = tf.scatter_nd(
          indices=self.nonpad_ids,
          updates=x,
          shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
      )
    return x 
Example #21
Source File: ops.py    From tensorflow-object-contour-detection with MIT License 5 votes vote down vote up
def unpool_with_argmax(pool, ind, name = None, ksize=[1, 2, 2, 1]):
    with tf.variable_scope(name):
        input_shape = pool.get_shape().as_list()
        output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])
        flat_input_size = np.prod(input_shape)
        flat_output_shape = [output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]]
        pool_ = tf.reshape(pool, [flat_input_size])
        batch_range = tf.reshape(tf.range(output_shape[0], dtype=ind.dtype), shape=[input_shape[0], 1, 1, 1])
        b = tf.ones_like(ind) * batch_range
        b = tf.reshape(b, [flat_input_size, 1])
        ind_ = tf.reshape(ind, [flat_input_size, 1])
        ind_ = tf.concat([b, ind_], 1)
        ret = tf.scatter_nd(ind_, pool_, shape=flat_output_shape)
        ret = tf.reshape(ret, output_shape)
        return ret 
Example #22
Source File: ops.py    From segnet with MIT License 5 votes vote down vote up
def maxunpool2d(incoming, mask, stride=2, name='unpool'):
    x = incoming

    input_shape = incoming.get_shape().as_list()
    strides = [1, stride, stride, 1]
    output_shape = (input_shape[0],
                    input_shape[1] * strides[1],
                    input_shape[2] * strides[2],
                    input_shape[3])

    flat_output_shape = [output_shape[0], np.prod(output_shape[1:])]
    with tf.name_scope(name):
        flat_input_size = tf.size(x)
        batch_range = tf.reshape(tf.range(output_shape[0], dtype=mask.dtype),
                                 shape=[input_shape[0], 1, 1, 1])
        b = tf.ones_like(mask) * batch_range
        b = tf.reshape(b, [flat_input_size, 1])
        mask_ = tf.reshape(mask, [flat_input_size, 1])
        mask_ = tf.concat([b, mask_], 1)

        x_ = tf.reshape(x, [flat_input_size])
        ret = tf.scatter_nd(mask_, x_, shape=flat_output_shape)
        ret = tf.reshape(ret, output_shape)
        return ret


# https://github.com/tflearn/tflearn/blob/master/tflearn/layers/normalization.py 
Example #23
Source File: utils.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def update_slices(slices, indices, dense_tensor, head_dims):
  """Reconstitutes a tensor from slices and corresponding indices.

  Like _stack_tensor, but instead of setting missing slices to 0, sets them to
  what they were in the original tensor. The return value is reshaped to be
  the same as dense_tensor.

  Args:
    slices: a tensor. Shape [K, D_1, ...]
    indices: a 1-D integer tensor. Shape: [K]
    dense_tensor: the original tensor the slices were taken
      from. Shape: [D_0, D_1, ...]
    head_dims: True dimensions of the dense_tensor's first dimension.

  Returns:
    Reconsituted tensor. Shape: [D_0, D_1, ...]
  """
  # NOTE(siege): This cast shouldn't be necessary.
  indices = tf.cast(indices, tf.int32)

  tail_dims = tf.shape(dense_tensor)[1:]
  dense_shape = tf.concat([head_dims, tail_dims], 0)

  update_mask_vals = tf.fill(tf.shape(indices), 1)
  reshaped_indices = tf.expand_dims(indices, -1)
  update_mask = tf.equal(
      tf.scatter_nd(reshaped_indices, update_mask_vals, head_dims[:1]), 1)

  reshaped_dense_slices = tf.reshape(
      stack_tensor(slices, indices, dense_tensor, head_dims), dense_shape)
  reshaped_dense_tensor = tf.reshape(dense_tensor, dense_shape)

  return tf.reshape(
      tf.where(update_mask, reshaped_dense_slices, reshaped_dense_tensor),
      tf.shape(dense_tensor)) 
Example #24
Source File: tensorflow_backend.py    From kaggle-rsna18 with MIT License 5 votes vote down vote up
def scatter_nd(*args, **kwargs):
    """ See https://www.tensorflow.org/versions/master/api_docs/python/tf/scatter_nd .
    """
    return tensorflow.scatter_nd(*args, **kwargs) 
Example #25
Source File: utils.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def stack_tensor(slices, indices, dense_tensor, head_dims):
  """Reconsititutes a tensor from slices and corresponding indices.

  This is an inverse operation to slice_tensor. Missing slices are set to 0.

  Args:
    slices: a tensor. Shape [K, D_1, ...]
    indices: a 1-D integer tensor. Shape: [K]
    dense_tensor: the original tensor the slices were taken
      from. Shape: [D_0, D_1, ...]
    head_dims: True dimensions of the dense_tensor's first dimension.

  Returns:
    Reconsituted tensor. Shape: [D_0, D_1, ...]
  """
  # NOTE(siege): This cast shouldn't be necessary.
  indices = tf.cast(indices, tf.int32)

  tail_dims = tf.shape(dense_tensor)[1:]
  dense_shape = tf.concat([head_dims, tail_dims], 0)

  slices = tf.reshape(slices, tf.concat([[-1], dense_shape[1:]], 0))
  indices = tf.expand_dims(indices, -1)

  return tf.reshape(tf.scatter_nd(indices, slices, dense_shape),
                    tf.shape(dense_tensor)) 
Example #26
Source File: utils.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def stack_tensor(slices, indices, dense_tensor, head_dims):
  """Reconsititutes a tensor from slices and corresponding indices.

  This is an inverse operation to slice_tensor. Missing slices are set to 0.

  Args:
    slices: a tensor. Shape [K, D_1, ...]
    indices: a 1-D integer tensor. Shape: [K]
    dense_tensor: the original tensor the slices were taken
      from. Shape: [D_0, D_1, ...]
    head_dims: True dimensions of the dense_tensor's first dimension.

  Returns:
    Reconsituted tensor. Shape: [D_0, D_1, ...]
  """
  # NOTE(siege): This cast shouldn't be necessary.
  indices = tf.cast(indices, tf.int32)

  tail_dims = tf.shape(dense_tensor)[1:]
  dense_shape = tf.concat([head_dims, tail_dims], 0)

  slices = tf.reshape(slices, tf.concat([[-1], dense_shape[1:]], 0))
  indices = tf.expand_dims(indices, -1)

  return tf.reshape(tf.scatter_nd(indices, slices, dense_shape),
                    tf.shape(dense_tensor)) 
Example #27
Source File: metric_utils.py    From ULTRA with Apache License 2.0 5 votes vote down vote up
def organize_valid_indices(is_valid, shuffle=True, seed=None):
    """Organizes indices in such a way that valid items appear first.

    Args:
      is_valid: A boolen `Tensor` for entry validity with shape [batch_size,
        list_size].
      shuffle: A boolean indicating whether valid items should be shuffled.
      seed: An int for random seed at the op level. It works together with the
        seed at global graph level together to determine the random number
        generation. See `tf.set_random_seed`.

    Returns:
      A tensor of indices with shape [batch_size, list_size, 2]. The returned
      tensor can be used with `tf.gather_nd` and `tf.scatter_nd` to compose a new
      [batch_size, list_size] tensor. The values in the last dimension are the
      indices for an element in the input tensor.
    """
    with tf.compat.v1.name_scope(name='organize_valid_indices'):
        is_valid = tf.convert_to_tensor(value=is_valid)
        is_valid.get_shape().assert_has_rank(2)
        output_shape = tf.shape(input=is_valid)

        if shuffle:
            values = tf.random.uniform(output_shape, seed=seed)
        else:
            values = (
                tf.ones_like(is_valid, tf.float32) * tf.reverse(
                    tf.cast(tf.range(output_shape[1]), dtype=tf.float32), [-1]))

        rand = tf.compat.v1.where(
            is_valid, values, tf.ones(output_shape) * -1e-6)
        # shape(indices) = [batch_size, list_size]
        indices = tf.argsort(rand, direction='DESCENDING', stable=True)
        return _to_nd_indices(indices) 
Example #28
Source File: labelprop.py    From gnn-benchmark with MIT License 5 votes vote down vote up
def _get_labelled_nodes_mask(self):
        inv_mask = tf.scatter_nd(
            tf.expand_dims(self.labelled_nodes, -1),
            tf.ones([self.labelled_nodes.get_shape()[0], self.num_classes], dtype=tf.float32),
            shape=[self.num_nodes, self.num_classes]
        )
        return 1 - inv_mask 
Example #29
Source File: dqn_dist.py    From sonic_contest with MIT License 5 votes vote down vote up
def add_rewards(self, probs, rewards, discounts):
        """
        Compute new distributions after adding rewards to
        old distributions.

        Args:
          log_probs: a batch of log probability vectors.
          rewards: a batch of rewards.
          discounts: the discount factors to apply to the
            distribution rewards.

        Returns:
          A new batch of log probability vectors.
        """
        atom_rews = tf.tile(tf.constant([self.atom_values()], dtype=probs.dtype),
                            tf.stack([tf.shape(rewards)[0], 1]))

        fuzzy_idxs = tf.expand_dims(rewards, axis=1) + tf.expand_dims(discounts, axis=1) * atom_rews
        fuzzy_idxs = (fuzzy_idxs - self.min_val) / self._delta

        # If the position were exactly 0, rounding up
        # and subtracting 1 would cause problems.
        fuzzy_idxs = tf.clip_by_value(fuzzy_idxs, 1e-18, float(self.num_atoms - 1))

        indices_1 = tf.cast(tf.ceil(fuzzy_idxs) - 1, tf.int32)
        fracs_1 = tf.abs(tf.ceil(fuzzy_idxs) - fuzzy_idxs)
        indices_2 = indices_1 + 1
        fracs_2 = 1 - fracs_1

        res = tf.zeros_like(probs)
        for indices, fracs in [(indices_1, fracs_1), (indices_2, fracs_2)]:
            index_matrix = tf.expand_dims(tf.range(tf.shape(indices)[0], dtype=tf.int32), axis=1)
            index_matrix = tf.tile(index_matrix, (1, self.num_atoms))
            scatter_indices = tf.stack([index_matrix, indices], axis=-1)
            res = res + tf.scatter_nd(scatter_indices, probs * fracs, tf.shape(res))

        return res 
Example #30
Source File: more_local_weight_update.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def get_shuffle_ind(self, size):
    if self.shuffle_ind is None:
      # put the shuffle in tf memory to make the eval jobs
      # re-entrant.
      shuffle_ind_val = np.random.permutation(size)
      shuffle_ind = tf.get_variable(
          name='shuffle_ind', dtype=tf.int64, initializer=shuffle_ind_val)
      unshuffle_ind = tf.scatter_nd(
          tf.reshape(shuffle_ind, [-1, 1]), tf.range(size), [size])

    return shuffle_ind, unshuffle_ind