Python tensorflow.scatter_nd_update() Examples

The following are 12 code examples of tensorflow.scatter_nd_update(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: scatter_nd_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testScatterRepeatIndices(self):
    """This tests scatter_add using indices that repeat."""
    self._ScatterRepeatIndicesTest(_NumpyAdd, tf.scatter_nd_add)
    self._ScatterRepeatIndicesTest(_NumpySub, tf.scatter_nd_sub)
    # TODO(simister): Re-enable once binary size increase due to
    # extra templating is back under control.
    # self._ScatterRepeatIndicesTest(_NumpyMul, tf.scatter_nd_mul)
    # self._ScatterRepeatIndicesTest(_NumpyDiv, tf.scatter_nd_div)

  # TODO(simister): Re-enable once binary size increase due to
  # extra templating is back under control and this op is re-enabled
  # def testBooleanScatterUpdate(self):
  #   with self.test_session(use_gpu=False) as session:
  #     var = tf.Variable([True, False])
  #     update0 = tf.scatter_nd_update(var, [[1]], [True])
  #     update1 = tf.scatter_nd_update(
  #         var, tf.constant(
  #             [[0]], dtype=tf.int64), [False])
  #     var.initializer.run()
  #     session.run([update0, update1])
  #     self.assertAllEqual([False, True], var.eval()) 
Example #2
Source File: scatter_nd_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testScatterOutOfRangeCpu(self):
    # TODO(simister): Re-enable once binary size increase due to
    # scatter_nd ops is under control.
    #  tf.scatter_nd_mul, tf.scatter_nd_div,
    for op in (tf.scatter_nd_add, tf.scatter_nd_sub, tf.scatter_nd_update):
      params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
      updates = np.array([-3, -4, -5]).astype(np.float32)
      with self.test_session(use_gpu=False):
        ref = tf.Variable(params)
        ref.initializer.run()

        # Indices all in range, no problem.
        indices = np.array([[2], [0], [5]])
        op(ref, indices, updates).eval()

        # Test some out of range errors.
        indices = np.array([[-1], [0], [5]])
        with self.assertRaisesOpError(
            r"Invalid indices: \[0,0\] = \[-1\] is not in \[0, 6\)"):
          op(ref, indices, updates).eval()

        indices = np.array([[2], [0], [6]])
        with self.assertRaisesOpError(
            r"Invalid indices: \[2,0\] = \[6\] is not in \[0, 6\)"):
          op(ref, indices, updates).eval() 
Example #3
Source File: scatter_nd_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _disabledTestScatterOutOfRangeGpu(self):
    if not tf.test.IsBuiltWithCuda():
      return
    # TODO(simister): Re-enable once binary size increase due to
    # scatter_nd ops is under control.
    # tf.scatter_nd_mul, tf.scatter_nd_div,
    for op in (tf.scatter_nd_add, tf.scatter_nd_sub, tf.scatter_nd_update):
      params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
      updates = np.array([-3, -4, -5]).astype(np.float32)
      # With GPU, the code ignores indices that are out of range.
      # We don't test the implementation; just test there's no failures.
      with self.test_session(force_gpu=True):
        ref = tf.Variable(params)
        ref.initializer.run()

        # Indices all in range, no problem.
        indices = np.array([2, 0, 5])
        op(ref, indices, updates).eval()

        # Indicies out of range should not fail.
        indices = np.array([-1, 0, 5])
        op(ref, indices, updates).eval()
        indices = np.array([2, 0, 6])
        op(ref, indices, updates).eval() 
Example #4
Source File: rnn_wrapper.py    From NQG_ASs2s with MIT License 6 votes vote down vote up
def attention_vocab(attention_weight, sentence_index):
        ''' return indices and updates for tf.scatter_nd_update

        Args:
            attention_weight : [batch, length]
            sentence_index : [batch, length]
        '''
        batch_size = attention_weight.get_shape()[0]
        sentencen_length = attention_weight.get_shape()[-1]

        batch_index = tf.range(batch_size)
        batch_index = tf.expand_dims(batch_index, [1])
        batch_index = tf.tile(batch_index, [1, sentence_length])
        batch_index = tf.reshape(batch_index, [-1, 1]) # looks like [0,0,0,0,0,1,1,1,1,1,2,2,2,2,2,....]

        zeros = tf.zeros([batch_size, self._output_size])

        flat_index = tf.reshape(sentence_index, [-1, 1])
        indices = tf.concat([batch_index, flat_index], 1)

        updates = tf.reshape(attention_weight, [-1])

        p_attn = tf.scatter_nd_update(zeros, indices, updates)

        return p_attn 
Example #5
Source File: memory.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def append(self, transitions, rows=None):
    """Append a batch of transitions to rows of the memory.

    Args:
      transitions: Tuple of transition quantities with batch dimension.
      rows: Episodes to append to, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity,
        message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less(
          tf.gather(self._length, rows), self._max_length,
          message='max length exceeded')
    append_ops = []
    with tf.control_dependencies([assert_max_length]):
      for buffer_, elements in zip(self._buffers, transitions):
        timestep = tf.gather(self._length, rows)
        indices = tf.stack([rows, timestep], 1)
        append_ops.append(tf.scatter_nd_update(buffer_, indices, elements))
    with tf.control_dependencies(append_ops):
      episode_mask = tf.reduce_sum(tf.one_hot(
          rows, self._capacity, dtype=tf.int32), 0)
      return self._length.assign_add(episode_mask) 
Example #6
Source File: memory.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def append(self, transitions, rows=None):
    """Append a batch of transitions to rows of the memory.

    Args:
      transitions: Tuple of transition quantities with batch dimension.
      rows: Episodes to append to, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity,
        message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less(
          tf.gather(self._length, rows), self._max_length,
          message='max length exceeded')
    append_ops = []
    with tf.control_dependencies([assert_max_length]):
      for buffer_, elements in zip(self._buffers, transitions):
        timestep = tf.gather(self._length, rows)
        indices = tf.stack([rows, timestep], 1)
        append_ops.append(tf.scatter_nd_update(buffer_, indices, elements))
    with tf.control_dependencies(append_ops):
      episode_mask = tf.reduce_sum(tf.one_hot(
          rows, self._capacity, dtype=tf.int32), 0)
      return self._length.assign_add(episode_mask) 
Example #7
Source File: scatter_nd_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testVariableRankUpdate(self):
    self._VariableRankTests(_NumpyUpdate, tf.scatter_nd_update) 
Example #8
Source File: scatter_nd_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testRank3ValidShape(self):
    indices = tf.zeros([2, 2, 2], tf.int32)
    updates = tf.zeros([2, 2, 2], tf.int32)
    shape = np.array([2, 2, 2])
    self.assertAllEqual(
        tf.scatter_nd(indices, updates, shape).get_shape().as_list(), shape)

    ref = tf.Variable(tf.zeros(shape, tf.int32))
    self.assertAllEqual(
        tf.scatter_nd_update(ref, indices, updates).get_shape().as_list(),
        shape) 
Example #9
Source File: scatter_nd_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testRank3InvalidShape1(self):
    indices = tf.zeros([3, 2, 2], tf.int32)
    updates = tf.zeros([2, 2, 2], tf.int32)
    shape = np.array([2, 2, 2])
    with self.assertRaisesWithPredicateMatch(
        ValueError, "The outer \\d+ dimensions of indices\\.shape="):
      tf.scatter_nd(indices, updates, shape)

    ref = tf.Variable(tf.zeros(shape, tf.int32))
    with self.assertRaisesWithPredicateMatch(
        ValueError, "The outer \\d+ dimensions of indices\\.shape="):
      tf.scatter_nd_update(ref, indices, updates) 
Example #10
Source File: memory.py    From batch-ppo with Apache License 2.0 5 votes vote down vote up
def append(self, transitions, rows=None):
    """Append a batch of transitions to rows of the memory.

    Args:
      transitions: Tuple of transition quantities with batch dimension.
      rows: Episodes to append to, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity,
        message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less(
          tf.gather(self._length, rows), self._max_length,
          message='max length exceeded')
    with tf.control_dependencies([assert_max_length]):
      timestep = tf.gather(self._length, rows)
      indices = tf.stack([rows, timestep], 1)
      append_ops = tools.nested.map(
          lambda var, val: tf.scatter_nd_update(var, indices, val),
          self._buffers, transitions, flatten=True)
    with tf.control_dependencies(append_ops):
      episode_mask = tf.reduce_sum(tf.one_hot(
          rows, self._capacity, dtype=tf.int32), 0)
      return self._length.assign_add(episode_mask) 
Example #11
Source File: bubble_sort.py    From EsotericTensorFlow with MIT License 5 votes vote down vote up
def inner_loop(self, i, j, _):
        body = tf.cond(tf.greater(self.array[j-1], self.array[j]),
                    lambda: tf.scatter_nd_update(self.array, [[j-1],[j]], [self.array[j],self.array[j-1]]),
                    lambda: self.array)
        return i, tf.subtract(j, 1), body 
Example #12
Source File: insertion_sort.py    From EsotericTensorFlow with MIT License 5 votes vote down vote up
def inner_loop(self, i, j, _):
        return i, tf.subtract(j, 1), tf.scatter_nd_update(self.array, [[j-1],[j]], [self.array[j],self.array[j-1]])