Python tensorflow.foldl() Examples

The following are 22 code examples of tensorflow.foldl(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: neural_gpu.py    From BERT with Apache License 2.0 6 votes vote down vote up
def neural_gpu_body(inputs, hparams, name=None):
  """The core Neural GPU."""
  with tf.variable_scope(name, "neural_gpu"):

    def step(state, inp):  # pylint: disable=missing-docstring
      x = tf.nn.dropout(state, 1.0 - hparams.dropout)
      for layer in range(hparams.num_hidden_layers):
        x = common_layers.conv_gru(
            x, (hparams.kernel_height, hparams.kernel_width),
            hparams.hidden_size,
            name="cgru_%d" % layer)
      # Padding input is zeroed-out in the modality, we check this by summing.
      padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
      new_state = tf.where(padding_inp, state, x)  # No-op where inp is padding.
      return new_state

    return tf.foldl(
        step,
        tf.transpose(inputs, [1, 0, 2, 3]),
        initializer=inputs,
        parallel_iterations=1,
        swap_memory=True) 
Example #2
Source File: neural_gpu.py    From fine-lm with MIT License 6 votes vote down vote up
def neural_gpu_body(inputs, hparams, name=None):
  """The core Neural GPU."""
  with tf.variable_scope(name, "neural_gpu"):

    def step(state, inp):  # pylint: disable=missing-docstring
      x = tf.nn.dropout(state, 1.0 - hparams.dropout)
      for layer in range(hparams.num_hidden_layers):
        x = common_layers.conv_gru(
            x, (hparams.kernel_height, hparams.kernel_width),
            hparams.hidden_size,
            name="cgru_%d" % layer)
      # Padding input is zeroed-out in the modality, we check this by summing.
      padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
      new_state = tf.where(padding_inp, state, x)  # No-op where inp is padding.
      return new_state

    return tf.foldl(
        step,
        tf.transpose(inputs, [1, 0, 2, 3]),
        initializer=inputs,
        parallel_iterations=1,
        swap_memory=True) 
Example #3
Source File: neural_gpu.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def neural_gpu_body(inputs, hparams, name=None):
  """The core Neural GPU."""
  with tf.variable_scope(name, "neural_gpu"):

    def step(state, inp):  # pylint: disable=missing-docstring
      x = tf.nn.dropout(state, 1.0 - hparams.dropout)
      for layer in range(hparams.num_hidden_layers):
        x = common_layers.conv_gru(
            x, (hparams.kernel_height, hparams.kernel_width),
            hparams.hidden_size,
            name="cgru_%d" % layer)
      # Padding input is zeroed-out in the modality, we check this by summing.
      padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
      new_state = tf.where(padding_inp, state, x)  # No-op where inp is padding.
      return new_state

    return tf.foldl(
        step,
        tf.transpose(inputs, [1, 0, 2, 3]),
        initializer=inputs,
        parallel_iterations=1,
        swap_memory=True) 
Example #4
Source File: functional_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testFoldl_Scoped(self):
    with self.test_session() as sess:
      with tf.variable_scope("root") as varscope:
        elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")

        r = tf.foldl(simple_scoped_fn, elems)
        # Check that we have the one variable we asked for here.
        self.assertEqual(len(tf.trainable_variables()), 1)
        self.assertEqual(tf.trainable_variables()[0].name, "root/body/two:0")
        sess.run([tf.global_variables_initializer()])
        self.assertAllEqual(208, r.eval())

        # Now let's reuse our single variable.
        varscope.reuse_variables()
        r = tf.foldl(simple_scoped_fn, elems, initializer=10)
        self.assertEqual(len(tf.trainable_variables()), 1)
        self.assertAllEqual(880, r.eval()) 
Example #5
Source File: functional_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testScanFoldl_Nested(self):
    with self.test_session():
      elems = tf.constant([1.0, 2.0, 3.0, 4.0], name="data")
      inner_elems = tf.constant([0.5, 0.5], name="data")

      def r_inner(a, x):
        return tf.foldl(lambda b, y: b * y * x, inner_elems, initializer=a)

      r = tf.scan(r_inner, elems)

      # t == 0 (returns 1)
      # t == 1, a == 1, x == 2 (returns 1)
      #   t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1
      #   t_1 == 1, b == 1,      y == 0.5, returns b * y * x = 1
      # t == 2, a == 1, x == 3 (returns 1.5*1.5 == 2.25)
      #   t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1.5
      #   t_1 == 1, b == 1.5,    y == 0.5, returns b * y * x = 1.5*1.5
      # t == 3, a == 2.25, x == 4 (returns 9)
      #   t_0 == 0, b == a == 2.25, y == 0.5, returns b * y * x = 4.5
      #   t_1 == 1, b == 4.5,       y == 0.5, returns b * y * x = 9
      self.assertAllClose([1., 1., 2.25, 9.], r.eval()) 
Example #6
Source File: tensorflow_backend.py    From keras-lambda with MIT License 5 votes vote down vote up
def foldl(fn, elems, initializer=None, name=None):
    """Reduce elems using fn to combine them from left to right.

    # Arguments
        fn: Callable that will be called upon each element in elems and an
            accumulator, for instance `lambda acc, x: acc + x`
        elems: tensor
        initializer: The first value used (`elems[0]` in case of None)
        name: A string name for the foldl node in the graph

    # Returns
        Tensor with same type and shape as `initializer`.
    """
    return tf.foldl(fn, elems, initializer=initializer, name=name) 
Example #7
Source File: native_module_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def nested_control_flow_module_fn():
  """Compute the sum of elements greater than 'a' with nested control flow."""
  elems = tf_v1.placeholder(
      dtype=tf.float32, name="elems", shape=[None])
  a = tf_v1.placeholder(dtype=tf.float32, name="a")

  def sum_above_a(acc, x):
    return acc + tf.cond(x > a, lambda: x, lambda: 0.0)

  hub.add_signature(
      inputs={"elems": elems, "a": a},
      outputs=tf.foldl(sum_above_a, elems, initializer=tf.constant(0.0))) 
Example #8
Source File: tensorflow_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def foldl(fn, elems, initializer=None, name=None):
    """Reduce elems using fn to combine them from left to right.

    # Arguments
        fn: Callable that will be called upon each element in elems and an
            accumulator, for instance `lambda acc, x: acc + x`
        elems: tensor
        initializer: The first value used (`elems[0]` in case of None)
        name: A string name for the foldl node in the graph

    # Returns
        Tensor with same type and shape as `initializer`.
    """
    return tf.foldl(fn, elems, initializer=initializer, name=name) 
Example #9
Source File: tf_wpe.py    From nara_wpe with MIT License 5 votes vote down vote up
def perform_filter_operation(Y, filter_matrix_conj, taps, delay):
    """

    >>> D, T, taps, delay = 1, 10, 2, 1
    >>> tf.enable_eager_execution()
    >>> Y = tf.ones([D, T])
    >>> filter_matrix_conj = tf.ones([taps, D, D])
    >>> X = perform_filter_operation_v2(Y, filter_matrix_conj, taps, delay)
    >>> X.shape
    TensorShape([Dimension(1), Dimension(10)])
    >>> X.numpy()
    array([[ 1.,  0., -1., -1., -1., -1., -1., -1., -1., -1.]], dtype=float32)
    """
    dyn_shape = tf.shape(Y)
    T = dyn_shape[1]

    def add_tap(accumulated, tau_minus_delay):
        new = tf.einsum(
            'de,dt',
            filter_matrix_conj[tau_minus_delay, :, :],
            Y[:, :(T - delay - tau_minus_delay)]
        )
        paddings = tf.convert_to_tensor([[0, 0], [delay + tau_minus_delay, 0]])
        new = tf.pad(new, paddings, "CONSTANT")
        return accumulated + new

    reverb_tail = tf.foldl(
        add_tap, tf.range(0, taps),
        initializer=tf.zeros_like(Y)
    )
    return Y - reverb_tail 
Example #10
Source File: codec.py    From vae-seq with Apache License 2.0 5 votes vote down vote up
def _sample_n(self, n, seed=None):
        all_counts = tf.to_float(tf.range(self._total_count + 1))
        for batch_dim in range(self.batch_shape.ndims):
            all_counts = tf.expand_dims(all_counts, axis=-1)
        all_cdfs = tf.map_fn(self.cdf, all_counts)
        shape = tf.concat([[n], self.batch_shape_tensor()], 0)
        uniform = tf.random_uniform(shape, seed=seed)
        return tf.foldl(
            lambda acc, cdfs: tf.where(uniform > cdfs, acc + 1, acc),
            all_cdfs,
            initializer=tf.zeros(shape, dtype=tf.int32)) 
Example #11
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def foldl(fn, elems, initializer=None, name=None):
    """Reduce elems using fn to combine them from left to right.

    # Arguments
        fn: Callable that will be called upon each element in elems and an
            accumulator, for instance `lambda acc, x: acc + x`
        elems: tensor
        initializer: The first value used (`elems[0]` in case of None)
        name: A string name for the foldl node in the graph

    # Returns
        Tensor with same type and shape as `initializer`.
    """
    return tf.foldl(fn, elems, initializer=initializer, name=name) 
Example #12
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def foldl(fn, elems, initializer=None, name=None):
    """Reduce elems using fn to combine them from left to right.

    # Arguments
        fn: Callable that will be called upon each element in elems and an
            accumulator, for instance `lambda acc, x: acc + x`
        elems: tensor
        initializer: The first value used (`elems[0]` in case of None)
        name: A string name for the foldl node in the graph

    # Returns
        Tensor with same type and shape as `initializer`.
    """
    return tf.foldl(fn, elems, initializer=initializer, name=name) 
Example #13
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def foldl(fn, elems, initializer=None, name=None):
    """Reduce elems using fn to combine them from left to right.

    # Arguments
        fn: Callable that will be called upon each element in elems and an
            accumulator, for instance `lambda acc, x: acc + x`
        elems: tensor
        initializer: The first value used (`elems[0]` in case of None)
        name: A string name for the foldl node in the graph

    # Returns
        Tensor with same type and shape as `initializer`.
    """
    return tf.foldl(fn, elems, initializer=initializer, name=name) 
Example #14
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def foldl(fn, elems, initializer=None, name=None):
    """Reduce elems using fn to combine them from left to right.

    # Arguments
        fn: Callable that will be called upon each element in elems and an
            accumulator, for instance `lambda acc, x: acc + x`
        elems: tensor
        initializer: The first value used (`elems[0]` in case of None)
        name: A string name for the foldl node in the graph

    # Returns
        Tensor with same type and shape as `initializer`.
    """
    return tf.foldl(fn, elems, initializer=initializer, name=name) 
Example #15
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def foldl(fn, elems, initializer=None, name=None):
    """Reduce elems using fn to combine them from left to right.

    # Arguments
        fn: Callable that will be called upon each element in elems and an
            accumulator, for instance `lambda acc, x: acc + x`
        elems: tensor
        initializer: The first value used (`elems[0]` in case of None)
        name: A string name for the foldl node in the graph

    # Returns
        Tensor with same type and shape as `initializer`.
    """
    return tf.foldl(fn, elems, initializer=initializer, name=name) 
Example #16
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def foldl(fn, elems, initializer=None, name=None):
    """Reduce elems using fn to combine them from left to right.

    # Arguments
        fn: Callable that will be called upon each element in elems and an
            accumulator, for instance `lambda acc, x: acc + x`
        elems: tensor
        initializer: The first value used (`elems[0]` in case of None)
        name: A string name for the foldl node in the graph

    # Returns
        Tensor with same type and shape as `initializer`.
    """
    return tf.foldl(fn, elems, initializer=initializer, name=name) 
Example #17
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def foldl(fn, elems, initializer=None, name=None):
    """Reduce elems using fn to combine them from left to right.

    # Arguments
        fn: Callable that will be called upon each element in elems and an
            accumulator, for instance `lambda acc, x: acc + x`
        elems: tensor
        initializer: The first value used (`elems[0]` in case of None)
        name: A string name for the foldl node in the graph

    # Returns
        Tensor with same type and shape as `initializer`.
    """
    return tf.foldl(fn, elems, initializer=initializer, name=name) 
Example #18
Source File: functional_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testFoldShape(self):
    with self.test_session():
      x = tf.constant([[1, 2, 3], [4, 5, 6]])
      def fn(_, current_input):
        return current_input
      initializer = tf.constant([0, 0, 0])
      y = tf.foldl(fn, x, initializer=initializer)
      self.assertAllEqual(y.get_shape(), y.eval().shape) 
Example #19
Source File: functional_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testFold_Grad(self):
    with self.test_session():
      elems = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
      v = tf.constant(2.0, name="v")

      r = tf.foldl(
          lambda a, x: tf.mul(a, x), elems, initializer=v)
      r = tf.gradients(r, v)[0]
      self.assertAllEqual(720.0, r.eval())

      r = tf.foldr(
          lambda a, x: tf.mul(a, x), elems, initializer=v)
      r = tf.gradients(r, v)[0]
      self.assertAllEqual(720.0, r.eval()) 
Example #20
Source File: functional_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testFoldl_Simple(self):
    with self.test_session():
      elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")

      r = tf.foldl(lambda a, x: tf.mul(tf.add(a, x), 2), elems)
      self.assertAllEqual(208, r.eval())

      r = tf.foldl(
          lambda a, x: tf.mul(tf.add(a, x), 2), elems, initializer=10)
      self.assertAllEqual(880, r.eval()) 
Example #21
Source File: tensorflow_backend.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def foldl(fn, elems, initializer=None, name=None):
    """Reduce elems using fn to combine them from left to right.

    # Arguments
        fn: Callable that will be called upon each element in elems and an
            accumulator, for instance `lambda acc, x: acc + x`
        elems: tensor
        initializer: The first value used (`elems[0]` in case of None)
        name: A string name for the foldl node in the graph

    # Returns
        Tensor with same type and shape as `initializer`.
    """
    return tf.foldl(fn, elems, initializer=initializer, name=name) 
Example #22
Source File: common_layers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def top_kth_iterative(x, k):
  """Compute the k-th top element of x on the last axis iteratively.

  This assumes values in x are non-negative, rescale if needed.
  It is often faster than tf.nn.top_k for small k, especially if k < 30.
  Note: this does not support back-propagation, it stops gradients!

  Args:
    x: a Tensor of non-negative numbers of type float.
    k: a python integer.

  Returns:
    a float tensor of the same shape as x but with 1 on the last axis
    that contains the k-th largest number in x.
  """
  # The iterative computation is as follows:
  #
  # cur_x = x
  # for _ in range(k):
  #   top_x = maximum of elements of cur_x on the last axis
  #   cur_x = cur_x where cur_x < top_x and 0 everywhere else (top elements)
  #
  # We encode this computation in a TF graph using tf.foldl, so the inner
  # part of the above loop is called "next_x" and tf.foldl does the loop.
  def next_x(cur_x, _):
    top_x = tf.reduce_max(cur_x, axis=-1, keep_dims=True)
    return cur_x * to_float(cur_x < top_x)
  # We only do k-1 steps of the loop and compute the final max separately.
  fin_x = tf.foldl(next_x, tf.range(k - 1), initializer=tf.stop_gradient(x),
                   parallel_iterations=2, back_prop=False)
  return tf.stop_gradient(tf.reduce_max(fin_x, axis=-1, keep_dims=True))