Python tensorflow.python.framework.ops.control_dependencies() Examples

The following are 30 code examples of tensorflow.python.framework.ops.control_dependencies(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.framework.ops , or try the search function .
Example #1
Source File: backend.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self, inputs, outputs, updates=None):
    updates = updates or []
    if not isinstance(inputs, (list, tuple)):
      raise TypeError('`inputs` to a TensorFlow backend function '
                      'should be a list or tuple.')
    if not isinstance(outputs, (list, tuple)):
      raise TypeError('`outputs` of a TensorFlow backend function '
                      'should be a list or tuple.')
    if not isinstance(updates, (list, tuple)):
      raise TypeError('`updates` in a TensorFlow backend function '
                      'should be a list or tuple.')
    self.inputs = list(inputs)
    self.outputs = list(outputs)
    with ops.control_dependencies(self.outputs):
      updates_ops = []
      for update in updates:
        if isinstance(update, tuple):
          p, new_p = update
          updates_ops.append(state_ops.assign(p, new_p))
        else:
          # assumed already an op
          updates_ops.append(update)
      self.updates_op = control_flow_ops.group(*updates_ops) 
Example #2
Source File: gmm_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _init_clusters_random(data, num_clusters, random_seed):
  """Does random initialization of clusters.

  Args:
    data: a list of Tensors with a matrix of data, each row is an example.
    num_clusters: an integer with the number of clusters.
    random_seed: Seed for PRNG used to initialize seeds.

  Returns:
    A Tensor with num_clusters random rows of data.
  """
  assert isinstance(data, list)
  num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in data])
  with ops.control_dependencies(
      [check_ops.assert_less_equal(num_clusters, num_data)]):
    indices = random_ops.random_uniform(
        [num_clusters],
        minval=0,
        maxval=math_ops.cast(num_data, dtypes.int64),
        seed=random_seed,
        dtype=dtypes.int64)
  indices %= math_ops.cast(num_data, dtypes.int64)
  clusters_init = embedding_lookup(data, indices, partition_strategy='div')
  return clusters_init 
Example #3
Source File: clustering_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _init_clusters_random(self):
    """Does random initialization of clusters.

    Returns:
      Tensor of randomly initialized clusters.
    """
    num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in self._inputs])
    # Note that for mini-batch k-means, we should ensure that the batch size of
    # data used during initialization is sufficiently large to avoid duplicated
    # clusters.
    with ops.control_dependencies(
        [check_ops.assert_less_equal(self._num_clusters, num_data)]):
      indices = random_ops.random_uniform(
          array_ops.reshape(self._num_clusters, [-1]),
          minval=0,
          maxval=math_ops.cast(num_data, dtypes.int64),
          seed=self._random_seed,
          dtype=dtypes.int64)
      clusters_init = embedding_lookup(
          self._inputs, indices, partition_strategy='div')
      return clusters_init 
Example #4
Source File: layers.py    From tensornets with MIT License 6 votes vote down vote up
def _dense_inner_flatten(inputs, new_rank):
  """Helper function for `inner_flatten`."""
  rank_assertion = check_ops.assert_rank_at_least(
      inputs, new_rank, message='inputs has rank less than new_rank')
  with ops.control_dependencies([rank_assertion]):
    outer_dimensions = array_ops.strided_slice(
        array_ops.shape(inputs), [0], [new_rank - 1])
    new_shape = array_ops.concat((outer_dimensions, [-1]), 0)
    reshaped = array_ops.reshape(inputs, new_shape)

  # if `new_rank` is an integer, try to calculate new shape.
  if isinstance(new_rank, six.integer_types):
    static_shape = inputs.get_shape()
    if static_shape is not None and static_shape.dims is not None:
      static_shape = static_shape.as_list()
      static_outer_dims = static_shape[:new_rank - 1]
      static_inner_dims = static_shape[new_rank - 1:]
      flattened_dimension = 1
      for inner_dim in static_inner_dims:
        if inner_dim is None:
          flattened_dimension = None
          break
        flattened_dimension *= inner_dim
      reshaped.set_shape(static_outer_dims + [flattened_dimension])
  return reshaped 
Example #5
Source File: main_op_impl.py    From lambda-packs with MIT License 6 votes vote down vote up
def main_op_with_restore(restore_op_name):
  """Returns a main op to init variables, tables and restore the graph.

  Returns the main op including the group of ops that initializes all
  variables, initialize local variables, initialize all tables and the restore
  op name.

  Args:
    restore_op_name: Name of the op to use to restore the graph.

  Returns:
    The set of ops to be run as part of the main op upon the load operation.
  """
  with ops.control_dependencies([main_op()]):
    main_op_with_restore = control_flow_ops.group(restore_op_name)
  return main_op_with_restore 
Example #6
Source File: math_grad.py    From lambda-packs with MIT License 6 votes vote down vote up
def _SquaredDifferenceGrad(op, grad):
  """Returns the gradient for (x-y)^2."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  # pylint: disable=protected-access
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  # pylint: enable=protected-access
  # .op works with Tensors or IndexedSlices
  with ops.control_dependencies([grad.op]):
    # The parens ensure that if grad is IndexedSlices, it'll get multiplied by
    # Tensor (not a number like 2.0) which causes it to convert to Tensor.
    x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
  return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
          -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))


# Logical operations have no gradients. 
Example #7
Source File: variables.py    From lambda-packs with MIT License 6 votes vote down vote up
def initialized_value(self):
    """Returns the value of the initialized variable.

    You should use this instead of the variable itself to initialize another
    variable with a value that depends on the value of this variable.

    ```python
    # Initialize 'v' with a random tensor.
    v = tf.Variable(tf.truncated_normal([10, 40]))
    # Use `initialized_value` to guarantee that `v` has been
    # initialized before its value is used to initialize `w`.
    # The random values are picked only once.
    w = tf.Variable(v.initialized_value() * 2.0)
    ```

    Returns:
      A `Tensor` holding the value of this variable after its initializer
      has run.
    """
    with ops.control_dependencies(None):
      return control_flow_ops.cond(is_variable_initialized(self),
                                   self.read_value,
                                   lambda: self.initial_value) 
Example #8
Source File: tensor_array_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def grad(self, source, flow=None, name=None):
    # tensor_array_grad requires a flow input when forward
    # TensorArrays are dynamically sized.  This forces the creation
    # of the grad TensorArray only once the final forward array's size
    # is fixed.
    if flow is None:
      flow = self.flow
    with ops.name_scope(name, "TensorArrayGrad", [self._handle]):
      with ops.colocate_with(self._handle):
        g_handle, unused_flow = gen_data_flow_ops._tensor_array_grad_v3(
            handle=self._handle, source=source, flow_in=flow, name=name)
        with ops.control_dependencies([g_handle]):
          flow = array_ops.identity(flow, name="gradient_flow")
        g = TensorArray(
            dtype=self._dtype,
            handle=g_handle,
            flow=flow,
            infer_shape=self._infer_shape,
            colocate_with_first_write_call=False)
        g._element_shape = self._element_shape
        return g 
Example #9
Source File: control_flow_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def AddValue(self, val):
    """Add `val` to the current context and its outer context recursively."""
    if val.name in self._values:
      # Use the real value if it comes from outer context. This is needed in
      # particular for nested conds.
      result = self._external_values.get(val.name)
      result = val if result is None else result
    else:
      result = val
      self._values.add(val.name)
      if self._outer_context:
        result = self._outer_context.AddValue(val)
        self._values.add(result.name)
      with ops.control_dependencies(None):
        result = _SwitchRefOrTensor(result, self._pred)[self._branch]
      result.op.graph.prevent_fetching(result.op)
      # pylint: disable=protected-access
      result.op._set_control_flow_context(self)
      # pylint: enable=protected-access

      self._values.add(result.name)
      self._external_values[val.name] = result
    return result 
Example #10
Source File: math_grad.py    From lambda-packs with MIT License 6 votes vote down vote up
def _ZetaGrad(op, grad):
  """Returns gradient of zeta(x, q) with respect to x and q."""
  # TODO(tillahoffmann): Add derivative with respect to x
  x = op.inputs[0]
  q = op.inputs[1]
  # Broadcast gradients
  sx = array_ops.shape(x)
  sq = array_ops.shape(q)
  unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
  # Evaluate gradient
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    q = math_ops.conj(q)
    partial_q = -x * math_ops.zeta(x + 1, q)
    # TODO(b/36815900): Mark None return values as NotImplemented
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq)) 
Example #11
Source File: numerics.py    From lambda-packs with MIT License 6 votes vote down vote up
def add_check_numerics_ops():
  """Connect a `check_numerics` to every floating point tensor.

  `check_numerics` operations themselves are added for each `half`, `float`,
  or `double` tensor in the graph. For all ops in the graph, the
  `check_numerics` op for all of its (`half`, `float`, or `double`) inputs
  is guaranteed to run before the `check_numerics` op on any of its outputs.

  Returns:
    A `group` op depending on all `check_numerics` ops added.
  """
  check_op = []
  # This code relies on the ordering of ops in get_operations().
  # The producer of a tensor always comes before that tensor's consumer in
  # this list. This is true because get_operations() returns ops in the order
  # added, and an op can only be added after its inputs are added.
  for op in ops.get_default_graph().get_operations():
    for output in op.outputs:
      if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
        message = op.name + ":" + str(output.value_index)
        with ops.control_dependencies(check_op):
          check_op = [array_ops.check_numerics(output, message=message)]
  return control_flow_ops.group(*check_op) 
Example #12
Source File: optimizer.py    From tensorflow-XNN with MIT License 6 votes vote down vote up
def _finish(self, update_ops, name_scope):
        # Update the power accumulators.
        with ops.control_dependencies(update_ops):
            with ops.colocate_with(self._iterations):
                update_beta1 = self._beta1_power.assign(
                    self._beta1_power * self._beta1_t,
                    use_locking=self._use_locking)
                update_beta2 = self._beta2_power.assign(
                    self._beta2_power * self._beta2_t,
                    use_locking=self._use_locking)
                t = self._iterations + 1.
                update_iterations = self._iterations.assign(t, use_locking=self._use_locking)
                momentum_cache_power = self._get_momentum_cache(self._schedule_decay_t, t)
                momentum_cache_t = self._beta1_t * (1. - 0.5 * momentum_cache_power)
                update_m_schedule = self._m_schedule.assign(
                    self._m_schedule * momentum_cache_t,
                    use_locking=self._use_locking)
        return control_flow_ops.group(
            *update_ops + [update_beta1, update_beta2] + [update_iterations, update_m_schedule],
            name=name_scope) 
Example #13
Source File: linalg_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
  """Gradient for SelfAdjointEigV2."""
  e = op.outputs[0]
  v = op.outputs[1]
  # a = op.inputs[0], which satisfies
  # a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
  with ops.control_dependencies([grad_e.op, grad_v.op]):
    if grad_v is not None:
      # Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
      # Notice that because of the term involving f, the gradient becomes
      # infinite (or NaN in practice) when eigenvalues are not unique.
      # Mathematically this should not be surprising, since for (k-fold)
      # degenerate eigenvalues, the corresponding eigenvectors are only defined
      # up to arbitrary rotation in a (k-dimensional) subspace.
      f = array_ops.matrix_set_diag(
          math_ops.reciprocal(
              array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
          array_ops.zeros_like(e))
      grad_a = math_ops.matmul(
          v,
          math_ops.matmul(
              array_ops.matrix_diag(grad_e) + f * math_ops.matmul(
                  v, grad_v, adjoint_a=True),
              v,
              adjoint_b=True))
    else:
      grad_a = math_ops.matmul(
          v, math_ops.matmul(
              array_ops.matrix_diag(grad_e), v, adjoint_b=True))
    # The forward op only depends on the lower triangular part of a, so here we
    # symmetrize and take the lower triangle
    grad_a = array_ops.matrix_band_part(
        grad_a + array_ops.matrix_transpose(grad_a), -1, 0)
    grad_a = array_ops.matrix_set_diag(grad_a,
                                       0.5 * array_ops.matrix_diag_part(grad_a))
    return grad_a 
Example #14
Source File: nn_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _SoftplusGradGrad(op, grad):
  # Let:
  #   y = tf.nn.softplus(x)
  #   dx = gen_nn_ops._softplus_grad(dy, x) = dy / (1 + exp(-x))
  # This op computes (ddy, d2x) from op.inputs == [dy, x] and grad == ddx.
  dy, x = op.inputs
  with ops.control_dependencies([grad.op]):
    ddy = gen_nn_ops._softplus_grad(grad, x)  # pylint: disable=protected-access
    d2x = grad * dy / (math_ops.exp(-x) + 2.0 + math_ops.exp(x))
    return (ddy, d2x) 
Example #15
Source File: adam.py    From lambda-packs with MIT License 5 votes vote down vote up
def _resource_scatter_add(self, x, i, v):
    with ops.control_dependencies(
        [resource_variable_ops.resource_scatter_add(
            x.handle, i, v)]):
      return x.value() 
Example #16
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _SigmoidGrad(op, grad):
  """Returns grad * sigmoid(x) * (1 - sigmoid(x))."""
  y = op.outputs[0]  # y = sigmoid(x)
  with ops.control_dependencies([grad.op]):
    y = math_ops.conj(y)
    # pylint: disable=protected-access
    return gen_math_ops._sigmoid_grad(y, grad) 
Example #17
Source File: clustering_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def _mini_batch_sync_updates_op(self, update_in_steps,
                                  cluster_centers_var, cluster_centers_updated,
                                  total_counts):
    if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
      assert update_in_steps is not None
      with ops.colocate_with(update_in_steps):
        def _f():
          # Note that there is a race condition here, so we do a best effort
          # updates here. We reset update_in_steps first so that other workers
          # don't duplicate the updates. Also we update cluster_center_vars
          # before resetting total_counts to avoid large updates to
          # cluster_centers_updated based on partially updated
          # cluster_center_vars.
          with ops.control_dependencies([state_ops.assign(
              update_in_steps,
              self._mini_batch_steps_per_iteration - 1)]):
            with ops.colocate_with(cluster_centers_updated):
              if self._distance_metric == COSINE_DISTANCE:
                cluster_centers = nn_impl.l2_normalize(cluster_centers_updated,
                                                       dim=1)
              else:
                cluster_centers = cluster_centers_updated
            with ops.colocate_with(cluster_centers_var):
              with ops.control_dependencies([state_ops.assign(
                  cluster_centers_var,
                  cluster_centers)]):
                with ops.colocate_with(cluster_centers_var):
                  with ops.control_dependencies([
                      state_ops.assign(total_counts,
                                       array_ops.zeros_like(total_counts))]):
                    return array_ops.identity(update_in_steps)
        return control_flow_ops.cond(
            update_in_steps <= 0,
            _f,
            lambda: state_ops.assign_sub(update_in_steps, 1))
    else:
      return control_flow_ops.no_op() 
Example #18
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _DigammaGrad(op, grad):
  """Compute gradient of the digamma function with respect to its argument."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x) 
Example #19
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _LgammaGrad(op, grad):
  """Returns grad * digamma(x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * math_ops.digamma(x) 
Example #20
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _ErfcGrad(op, grad):
  """Returns -grad * 2/sqrt(pi) * exp(-x**2)."""
  x = op.inputs[0]
  minus_two_over_root_pi = constant_op.constant(
      -2 / np.sqrt(np.pi), dtype=grad.dtype)
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x)) 
Example #21
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _ErfGrad(op, grad):
  """Returns grad * 2/sqrt(pi) * exp(-x**2)."""
  x = op.inputs[0]
  two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x)) 
Example #22
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _TanhGrad(op, grad):
  """Returns grad * (1 - tanh(x) * tanh(x))."""
  y = op.outputs[0]  # y = tanh(x)
  with ops.control_dependencies([grad.op]):
    y = math_ops.conj(y)
    # pylint: disable=protected-access
    return gen_math_ops._tanh_grad(y, grad) 
Example #23
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _Log1pGrad(op, grad):
  """Returns grad * (1/(1 + x))."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * math_ops.reciprocal(1 + x) 
Example #24
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _LogGrad(op, grad):
  """Returns grad * (1/x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * math_ops.reciprocal(x) 
Example #25
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _Expm1Grad(op, grad):
  """Returns grad * exp(x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    y = math_ops.exp(x)
    return grad * y 
Example #26
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _ExpGrad(op, grad):
  """Returns grad * exp(x)."""
  y = op.outputs[0]  # y = e^x
  with ops.control_dependencies([grad.op]):
    y = math_ops.conj(y)
    return grad * y 
Example #27
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _SqrtGradGrad(op, grad):
  a = op.inputs[0]
  y = op.outputs[0]  # y = 0.5 * b / conj(a)
  with ops.control_dependencies([grad.op]):
    ga = grad / a
    return -math_ops.conj(ga) * y, 0.5 * ga 
Example #28
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _InvGradGrad(op, grad):
  b = op.inputs[1]
  # op.output[0]: y = -b * conj(a)^2
  with ops.control_dependencies([grad.op]):
    ca = math_ops.conj(op.inputs[0])
    cg = math_ops.conj(grad)
    # pylint: disable=protected-access
    return cg * -2.0 * b * ca, gen_math_ops._reciprocal_grad(ca, grad) 
Example #29
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _SquareGrad(op, grad):
  x = op.inputs[0]
  # Added control dependencies to prevent 2*x from being computed too early.
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * (2.0 * x) 
Example #30
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _SigmoidGradGrad(op, grad):
  with ops.control_dependencies([grad.op]):
    a = math_ops.conj(op.inputs[0])
    b = math_ops.conj(op.inputs[1])
    gb = grad * b
    # pylint: disable=protected-access
    return gb - 2.0 * gb * a, gen_math_ops._sigmoid_grad(a, grad)