Python tensorflow.python.ops.control_flow_ops.no_op() Examples

The following are 30 code examples of tensorflow.python.ops.control_flow_ops.no_op(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.control_flow_ops , or try the search function .
Example #1
Source File: tpu_estimator.py    From Chinese-XLNet with Apache License 2.0 6 votes vote down vote up
def _sync_variables_ops(ctx):
  """Create varriables synchronization ops.

  Gets the variables back from TPU nodes. This means the variables updated
  by TPU will now be *synced* to host memory.
  In BROADCAST mode, we skip this sync since the variables are ususally too
  big to transmit via RPC.

  Args:
    ctx: A `_InternalTPUContext` instance with mode.

  Returns:
    A list of sync ops.
  """

  if not ctx.is_input_broadcast_with_iterators():
    return [
        array_ops.check_numerics(v.read_value(),
                                 'Gradient for %s is NaN' % v.name).op
        for v in variables.trainable_variables()
    ]
  else:
    return [control_flow_ops.no_op()] 
Example #2
Source File: variables.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def variables_initializer(var_list, name="init"):
  """Returns an Op that initializes a list of variables.

  After you launch the graph in a session, you can run the returned Op to
  initialize all the variables in `var_list`. This Op runs all the
  initializers of the variables in `var_list` in parallel.

  Calling `initialize_variables()` is equivalent to passing the list of
  initializers to `Group()`.

  If `var_list` is empty, however, the function still returns an Op that can
  be run. That Op just has no effect.

  Args:
    var_list: List of `Variable` objects to initialize.
    name: Optional name for the returned operation.

  Returns:
    An Op that run the initializers of all the specified variables.
  """
  if var_list:
    return control_flow_ops.group(*[v.initializer for v in var_list], name=name)
  return control_flow_ops.no_op(name=name) 
Example #3
Source File: topn.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def insert(self, ids, scores):
    """Insert the ids and scores into the TopN."""
    with ops.control_dependencies(self.last_ops):
      scatter_op = state_ops.scatter_update(self.id_to_score, ids, scores)
      larger_scores = math_ops.greater(scores, self.sl_scores[0])

      def shortlist_insert():
        larger_ids = array_ops.boolean_mask(
            math_ops.to_int64(ids), larger_scores)
        larger_score_values = array_ops.boolean_mask(scores, larger_scores)
        shortlist_ids, new_ids, new_scores = tensor_forest_ops.top_n_insert(
            self.sl_ids, self.sl_scores, larger_ids, larger_score_values)
        u1 = state_ops.scatter_update(self.sl_ids, shortlist_ids, new_ids)
        u2 = state_ops.scatter_update(self.sl_scores, shortlist_ids, new_scores)
        return control_flow_ops.group(u1, u2)

      # We only need to insert into the shortlist if there are any
      # scores larger than the threshold.
      cond_op = control_flow_ops.cond(
          math_ops.reduce_any(larger_scores), shortlist_insert,
          control_flow_ops.no_op)
      with ops.control_dependencies([cond_op]):
        self.last_ops = [scatter_op, cond_op] 
Example #4
Source File: variables.py    From lambda-packs with MIT License 6 votes vote down vote up
def variables_initializer(var_list, name="init"):
  """Returns an Op that initializes a list of variables.

  After you launch the graph in a session, you can run the returned Op to
  initialize all the variables in `var_list`. This Op runs all the
  initializers of the variables in `var_list` in parallel.

  Calling `initialize_variables()` is equivalent to passing the list of
  initializers to `Group()`.

  If `var_list` is empty, however, the function still returns an Op that can
  be run. That Op just has no effect.

  Args:
    var_list: List of `Variable` objects to initialize.
    name: Optional name for the returned operation.

  Returns:
    An Op that run the initializers of all the specified variables.
  """
  if var_list:
    return control_flow_ops.group(*[v.initializer for v in var_list], name=name)
  return control_flow_ops.no_op(name=name) 
Example #5
Source File: tpu_estimator.py    From embedding-as-service with MIT License 6 votes vote down vote up
def _sync_variables_ops(ctx):
  """Create varriables synchronization ops.

  Gets the variables back from TPU nodes. This means the variables updated
  by TPU will now be *synced* to host memory.
  In BROADCAST mode, we skip this sync since the variables are ususally too
  big to transmit via RPC.

  Args:
    ctx: A `_InternalTPUContext` instance with mode.

  Returns:
    A list of sync ops.
  """

  if not ctx.is_input_broadcast_with_iterators():
    return [
        array_ops.check_numerics(v.read_value(),
                                 'Gradient for %s is NaN' % v.name).op
        for v in variables.trainable_variables()
    ]
  else:
    return [control_flow_ops.no_op()] 
Example #6
Source File: check_ops.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def assert_type(tensor, tf_type, message=None, name=None):
  """Statically asserts that the given `Tensor` is of the specified type.

  Args:
    tensor: A tensorflow `Tensor`.
    tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,
      etc).
    message: A string to prefix to the default message.
    name:  A name to give this `Op`.  Defaults to "assert_type"

  Raises:
    TypeError: If the tensors data type doesn't match `tf_type`.

  Returns:
    A `no_op` that does nothing.  Type can be determined statically.
  """
  message = message or ''
  with ops.name_scope(name, 'assert_type', [tensor]):
    tensor = ops.convert_to_tensor(tensor, name='tensor')
    if tensor.dtype != tf_type:
      raise TypeError(
          '%s  %s must be of type %s' % (message, tensor.op.name, tf_type))

    return control_flow_ops.no_op('statically_determined_correct_type') 
Example #7
Source File: linear_operator_util.py    From lambda-packs with MIT License 6 votes vote down vote up
def assert_zero_imag_part(x, message=None, name="assert_zero_imag_part"):
  """Returns `Op` that asserts Tensor `x` has no non-zero imaginary parts.

  Args:
    x:  Numeric `Tensor`, real, integer, or complex.
    message:  A string message to prepend to failure message.
    name:  A name to give this `Op`.

  Returns:
    An `Op` that asserts `x` has no entries with modulus zero.
  """
  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name="x")
    dtype = x.dtype.base_dtype

    if dtype.is_floating:
      return control_flow_ops.no_op()

    zero = ops.convert_to_tensor(0, dtype=dtype.real_dtype)
    return check_ops.assert_equal(zero, math_ops.imag(x), message=message) 
Example #8
Source File: linear_operator_util.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def assert_zero_imag_part(x, message=None, name="assert_zero_imag_part"):
  """Returns `Op` that asserts Tensor `x` has no non-zero imaginary parts.

  Args:
    x:  Numeric `Tensor`, real, integer, or complex.
    message:  A string message to prepend to failure message.
    name:  A name to give this `Op`.

  Returns:
    An `Op` that asserts `x` has no entries with modulus zero.
  """
  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name="x")
    dtype = x.dtype.base_dtype

    if dtype.is_floating:
      return control_flow_ops.no_op()

    zero = ops.convert_to_tensor(0, dtype=dtype.real_dtype)
    return check_ops.assert_equal(zero, math_ops.imag(x), message=message) 
Example #9
Source File: adamW.py    From Conditional_Density_Estimation with MIT License 5 votes vote down vote up
def _decay_weights_op(self, var):
    if not self._decay_var_list or var in self._decay_var_list:
      return var.assign_sub(self._weight_decay * var, self._use_locking)
    return control_flow_ops.no_op() 
Example #10
Source File: adam_weight_decay_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def _decay_weights_sparse_op(self, var, indices, scatter_add):
		if not self._decay_var_list or var in self._decay_var_list:
			return scatter_add(var, indices, -self._weight_decay * var,
												 self._use_locking)
		return control_flow_ops.no_op()

	# Here, we overwrite the apply functions that the base optimizer calls.
	# super().apply_x resolves to the apply_x function of the BaseOptimizer. 
Example #11
Source File: sync_replicas_optimizer.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def get_init_tokens_op(self, num_tokens=-1):
    """Returns the op to fill the sync_token_queue with the tokens.

    This is supposed to be executed in the beginning of the chief/sync thread
    so that even if the total_num_replicas is less than replicas_to_aggregate,
    the model can still proceed as the replicas can compute multiple steps per
    variable update. Make sure:
    `num_tokens >= replicas_to_aggregate - total_num_replicas`.

    Args:
      num_tokens: Number of tokens to add to the queue.

    Returns:
      An op for the chief/sync replica to fill the token queue.

    Raises:
      ValueError: If this is called before apply_gradients().
      ValueError: If num_tokens are smaller than replicas_to_aggregate -
        total_num_replicas.
    """
    if self._gradients_applied is False:
      raise ValueError(
          "get_init_tokens_op() should be called after apply_gradients().")

    tokens_needed = self._replicas_to_aggregate - self._total_num_replicas
    if num_tokens == -1:
      num_tokens = self._replicas_to_aggregate
    elif num_tokens < tokens_needed:
      raise ValueError(
          "Too few tokens to finish the first step: %d (given) vs %d (needed)" %
          (num_tokens, tokens_needed))

    if num_tokens > 0:
      with ops.device(self._global_step.device), ops.name_scope(""):
        tokens = array_ops.fill([num_tokens],
                                self._global_step.ref())
        init_tokens = self._sync_token_queue.enqueue_many((tokens,))
    else:
      init_tokens = control_flow_ops.no_op(name="no_init_tokens")

    return init_tokens 
Example #12
Source File: tensor_forest.py    From deep-learning with MIT License 5 votes vote down vote up
def tree_initialization(self):
    def _init_tree():
      return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op

    def _nothing():
      return control_flow_ops.no_op()

    return control_flow_ops.cond(
        math_ops.equal(array_ops.squeeze(array_ops.slice(
            self.variables.tree, [0, 0], [1, 1])), -2),
        _init_tree, _nothing) 
Example #13
Source File: adamW.py    From Conditional_Density_Estimation with MIT License 5 votes vote down vote up
def _decay_weights_sparse_op(self, var, indices, scatter_add):
    if not self._decay_var_list or var in self._decay_var_list:
      return scatter_add(var, indices, -self._weight_decay * var,
                         self._use_locking)
    return control_flow_ops.no_op()

  # Here, we overwrite the apply functions that the base optimizer calls.
  # super().apply_x resolves to the apply_x function of the BaseOptimizer. 
Example #14
Source File: adam_weight_decay_exclude_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def _decay_weights_op(self, var):
    def apply_decay_fn(v, decay):
      return state_ops.assign_sub(v, decay * v, self._use_locking)
    def merge_fn(strategy, v, value):
      value = strategy.reduce(
          variable_scope.VariableAggregation.MEAN, value, v)
      return strategy.update(v, apply_decay_fn, value)

    if not self._decay_var_list or var in self._decay_var_list:
      replica_context = distribute_lib.get_tower_context()
      return replica_context.merge_call(merge_fn, var, self._weight_decay)
    return control_flow_ops.no_op() 
Example #15
Source File: optimizer.py    From QANet_dureader with MIT License 5 votes vote down vote up
def _decay_weights_sparse_op(self, var, indices, scatter_add):
    if not self._decay_var_list or var in self._decay_var_list:
      return scatter_add(var, indices, -self._weight_decay * var,
                         self._use_locking)
    return control_flow_ops.no_op()

  # Here, we overwrite the apply functions that the base optimizer calls.
  # super().apply_x resolves to the apply_x function of the BaseOptimizer. 
Example #16
Source File: optimizer.py    From QANet_dureader with MIT License 5 votes vote down vote up
def _decay_weights_op(self, var):
    if not self._decay_var_list or var in self._decay_var_list:
      return var.assign_sub(self._weight_decay * var, self._use_locking)
    return control_flow_ops.no_op() 
Example #17
Source File: ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _get_test_attrs(self):
    x = control_flow_ops.no_op()
    try:
      a = compat.as_text(x.get_attr("_A"))
    except ValueError:
      a = None
    try:
      b = compat.as_text(x.get_attr("_B"))
    except ValueError:
      b = None
    print(a, b)
    return (a, b) 
Example #18
Source File: ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testNoConvert(self):
    # Operation cannot be converted to Tensor.
    op = control_flow_ops.no_op()
    with self.assertRaisesRegexp(TypeError,
                                 r"Can't convert Operation '.*' to Tensor"):
      ops.convert_to_tensor(op) 
Example #19
Source File: sync_replicas_optimizer.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def get_init_tokens_op(self, num_tokens=-1):
    """Returns the op to fill the sync_token_queue with the tokens.

    This is supposed to be executed in the beginning of the chief/sync thread
    so that even if the total_num_replicas is less than replicas_to_aggregate,
    the model can still proceed as the replicas can compute multiple steps per
    variable update. Make sure:
    `num_tokens >= replicas_to_aggregate - total_num_replicas`.

    Args:
      num_tokens: Number of tokens to add to the queue.

    Returns:
      An op for the chief/sync replica to fill the token queue.

    Raises:
      ValueError: If this is called before apply_gradients().
      ValueError: If num_tokens are smaller than replicas_to_aggregate -
        total_num_replicas.
    """
    if self._gradients_applied is False:
      raise ValueError(
          "get_init_tokens_op() should be called after apply_gradients().")

    tokens_needed = self._replicas_to_aggregate - self._total_num_replicas
    if num_tokens == -1:
      num_tokens = self._replicas_to_aggregate
    elif num_tokens < tokens_needed:
      raise ValueError(
          "Too few tokens to finish the first step: %d (given) vs %d (needed)" %
          (num_tokens, tokens_needed))

    if num_tokens > 0:
      with ops.device(self._global_step.device), ops.name_scope(""):
        tokens = array_ops.fill([num_tokens], self._global_step)
        init_tokens = self._sync_token_queue.enqueue_many((tokens,))
    else:
      init_tokens = control_flow_ops.no_op(name="no_init_tokens")

    return init_tokens 
Example #20
Source File: adam_weight_decay_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def _decay_weights_op(self, var):
		if not self._decay_var_list or var in self._decay_var_list:
			return var.assign_sub(self._weight_decay * var, self._use_locking)
		return control_flow_ops.no_op() 
Example #21
Source File: adam_weight_decay_exclude_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def _decay_weights_sparse_op(self, var, indices, scatter_add):
    if not self._decay_var_list or var in self._decay_var_list:
      update = -self._weight_decay * array_ops.gather(var, indices)
      return scatter_add(var, indices, update, self._use_locking)
    return control_flow_ops.no_op()

  # Here, we overwrite the apply functions that the base optimizer calls.
  # super().apply_x resolves to the apply_x function of the BaseOptimizer. 
Example #22
Source File: data_flow_ops.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def initialize_all_tables(name="init_all_tables"):
  """Returns an Op that initializes all tables of the default graph.

  Args:
    name: Optional name for the initialization op.

  Returns:
    An Op that initializes all tables.  Note that if there are
    not tables the returned Op is a NoOp.
  """
  initializers = ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS)
  if initializers:
    return control_flow_ops.group(*initializers, name=name)
  return control_flow_ops.no_op(name=name) 
Example #23
Source File: linear_operator_identity.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _assert_self_adjoint(self):
    return control_flow_ops.no_op("assert_self_adjoint") 
Example #24
Source File: linear_operator_identity.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _assert_positive_definite(self):
    return control_flow_ops.no_op("assert_positive_definite") 
Example #25
Source File: lookup_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def init(self):
    """The table initialization op."""
    if self._table:
      return self._table.init
    with ops.name_scope(None, "init"):
      return control_flow_ops.no_op() 
Example #26
Source File: topn.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def get_best(self, n):
    """Return the indices and values of the n highest scores in the TopN."""

    def refresh_shortlist():
      """Update the shortlist with the highest scores in id_to_score."""
      new_scores, new_ids = nn_ops.top_k(self.id_to_score, self.shortlist_size)
      smallest_new_score = math_ops.reduce_min(new_scores)
      new_length = math_ops.reduce_sum(
          math_ops.to_int32(math_ops.greater(new_scores, dtypes.float32.min)))
      u1 = self.sl_ids.assign(
          math_ops.to_int64(array_ops.concat([[new_length], new_ids], 0)))
      u2 = self.sl_scores.assign(
          array_ops.concat([[smallest_new_score], new_scores], 0))
      self.last_ops = [u1, u2]
      return control_flow_ops.group(u1, u2)

    # We only need to refresh the shortlist if n is greater than the
    # current shortlist size (which is stored in sl_ids[0]).
    with ops.control_dependencies(self.last_ops):
      cond_op = control_flow_ops.cond(n > self.sl_ids[0], refresh_shortlist,
                                      control_flow_ops.no_op)
      with ops.control_dependencies([cond_op]):
        topk_values, topk_indices = nn_ops.top_k(
            self.sl_scores,
            math_ops.minimum(n, math_ops.to_int32(self.sl_ids[0])))
        # topk_indices are the indices into the shortlist, we want to return
        # the indices into id_to_score
        gathered_indices = array_ops.gather(self.sl_ids, topk_indices)
        return gathered_indices, topk_values 
Example #27
Source File: tensor_forest.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def tree_initialization(self):
    def _init_tree():
      return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op

    def _nothing():
      return control_flow_ops.no_op()

    return control_flow_ops.cond(
        math_ops.equal(
            array_ops.squeeze(
                array_ops.strided_slice(self.variables.tree, [0, 0], [1, 1])),
            -2), _init_tree, _nothing) 
Example #28
Source File: egdd.py    From lingvo with Apache License 2.0 5 votes vote down vote up
def _resource_apply_sparse(self, grad_values, var, grad_indices):
    return control_flow_ops.no_op() 
Example #29
Source File: input.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _enqueue(queue, tensor_list, threads, enqueue_many, keep_input):
  """Enqueue `tensor_list` in `queue`."""
  if enqueue_many:
    enqueue_fn = queue.enqueue_many
  else:
    enqueue_fn = queue.enqueue
  if keep_input is None:
    enqueue_ops = [enqueue_fn(tensor_list)] * threads
  else:
    enqueue_ops = [control_flow_ops.cond(
        keep_input,
        lambda: enqueue_fn(tensor_list),
        control_flow_ops.no_op)] * threads
  queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops)) 
Example #30
Source File: input.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _enqueue_join(queue, tensor_list_list, enqueue_many, keep_input):
  """Enqueue `tensor_list_list` in `queue`."""
  if enqueue_many:
    enqueue_fn = queue.enqueue_many
  else:
    enqueue_fn = queue.enqueue
  if keep_input is None:
    enqueue_ops = [enqueue_fn(tl) for tl in tensor_list_list]
  else:
    enqueue_ops = [control_flow_ops.cond(
        keep_input,
        lambda: enqueue_fn(tl),
        control_flow_ops.no_op) for tl in tensor_list_list]
  queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))