Python tensorflow.timestamp() Examples

The following are 6 code examples of tensorflow.timestamp(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: train_policy.py    From lm-human-preferences with MIT License 6 votes vote down vote up
def tf_times():
    """Returns (time since start, time since last) as a tensorflow op."""
    # Keep track of start and last times
    with tf.init_scope():
        init = tf.timestamp()

    def make(name):
        return tf.Variable(init, name=name, trainable=False, use_resource=True)

    start = make('start_time')
    last = make('last_time')

    # Get new time and update last
    now = tf.timestamp()
    prev = last.read_value()
    with tf.control_dependencies([prev]):
        with tf.control_dependencies([last.assign(now)]):
            return tf.cast(now - start.read_value(), tf.float32), tf.cast(now - prev, tf.float32) 
Example #2
Source File: training_time.py    From nasbench with Apache License 2.0 6 votes vote down vote up
def begin(self):
    with tf.name_scope(_SCOPE_NAME):
      # See _get_or_create_timing_vars for the definitions of these variables.
      timing_vars = _get_or_create_timing_vars()

      # An op to produce a tensor with the latest timestamp.
      self._end_op = _seconds_to_internal_time(tf.timestamp(name='end'))

      # An op to update the timing_vars.start_timestamp variable.
      self._start_op = tf.cond(
          pred=tf.equal(timing_vars.steps, 0),
          true_fn=lambda: timing_vars.start_timestamp.assign(self._end_op),
          false_fn=lambda: timing_vars.start_timestamp)

      # An op to update the step.
      with tf.control_dependencies([self._start_op]):
        self._step_op = timing_vars.steps.assign_add(1)

      # An op to compute the timing_vars.total_time variable.
      self._total_op = timing_vars.total_time.assign(
          timing_vars.previous_time +
          _internal_time_to_seconds(self._end_op - self._start_op)) 
Example #3
Source File: tf_mlperf_log.py    From models with Apache License 2.0 5 votes vote down vote up
def log_deferred(op, log_id, every_n=1, first_n=None):
  """Helper method inserting compliance logging ops.

  Note: This helper is not guaranteed to be efficient, as it will insert ops
        and control dependencies. If this proves to be a bottleneck, submitters
        may wish to consider other methods such as extracting values from an
        .events file.

  Args:
    op: A tf op to be printed.
    log_id: a uuid provided by the logger in mlperf_log.py
    every_n: If repeat is True, with what frequency should the input op be '
             logged. If repeat is False, this argument is ignored.
    first_n: Only log this many values. This arg does not interact with every_n.
             The first_n refers to the first n that would have been logged.
  """

  prefix = ":::MLPv0.5.0 [{}]".format(log_id)
  if not first_n is not None and first_n == 1:
    return tf.compat.v1.Print(op, [tf.timestamp(), op], message=prefix, first_n=1)

  counter = tf.Variable(tf.zeros(shape=(), dtype=tf.int32) - 1,
                        aggregation=tf.VariableAggregation.MEAN)
  increment = tf.compat.v1.assign_add(counter, 1, use_locking=True)
  return tf.cond(
      pred=tf.equal(tf.math.mod(increment, every_n), 0),
      true_fn=lambda :tf.compat.v1.Print(op, [tf.timestamp(), op], message=prefix,
                       first_n=first_n),
      false_fn=lambda :op
  ) 
Example #4
Source File: tf_mlperf_log.py    From models with Apache License 2.0 5 votes vote down vote up
def log_deferred(op, log_id, every_n=1, first_n=None):
  """Helper method inserting compliance logging ops.

  Note: This helper is not guaranteed to be efficient, as it will insert ops
        and control dependencies. If this proves to be a bottleneck, submitters
        may wish to consider other methods such as extracting values from an
        .events file.

  Args:
    op: A tf op to be printed.
    log_id: a uuid provided by the logger in mlperf_log.py
    every_n: If repeat is True, with what frequency should the input op be '
             logged. If repeat is False, this argument is ignored.
    first_n: Only log this many values. This arg does not interact with every_n.
             The first_n refers to the first n that would have been logged.
  """

  prefix = ":::MLPv0.5.0 [{}]".format(log_id)
  if not first_n is not None and first_n == 1:
    return tf.compat.v1.Print(op, [tf.timestamp(), op], message=prefix, first_n=1)

  counter = tf.Variable(tf.zeros(shape=(), dtype=tf.int32) - 1,
                        aggregation=tf.VariableAggregation.MEAN)
  increment = tf.compat.v1.assign_add(counter, 1, use_locking=True)
  return tf.cond(
      pred=tf.equal(tf.math.mod(increment, every_n), 0),
      true_fn=lambda :tf.compat.v1.Print(op, [tf.timestamp(), op], message=prefix,
                       first_n=first_n),
      false_fn=lambda :op
  ) 
Example #5
Source File: tf_mlperf_log.py    From training with Apache License 2.0 5 votes vote down vote up
def log_deferred(op, log_id, every_n=1, first_n=None):
  """Helper method inserting compliance logging ops.

  Note: This helper is not guaranteed to be efficient, as it will insert ops
        and control dependencies. If this proves to be a bottleneck, submitters
        may wish to consider other methods such as extracting values from an
        .events file.

  Args:
    op: A tf op to be printed.
    log_id: a uuid provided by the logger in mlperf_log.py
    every_n: If repeat is True, with what frequency should the input op be '
             logged. If repeat is False, this argument is ignored.
    first_n: Only log this many values. This arg does not interact with every_n.
             The first_n refers to the first n that would have been logged.
  """

  prefix = ":::MLPv0.5.0 [{}]".format(log_id)
  if not first_n is not None and first_n == 1:
    return tf.Print(op, [tf.timestamp(), op], message=prefix, first_n=1)

  counter = tf.Variable(tf.zeros(shape=(), dtype=tf.int32) - 1,
                        aggregation=tf.VariableAggregation.MEAN)
  increment = tf.assign_add(counter, 1, use_locking=True)
  return tf.cond(
      tf.equal(tf.mod(increment, every_n), 0),
      lambda :tf.Print(op, [tf.timestamp(), op], message=prefix,
                       first_n=first_n),
      lambda :op
  ) 
Example #6
Source File: test_utils.py    From federated with Apache License 2.0 4 votes vote down vote up
def get_iterative_process_for_example_with_unused_tf_computation_arg():
  """Returns an iterative process with a @tf.function with an unused arg."""
  server_state_type = computation_types.NamedTupleType([('num_clients',
                                                         tf.int32)])

  def _bind_tf_function(unused_input, tf_func):
    tf_wrapper = tf.function(lambda _: tf_func())
    input_federated_type = unused_input.type_signature
    wrapper = computations.tf_computation(tf_wrapper,
                                          input_federated_type.member)
    return intrinsics.federated_map(wrapper, unused_input)

  def count_clients_federated(client_data):

    @tf.function
    def client_ones_fn():
      return tf.ones(shape=[], dtype=tf.int32)

    client_ones = _bind_tf_function(client_data, client_ones_fn)
    return intrinsics.federated_sum(client_ones)

  @computations.federated_computation
  def init_fn():
    return intrinsics.federated_value(
        collections.OrderedDict(num_clients=0), placements.SERVER)

  @computations.federated_computation([
      computation_types.FederatedType(server_state_type, placements.SERVER),
      computation_types.FederatedType(
          computation_types.SequenceType(tf.string), placements.CLIENTS)
  ])
  def next_fn(server_state, client_val):
    """`next` function for `tff.templates.IterativeProcess`."""
    server_update = intrinsics.federated_zip(
        collections.OrderedDict(
            num_clients=count_clients_federated(client_val)))

    server_output = intrinsics.federated_value((), placements.SERVER)
    server_output = intrinsics.federated_sum(
        _bind_tf_function(
            intrinsics.federated_broadcast(server_state), tf.timestamp))

    return server_update, server_output

  return iterative_process.IterativeProcess(init_fn, next_fn)