Python tensorflow.assert_less() Examples

The following are 19 code examples of tensorflow.assert_less(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: memory.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def replace(self, episodes, length, rows=None):
    """Replace full episodes.

    Args:
      episodes: Tuple of transition quantities with batch and time dimensions.
      length: Batch of sequence lengths.
      rows: Episodes to replace, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity, message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less_equal(
          length, self._max_length, message='max length exceeded')
    replace_ops = []
    with tf.control_dependencies([assert_max_length]):
      for buffer_, elements in zip(self._buffers, episodes):
        replace_op = tf.scatter_update(buffer_, rows, elements)
        replace_ops.append(replace_op)
    with tf.control_dependencies(replace_ops):
      return tf.scatter_update(self._length, rows, length) 
Example #2
Source File: memory.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def replace(self, episodes, length, rows=None):
    """Replace full episodes.

    Args:
      episodes: Tuple of transition quantities with batch and time dimensions.
      length: Batch of sequence lengths.
      rows: Episodes to replace, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity, message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less_equal(
          length, self._max_length, message='max length exceeded')
    replace_ops = []
    with tf.control_dependencies([assert_max_length]):
      for buffer_, elements in zip(self._buffers, episodes):
        replace_op = tf.scatter_update(buffer_, rows, elements)
        replace_ops.append(replace_op)
    with tf.control_dependencies(replace_ops):
      return tf.scatter_update(self._length, rows, length) 
Example #3
Source File: graph_search_test.py    From kfac with Apache License 2.0 6 votes vote down vote up
def sparse_softmax_cross_entropy(labels,
                                 logits,
                                 num_classes,
                                 weights=1.0,
                                 label_smoothing=0.1):
  """Softmax cross entropy with example weights, label smoothing."""
  assert_valid_label = [
      tf.assert_greater_equal(labels, tf.cast(0, dtype=tf.int64)),
      tf.assert_less(labels, tf.cast(num_classes, dtype=tf.int64))
  ]
  with tf.control_dependencies(assert_valid_label):
    labels = tf.reshape(labels, [-1])
    dense_labels = tf.one_hot(labels, num_classes)
    loss = tf.losses.softmax_cross_entropy(
        onehot_labels=dense_labels,
        logits=logits,
        weights=weights,
        label_smoothing=label_smoothing)
  return loss 
Example #4
Source File: memory.py    From batch-ppo with Apache License 2.0 6 votes vote down vote up
def replace(self, episodes, length, rows=None):
    """Replace full episodes.

    Args:
      episodes: Tuple of transition quantities with batch and time dimensions.
      length: Batch of sequence lengths.
      rows: Episodes to replace, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity, message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less_equal(
          length, self._max_length, message='max length exceeded')
    with tf.control_dependencies([assert_max_length]):
      replace_ops = tools.nested.map(
          lambda var, val: tf.scatter_update(var, rows, val),
          self._buffers, episodes, flatten=True)
    with tf.control_dependencies(replace_ops):
      return tf.scatter_update(self._length, rows, length) 
Example #5
Source File: util.py    From mac-graph with The Unlicense 5 votes vote down vote up
def tf_assert_almost_equal(x, y, delta=0.001, **kwargs):
	return tf.assert_less(tf.abs(x-y), delta, **kwargs) 
Example #6
Source File: memory.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def append(self, transitions, rows=None):
    """Append a batch of transitions to rows of the memory.

    Args:
      transitions: Tuple of transition quantities with batch dimension.
      rows: Episodes to append to, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity,
        message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less(
          tf.gather(self._length, rows), self._max_length,
          message='max length exceeded')
    append_ops = []
    with tf.control_dependencies([assert_max_length]):
      for buffer_, elements in zip(self._buffers, transitions):
        timestep = tf.gather(self._length, rows)
        indices = tf.stack([rows, timestep], 1)
        append_ops.append(tf.scatter_nd_update(buffer_, indices, elements))
    with tf.control_dependencies(append_ops):
      episode_mask = tf.reduce_sum(tf.one_hot(
          rows, self._capacity, dtype=tf.int32), 0)
      return self._length.assign_add(episode_mask) 
Example #7
Source File: multi_tower_model.py    From revnet-public with MIT License 5 votes vote down vote up
def __init__(self,
               config,
               tower_cls,
               is_training=True,
               inference_only=False,
               num_replica=2,
               inp=None,
               label=None,
               apply_grad=True,
               batch_size=None):
    self._config = config
    self._is_training = is_training
    self._inference_only = inference_only
    self._num_replica = num_replica
    self._apply_grad = apply_grad
    self._tower_cls = tower_cls
    self._batch_size = batch_size

    # Input.
    if inp is None:
      x = tf.placeholder(
          self.dtype,
          [batch_size, config.height, config.width, config.num_channel])
    else:
      x = inp
    if label is None:
      y = tf.placeholder(tf.int32, [batch_size])
    else:
      y = label
    self._bn_update_ops = None
    self._input = x
    # Make sure that the labels are in reasonable range.
    # with tf.control_dependencies(
    #     [tf.assert_greater_equal(y, 0), tf.assert_less(y, config.num_classes)]):
    #   self._label = tf.identity(y)
    self._label = y
    self._towers = []
    self._build_towers() 
Example #8
Source File: embedding_utils.py    From models with Apache License 2.0 5 votes vote down vote up
def create_initial_softmax_from_labels(last_frame_labels, reference_labels,
                                       decoder_output_stride, reduce_labels):
  """Creates initial softmax predictions from last frame labels.

  Args:
    last_frame_labels: last frame labels of shape [1, height, width, 1].
    reference_labels: reference frame labels of shape [1, height, width, 1].
    decoder_output_stride: Integer, the stride of the decoder. Can be None, in
      this case it's assumed that the last_frame_labels and reference_labels
      are already scaled to the decoder output resolution.
    reduce_labels: Boolean, whether to reduce the depth of the softmax one_hot
      encoding to the actual number of labels present in the reference frame
      (otherwise the depth will be the highest label index + 1).

  Returns:
    init_softmax: the initial softmax predictions.
  """
  if decoder_output_stride is None:
    labels_output_size = last_frame_labels
    reference_labels_output_size = reference_labels
  else:
    h = tf.shape(last_frame_labels)[1]
    w = tf.shape(last_frame_labels)[2]
    h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride)
    w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride)
    labels_output_size = tf.image.resize_nearest_neighbor(
        last_frame_labels, [h_sub, w_sub], align_corners=True)
    reference_labels_output_size = tf.image.resize_nearest_neighbor(
        reference_labels, [h_sub, w_sub], align_corners=True)
  if reduce_labels:
    unique_labels, _ = tf.unique(tf.reshape(reference_labels_output_size, [-1]))
    depth = tf.size(unique_labels)
  else:
    depth = tf.reduce_max(reference_labels_output_size) + 1
  one_hot_assertion = tf.assert_less(tf.reduce_max(labels_output_size), depth)
  with tf.control_dependencies([one_hot_assertion]):
    init_softmax = tf.one_hot(tf.squeeze(labels_output_size,
                                         axis=-1),
                              depth=depth,
                              dtype=tf.float32)
  return init_softmax 
Example #9
Source File: batch_size_limited_classifier.py    From model-analysis with Apache License 2.0 5 votes vote down vote up
def model_fn(features, labels, mode, config):
  """Model function for custom estimator."""
  del labels
  del config
  classes = features['classes']
  scores = features['scores']

  with tf.control_dependencies(
      [tf.assert_less(tf.shape(classes)[0], tf.constant(2))]):
    scores = tf.identity(scores)

  predictions = {
      prediction_keys.PredictionKeys.LOGITS: scores,
      prediction_keys.PredictionKeys.PROBABILITIES: scores,
      prediction_keys.PredictionKeys.PREDICTIONS: scores,
      prediction_keys.PredictionKeys.CLASSES: classes,
  }

  if mode == tf.estimator.ModeKeys.PREDICT:
    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions=predictions,
        export_outputs={
            tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                tf.estimator.export.ClassificationOutput(
                    scores=scores, classes=classes),
        })

  loss = tf.constant(0.0)
  train_op = tf.compat.v1.assign_add(tf.compat.v1.train.get_global_step(), 1)
  eval_metric_ops = {
      metric_keys.MetricKeys.LOSS_MEAN: tf.compat.v1.metrics.mean(loss),
  }

  return tf.estimator.EstimatorSpec(
      mode=mode,
      loss=loss,
      train_op=train_op,
      predictions=predictions,
      eval_metric_ops=eval_metric_ops) 
Example #10
Source File: modules.py    From tacotron2 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        with tf.control_dependencies([tf.assert_greater_equal(inputs, self.index_offset),
                                      tf.assert_less(inputs, self.index_offset + self._num_symbols)]):
            return tf.nn.embedding_lookup(self._embedding, inputs - self.index_offset) 
Example #11
Source File: ops.py    From in-silico-labeling with Apache License 2.0 5 votes vote down vote up
def _distribution_statistics(distribution: tf.Tensor) -> tf.Tensor:
  """Implementation of `distribution_statisticsy`."""
  _, num_classes = distribution.shape.as_list()
  assert num_classes is not None

  # Each batch element is a probability distribution.
  max_discrepancy = tf.reduce_max(
      tf.abs(tf.reduce_sum(distribution, axis=1) - 1.0))
  with tf.control_dependencies([tf.assert_less(max_discrepancy, 0.0001)]):
    values = tf.reshape(tf.linspace(0.0, 1.0, num_classes), [1, num_classes])

    mode = tf.to_float(tf.argmax(distribution,
                                 axis=1)) / tf.constant(num_classes - 1.0)
    median = tf.reduce_sum(
        tf.to_float(tf.cumsum(distribution, axis=1) < 0.5),
        axis=1) / tf.constant(num_classes - 1.0)
    mean = tf.reduce_sum(distribution * values, axis=1)
    standard_deviation = tf.sqrt(
        tf.reduce_sum(
            ((values - tf.reshape(mean, [-1, 1]))**2) * distribution, axis=1))
    probability_nonzero = 1.0 - distribution[:, 0]
    entropy = tf.reduce_sum(
        -(distribution * tf.log(distribution + 0.0000001)), axis=1) / tf.log(
            float(num_classes))

    statistics = tf.stack(
        [mode, median, mean, standard_deviation, probability_nonzero, entropy],
        axis=1)

    return statistics 
Example #12
Source File: memory.py    From batch-ppo with Apache License 2.0 5 votes vote down vote up
def append(self, transitions, rows=None):
    """Append a batch of transitions to rows of the memory.

    Args:
      transitions: Tuple of transition quantities with batch dimension.
      rows: Episodes to append to, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity,
        message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less(
          tf.gather(self._length, rows), self._max_length,
          message='max length exceeded')
    with tf.control_dependencies([assert_max_length]):
      timestep = tf.gather(self._length, rows)
      indices = tf.stack([rows, timestep], 1)
      append_ops = tools.nested.map(
          lambda var, val: tf.scatter_nd_update(var, indices, val),
          self._buffers, transitions, flatten=True)
    with tf.control_dependencies(append_ops):
      episode_mask = tf.reduce_sum(tf.one_hot(
          rows, self._capacity, dtype=tf.int32), 0)
      return self._length.assign_add(episode_mask) 
Example #13
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_raises_when_less_but_non_broadcastable_shapes(self):
    with self.test_session():
      small = tf.constant([1, 1, 1], name="small")
      big = tf.constant([3, 2], name="big")
      with self.assertRaisesRegexp(ValueError, "must be"):
        with tf.control_dependencies([tf.assert_less(small, big)]):
          out = tf.identity(small)
        out.eval() 
Example #14
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_doesnt_raise_when_less_and_broadcastable_shapes(self):
    with self.test_session():
      small = tf.constant([1], name="small")
      big = tf.constant([3, 2], name="big")
      with tf.control_dependencies([tf.assert_less(small, big)]):
        out = tf.identity(small)
      out.eval() 
Example #15
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_doesnt_raise_when_less(self):
    with self.test_session():
      small = tf.constant([3, 1], name="small")
      big = tf.constant([4, 2], name="big")
      with tf.control_dependencies([tf.assert_less(small, big)]):
        out = tf.identity(small)
      out.eval() 
Example #16
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_raises_when_greater(self):
    with self.test_session():
      small = tf.constant([1, 2], name="small")
      big = tf.constant([3, 4], name="big")
      with tf.control_dependencies([tf.assert_less(big, small)]):
        out = tf.identity(small)
      with self.assertRaisesOpError("big.*small"):
        out.eval() 
Example #17
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_raises_when_equal(self):
    with self.test_session():
      small = tf.constant([1, 2], name="small")
      with tf.control_dependencies(
          [tf.assert_less(small, small, message="fail")]):
        out = tf.identity(small)
      with self.assertRaisesOpError("fail.*small.*small"):
        out.eval() 
Example #18
Source File: util.py    From shortest-path with The Unlicense 5 votes vote down vote up
def tf_assert_almost_equal(x, y, delta=0.001, **kwargs):
	return tf.assert_less(tf.abs(x-y), delta, **kwargs) 
Example #19
Source File: memory.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def append(self, transitions, rows=None):
    """Append a batch of transitions to rows of the memory.

    Args:
      transitions: Tuple of transition quantities with batch dimension.
      rows: Episodes to append to, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity,
        message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less(
          tf.gather(self._length, rows), self._max_length,
          message='max length exceeded')
    append_ops = []
    with tf.control_dependencies([assert_max_length]):
      for buffer_, elements in zip(self._buffers, transitions):
        timestep = tf.gather(self._length, rows)
        indices = tf.stack([rows, timestep], 1)
        append_ops.append(tf.scatter_nd_update(buffer_, indices, elements))
    with tf.control_dependencies(append_ops):
      episode_mask = tf.reduce_sum(tf.one_hot(
          rows, self._capacity, dtype=tf.int32), 0)
      return self._length.assign_add(episode_mask)