Python tensorflow.compat.v1.round() Examples

The following are 12 code examples of tensorflow.compat.v1.round(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v1 , or try the search function .
Example #1
Source File: base.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def pixels_from_softmax(frame_logits, pure_sampling=False,
                        temperature=1.0, gumbel_noise_factor=0.2):
  """Given frame_logits from a per-pixel softmax, generate colors."""
  # If we're purely sampling, just sample each pixel.
  if pure_sampling or temperature == 0.0:
    return common_layers.sample_with_temperature(frame_logits, temperature)

  # Gumbel-sample from the pixel sofmax and average by pixel values.
  pixel_range = tf.to_float(tf.range(256))
  for _ in range(len(frame_logits.get_shape().as_list()) - 1):
    pixel_range = tf.expand_dims(pixel_range, axis=0)

  frame_logits = tf.nn.log_softmax(frame_logits)
  gumbel_samples = discretization.gumbel_sample(
      common_layers.shape_list(frame_logits)) * gumbel_noise_factor

  frame = tf.nn.softmax((frame_logits + gumbel_samples) / temperature, axis=-1)
  result = tf.reduce_sum(frame * pixel_range, axis=-1)
  # Round on the forward pass, not on the backward one.
  return result + tf.stop_gradient(tf.round(result) - result) 
Example #2
Source File: classification_model.py    From tensor2robot with Apache License 2.0 5 votes vote down vote up
def model_eval_fn(self,
                    features,
                    labels,
                    inference_outputs,
                    train_loss,
                    train_outputs,
                    mode,
                    config = None,
                    params = None):
    """See base class."""
    eval_mse = tf.metrics.mean_squared_error(
        labels=labels.classes,
        predictions=inference_outputs['a_predicted'],
        name='eval_mse')

    predictions_rounded = tf.round(inference_outputs['a_predicted'])

    eval_precision = tf.metrics.precision(
        labels=labels.classes,
        predictions=predictions_rounded,
        name='eval_precision')

    eval_accuracy = tf.metrics.accuracy(
        labels=labels.classes,
        predictions=predictions_rounded,
        name='eval_accuracy')

    eval_recall = tf.metrics.recall(
        labels=labels.classes,
        predictions=predictions_rounded,
        name='eval_recall')

    metric_fn = {
        'eval_mse': eval_mse,
        'eval_precision': eval_precision,
        'eval_accuracy': eval_accuracy,
        'eval_recall': eval_recall
    }

    return metric_fn 
Example #3
Source File: preprocessors.py    From text-to-text-transfer-transformer with Apache License 2.0 5 votes vote down vote up
def regular_noise_mask(length,
                       noise_density,
                       min_span_length=1,
                       max_span_length=5):
  """Noise mask consisting of equally spaced spans of equal length.

  The span length and the offset are chosen randomly per-example.
  The beginning and end of the sequence may be part of shorter spans of noise.
  For example, if noise_density=0.25 and a span length of 2 is chosen,
  then the output might be:

  [T F F F F F F T T F F F F F F T T F F F F F F T T F F]

  Args:
    length: an int32 scalar
    noise_density: a float - approximate density of output mask
    min_span_length: an integer
    max_span_length: an integer

  Returns:
    a boolean tensor with shape [length]
  """
  span_length = tf.random.uniform([],
                                  minval=min_span_length,
                                  maxval=max_span_length + 1,
                                  dtype=tf.int32)
  period = tf.cast(
      tf.round(tf.cast(span_length, tf.float32) / noise_density), tf.int32)
  offset = tf.random.uniform([], maxval=period, dtype=tf.int32)
  return (tf.range(length, dtype=tf.int32) + offset) % period < span_length 
Example #4
Source File: bls2017.py    From compression with Apache License 2.0 5 votes vote down vote up
def quantize_image(image):
  image = tf.round(image * 255)
  image = tf.saturate_cast(image, tf.uint8)
  return image 
Example #5
Source File: bmshj2018.py    From compression with Apache License 2.0 5 votes vote down vote up
def quantize_image(image):
  image = tf.round(image * 255)
  image = tf.saturate_cast(image, tf.uint8)
  return image 
Example #6
Source File: tfci.py    From compression with Apache License 2.0 5 votes vote down vote up
def write_png(filename, image):
  """Creates graph to write a PNG image file."""
  image = tf.squeeze(image, 0)
  if image.dtype.is_floating:
    image = tf.round(image)
  if image.dtype != tf.uint8:
    image = tf.saturate_cast(image, tf.uint8)
  string = tf.image.encode_png(image)
  return tf.io.write_file(filename, string) 
Example #7
Source File: model.py    From ocrd_anybaseocr with Apache License 2.0 5 votes vote down vote up
def denorm_boxes_graph(boxes, shape):
    """Converts boxes from normalized coordinates to pixel coordinates.
    boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
    shape: [..., (height, width)] in pixels

    Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
    coordinates it's inside the box.

    Returns:
        [..., (y1, x1, y2, x2)] in pixel coordinates
    """
    h, w = tf.split(tf.cast(shape, tf.float32), 2)
    scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
    shift = tf.constant([0., 0., 1., 1.])
    return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32) 
Example #8
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_forward_round():
    """test Round"""
    np_data = np.random.uniform(-10, 10, size=(5, 7)).astype(np.float32)
    tf.reset_default_graph()
    with tf.Graph().as_default():
        in_data = tf.placeholder(tf.float32, (5, 7), name="in_data")
        tf.round(in_data, name="round")
        compare_tf_with_tvm([np_data], ['in_data:0'], 'round:0') 
Example #9
Source File: common_layers.py    From tensor2tensor with Apache License 2.0 4 votes vote down vote up
def sample_from_discretized_mix_logistic(pred, seed=None):
  """Sampling from a discretized mixture of logistics.

  Args:
    pred: A [batch, height, width, num_mixtures*10] tensor of floats
      comprising one unconstrained mixture probability, three means
      (one per channel), three standard deviations (one per channel),
      and three coefficients which linearly parameterize dependence across
      channels.
    seed: Random seed.

  Returns:
    A tensor of shape [batch, height, width, 3] with real intensities scaled
    between -1 and 1.
  """

  logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
      pred)

  # Sample mixture indicator given logits using the gumbel max trick.
  num_mixtures = shape_list(logits)[-1]
  gumbel_noise = -tf.log(-tf.log(
      tf.random_uniform(
          tf.shape(logits), minval=1e-5, maxval=1. - 1e-5, seed=seed)))
  sel = tf.one_hot(
      tf.argmax(logits + gumbel_noise, -1),
      depth=num_mixtures,
      dtype=tf.float32)

  # Select mixture component's parameters.
  sel = tf.expand_dims(sel, -1)
  locs = tf.reduce_sum(locs * sel, 3)
  log_scales = tf.reduce_sum(log_scales * sel, 3)
  coeffs = tf.reduce_sum(coeffs * sel, 3)

  # Sample from 3-D logistic & clip to interval. Note we don't round to the
  # nearest 8-bit value when sampling.
  uniform_noise = tf.random_uniform(
      tf.shape(locs), minval=1e-5, maxval=1. - 1e-5, seed=seed)
  logistic_noise = tf.log(uniform_noise) - tf.log1p(-uniform_noise)
  x = locs + tf.exp(log_scales) * logistic_noise
  x0 = x[..., 0]
  x1 = x[..., 1] + coeffs[..., 0] * x0
  x2 = x[..., 2] + coeffs[..., 1] * x0 + coeffs[..., 2] * x1
  x = tf.stack([x0, x1, x2], axis=-1)
  x = tf.clip_by_value(x, -1., 1.)
  return x 
Example #10
Source File: common_layers.py    From tensor2tensor with Apache License 2.0 4 votes vote down vote up
def targeted_dropout(inputs,
                     k,
                     keep_prob,
                     targeting_fn,
                     is_training,
                     do_prune=False):
  """Applies targeted dropout.

  Applies dropout at a rate of `1 - keep_prob` to only those elements of
  `inputs` marked by `targeting_fn`. See below and paper for more detail:

  "Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
    Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.

  Args:
    inputs: Tensor, inputs to apply targeted dropout to.
    k: Scalar Tensor or python scalar, sets the number of elements to target in
      `inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
      second argument of `targeting_fn`.
    keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
    targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
      boolean mask the same shape as `inputs` where True indicates an element
      will be dropped, and False not.
    is_training: bool, indicates whether currently training.
    do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
      elements of `inputs` expected to be dropped each forwards pass.

  Returns:
    Tensor, same shape and dtype as `inputs`.
  """
  if not is_training and do_prune:
    k = tf.round(to_float(k) * to_float(1. - keep_prob))

  mask = targeting_fn(inputs, k)
  mask = tf.cast(mask, inputs.dtype)

  if is_training:
    return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask
  elif do_prune:
    return inputs * (1 - mask)
  else:
    return inputs 
Example #11
Source File: preprocessors.py    From text-to-text-transfer-transformer with Apache License 2.0 4 votes vote down vote up
def stsb(dataset):
  """Convert STSB examples to text2text format.

  STSB maps two sentences to a floating point number between 1 and 5
  representing their semantic similarity. Since we are treating all tasks as
  text-to-text tasks we need to convert this floating point number to a string.
  The vast majority of the similarity score labels in STSB are in the set
  [0, 0.2, 0.4, ..., 4.8, 5.0]. So, we first round the number to the closest
  entry in this set, and then we convert the result to a string (literally e.g.
  "3.4"). This converts STSB roughly into a 26-class classification dataset.
  This function uses the feature names from the dataset to unpack examples into
  a format amenable for a text2text problem.

  For example, a typical example from STSB might look like
  {
      "sentence1": "Three more US soldiers killed in Afghanistan",
      "sentence2": "NATO Soldier Killed in Afghanistan",
      "label": 1.8,
  }

  This example would be transformed to
  {
       "inputs": (
           "stsb sentence1: Three more US soldiers killed in Afghanistan "
           "sentence2: NATO Soldier Killed in Afghanistan"
       ),
       "targets": "1.8",
  }

  Args:
    dataset: a tf.data.Dataset to process.
  Returns:
    a tf.data.Dataset
  """
  def my_fn(x):
    """Collapse an example into a text2text pair."""
    strs_to_join = [
        'stsb sentence1:', x['sentence1'], 'sentence2:', x['sentence2']
    ]
    label_string = tf.as_string(tf.round(x['label']*5)/5, precision=1)
    joined = tf.strings.join(strs_to_join, separator=' ')
    return {'inputs': joined, 'targets': label_string, 'idx': x['idx']}
  return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) 
Example #12
Source File: preprocessors.py    From text-to-text-transfer-transformer with Apache License 2.0 4 votes vote down vote up
def random_spans_helper(inputs_length=gin.REQUIRED,
                        noise_density=gin.REQUIRED,
                        mean_noise_span_length=gin.REQUIRED,
                        extra_tokens_per_span_inputs=gin.REQUIRED,
                        extra_tokens_per_span_targets=gin.REQUIRED):
  """Training parameters to avoid padding with random_spans_noise_mask.

  When training a model with random_spans_noise_mask, we would like to set the
  other training hyperparmeters in a way that avoids padding.  This function
  helps us compute these hyperparameters.

  We assume that each noise span in the input is replaced by
  extra_tokens_per_span_inputs sentinel tokens, and each non-noise span in the
  targets is replaced by extra_tokens_per_span_targets sentinel tokens.

  This function tells us the required number of tokens in the raw example (for
  split_tokens()) as well as the length of the encoded targets.

  Args:
    inputs_length: an integer - desired length of the tokenized inputs sequence
    noise_density: a float
    mean_noise_span_length: a float
    extra_tokens_per_span_inputs: an integer
    extra_tokens_per_span_targets: an integer
  Returns:
    tokens_length: length of original text in tokens
    targets_length: an integer - length in tokens of encoded targets sequence
  """
  def _tokens_length_to_inputs_length_targets_length(tokens_length):
    num_noise_tokens = int(round(tokens_length * noise_density))
    num_nonnoise_tokens = tokens_length - num_noise_tokens
    num_noise_spans = int(round(num_noise_tokens / mean_noise_span_length))
    # inputs contain all nonnoise tokens, sentinels for all noise spans
    # and one EOS token.
    return (
        num_nonnoise_tokens +
        num_noise_spans * extra_tokens_per_span_inputs + 1,
        num_noise_tokens +
        num_noise_spans * extra_tokens_per_span_targets + 1)

  tokens_length = inputs_length
  while (_tokens_length_to_inputs_length_targets_length(tokens_length + 1)[0]
         <= inputs_length):
    tokens_length += 1
  inputs_length, targets_length = (
      _tokens_length_to_inputs_length_targets_length(tokens_length))
  # minor hack to get the targets length to be equal to inputs length
  # which is more likely to have been set to a nice round number.
  if noise_density == 0.5 and targets_length > inputs_length:
    tokens_length -= 1
    targets_length -= 1
  tf.logging.info(
      'tokens_length=%s inputs_length=%s targets_length=%s '
      'noise_density=%s mean_noise_span_length=%s ' %
      (tokens_length, inputs_length, targets_length,
       noise_density, mean_noise_span_length))
  return tokens_length, targets_length