Python tensorflow.compat.v1.cond() Examples

The following are 30 code examples of tensorflow.compat.v1.cond(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v1 , or try the search function .
Example #1
Source File: tf_example_decoder.py    From Object_Detection_Tracking with Apache License 2.0 6 votes vote down vote up
def _decode_masks(self, parsed_tensors):
    """Decode a set of PNG masks to the tf.float32 tensors."""
    def _decode_png_mask(png_bytes):
      mask = tf.squeeze(
          tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
      mask = tf.cast(mask, dtype=tf.float32)
      mask.set_shape([None, None])
      return mask

    height = parsed_tensors['image/height']
    width = parsed_tensors['image/width']
    masks = parsed_tensors['image/object/mask']
    return tf.cond(
        tf.greater(tf.size(masks), 0),
        lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
        lambda: tf.zeros([0, height, width], dtype=tf.float32)) 
Example #2
Source File: data_aug_lib.py    From mesh with Apache License 2.0 6 votes vote down vote up
def maybe_add_noise(image, noise_shape, scale0, scale1,
                    image_noise_probability, image_noise_ratio):
  """Add noise at two scales."""

  if image_noise_probability < 0.000001 or (
      image_noise_ratio < 0.000001):
    return image

  noise_list = []
  for scale in [scale0, scale1]:
    rand_image_noise_ratio = tf.random.uniform(
        shape=[], minval=0.0, maxval=image_noise_ratio)
    noise_list.append(
        _rand_noise(0.0, rand_image_noise_ratio, scale, noise_shape))

  skip_noise = tf.greater(tf.random.uniform([]), image_noise_probability)
  image = tf.cond(skip_noise,
                  lambda: image, lambda: image + noise_list[0])
  image = tf.cond(skip_noise,
                  lambda: image, lambda: image + noise_list[1])

  return image 
Example #3
Source File: common_layers.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):
  """Pooling (supports "LEFT")."""
  with tf.name_scope("pool", values=[inputs]):
    static_shape = inputs.get_shape()
    if not static_shape or len(static_shape) != 4:
      raise ValueError("Inputs to conv must have statically known rank 4.")
    # Add support for left padding.
    if padding == "LEFT":
      assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1
      if len(static_shape) == 3:
        width_padding = 2 * (window_size[1] // 2)
        padding_ = [[0, 0], [width_padding, 0], [0, 0]]
      else:
        height_padding = 2 * (window_size[0] // 2)
        cond_padding = tf.cond(
            tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
            lambda: tf.constant(2 * (window_size[1] // 2)))
        width_padding = 0 if static_shape[2] == 1 else cond_padding
        padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
      inputs = tf.pad(inputs, padding_)
      inputs.set_shape([static_shape[0], None, None, static_shape[3]])
      padding = "VALID"

  return tf.nn.pool(inputs, window_size, pooling_type, padding, strides=strides) 
Example #4
Source File: transformer_glow_layers_ops.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def get_variable_ddi(
    name, shape, value, init, initializer=None, dtype=tf.float32,
    regularizer=None, trainable=True):
  """Wrapper for data-dependent initialization."""
  kwargs = {"trainable": trainable}
  if initializer:
    kwargs["initializer"] = initializer
  if regularizer:
    kwargs["regularizer"] = regularizer
  w = tf.get_variable(name, shape, dtype, **kwargs)
  if isinstance(init, bool):
    if init:
      return assign(w, value)
    return w
  else:
    return tf.cond(init, lambda: assign(w, value), lambda: w) 
Example #5
Source File: optimizers.py    From privacy with Apache License 2.0 6 votes vote down vote up
def project_weights_to_r(self, force=False):
    """Normalize the weights to the R-ball.

    Args:
      force: True to normalize regardless of previous weight values.
        False to check if weights > R-ball and only normalize then.

    Raises:
      Exception: If not called from inside this optimizer context.
    """
    if not self._is_init:
      raise Exception('This method must be called from within the optimizer\'s '
                      'context.')
    radius = self.loss.radius()
    for layer in self.layers:
      weight_norm = tf.norm(layer.kernel, axis=0)
      if force:
        layer.kernel = layer.kernel / (weight_norm / radius)
      else:
        layer.kernel = tf.cond(
            tf.reduce_sum(tf.cast(weight_norm > radius, dtype=self.dtype)) > 0,
            lambda k=layer.kernel, w=weight_norm, r=radius: k / (w / r),  # pylint: disable=cell-var-from-loop
            lambda k=layer.kernel: k  # pylint: disable=cell-var-from-loop
        ) 
Example #6
Source File: trainer.py    From s4l with Apache License 2.0 6 votes vote down vote up
def get_lr(global_step, base_lr,  # pylint: disable=missing-docstring
           decay_steps, lr_decay_factor, warmup_steps):

  warmup_lr = 0.0
  if warmup_steps > 0:
    warmup_lr = (tf.cast(global_step, tf.float32) * (base_lr / warmup_steps))

  if decay_steps:
    normal_lr = tf.train.piecewise_constant(
        global_step,
        [s for s in decay_steps],
        [base_lr * (lr_decay_factor ** i) for i in range(len(decay_steps) + 1)]
    )
  else:
    normal_lr = base_lr

  lr = tf.cond(
      tf.less(global_step, tf.cast(warmup_steps, dtype=tf.dtypes.int64)),
      lambda: warmup_lr, lambda: normal_lr)

  return lr


# TODO(akolesnikov): add more logging 
Example #7
Source File: datasets.py    From s4l with Apache License 2.0 6 votes vote down vote up
def _decode_fn(self, example):
    image = tf.image.decode_jpeg(example[self.IMAGE_KEY], channels=3)
    # Subtract LABEL_OFFSET so that labels are in [0, 1000).
    label = tf.cast(example[self.LABEL_KEY], tf.int32) - self.LABEL_OFFSET
    if FLAGS.get_flag_value('pseudo_label_key', None):
        # Always use original label for val / test set.
        label_original = tf.cast(example[self.ORIGINAL_LABEL_KEY],
                                 tf.int32) - self.LABEL_OFFSET
        if self.split_name in ['val', 'test']:
          label = label_original
        elif self.split_name in ['train', 'trainval']:
          label_flag = tf.cast(example[self.FLAG_KEY], tf.int32)
          label = tf.cond(
              tf.math.equal(label_flag, tf.constant(1, dtype=tf.int32)),
              true_fn=lambda: label_original,
              false_fn=lambda: label)
        else:
            raise ValueError('Unkown split{}'.format(self.split_name))

    return self.preprocess_fn({'image': image, 'label': label}) 
Example #8
Source File: multistep_optimizer.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def _finish(self, update_ops, name_scope):
    """Updates beta_power variables every n batches and incrs counter."""
    iter_ = self._get_iter_variable()
    beta1_power, beta2_power = self._get_beta_accumulators()
    with tf.control_dependencies(update_ops):
      with tf.colocate_with(iter_):

        def update_beta_op():
          update_beta1 = beta1_power.assign(
              beta1_power * self._beta1_t,
              use_locking=self._use_locking)
          update_beta2 = beta2_power.assign(
              beta2_power * self._beta2_t,
              use_locking=self._use_locking)
          return tf.group(update_beta1, update_beta2)
        maybe_update_beta = tf.cond(
            tf.equal(iter_, 0), update_beta_op, tf.no_op)
        with tf.control_dependencies([maybe_update_beta]):
          update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),
                                     use_locking=self._use_locking)
    return tf.group(
        *update_ops + [update_iter, maybe_update_beta], name=name_scope) 
Example #9
Source File: multistep_optimizer.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
    """Apply conditionally if counter is zero."""
    grad_acc = self.get_slot(var, "grad_acc")

    def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
      total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
      adam_op = apply_fn(total_grad, var, *args, **kwargs)
      with tf.control_dependencies([adam_op]):
        grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc),
                                              use_locking=self._use_locking)
      return tf.group(adam_op, grad_acc_to_zero_op)

    def accumulate_gradient(grad_acc, grad):
      assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
      return tf.group(assign_op)  # Strip return value

    return tf.cond(
        tf.equal(self._get_iter_variable(), 0),
        lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
        lambda: accumulate_gradient(grad_acc, grad)) 
Example #10
Source File: imagenet.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def _random_crop(image, size):
  """Make a random crop of (`size` x `size`)."""
  bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
  random_image, bbox = distorted_bounding_box_crop(
      image,
      bbox,
      min_object_covered=0.1,
      aspect_ratio_range=(3. / 4, 4. / 3.),
      area_range=(0.08, 1.0),
      max_attempts=1,
      scope=None)
  bad = _at_least_x_are_true(tf.shape(image), tf.shape(random_image), 3)

  image = tf.cond(
      bad, lambda: _center_crop(_do_scale(image, size), size),
      lambda: tf.image.resize_bicubic([random_image], [size, size])[0])
  return image 
Example #11
Source File: preprocessing.py    From EfficientNet-PyTorch with Apache License 2.0 6 votes vote down vote up
def _decode_and_random_crop(image_bytes, image_size):
  """Make a random crop of image_size."""
  bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
  image = distorted_bounding_box_crop(
      image_bytes,
      bbox,
      min_object_covered=0.1,
      aspect_ratio_range=(3. / 4, 4. / 3.),
      area_range=(0.08, 1.0),
      max_attempts=10,
      scope=None)
  original_shape = tf.image.extract_jpeg_shape(image_bytes)
  bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)

  image = tf.cond(
      bad,
      lambda: _decode_and_center_crop(image_bytes, image_size),
      lambda: tf.image.resize_bicubic([image],  # pylint: disable=g-long-lambda
                                      [image_size, image_size])[0])

  return image 
Example #12
Source File: multistep_with_adamoptimizer.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
    """Apply conditionally if counter is zero."""
    grad_acc = self.get_slot(var, "grad_acc")

    def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
      total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
      adam_op = apply_fn(total_grad, var, *args, **kwargs)
      with tf.control_dependencies([adam_op]):
        grad_acc_to_zero_op = grad_acc.assign(
            tf.zeros_like(grad_acc), use_locking=self._use_locking)
      return tf.group(adam_op, grad_acc_to_zero_op)

    def accumulate_gradient(grad_acc, grad):
      assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
      return tf.group(assign_op)  # Strip return value

    return tf.cond(
        tf.equal(self._get_iter_variable(), 0),
        lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
        lambda: accumulate_gradient(grad_acc, grad)) 
Example #13
Source File: sample.py    From gpt2-estimator with MIT License 6 votes vote down vote up
def top_k_logits(logits, k):
    if k == 0:
        # no truncation
        return logits

    def _top_k():
        values, _ = tf.nn.top_k(logits, k=k)
        min_values = values[:, -1, tf.newaxis]
        return tf.where(
            logits < min_values,
            tf.ones_like(logits, dtype=logits.dtype) * -1e10,
            logits,
        )
    return tf.cond(
        tf.equal(k, 0),
        lambda: logits,
        lambda: _top_k(),
    ) 
Example #14
Source File: utils.py    From magenta with Apache License 2.0 6 votes vote down vote up
def softmax_summaries(loss, logits, one_hot_labels, name="softmax"):
  """Create the softmax summaries for this cross entropy loss.

  Args:
    loss: Cross-entropy loss.
    logits: The [batch_size, classes] float tensor representing the logits.
    one_hot_labels: The float tensor representing actual class ids. If this is
      [batch_size, classes], then we take the argmax of it first.
    name: Prepended to summary scope.
  """
  tf.summary.scalar(name + "_loss", loss)

  one_hot_labels = tf.cond(
      tf.equal(tf.rank(one_hot_labels),
               2), lambda: tf.to_int32(tf.argmax(one_hot_labels, 1)),
      lambda: tf.to_int32(one_hot_labels))

  in_top_1 = tf.nn.in_top_k(logits, one_hot_labels, 1)
  tf.summary.scalar(name + "_precision@1",
                    tf.reduce_mean(tf.to_float(in_top_1)))
  in_top_5 = tf.nn.in_top_k(logits, one_hot_labels, 5)
  tf.summary.scalar(name + "_precision@5",
                    tf.reduce_mean(tf.to_float(in_top_5))) 
Example #15
Source File: vgg_preprocessing.py    From models with Apache License 2.0 5 votes vote down vote up
def _smallest_size_at_least(height, width, smallest_side):
  """Computes new shape with the smallest side equal to `smallest_side`.

  Computes new shape with the smallest side equal to `smallest_side` while
  preserving the original aspect ratio.

  Args:
    height: an int32 scalar tensor indicating the current height.
    width: an int32 scalar tensor indicating the current width.
    smallest_side: A python integer or scalar `Tensor` indicating the size of
      the smallest side after resize.

  Returns:
    new_height: an int32 scalar tensor indicating the new height.
    new_width: and int32 scalar tensor indicating the new width.
  """
  smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)

  height = tf.to_float(height)
  width = tf.to_float(width)
  smallest_side = tf.to_float(smallest_side)

  scale = tf.cond(tf.greater(height, width),
                  lambda: smallest_side / width,
                  lambda: smallest_side / height)
  new_height = tf.to_int32(tf.rint(height * scale))
  new_width = tf.to_int32(tf.rint(width * scale))
  return new_height, new_width 
Example #16
Source File: mlperf.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def log_deferred_tensor_value(self, key, tensor_value, global_step,
                                stack_offset=2, every_n=1):
    """Logs the value of a tensor when the graph is run."""
    caller = '(%s)' % mlperf_log.get_caller(stack_offset, self._root_dir)
    def create_print_op():
      return tf.print(_MLPERF_LOG_PREFIX, self.mlperf_model_name,
                      tf.timestamp(), caller, key,
                      ': { "deferred": true, "value":', tensor_value, '}',
                      output_stream=sys.stdout)
    maybe_print = tf.cond(tf.equal(global_step % every_n, 0), create_print_op,
                          tf.no_op)
    with tf.control_dependencies([maybe_print]):
      return tf.identity(tensor_value) 
Example #17
Source File: autoaugment_utils.py    From models with Apache License 2.0 5 votes vote down vote up
def _concat_bbox(bbox, bboxes):
  """Helper function that concates bbox to bboxes along the first dimension."""

  # Note if all elements in bboxes are -1 (_INVALID_BOX), then this means
  # we discard bboxes and start the bboxes Tensor with the current bbox.
  bboxes_sum_check = tf.reduce_sum(bboxes)
  bbox = tf.expand_dims(bbox, 0)
  # This check will be true when it is an _INVALID_BOX
  bboxes = tf.cond(tf.equal(bboxes_sum_check, -4.0),
                   lambda: bbox,
                   lambda: tf.concat([bboxes, bbox], 0))
  return bboxes 
Example #18
Source File: tf_example_decoder.py    From models with Apache License 2.0 5 votes vote down vote up
def tensors_to_item(self, keys_to_tensors):
    item = self._handler.tensors_to_item(keys_to_tensors)
    return tf.cond(
        pred=tf.equal(tf.reduce_prod(tf.shape(item)), 0),
        true_fn=lambda: self._backup.tensors_to_item(keys_to_tensors),
        false_fn=lambda: item) 
Example #19
Source File: inputs.py    From models with Apache License 2.0 5 votes vote down vote up
def _replace_empty_string_with_random_number(string_tensor):
  """Returns string unchanged if non-empty, and random string tensor otherwise.

  The random string is an integer 0 and 2**63 - 1, casted as string.


  Args:
    string_tensor: A tf.tensor of dtype string.

  Returns:
    out_string: A tf.tensor of dtype string. If string_tensor contains the empty
      string, out_string will contain a random integer casted to a string.
      Otherwise string_tensor is returned unchanged.

  """

  empty_string = tf.constant('', dtype=tf.string, name='EmptyString')

  random_source_id = tf.as_string(
      tf.random_uniform(shape=[], maxval=2**63 - 1, dtype=tf.int64))

  out_string = tf.cond(
      tf.equal(string_tensor, empty_string),
      true_fn=lambda: random_source_id,
      false_fn=lambda: string_tensor)

  return out_string 
Example #20
Source File: inputs.py    From models with Apache License 2.0 5 votes vote down vote up
def _convert_labeled_classes_to_k_hot(groundtruth_labeled_classes, num_classes):
  """Returns k-hot encoding of the labeled classes."""

  # If the input labeled_classes is empty, it assumes all classes are
  # exhaustively labeled, thus returning an all-one encoding.
  def true_fn():
    return tf.sparse_to_dense(
        groundtruth_labeled_classes - _LABEL_OFFSET, [num_classes],
        tf.constant(1, dtype=tf.float32),
        validate_indices=False)

  def false_fn():
    return tf.ones(num_classes, dtype=tf.float32)

  return tf.cond(tf.size(groundtruth_labeled_classes) > 0, true_fn, false_fn) 
Example #21
Source File: inputs.py    From models with Apache License 2.0 5 votes vote down vote up
def _multiclass_scores_or_one_hot_labels(multiclass_scores,
                                         groundtruth_boxes,
                                         groundtruth_classes, num_classes):
  """Returns one-hot encoding of classes when multiclass_scores is empty."""
  # Replace groundtruth_classes tensor with multiclass_scores tensor when its
  # non-empty. If multiclass_scores is empty fall back on groundtruth_classes
  # tensor.
  def true_fn():
    return tf.reshape(multiclass_scores,
                      [tf.shape(groundtruth_boxes)[0], num_classes])
  def false_fn():
    return tf.one_hot(groundtruth_classes, num_classes)
  return tf.cond(tf.size(multiclass_scores) > 0, true_fn, false_fn) 
Example #22
Source File: tf_example_decoder.py    From models with Apache License 2.0 5 votes vote down vote up
def _decode_png_instance_masks(self, keys_to_tensors):
    """Decode PNG instance segmentation masks and stack into dense tensor.

    The instance segmentation masks are reshaped to [num_instances, height,
    width].

    Args:
      keys_to_tensors: a dictionary from keys to tensors.

    Returns:
      A 3-D float tensor of shape [num_instances, height, width] with values
        in {0, 1}.
    """

    def decode_png_mask(image_buffer):
      image = tf.squeeze(
          tf.image.decode_image(image_buffer, channels=1), axis=2)
      image.set_shape([None, None])
      image = tf.cast(tf.greater(image, 0), dtype=tf.float32)
      return image

    png_masks = keys_to_tensors['image/object/mask']
    height = keys_to_tensors['image/height']
    width = keys_to_tensors['image/width']
    if isinstance(png_masks, tf.SparseTensor):
      png_masks = tf.sparse_tensor_to_dense(png_masks, default_value='')
    return tf.cond(
        tf.greater(tf.size(png_masks), 0),
        lambda: tf.map_fn(decode_png_mask, png_masks, dtype=tf.float32),
        lambda: tf.zeros(tf.cast(tf.stack([0, height, width]), dtype=tf.int32))) 
Example #23
Source File: helpers.py    From language with Apache License 2.0 5 votes vote down vote up
def next_inputs(self, time, outputs, state, sample_ids, name=None):
    del sample_ids  # Unused.
    with tf.name_scope(name, "TrainingHelperNextInputs", [time, outputs]):
      next_time = time + 1
      finished = (next_time >= self._sequence_length)
      all_finished = tf.reduce_all(finished)
      next_inputs = tf.cond(
          all_finished, lambda: self._zero_inputs, lambda: outputs)
      return finished, next_inputs, state 
Example #24
Source File: tf_example_decoder.py    From Object_Detection_Tracking with Apache License 2.0 5 votes vote down vote up
def _decode_areas(self, parsed_tensors):
    xmin = parsed_tensors['image/object/bbox/xmin']
    xmax = parsed_tensors['image/object/bbox/xmax']
    ymin = parsed_tensors['image/object/bbox/ymin']
    ymax = parsed_tensors['image/object/bbox/ymax']
    return tf.cond(
        tf.greater(tf.shape(parsed_tensors['image/object/area'])[0], 0),
        lambda: parsed_tensors['image/object/area'],
        lambda: (xmax - xmin) * (ymax - ymin)) 
Example #25
Source File: toy_model_tpu.py    From mesh with Apache License 2.0 5 votes vote down vote up
def toy_model(features, mesh):
  """A toy model implemented by mesh tensorlfow."""
  batch_dim = mtf.Dimension('batch', FLAGS.batch_size)
  io_dim = mtf.Dimension('io', FLAGS.io_size)

  master_dtype = tf.as_dtype(FLAGS.master_dtype)
  slice_dtype = tf.as_dtype(FLAGS.slice_dtype)
  activation_dtype = tf.as_dtype(FLAGS.activation_dtype)

  x = mtf.import_tf_tensor(mesh, features, mtf.Shape([batch_dim, io_dim]))
  x = mtf.cast(x, activation_dtype)
  h = x
  for lnum in range(1, FLAGS.num_hidden_layers + 2):
    if lnum + 1 == FLAGS.num_hidden_layers + 2:
      # output layer
      dim = io_dim
    elif lnum % 2 == 0:
      dim = mtf.Dimension('hidden_even', FLAGS.hidden_size)
    else:
      dim = mtf.Dimension('hidden_odd', FLAGS.hidden_size)
    h = mtf.layers.dense(
        h, dim,
        use_bias=False,
        master_dtype=master_dtype,
        slice_dtype=slice_dtype,
        name='layer_%d' % lnum)
  y = h
  g = tf.train.get_global_step()
  if FLAGS.step_with_nan >= 0:
    # Trigger NaN in the forward pass, this is used for testing whether
    # MeshTensorFlow can handle occasional NaN value.
    y += mtf.import_tf_tensor(
        mesh,
        tf.divide(
            0.0,
            tf.cond(tf.equal(g, FLAGS.step_with_nan), lambda: 0., lambda: 1.)),
        mtf.Shape([]))

  loss = mtf.reduce_mean(mtf.square(y - x))
  return y, loss 
Example #26
Source File: preprocessor.py    From Object_Detection_Tracking with Apache License 2.0 5 votes vote down vote up
def _compute_new_dynamic_size(image, min_dimension, max_dimension):
  """Compute new dynamic shape for resize_to_range method."""
  image_shape = tf.shape(image)
  orig_height = tf.to_float(image_shape[0])
  orig_width = tf.to_float(image_shape[1])
  num_channels = image_shape[2]
  orig_min_dim = tf.minimum(orig_height, orig_width)
  # Calculates the larger of the possible sizes
  min_dimension = tf.constant(min_dimension, dtype=tf.float32)
  large_scale_factor = min_dimension / orig_min_dim
  # Scaling orig_(height|width) by large_scale_factor will make the smaller
  # dimension equal to min_dimension, save for floating point rounding errors.
  # For reasonably-sized images, taking the nearest integer will reliably
  # eliminate this error.
  large_height = tf.to_int32(tf.round(orig_height * large_scale_factor))
  large_width = tf.to_int32(tf.round(orig_width * large_scale_factor))
  large_size = tf.stack([large_height, large_width])
  if max_dimension:
    # Calculates the smaller of the possible sizes, use that if the larger
    # is too big.
    orig_max_dim = tf.maximum(orig_height, orig_width)
    max_dimension = tf.constant(max_dimension, dtype=tf.float32)
    small_scale_factor = max_dimension / orig_max_dim
    # Scaling orig_(height|width) by small_scale_factor will make the larger
    # dimension equal to max_dimension, save for floating point rounding
    # errors. For reasonably-sized images, taking the nearest integer will
    # reliably eliminate this error.
    small_height = tf.to_int32(tf.round(orig_height * small_scale_factor))
    small_width = tf.to_int32(tf.round(orig_width * small_scale_factor))
    small_size = tf.stack([small_height, small_width])
    new_size = tf.cond(
        tf.to_float(tf.reduce_max(large_size)) > max_dimension,
        lambda: small_size, lambda: large_size)
  else:
    new_size = large_size
  return tf.stack(tf.unstack(new_size) + [num_channels]) 
Example #27
Source File: mnist_benchmark.py    From autograph with Apache License 2.0 5 votes vote down vote up
def benchmark_handwritten(self):
    with tf.Graph().as_default():
      ds, opt, hp, w, b = get_data_and_params()
      iterator = ds.make_one_shot_iterator()

      def loop_body(i, unused_previous_loss_t):
        """Manual implementation of training loop."""
        # Call get_next() inside body or else training happens repeatedly on
        # the first minibatch only.
        x, y = iterator.get_next()
        loss_t = loss_fn(x, y, w, b)
        train_op = opt.minimize(loss_t, var_list=(w, b))
        i = tf.cond(tf.equal(i % 100, 0),
                    lambda: tf.Print(i, [i, loss_t], message='Step, loss: '),
                    lambda: i)

        with tf.control_dependencies([train_op]):
          return i + 1, loss_t

      _, final_loss_t = tf.while_loop(
          lambda i, _: i < hp.train_steps,
          loop_body,
          [tf.constant(0), tf.constant(0.0)])

      with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        def target():
          loss_val = sess.run(final_loss_t)
          assert 0.1 < loss_val < 1, loss_val

        self.time_execution(
            'Handwritten',
            target,
            iter_volume=hp.train_steps,
            iter_unit='training steps') 
Example #28
Source File: utils.py    From interval-bound-propagation with Apache License 2.0 5 votes vote down vote up
def build_loss_schedule(step, warmup_steps, rampup_steps, init, final,
                        warmup=None):
  """Linear schedule builder.

  Args:
    step: Current step number.
    warmup_steps: When step < warmup_steps, set value to warmup.
    rampup_steps: Ramp up schedule value from init to final in rampup_step.
    init: Initial schedule value after warmup_steps.
    final: Final schedule value after warmup_steps + rampup_steps.
    warmup: Schedule value before warmup_steps. When set to None, the warmup
      period value is set to init.

  Returns:
    A schedule tensor.
  """
  if warmup is None and init == final:
    return init
  if rampup_steps < 0:
    if warmup is not None:
      return tf.cond(step < warmup_steps, lambda: tf.constant(warmup),
                     lambda: tf.constant(final))
    return final
  schedule = linear_schedule(
      step, warmup_steps, warmup_steps + rampup_steps, init, final)
  if warmup is not None:
    # Set the value to warmup during warmup process.
    return tf.cond(step < warmup_steps,
                   lambda: tf.constant(warmup), lambda: schedule)
  return schedule 
Example #29
Source File: crown.py    From interval-bound-propagation with Apache License 2.0 5 votes vote down vote up
def _get_specification_bounds(self):
    """Get upper bounds on specification. Used for building verified loss."""
    ibp_bounds = self._specification(self._predictor.modules)
    # Compute verified accuracy using IBP bounds.
    v = tf.reduce_max(ibp_bounds, axis=1)
    self._interval_bounds_accuracy = tf.reduce_mean(
        tf.cast(v <= 0., tf.float32))
    # CROWN-IBP bounds.
    if self._use_crown_ibp:
      logging.info('CROWN-IBP active')
      def _build_crown_ibp_bounds():
        """Create the computationally expensive CROWN bounds for tf.cond."""
        predictor = self._predictor
        # CROWN is computed backwards so we need to start with a
        # initial bound related to the specification.
        init_crown_bounds = create_initial_backward_bounds(self._specification,
                                                           predictor.modules)
        # Now propagate the specification matrix layer by layer;
        # we only need the CROWN upper bound, do not need lower bound.
        crown_bound = predictor.propagate_bound_backward(init_crown_bounds,
                                                         compute_upper=True,
                                                         compute_lower=False)
        # A linear mixture of the two bounds with a schedule.
        return self._crown_bound_schedule * crown_bound.upper + \
               (1. - self._crown_bound_schedule) * ibp_bounds
      # If the coefficient for CROWN bound is close to 0, compute IBP only.
      mixture_bounds = tf.cond(self._crown_bound_schedule < 1e-6,
                               lambda: ibp_bounds, _build_crown_ibp_bounds)
      v = tf.reduce_max(mixture_bounds, axis=1)
      self._crown_ibp_accuracy = tf.reduce_mean(tf.cast(v <= 0., tf.float32))
    else:
      mixture_bounds = ibp_bounds
      self._crown_ibp_accuracy = tf.constant(0.)
    return mixture_bounds 
Example #30
Source File: utils.py    From EfficientNet-PyTorch with Apache License 2.0 5 votes vote down vote up
def build_learning_rate(initial_lr,
                        global_step,
                        steps_per_epoch=None,
                        lr_decay_type='exponential',
                        decay_factor=0.97,
                        decay_epochs=2.4,
                        total_steps=None,
                        warmup_epochs=5):
  """Build learning rate."""
  if lr_decay_type == 'exponential':
    assert steps_per_epoch is not None
    decay_steps = steps_per_epoch * decay_epochs
    lr = tf.train.exponential_decay(
        initial_lr, global_step, decay_steps, decay_factor, staircase=True)
  elif lr_decay_type == 'cosine':
    assert total_steps is not None
    lr = 0.5 * initial_lr * (
        1 + tf.cos(np.pi * tf.cast(global_step, tf.float32) / total_steps))
  elif lr_decay_type == 'constant':
    lr = initial_lr
  else:
    assert False, 'Unknown lr_decay_type : %s' % lr_decay_type

  if warmup_epochs:
    logging.info('Learning rate warmup_epochs: %d', warmup_epochs)
    warmup_steps = int(warmup_epochs * steps_per_epoch)
    warmup_lr = (
        initial_lr * tf.cast(global_step, tf.float32) / tf.cast(
            warmup_steps, tf.float32))
    lr = tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)

  return lr