Python tensorflow.assert_greater_equal() Examples

The following are 30 code examples of tensorflow.assert_greater_equal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: utils.py    From zhusuan with MIT License 6 votes vote down vote up
def __call__(self, x, y):
        '''
        Return K(x, y), where x and y are possibly batched.
        :param x: shape [..., n_x, n_covariates]
        :param y: shape [..., n_y, n_covariates]
        :return: Tensor with shape [..., n_x, n_y]
        '''
        batch_shape = tf.shape(x)[:-2]
        rank = x.shape.ndims
        assert_ops = [
            tf.assert_greater_equal(
                rank, 2,
                message='RBFKernel: rank(x) should be static and >=2'),
            tf.assert_equal(
                rank, tf.rank(y),
                message='RBFKernel: x and y should have the same rank')]
        with tf.control_dependencies(assert_ops):
            x = tf.expand_dims(x, rank - 1)
            y = tf.expand_dims(y, rank - 2)
            k_scale = tf.reshape(self.k_scale, [1] * rank + [-1])
            ret = tf.exp(
                -tf.reduce_sum(tf.square(x - y) / k_scale, axis=-1) / 2)
        return ret 
Example #2
Source File: GAN.py    From VideoSuperResolution with MIT License 6 votes vote down vote up
def _preprocess_for_inception(images):
  """Preprocess images for inception.

  Args:
    images: images minibatch. Shape [batch size, width, height,
      channels]. Values are in [0..255].

  Returns:
    preprocessed_images
  """

  images = tf.cast(images, tf.float32)

  # tfgan_eval.preprocess_image function takes values in [0, 255]
  with tf.control_dependencies([tf.assert_greater_equal(images, 0.0),
                                tf.assert_less_equal(images, 255.0)]):
    images = tf.identity(images)

  preprocessed_images = tf.map_fn(
    fn=_TFGAN.preprocess_image,
    elems=images,
    back_prop=False)

  return preprocessed_images 
Example #3
Source File: graph_search_test.py    From kfac with Apache License 2.0 6 votes vote down vote up
def sparse_softmax_cross_entropy(labels,
                                 logits,
                                 num_classes,
                                 weights=1.0,
                                 label_smoothing=0.1):
  """Softmax cross entropy with example weights, label smoothing."""
  assert_valid_label = [
      tf.assert_greater_equal(labels, tf.cast(0, dtype=tf.int64)),
      tf.assert_less(labels, tf.cast(num_classes, dtype=tf.int64))
  ]
  with tf.control_dependencies(assert_valid_label):
    labels = tf.reshape(labels, [-1])
    dense_labels = tf.one_hot(labels, num_classes)
    loss = tf.losses.softmax_cross_entropy(
        onehot_labels=dense_labels,
        logits=logits,
        weights=weights,
        label_smoothing=label_smoothing)
  return loss 
Example #4
Source File: utils.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms 
Example #5
Source File: gcg_policy.py    From GtS with MIT License 5 votes vote down vote up
def _graph_sub_cost(self, preds, pre_preds, use_pre, labels, mask, loss, scale, loss_weight, **kwargs):
        control_dependencies = []
        acc = None
        if loss == 'mse':
            assert(not use_pre)
            assert(len(preds.get_shape()) == len(labels.get_shape()))
            cost = 0.5 * tf.square(preds - labels) / scale
        elif loss == 'huber':
            # Used implementation similar to tf github to avoid gradient issues
            assert(not use_pre)
            assert(len(preds.get_shape()) == len(labels.get_shape()))
            delta = 1.0 * scale
            abs_diff = tf.abs(preds - labels)
            quadratic = tf.minimum(abs_diff, delta)
            linear = (abs_diff - quadratic)
            cost = (0.5 * quadratic**2 + delta * linear) / scale
        elif loss == 'xentropy':
            assert(use_pre)
            assert(len(pre_preds.get_shape()) == len(labels.get_shape()))
            labels /= scale
            preds /= scale
            control_dependencies += [tf.assert_greater_equal(labels, 0., name='cost_assert_2')]
            control_dependencies += [tf.assert_less_equal(labels, 1., name='cost_assert_3', summarize=1000)]
            control_dependencies += [tf.assert_greater_equal(preds, 0., name='cost_assert_4')]
            control_dependencies += [tf.assert_less_equal(preds, 1., name='cost_assert_5')]
            xentropy_posweight = kwargs.get('xentropy_posweight', 1)
            cost = tf.nn.weighted_cross_entropy_with_logits(logits=pre_preds, targets=labels,
                                                            pos_weight=xentropy_posweight)
            acc = tf.reduce_mean(tf.cast(tf.equal(preds > 0.5, labels > 0.5), tf.float32))
        else:
            raise NotImplementedError
        assert(len(cost.get_shape()) == len((mask * loss_weight).get_shape()))
        cost = tf.reduce_sum(cost * mask * loss_weight)
        return cost, acc, control_dependencies 
Example #6
Source File: modules.py    From tacotron2 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        with tf.control_dependencies([tf.assert_greater_equal(inputs, self.index_offset),
                                      tf.assert_less(inputs, self.index_offset + self._num_symbols)]):
            return tf.nn.embedding_lookup(self._embedding, inputs - self.index_offset) 
Example #7
Source File: utils.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms 
Example #8
Source File: preprocessing.py    From object_detection_with_tensorflow with MIT License 5 votes vote down vote up
def scale_to_inception_range(image):
  """Scales an image in the range [0,1] to [-1,1] as expected by inception."""
  # Assert that incoming images have been properly scaled to [0,1].
  with tf.control_dependencies(
      [tf.assert_less_equal(tf.reduce_max(image), 1.),
       tf.assert_greater_equal(tf.reduce_min(image), 0.)]):
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image 
Example #9
Source File: utils.py    From object_detection_with_tensorflow with MIT License 5 votes vote down vote up
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms 
Example #10
Source File: eval_utils.py    From compare_gan with Apache License 2.0 5 votes vote down vote up
def inception_transform(inputs):
  with tf.control_dependencies([
      tf.assert_greater_equal(inputs, 0.0),
      tf.assert_less_equal(inputs, 255.0)]):
    inputs = tf.identity(inputs)
  preprocessed_inputs = tf.map_fn(
      fn=tfgan.eval.preprocess_image, elems=inputs, back_prop=False)
  return tfgan.eval.run_inception(
      preprocessed_inputs,
      graph_def=get_inception_graph_def(),
      output_tensor=["pool_3:0", "logits:0"]) 
Example #11
Source File: inception_network.py    From precision-recall-distributions with Apache License 2.0 5 votes vote down vote up
def preprocess_for_inception(images):
  """Preprocess images for inception.

  Args:
    images: images minibatch. Shape [batch size, width, height,
      channels]. Values are in [0..255].

  Returns:
    preprocessed_images
  """

  # Images should have 3 channels.
  assert images.shape[3].value == 3

  # tf.contrib.gan.eval.preprocess_image function takes values in [0, 255]
  with tf.control_dependencies([tf.assert_greater_equal(images, 0.0),
                                tf.assert_less_equal(images, 255.0)]):
    images = tf.identity(images)

  preprocessed_images = tf.map_fn(
      fn=tf.contrib.gan.eval.preprocess_image,
      elems=images,
      back_prop=False
  )

  return preprocessed_images 
Example #12
Source File: preprocessing.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def scale_to_inception_range(image):
  """Scales an image in the range [0,1] to [-1,1] as expected by inception."""
  # Assert that incoming images have been properly scaled to [0,1].
  with tf.control_dependencies(
      [tf.assert_less_equal(tf.reduce_max(image), 1.),
       tf.assert_greater_equal(tf.reduce_min(image), 0.)]):
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image 
Example #13
Source File: spectral_covariance.py    From VFF with Apache License 2.0 5 votes vote down vote up
def make_Kuf(k, X, a, b, ms):
    omegas = 2. * np.pi * ms / (b-a)
    if float_type is tf.float32:
        omegas = omegas.astype(np.float32)
    Kuf_cos = tf.transpose(tf.cos(omegas * (X - a)))
    omegas_sin = omegas[omegas != 0]  # don't compute zeros freq.
    Kuf_sin = tf.transpose(tf.sin(omegas_sin * (X - a)))

    # correct Kfu outside [a, b]
    lt_a_sin = tf.tile(tf.transpose(X) < a, [len(ms)-1, 1])
    gt_b_sin = tf.tile(tf.transpose(X) > b, [len(ms)-1, 1])
    lt_a_cos = tf.tile(tf.transpose(X) < a, [len(ms), 1])
    gt_b_cos = tf.tile(tf.transpose(X) > b, [len(ms), 1])
    if isinstance(k, gpflow.kernels.Matern12):
        # Kuf_sin[:, np.logical_or(X.flatten() < a, X.flatten() > b)] = 0
        Kuf_sin = tf.where(tf.logical_or(lt_a_sin, gt_b_sin), tf.zeros(tf.shape(Kuf_sin), float_type), Kuf_sin)
        Kuf_cos = tf.where(lt_a_cos, tf.tile(tf.exp(-tf.abs(tf.transpose(X-a))/k.lengthscales), [len(ms), 1]), Kuf_cos)
        Kuf_cos = tf.where(gt_b_cos, tf.tile(tf.exp(-tf.abs(tf.transpose(X-b))/k.lengthscales), [len(ms), 1]), Kuf_cos)
    elif isinstance(k, gpflow.kernels.Matern32):
        arg = np.sqrt(3) * tf.abs(tf.transpose(X) - a) / k.lengthscales
        edge = tf.tile((1 + arg) * tf.exp(-arg), [len(ms), 1])
        Kuf_cos = tf.where(lt_a_cos, edge, Kuf_cos)
        arg = np.sqrt(3) * tf.abs(tf.transpose(X) - b) / k.lengthscales
        edge = tf.tile((1 + arg) * tf.exp(-arg), [len(ms), 1])
        Kuf_cos = tf.where(gt_b_cos, edge, Kuf_cos)

        arg = np.sqrt(3) * tf.abs(tf.transpose(X) - a) / k.lengthscales
        edge = (tf.transpose(X) - a) * tf.exp(-arg) * omegas_sin[:, None]
        Kuf_sin = tf.where(lt_a_sin, edge, Kuf_sin)
        arg = np.sqrt(3) * tf.abs(tf.transpose(X) - b) / k.lengthscales
        edge = (tf.transpose(X) - b) * tf.exp(-arg) * omegas_sin[:, None]
        Kuf_sin = tf.where(gt_b_sin, edge, Kuf_sin)
    elif isinstance(k, gpflow.kernels.Matern52):
        # edges not implemented yet
        Kuf_cos = tf.with_dependencies([tf.assert_greater_equal(X, a)], Kuf_cos, message='Edges not implemented for Matern52', name='assert_left_edge')
        Kuf_sin = tf.with_dependencies([tf.assert_less_equal   (X, b)], Kuf_sin, message='Edges not implemented for Matern52', name='assert_right_edge')
    else:
        raise NotImplementedError
    return tf.concat([Kuf_cos, Kuf_sin], axis=0) 
Example #14
Source File: preprocessing.py    From models with Apache License 2.0 5 votes vote down vote up
def scale_to_inception_range(image):
  """Scales an image in the range [0,1] to [-1,1] as expected by inception."""
  # Assert that incoming images have been properly scaled to [0,1].
  with tf.control_dependencies(
      [tf.assert_less_equal(tf.reduce_max(image), 1.),
       tf.assert_greater_equal(tf.reduce_min(image), 0.)]):
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image 
Example #15
Source File: utils.py    From models with Apache License 2.0 5 votes vote down vote up
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms 
Example #16
Source File: multi_tower_model.py    From revnet-public with MIT License 5 votes vote down vote up
def __init__(self,
               config,
               tower_cls,
               is_training=True,
               inference_only=False,
               num_replica=2,
               inp=None,
               label=None,
               apply_grad=True,
               batch_size=None):
    self._config = config
    self._is_training = is_training
    self._inference_only = inference_only
    self._num_replica = num_replica
    self._apply_grad = apply_grad
    self._tower_cls = tower_cls
    self._batch_size = batch_size

    # Input.
    if inp is None:
      x = tf.placeholder(
          self.dtype,
          [batch_size, config.height, config.width, config.num_channel])
    else:
      x = inp
    if label is None:
      y = tf.placeholder(tf.int32, [batch_size])
    else:
      y = label
    self._bn_update_ops = None
    self._input = x
    # Make sure that the labels are in reasonable range.
    # with tf.control_dependencies(
    #     [tf.assert_greater_equal(y, 0), tf.assert_less(y, config.num_classes)]):
    #   self._label = tf.identity(y)
    self._label = y
    self._towers = []
    self._build_towers() 
Example #17
Source File: utils_tf.py    From cleverhans with MIT License 5 votes vote down vote up
def assert_greater_equal(*args, **kwargs):
  """
  Wrapper for tf.assert_greater_equal.
  Overrides tf.device so that the assert always goes on CPU.
  The unwrapped version raises an exception if used with tf.device("/GPU:x").
  """
  with tf.device("/CPU:0"):
    return tf.assert_greater_equal(*args, **kwargs) 
Example #18
Source File: preprocessing.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def scale_to_inception_range(image):
  """Scales an image in the range [0,1] to [-1,1] as expected by inception."""
  # Assert that incoming images have been properly scaled to [0,1].
  with tf.control_dependencies(
      [tf.assert_less_equal(tf.reduce_max(image), 1.),
       tf.assert_greater_equal(tf.reduce_min(image), 0.)]):
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image 
Example #19
Source File: utils.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms 
Example #20
Source File: PlanarFlow.py    From Conditional_Density_Estimation with MIT License 5 votes vote down vote up
def _inverse(self, z):
        """
        Runs a backward pass through the bijector
        Also checks for whether the flow is actually invertible
        """
        z = InvertedPlanarFlow._handle_input_dimensionality(z)
        uw = tf.reduce_sum(self._w * self._u, 1)
        invertible = tf.assert_greater_equal(uw, -1., name='Invertibility_Constraint', data=[self._u, self._w, uw])
        with tf.control_dependencies([invertible]):
            return z + self._u * tf.tanh(self._wzb(z)) 
Example #21
Source File: attacks_tf.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _project_perturbation(perturbation, epsilon, input_image):
    """Project `perturbation` onto L-infinity ball of radius `epsilon`."""
    # Ensure inputs are in the correct range
    with tf.control_dependencies([
        tf.assert_less_equal(input_image, 1.0),
        tf.assert_greater_equal(input_image, 0.0)
    ]):
        clipped_perturbation = tf.clip_by_value(
            perturbation, -epsilon, epsilon)
        new_image = tf.clip_by_value(
            input_image + clipped_perturbation, 0., 1.)
        return new_image - input_image 
Example #22
Source File: base_dataset.py    From video_prediction with MIT License 5 votes vote down vote up
def slice_sequences(self, state_like_seqs, action_like_seqs, example_sequence_length):
        """
        Slices sequences of length `example_sequence_length` into subsequences
        of length `sequence_length`. The dicts of sequences are updated
        in-place and the same dicts are returned.
        """
        # handle random shifting and frame skip
        sequence_length = self.hparams.sequence_length  # desired sequence length
        frame_skip = self.hparams.frame_skip
        time_shift = self.hparams.time_shift
        if (time_shift and self.mode == 'train') or self.hparams.force_time_shift:
            assert time_shift > 0 and isinstance(time_shift, int)
            if isinstance(example_sequence_length, tf.Tensor):
                example_sequence_length = tf.cast(example_sequence_length, tf.int32)
            num_shifts = ((example_sequence_length - 1) - (sequence_length - 1) * (frame_skip + 1)) // time_shift
            assert_message = ('example_sequence_length has to be at least %d when '
                              'sequence_length=%d, frame_skip=%d.' %
                              ((sequence_length - 1) * (frame_skip + 1) + 1,
                               sequence_length, frame_skip))
            with tf.control_dependencies([tf.assert_greater_equal(num_shifts, 0,
                    data=[example_sequence_length, num_shifts], message=assert_message)]):
                t_start = tf.random_uniform([], 0, num_shifts + 1, dtype=tf.int32, seed=self.seed) * time_shift
        else:
            t_start = 0
        state_like_t_slice = slice(t_start, t_start + (sequence_length - 1) * (frame_skip + 1) + 1, frame_skip + 1)
        action_like_t_slice = slice(t_start, t_start + (sequence_length - 1) * (frame_skip + 1))

        for example_name, seq in state_like_seqs.items():
            seq = tf.convert_to_tensor(seq)[state_like_t_slice]
            seq.set_shape([sequence_length] + seq.shape.as_list()[1:])
            state_like_seqs[example_name] = seq
        for example_name, seq in action_like_seqs.items():
            seq = tf.convert_to_tensor(seq)[action_like_t_slice]
            seq.set_shape([(sequence_length - 1) * (frame_skip + 1)] + seq.shape.as_list()[1:])
            # concatenate actions of skipped frames into single macro actions
            seq = tf.reshape(seq, [sequence_length - 1, -1])
            action_like_seqs[example_name] = seq
        return state_like_seqs, action_like_seqs 
Example #23
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
    with self.test_session():
      small = tf.constant([1, 1, 1], name="big")
      big = tf.constant([3, 1], name="small")
      with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
        with tf.control_dependencies([tf.assert_greater_equal(big, small)]):
          out = tf.identity(small)
        out.eval() 
Example #24
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_doesnt_raise_when_greater_equal_and_broadcastable_shapes(self):
    with self.test_session():
      small = tf.constant([1], name="small")
      big = tf.constant([3, 1], name="big")
      with tf.control_dependencies([tf.assert_greater_equal(big, small)]):
        out = tf.identity(small)
      out.eval() 
Example #25
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_doesnt_raise_when_greater_equal(self):
    with self.test_session():
      small = tf.constant([1, 2], name="small")
      big = tf.constant([3, 2], name="big")
      with tf.control_dependencies([tf.assert_greater_equal(big, small)]):
        out = tf.identity(small)
      out.eval() 
Example #26
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_raises_when_less(self):
    with self.test_session():
      small = tf.constant([1, 2], name="small")
      big = tf.constant([3, 4], name="big")
      with tf.control_dependencies(
          [tf.assert_greater_equal(small, big, message="fail")]):
        out = tf.identity(small)
      with self.assertRaisesOpError("fail.*small.*big"):
        out.eval() 
Example #27
Source File: check_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def test_doesnt_raise_when_equal(self):
    with self.test_session():
      small = tf.constant([1, 2], name="small")
      with tf.control_dependencies([tf.assert_greater_equal(small, small)]):
        out = tf.identity(small)
      out.eval() 
Example #28
Source File: utils.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms 
Example #29
Source File: preprocessing.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def scale_to_inception_range(image):
  """Scales an image in the range [0,1] to [-1,1] as expected by inception."""
  # Assert that incoming images have been properly scaled to [0,1].
  with tf.control_dependencies(
      [tf.assert_less_equal(tf.reduce_max(image), 1.),
       tf.assert_greater_equal(tf.reduce_min(image), 0.)]):
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image 
Example #30
Source File: utils.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms