Python tensorflow.python.ops.math_ops.less() Examples

The following are 30 code examples of tensorflow.python.ops.math_ops.less(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.math_ops , or try the search function .
Example #1
Source File: image_ops.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def random_flip_left_right(image, seed=None):
  """Randomly flip an image horizontally (left to right).

  With a 1 in 2 chance, outputs the contents of `image` flipped along the
  second dimension, which is `width`.  Otherwise output the image as-is.

  Args:
    image: A 3-D tensor of shape `[height, width, channels].`
    seed: A Python integer. Used to create a random seed. See
      [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
      for behavior.

  Returns:
    A 3-D tensor of the same type and shape as `image`.

  Raises:
    ValueError: if the shape of `image` not supported.
  """
  image = ops.convert_to_tensor(image, name='image')
  _Check3DImage(image, require_static=False)
  uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
  mirror = math_ops.less(array_ops.pack([1.0, uniform_random, 1.0]), 0.5)
  return array_ops.reverse(image, mirror) 
Example #2
Source File: check_ops.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def is_strictly_increasing(x, name=None):
  """Returns `True` if `x` is strictly increasing.

  Elements of `x` are compared in row-major order.  The tensor `[x[0],...]`
  is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`.
  If `x` has less than two elements, it is trivially strictly increasing.

  See also:  `is_non_decreasing`

  Args:
    x: Numeric `Tensor`.
    name: A name for this operation (optional).
      Defaults to "is_strictly_increasing"

  Returns:
    Boolean `Tensor`, equal to `True` iff `x` is strictly increasing.

  Raises:
    TypeError: if `x` is not a numeric tensor.
  """
  with ops.name_scope(name, 'is_strictly_increasing', [x]):
    diff = _get_diff_for_monotonic_comparison(x)
    # When len(x) = 1, diff = [], less = [], and reduce_all([]) = True.
    zero = ops.convert_to_tensor(0, dtype=diff.dtype)
    return math_ops.reduce_all(math_ops.less(zero, diff)) 
Example #3
Source File: check_ops.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def is_non_decreasing(x, name=None):
  """Returns `True` if `x` is non-decreasing.

  Elements of `x` are compared in row-major order.  The tensor `[x[0],...]`
  is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`.
  If `x` has less than two elements, it is trivially non-decreasing.

  See also:  `is_strictly_increasing`

  Args:
    x: Numeric `Tensor`.
    name: A name for this operation (optional).  Defaults to "is_non_decreasing"

  Returns:
    Boolean `Tensor`, equal to `True` iff `x` is non-decreasing.

  Raises:
    TypeError: if `x` is not a numeric tensor.
  """
  with ops.name_scope(name, 'is_non_decreasing', [x]):
    diff = _get_diff_for_monotonic_comparison(x)
    # When len(x) = 1, diff = [], less_equal = [], and reduce_all([]) = True.
    zero = ops.convert_to_tensor(0, dtype=diff.dtype)
    return math_ops.reduce_all(math_ops.less_equal(zero, diff)) 
Example #4
Source File: frechet_kernel_Inception_distance.py    From GAN_Metrics-Tensorflow with MIT License 6 votes vote down vote up
def _symmetric_matrix_square_root(mat, eps=1e-10):
  """Compute square root of a symmetric matrix.

  Note that this is different from an elementwise square root. We want to
  compute M' where M' = sqrt(mat) such that M' * M' = mat.

  Also note that this method **only** works for symmetric matrices.

  Args:
    mat: Matrix to take the square root of.
    eps: Small epsilon such that any element less than eps will not be square
      rooted to guard against numerical instability.

  Returns:
    Matrix square root of mat.
  """
  # Unlike numpy, tensorflow's return order is (s, u, v)
  s, u, v = linalg_ops.svd(mat)
  # sqrt is unstable around 0, just use 0 in such case
  si = array_ops.where(math_ops.less(s, eps), s, math_ops.sqrt(s))
  # Note that the v returned by Tensorflow is v = V
  # (when referencing the equation A = U S V^T)
  # This is unlike Numpy which returns v = V^T
  return math_ops.matmul(
      math_ops.matmul(u, array_ops.diag(si)), v, transpose_b=True) 
Example #5
Source File: check_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def is_strictly_increasing(x, name=None):
  """Returns `True` if `x` is strictly increasing.

  Elements of `x` are compared in row-major order.  The tensor `[x[0],...]`
  is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`.
  If `x` has less than two elements, it is trivially strictly increasing.

  See also:  `is_non_decreasing`

  Args:
    x: Numeric `Tensor`.
    name: A name for this operation (optional).
      Defaults to "is_strictly_increasing"

  Returns:
    Boolean `Tensor`, equal to `True` iff `x` is strictly increasing.

  Raises:
    TypeError: if `x` is not a numeric tensor.
  """
  with ops.name_scope(name, 'is_strictly_increasing', [x]):
    diff = _get_diff_for_monotonic_comparison(x)
    # When len(x) = 1, diff = [], less = [], and reduce_all([]) = True.
    zero = ops.convert_to_tensor(0, dtype=diff.dtype)
    return math_ops.reduce_all(math_ops.less(zero, diff)) 
Example #6
Source File: check_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def is_non_decreasing(x, name=None):
  """Returns `True` if `x` is non-decreasing.

  Elements of `x` are compared in row-major order.  The tensor `[x[0],...]`
  is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`.
  If `x` has less than two elements, it is trivially non-decreasing.

  See also:  `is_strictly_increasing`

  Args:
    x: Numeric `Tensor`.
    name: A name for this operation (optional).  Defaults to "is_non_decreasing"

  Returns:
    Boolean `Tensor`, equal to `True` iff `x` is non-decreasing.

  Raises:
    TypeError: if `x` is not a numeric tensor.
  """
  with ops.name_scope(name, 'is_non_decreasing', [x]):
    diff = _get_diff_for_monotonic_comparison(x)
    # When len(x) = 1, diff = [], less_equal = [], and reduce_all([]) = True.
    zero = ops.convert_to_tensor(0, dtype=diff.dtype)
    return math_ops.reduce_all(math_ops.less_equal(zero, diff)) 
Example #7
Source File: tf_image.py    From MobileNet with Apache License 2.0 6 votes vote down vote up
def random_flip_left_right(image, bboxes, seed=None):
    """Random flip left-right of an image and its bounding boxes.
    """
    def flip_bboxes(bboxes):
        """Flip bounding boxes coordinates.
        """
        bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3],
                           bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1)
        return bboxes

    # Random flip. Tensorflow implementation.
    with tf.name_scope('random_flip_left_right'):
        image = ops.convert_to_tensor(image, name='image')
        _Check3DImage(image, require_static=False)
        uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
        mirror_cond = math_ops.less(uniform_random, .5)
        # Flip image.
        result = control_flow_ops.cond(mirror_cond,
                                       lambda: array_ops.reverse_v2(image, [1]),
                                       lambda: image)
        # Flip bboxes.
        bboxes = control_flow_ops.cond(mirror_cond,
                                       lambda: flip_bboxes(bboxes),
                                       lambda: bboxes)
        return fix_image_flip_shape(image, result), bboxes 
Example #8
Source File: check_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def is_non_decreasing(x, name=None):
  """Returns `True` if `x` is non-decreasing.

  Elements of `x` are compared in row-major order.  The tensor `[x[0],...]`
  is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`.
  If `x` has less than two elements, it is trivially non-decreasing.

  See also:  `is_strictly_increasing`

  Args:
    x: Numeric `Tensor`.
    name: A name for this operation (optional).  Defaults to "is_non_decreasing"

  Returns:
    Boolean `Tensor`, equal to `True` iff `x` is non-decreasing.

  Raises:
    TypeError: if `x` is not a numeric tensor.
  """
  with ops.name_scope(name, 'is_non_decreasing', [x]):
    diff = _get_diff_for_monotonic_comparison(x)
    # When len(x) = 1, diff = [], less_equal = [], and reduce_all([]) = True.
    zero = ops.convert_to_tensor(0, dtype=diff.dtype)
    return math_ops.reduce_all(math_ops.less_equal(zero, diff)) 
Example #9
Source File: tf_image.py    From pixel_link with MIT License 6 votes vote down vote up
def random_flip_left_right(image, bboxes, seed=None):
    """Random flip left-right of an image and its bounding boxes.
    """
    def flip_bboxes(bboxes):
        """Flip bounding boxes coordinates.
        """
        bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3],
                           bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1)
        return bboxes

    # Random flip. Tensorflow implementation.
    with tf.name_scope('random_flip_left_right'):
        image = ops.convert_to_tensor(image, name='image')
        _Check3DImage(image, require_static=False)
        uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
        mirror_cond = math_ops.less(uniform_random, .5)
        # Flip image.
        result = control_flow_ops.cond(mirror_cond,
                                       lambda: array_ops.reverse_v2(image, [1]),
                                       lambda: image)
        # Flip bboxes.
        bboxes = control_flow_ops.cond(mirror_cond,
                                       lambda: flip_bboxes(bboxes),
                                       lambda: bboxes)
        return fix_image_flip_shape(image, result), bboxes 
Example #10
Source File: session_debug_testlib.py    From lambda-packs with MIT License 6 votes vote down vote up
def testDebugWhileLoopWatchingWholeGraphWorks(self):
    with session.Session() as sess:
      loop_body = lambda i: math_ops.add(i, 2)
      loop_cond = lambda i: math_ops.less(i, 16)

      i = constant_op.constant(10, name="i")
      loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_utils.watch_graph(run_options,
                              sess.graph,
                              debug_urls=self._debug_urls())
      run_metadata = config_pb2.RunMetadata()
      self.assertEqual(
          16, sess.run(loop, options=run_options, run_metadata=run_metadata))

      dump = debug_data.DebugDumpDir(
          self._dump_root, partition_graphs=run_metadata.partition_graphs)

      self.assertEqual(
          [[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
      self.assertEqual(
          [[12], [14], [16]],
          dump.get_tensors("while/NextIteration", 0, "DebugIdentity")) 
Example #11
Source File: tf_image.py    From seglink with GNU General Public License v3.0 6 votes vote down vote up
def random_flip_left_right(image, bboxes, seed=None):
    """Random flip left-right of an image and its bounding boxes.
    """
    def flip_bboxes(bboxes):
        """Flip bounding boxes coordinates.
        """
        bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3],
                           bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1)
        return bboxes

    # Random flip. Tensorflow implementation.
    with tf.name_scope('random_flip_left_right'):
        image = ops.convert_to_tensor(image, name='image')
        _Check3DImage(image, require_static=False)
        uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
        mirror_cond = math_ops.less(uniform_random, .5)
        # Flip image.
        result = control_flow_ops.cond(mirror_cond,
                                       lambda: array_ops.reverse_v2(image, [1]),
                                       lambda: image)
        # Flip bboxes.
        bboxes = control_flow_ops.cond(mirror_cond,
                                       lambda: flip_bboxes(bboxes),
                                       lambda: bboxes)
        return fix_image_flip_shape(image, result), bboxes 
Example #12
Source File: image_ops_impl.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def random_flip_up_down(image, seed=None):
  """Randomly flips an image vertically (upside down).

  With a 1 in 2 chance, outputs the contents of `image` flipped along the first
  dimension, which is `height`.  Otherwise output the image as-is.

  Args:
    image: A 3-D tensor of shape `[height, width, channels].`
    seed: A Python integer. Used to create a random seed. See
      [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
      for behavior.

  Returns:
    A 3-D tensor of the same type and shape as `image`.

  Raises:
    ValueError: if the shape of `image` not supported.
  """
  image = ops.convert_to_tensor(image, name='image')
  _Check3DImage(image, require_static=False)
  uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
  mirror_cond = math_ops.less(uniform_random, .5)
  stride = array_ops.where(mirror_cond, -1, 1)
  result = image[::stride, :, :]
  return fix_image_flip_shape(image, result) 
Example #13
Source File: image_ops_impl.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def random_flip_left_right(image, seed=None):
  """Randomly flip an image horizontally (left to right).

  With a 1 in 2 chance, outputs the contents of `image` flipped along the
  second dimension, which is `width`.  Otherwise output the image as-is.

  Args:
    image: A 3-D tensor of shape `[height, width, channels].`
    seed: A Python integer. Used to create a random seed. See
      [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
      for behavior.

  Returns:
    A 3-D tensor of the same type and shape as `image`.

  Raises:
    ValueError: if the shape of `image` not supported.
  """
  image = ops.convert_to_tensor(image, name='image')
  _Check3DImage(image, require_static=False)
  uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
  mirror_cond = math_ops.less(uniform_random, .5)
  stride = array_ops.where(mirror_cond, -1, 1)
  result = image[:, ::stride, :]
  return fix_image_flip_shape(image, result) 
Example #14
Source File: image_ops.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def random_flip_up_down(image, seed=None):
  """Randomly flips an image vertically (upside down).

  With a 1 in 2 chance, outputs the contents of `image` flipped along the first
  dimension, which is `height`.  Otherwise output the image as-is.

  Args:
    image: A 3-D tensor of shape `[height, width, channels].`
    seed: A Python integer. Used to create a random seed. See
      [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
      for behavior.

  Returns:
    A 3-D tensor of the same type and shape as `image`.

  Raises:
    ValueError: if the shape of `image` not supported.
  """
  image = ops.convert_to_tensor(image, name='image')
  _Check3DImage(image, require_static=False)
  uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
  mirror = math_ops.less(array_ops.pack([uniform_random, 1.0, 1.0]), 0.5)
  return array_ops.reverse(image, mirror) 
Example #15
Source File: check_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def is_strictly_increasing(x, name=None):
  """Returns `True` if `x` is strictly increasing.

  Elements of `x` are compared in row-major order.  The tensor `[x[0],...]`
  is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`.
  If `x` has less than two elements, it is trivially strictly increasing.

  See also:  `is_non_decreasing`

  Args:
    x: Numeric `Tensor`.
    name: A name for this operation (optional).
      Defaults to "is_strictly_increasing"

  Returns:
    Boolean `Tensor`, equal to `True` iff `x` is strictly increasing.

  Raises:
    TypeError: if `x` is not a numeric tensor.
  """
  with ops.name_scope(name, 'is_strictly_increasing', [x]):
    diff = _get_diff_for_monotonic_comparison(x)
    # When len(x) = 1, diff = [], less = [], and reduce_all([]) = True.
    zero = ops.convert_to_tensor(0, dtype=diff.dtype)
    return math_ops.reduce_all(math_ops.less(zero, diff)) 
Example #16
Source File: dynamic_rnn_estimator.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _padding_mask(sequence_lengths, padded_length):
  """Creates a mask used for calculating losses with padded input.

  Args:
    sequence_lengths: a `Tensor` of shape `[batch_size]` containing the unpadded
      length of  each sequence.
    padded_length: a scalar `Tensor` indicating the length of the sequences
      after padding
  Returns:
    A boolean `Tensor` M of shape `[batch_size, padded_length]` where
    `M[i, j] == True` when `lengths[i] > j`.

  """
  range_tensor = math_ops.range(padded_length)
  return math_ops.less(array_ops.expand_dims(range_tensor, 0),
                       array_ops.expand_dims(sequence_lengths, 1)) 
Example #17
Source File: analyzer_cli_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def setUpClass(cls):
    cls._dump_root = tempfile.mkdtemp()

    with session.Session() as sess:
      loop_var = constant_op.constant(0, name="while_loop_test/loop_var")
      cond = lambda loop_var: math_ops.less(loop_var, 10)
      body = lambda loop_var: math_ops.add(loop_var, 1)
      while_loop = control_flow_ops.while_loop(
          cond, body, [loop_var], parallel_iterations=1)

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_url = "file://%s" % cls._dump_root

      watch_opts = run_options.debug_tensor_watch_opts

      # Add debug tensor watch for "while/Identity".
      watch = watch_opts.add()
      watch.node_name = "while/Identity"
      watch.output_slot = 0
      watch.debug_ops.append("DebugIdentity")
      watch.debug_urls.append(debug_url)

      # Invoke Session.run().
      run_metadata = config_pb2.RunMetadata()
      sess.run(while_loop, options=run_options, run_metadata=run_metadata)

    cls._debug_dump = debug_data.DebugDumpDir(
        cls._dump_root, partition_graphs=run_metadata.partition_graphs)

    cls._analyzer = analyzer_cli.DebugAnalyzer(cls._debug_dump)
    cls._registry = debugger_cli_common.CommandHandlerRegistry()
    cls._registry.register_command_handler(
        "list_tensors",
        cls._analyzer.list_tensors,
        cls._analyzer.get_help("list_tensors"),
        prefix_aliases=["lt"])
    cls._registry.register_command_handler(
        "print_tensor",
        cls._analyzer.print_tensor,
        cls._analyzer.get_help("print_tensor"),
        prefix_aliases=["pt"]) 
Example #18
Source File: customlayers.py    From AiGEM_TeamHeidelberg2017 with MIT License 5 votes vote down vote up
def focal_loss(labels=[], logits=[], pos_weights=[], gamma=2., clips=[], name='focal_loss'):
    """
    Add focal loss weigths to the wigthted sigmoid cross entropy
    :return:
    """
    batchsize = labels.get_shape().as_list()[0]
    n_classes = labels.get_shape().as_list()[1]

    with tf.variable_scope(name) as vs:
        # first get a sigmoid to determine the focal loss weigths:
        sigmoid_logits = tf.nn.sigmoid(logits)
        # determine the focal loss weigths:
        labels = math_ops.to_float(labels)
        sigmoid_logits.get_shape().assert_is_compatible_with(labels.get_shape())
        preds = array_ops.where(math_ops.equal(labels, 1.), sigmoid_logits, 1. - sigmoid_logits)
        focal_weights = (math_ops.subtract(1., preds)) ** gamma
        print(focal_weights)

        # clip the weights at E-3 and E3
        up_clip = math_ops.multiply(tf.ones([batchsize, n_classes]), clips[1])
        low_clip = math_ops.multiply(tf.ones([batchsize, n_classes]), clips[0])
        focal_weights = array_ops.where(math_ops.greater(focal_weights, clips[1]), up_clip, focal_weights)
        focal_weights = array_ops.where(math_ops.less(focal_weights, clips[0]), low_clip, focal_weights)
        log_weight = 1. + (pos_weights - 1.) * labels

        # now put them into a weighted softmax ce:
        loss = math_ops.multiply(math_ops.add((1. - labels) * logits,
                        log_weight * (math_ops.log1p(math_ops.exp(-math_ops.abs(logits))) + nn_ops.relu(-logits))),
               focal_weights, name='sc_entropy')
        return loss 
Example #19
Source File: distributions.py    From TensorSwarm with MIT License 5 votes vote down vote up
def sample(self):
        u = tf.random_uniform(tf.shape(self.ps))
        return tf.to_float(math_ops.less(u, self.ps)) 
Example #20
Source File: distributions.py    From rl-teacher with MIT License 5 votes vote down vote up
def sample(self):
        u = tf.random_uniform(tf.shape(self.ps))
        return tf.to_float(math_ops.less(u, self.ps)) 
Example #21
Source File: distributions.py    From gail-tf with MIT License 5 votes vote down vote up
def sample(self):
        u = tf.random_uniform(tf.shape(self.ps))
        return tf.to_float(math_ops.less(u, self.ps)) 
Example #22
Source File: customlayers.py    From AiGEM_TeamHeidelberg2017 with MIT License 5 votes vote down vote up
def focal_loss_alpha(labels=[], logits=[], pos_weights=[], gamma=2., clips=[], name='focal_loss'):
    """
    Add focal loss weigths to the wigthted sigmoid cross entropy
    :return:
    """
    batchsize = labels.get_shape().as_list()[0]
    n_classes = labels.get_shape().as_list()[1]

    with tf.variable_scope(name) as vs:
        # first get a sigmoid to determine the focal loss weigths:
        sigmoid_logits = tf.nn.sigmoid(logits)
        # determine the focal loss weigths:
        labels = math_ops.to_float(labels)
        sigmoid_logits.get_shape().assert_is_compatible_with(labels.get_shape())
        preds = array_ops.where(math_ops.equal(labels, 1.), sigmoid_logits, 1. - sigmoid_logits)
        focal_weights = (math_ops.subtract(1., preds)) ** gamma
        print(focal_weights)

        # clip the weights at E-3 and E3
        up_clip = math_ops.multiply(tf.ones([batchsize, n_classes]), clips[1])
        low_clip = math_ops.multiply(tf.ones([batchsize, n_classes]), clips[0])
        focal_weights = array_ops.where(math_ops.greater(focal_weights, clips[1]), up_clip, focal_weights)
        focal_weights = array_ops.where(math_ops.less(focal_weights, clips[0]), low_clip, focal_weights)
        log_weight = 1. + (pos_weights - 1.) * labels

        # now put them into a weighted softmax ce:
        loss = math_ops.multiply(math_ops.add((1. - labels) * logits,
                         log_weight * (math_ops.log1p(math_ops.exp(-math_ops.abs(logits))) + nn_ops.relu(-logits))),
                                 focal_weights, name='sc_entropy')
        return loss 
Example #23
Source File: distributions.py    From ICML2019-TREX with MIT License 5 votes vote down vote up
def sample(self):
        u = tf.random_uniform(tf.shape(self.ps))
        return tf.to_float(math_ops.less(u, self.ps)) 
Example #24
Source File: test_forward.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def test_forward_rel_ops():
    t1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
    t2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
    _test_forward_rel_op([t1, t2], math_ops.less)
    _test_forward_rel_op([t1, t2], math_ops.greater)
    _test_forward_rel_op([t1, t2], math_ops.less_equal)
    _test_forward_rel_op([t1, t2], math_ops.greater_equal)
    _test_forward_rel_op([t1, t2], math_ops.equal)
    _test_forward_rel_op([t1, t2], math_ops.not_equal)


#######################################################################
# Main
# ---- 
Example #25
Source File: image_ops.py    From rec-attend-public with MIT License 5 votes vote down vote up
def random_flip_up_down(image, seed=None):
  uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
  mirror = math_ops.less(tf.pack([1.0, uniform_random, 1.0, 1.0]), 0.5)
  return tf.reverse(image, mirror) 
Example #26
Source File: image_ops.py    From rec-attend-public with MIT License 5 votes vote down vote up
def random_flip_left_right(image, seed=None):
  uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
  mirror = math_ops.less(tf.pack([1.0, 1.0, uniform_random, 1.0]), 0.5)
  return tf.reverse(image, mirror) 
Example #27
Source File: distributions.py    From m3ddpg with MIT License 5 votes vote down vote up
def sample(self):
        p = tf.sigmoid(self.logits)
        u = tf.random_uniform(tf.shape(p))
        return tf.to_float(math_ops.less(u, p)) 
Example #28
Source File: distributions.py    From MOREL with MIT License 5 votes vote down vote up
def sample(self):
        u = tf.random_uniform(tf.shape(self.ps))
        return tf.to_float(math_ops.less(u, self.ps)) 
Example #29
Source File: distributions.py    From ICML2019-TREX with MIT License 5 votes vote down vote up
def sample(self):
        u = tf.random_uniform(tf.shape(self.ps))
        return tf.to_float(math_ops.less(u, self.ps)) 
Example #30
Source File: distributions.py    From stable-baselines with MIT License 5 votes vote down vote up
def sample(self):
        samples_from_uniform = tf.random_uniform(tf.shape(self.probabilities))
        return tf.cast(math_ops.less(samples_from_uniform, self.probabilities), tf.float32)