Python tensorflow.python.ops.check_ops.assert_equal() Examples

The following are 30 code examples of tensorflow.python.ops.check_ops.assert_equal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.check_ops , or try the search function .
Example #1
Source File: operator_pd_cholesky.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _check_chol(self, chol):
    """Verify that `chol` is proper."""
    chol = ops.convert_to_tensor(chol, name="chol")
    if not self.verify_pd:
      return chol

    shape = array_ops.shape(chol)
    rank = array_ops.rank(chol)

    is_matrix = check_ops.assert_rank_at_least(chol, 2)
    is_square = check_ops.assert_equal(
        array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1))

    deps = [is_matrix, is_square]
    diag = array_ops.matrix_diag_part(chol)
    deps.append(check_ops.assert_positive(diag))

    return control_flow_ops.with_dependencies(deps, chol) 
Example #2
Source File: operator_pd_identity.py    From keras-lambda with MIT License 6 votes vote down vote up
def _check_shape(self, shape):
    """Check that the init arg `shape` defines a valid operator."""
    shape = ops.convert_to_tensor(shape, name="shape")
    if not self._verify_pd:
      return shape

    # Further checks are equivalent to verification that this is positive
    # definite.  Why?  Because the further checks simply check that this is a
    # square matrix, and combining the fact that this is square (and thus maps
    # a vector space R^k onto itself), with the behavior of .matmul(), this must
    # be the identity operator.
    rank = array_ops.size(shape)
    assert_matrix = check_ops.assert_less_equal(2, rank)
    with ops.control_dependencies([assert_matrix]):
      last_dim = array_ops.gather(shape, rank - 1)
      second_to_last_dim = array_ops.gather(shape, rank - 2)
      assert_square = check_ops.assert_equal(last_dim, second_to_last_dim)
      return control_flow_ops.with_dependencies([assert_matrix, assert_square],
                                                shape) 
Example #3
Source File: distribution_util.py    From keras-lambda with MIT License 6 votes vote down vote up
def assert_integer_form(
    x, data=None, summarize=None, message=None, name="assert_integer_form"):
  """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if round(x) != x.
  """

  message = message or "x has non-integer components"
  x = ops.convert_to_tensor(x, name="x")
  casted_x = math_ops.to_int64(x)
  return check_ops.assert_equal(
      x, math_ops.cast(math_ops.round(casted_x), x.dtype),
      data=data, summarize=summarize, message=message, name=name) 
Example #4
Source File: util.py    From lambda-packs with MIT License 6 votes vote down vote up
def assert_integer_form(
    x, data=None, summarize=None, message=None, name="assert_integer_form"):
  """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Floating-point `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if round(x) != x.
  """

  message = message or "x has non-integer components"
  x = ops.convert_to_tensor(x, name="x")
  casted_x = math_ops.to_int64(x)
  return check_ops.assert_equal(
      x, math_ops.cast(math_ops.round(casted_x), x.dtype),
      data=data, summarize=summarize, message=message, name=name) 
Example #5
Source File: operator_pd_identity.py    From lambda-packs with MIT License 6 votes vote down vote up
def _check_shape(self, shape):
    """Check that the init arg `shape` defines a valid operator."""
    shape = ops.convert_to_tensor(shape, name="shape")
    if not self._verify_pd:
      return shape

    # Further checks are equivalent to verification that this is positive
    # definite.  Why?  Because the further checks simply check that this is a
    # square matrix, and combining the fact that this is square (and thus maps
    # a vector space R^k onto itself), with the behavior of .matmul(), this must
    # be the identity operator.
    rank = array_ops.size(shape)
    assert_matrix = check_ops.assert_less_equal(2, rank)
    with ops.control_dependencies([assert_matrix]):
      last_dim = array_ops.gather(shape, rank - 1)
      second_to_last_dim = array_ops.gather(shape, rank - 2)
      assert_square = check_ops.assert_equal(last_dim, second_to_last_dim)
      return control_flow_ops.with_dependencies([assert_matrix, assert_square],
                                                shape) 
Example #6
Source File: head.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def _check_logits(logits, expected_logits_dimension):
  """Check logits type and shape."""
  with ops.name_scope(None, 'logits', (logits,)) as scope:
    logits = math_ops.to_float(logits)
    logits_shape = array_ops.shape(logits)
    assert_rank = check_ops.assert_rank(
        logits, 2, data=[logits_shape],
        message='logits shape must be [batch_size, logits_dimension]')
    with ops.control_dependencies([assert_rank]):
      static_shape = logits.shape
      if static_shape is not None:
        dim1 = static_shape[1]
        if (dim1 is not None) and (dim1 != expected_logits_dimension):
          raise ValueError(
              'logits shape must be [batch_size, logits_dimension], got %s.' %
              (static_shape,))
      assert_dimension = check_ops.assert_equal(
          expected_logits_dimension, logits_shape[1], data=[logits_shape],
          message='logits shape must be [batch_size, logits_dimension]')
      with ops.control_dependencies([assert_dimension]):
        return array_ops.identity(logits, name=scope) 
Example #7
Source File: head.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def _check_labels(labels, expected_labels_dimension):
  """Check labels type and shape."""
  with ops.name_scope(None, 'labels', (labels,)) as scope:
    labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
    if isinstance(labels, sparse_tensor.SparseTensor):
      raise ValueError('SparseTensor labels are not supported.')
    labels_shape = array_ops.shape(labels)
    err_msg = 'labels shape must be [batch_size, {}]'.format(
        expected_labels_dimension)
    assert_rank = check_ops.assert_rank(labels, 2, message=err_msg)
    with ops.control_dependencies([assert_rank]):
      static_shape = labels.shape
      if static_shape is not None:
        dim1 = static_shape[1]
        if (dim1 is not None) and (dim1 != expected_labels_dimension):
          raise ValueError(
              'Mismatched label shape. '
              'Classifier configured with n_classes=%s.  Received %s. '
              'Suggested Fix: check your n_classes argument to the estimator '
              'and/or the shape of your label.' %
              (expected_labels_dimension, dim1))
      assert_dimension = check_ops.assert_equal(
          expected_labels_dimension, labels_shape[1], message=err_msg)
      with ops.control_dependencies([assert_dimension]):
        return array_ops.identity(labels, name=scope) 
Example #8
Source File: linear_operator_util.py    From lambda-packs with MIT License 6 votes vote down vote up
def assert_zero_imag_part(x, message=None, name="assert_zero_imag_part"):
  """Returns `Op` that asserts Tensor `x` has no non-zero imaginary parts.

  Args:
    x:  Numeric `Tensor`, real, integer, or complex.
    message:  A string message to prepend to failure message.
    name:  A name to give this `Op`.

  Returns:
    An `Op` that asserts `x` has no entries with modulus zero.
  """
  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name="x")
    dtype = x.dtype.base_dtype

    if dtype.is_floating:
      return control_flow_ops.no_op()

    zero = ops.convert_to_tensor(0, dtype=dtype.real_dtype)
    return check_ops.assert_equal(zero, math_ops.imag(x), message=message) 
Example #9
Source File: distribution_util.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def assert_integer_form(
    x, data=None, summarize=None, message=None, name="assert_integer_form"):
  """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if round(x) != x.
  """

  message = message or "x has non-integer components"
  x = ops.convert_to_tensor(x, name="x")
  casted_x = math_ops.to_int64(x)
  return check_ops.assert_equal(
      x, math_ops.cast(math_ops.round(casted_x), x.dtype),
      data=data, summarize=summarize, message=message, name=name) 
Example #10
Source File: operator_pd_cholesky.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _check_chol(self, chol):
    """Verify that `chol` is proper."""
    chol = ops.convert_to_tensor(chol, name="chol")
    if not self.verify_pd:
      return chol

    shape = array_ops.shape(chol)
    rank = array_ops.rank(chol)

    is_matrix = check_ops.assert_rank_at_least(chol, 2)
    is_square = check_ops.assert_equal(
        array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1))

    deps = [is_matrix, is_square]
    diag = array_ops.matrix_diag_part(chol)
    deps.append(check_ops.assert_positive(diag))

    return control_flow_ops.with_dependencies(deps, chol) 
Example #11
Source File: linear_operator_util.py    From lambda-packs with MIT License 6 votes vote down vote up
def assert_compatible_matrix_dimensions(operator, x):
  """Assert that an argument to solve/matmul has proper domain dimension.

  If `operator.shape[-2:] = [M, N]`, and `x.shape[-2:] = [Q, R]`, then
  `operator.matmul(x)` is defined only if `N = Q`.  This `Op` returns an
  `Assert` that "fires" if this is not the case.  Static checks are already
  done by the base class `LinearOperator`.

  Args:
    operator:  `LinearOperator`.
    x:  `Tensor`.

  Returns:
    `Assert` `Op`.
  """
  # Static checks are done in the base class.  Only tensor asserts here.
  assert_same_dd = check_ops.assert_equal(
      array_ops.shape(x)[-2],
      operator.domain_dimension_tensor(),
      message=("Incompatible matrix dimensions.  "
               "shape[-2] of argument to be the same as this operator"))

  return assert_same_dd 
Example #12
Source File: linear_operator_util.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def assert_zero_imag_part(x, message=None, name="assert_zero_imag_part"):
  """Returns `Op` that asserts Tensor `x` has no non-zero imaginary parts.

  Args:
    x:  Numeric `Tensor`, real, integer, or complex.
    message:  A string message to prepend to failure message.
    name:  A name to give this `Op`.

  Returns:
    An `Op` that asserts `x` has no entries with modulus zero.
  """
  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name="x")
    dtype = x.dtype.base_dtype

    if dtype.is_floating:
      return control_flow_ops.no_op()

    zero = ops.convert_to_tensor(0, dtype=dtype.real_dtype)
    return check_ops.assert_equal(zero, math_ops.imag(x), message=message) 
Example #13
Source File: operator_pd_identity.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _check_shape(self, shape):
    """Check that the init arg `shape` defines a valid operator."""
    shape = ops.convert_to_tensor(shape, name="shape")
    if not self._verify_pd:
      return shape

    # Further checks are equivalent to verification that this is positive
    # definite.  Why?  Because the further checks simply check that this is a
    # square matrix, and combining the fact that this is square (and thus maps
    # a vector space R^k onto itself), with the behavior of .matmul(), this must
    # be the identity operator.
    rank = array_ops.size(shape)
    assert_matrix = check_ops.assert_less_equal(2, rank)
    with ops.control_dependencies([assert_matrix]):
      last_dim = array_ops.gather(shape, rank - 1)
      second_to_last_dim = array_ops.gather(shape, rank - 2)
      assert_square = check_ops.assert_equal(last_dim, second_to_last_dim)
      return control_flow_ops.with_dependencies([assert_matrix, assert_square],
                                                shape) 
Example #14
Source File: distribution_util.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def assert_integer_form(
    x, data=None, summarize=None, message=None, name="assert_integer_form"):
  """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if round(x) != x.
  """

  message = message or "x has non-integer components"
  x = ops.convert_to_tensor(x, name="x")
  casted_x = math_ops.to_int64(x)
  return check_ops.assert_equal(
      x, math_ops.cast(math_ops.round(casted_x), x.dtype),
      data=data, summarize=summarize, message=message, name=name) 
Example #15
Source File: operator_pd_identity.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _check_shape(self, shape):
    """Check that the init arg `shape` defines a valid operator."""
    shape = ops.convert_to_tensor(shape, name="shape")
    if not self._verify_pd:
      return shape

    # Further checks are equivalent to verification that this is positive
    # definite.  Why?  Because the further checks simply check that this is a
    # square matrix, and combining the fact that this is square (and thus maps
    # a vector space R^k onto itself), with the behavior of .matmul(), this must
    # be the identity operator.
    rank = array_ops.size(shape)
    assert_matrix = check_ops.assert_less_equal(2, rank)
    with ops.control_dependencies([assert_matrix]):
      last_dim = array_ops.gather(shape, rank - 1)
      second_to_last_dim = array_ops.gather(shape, rank - 2)
      assert_square = check_ops.assert_equal(last_dim, second_to_last_dim)
      return control_flow_ops.with_dependencies([assert_matrix, assert_square],
                                                shape) 
Example #16
Source File: pointer_wrapper.py    From CommonSenseMultiHopQA with MIT License 5 votes vote down vote up
def _batch_size_checks(self, batch_size, error_message):
        return [check_ops.assert_equal(batch_size, 
            attention_mechanism.batch_size,
            message=error_message)
                for attention_mechanism in self._attention_mechanisms] 
Example #17
Source File: util.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def assert_integer_form(
    x, data=None, summarize=None, message=None,
    int_dtype=None, name="assert_integer_form"):
  """Assert that x has integer components (or floats equal to integers).

  Args:
    x: Floating-point `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)
      implies the smallest possible signed int will be used for casting.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.
  """
  with ops.name_scope(name, values=[x, data]):
    x = ops.convert_to_tensor(x, name="x")
    if x.dtype.is_integer:
      return control_flow_ops.no_op()
    message = message or "{} has non-integer components".format(x.op.name)
    if int_dtype is None:
      try:
        int_dtype = {
            dtypes.float16: dtypes.int16,
            dtypes.float32: dtypes.int32,
            dtypes.float64: dtypes.int64,
        }[x.dtype.base_dtype]
      except KeyError:
        raise TypeError("Unrecognized type {}".format(x.dtype.name))
    return check_ops.assert_equal(
        x, math_ops.cast(math_ops.cast(x, int_dtype), x.dtype),
        data=data, summarize=summarize, message=message, name=name) 
Example #18
Source File: dirichlet_multinomial.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _maybe_assert_valid_sample(self, counts):
    """Check counts for proper shape, values, then return tensor version."""
    if not self.validate_args:
      return counts
    counts = distribution_util.embed_check_nonnegative_integer_form(counts)
    return control_flow_ops.with_dependencies([
        check_ops.assert_equal(
            self.total_count, math_ops.reduce_sum(counts, -1),
            message="counts last-dimension must sum to `self.total_count`"),
    ], counts) 
Example #19
Source File: Architecture_wrappers.py    From gmvae_tacotron with MIT License 5 votes vote down vote up
def _batch_size_checks(self, batch_size, error_message):
		return [check_ops.assert_equal(batch_size,
		  self._attention_mechanism.batch_size,
		  message=error_message)] 
Example #20
Source File: attention_wrapper_mod.py    From NQG_ASs2s with MIT License 5 votes vote down vote up
def _batch_size_checks(self, batch_size, error_message):
    return [check_ops.assert_equal(batch_size,
                                   attention_mechanism.batch_size,
                                   message=error_message)
            for attention_mechanism in self._attention_mechanisms] 
Example #21
Source File: Architecture_wrappers.py    From tacotron2-mandarin-griffin-lim with MIT License 5 votes vote down vote up
def _batch_size_checks(self, batch_size, error_message):
		return [check_ops.assert_equal(batch_size,
		  self._attention_mechanism.batch_size,
		  message=error_message)] 
Example #22
Source File: Architecture_wrappers.py    From Tacotron-2 with MIT License 5 votes vote down vote up
def _batch_size_checks(self, batch_size, error_message):
		return [check_ops.assert_equal(batch_size,
		  self._attention_mechanism.batch_size,
		  message=error_message)] 
Example #23
Source File: distribution_util.py    From keras-lambda with MIT License 5 votes vote down vote up
def assert_close(
    x, y, data=None, summarize=None, message=None, name="assert_close"):
  """Assert that that x and y are within machine epsilon of each other.

  Args:
    x: Numeric `Tensor`
    y: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if |x - y| > machine epsilon.
  """
  message = message or ""
  x = ops.convert_to_tensor(x, name="x")
  y = ops.convert_to_tensor(y, name="y")

  if data is None:
    data = [
        message,
        "Condition x ~= y did not hold element-wise: x = ", x.name, x, "y = ",
        y.name, y
    ]

  if x.dtype.is_integer:
    return check_ops.assert_equal(
        x, y, data=data, summarize=summarize, message=message, name=name)

  with ops.name_scope(name, "assert_close", [x, y, data]):
    tol = np.finfo(x.dtype.as_numpy_dtype).eps
    condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x-y), tol))
    return control_flow_ops.Assert(
        condition, data, summarize=summarize) 
Example #24
Source File: von_mises_fisher.py    From s-vae-tf with MIT License 5 votes vote down vote up
def _kl_vmf_uniform(vmf, hyu, name=None):
    with ops.control_dependencies([check_ops.assert_equal(vmf.loc.shape[-1] - 1, hyu.dim)]):
        with ops.name_scope(name, "_kl_vmf_uniform", [vmf.scale]):
            return - vmf.entropy() + hyu.entropy() 
Example #25
Source File: attention_wrapper.py    From tf-var-attention with MIT License 5 votes vote down vote up
def _batch_size_checks(self, batch_size, error_message):
        return [check_ops.assert_equal(batch_size,
                                       attention_mechanism.batch_size,
                                       message=error_message)
                for attention_mechanism in self._attention_mechanisms] 
Example #26
Source File: rnn_wrappers.py    From arabic-tacotron-tts with MIT License 5 votes vote down vote up
def _batch_size_checks(self, batch_size, error_message):
    return [check_ops.assert_equal(batch_size,
      self._attention_mechanism.batch_size,
      message=error_message)] 
Example #27
Source File: bijector.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _forward(self, x):
    if self._static_event_ndims == 0:
      return math_ops.square(x)
    if self.validate_args:
      is_matrix = check_ops.assert_rank_at_least(x, 2)
      shape = array_ops.shape(x)
      is_square = check_ops.assert_equal(shape[-2], shape[-1])
      x = control_flow_ops.with_dependencies([is_matrix, is_square], x)
    # For safety, explicitly zero-out the upper triangular part.
    x = array_ops.matrix_band_part(x, -1, 0)
    return math_ops.batch_matmul(x, x, adj_y=True) 
Example #28
Source File: mvn.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _assert_valid_mu(self, mu):
    """Return `mu` after validity checks and possibly with assertations."""
    cov = self._cov
    if mu.dtype != cov.dtype:
      raise TypeError(
          "mu and cov must have the same dtype.  Found mu.dtype = %s, "
          "cov.dtype = %s" % (mu.dtype, cov.dtype))

    # Try to validate with static checks.
    mu_shape = mu.get_shape()
    cov_shape = cov.get_shape()
    if mu_shape.is_fully_defined() and cov_shape.is_fully_defined():
      if mu_shape != cov_shape[:-1]:
        raise ValueError(
            "mu.shape and cov.shape[:-1] should match.  Found: mu.shape=%s, "
            "cov.shape=%s" % (mu_shape, cov_shape))
      else:
        return mu

    # Static checks could not be run, so possibly do dynamic checks.
    if not self.validate_args:
      return mu
    else:
      assert_same_rank = check_ops.assert_equal(
          array_ops.rank(mu) + 1,
          cov.rank(),
          data=["mu should have rank 1 less than cov.  Found: rank(mu) = ",
                array_ops.rank(mu), " rank(cov) = ", cov.rank()],
      )
      with ops.control_dependencies([assert_same_rank]):
        assert_same_shape = check_ops.assert_equal(
            array_ops.shape(mu),
            cov.vector_shape(),
            data=["mu.shape and cov.shape[:-1] should match.  "
                  "Found: shape(mu) = "
                  , array_ops.shape(mu), " shape(cov) = ", cov.shape()],
        )
        return control_flow_ops.with_dependencies([assert_same_shape], mu) 
Example #29
Source File: multinomial.py    From lambda-packs with MIT License 5 votes vote down vote up
def _maybe_assert_valid_sample(self, counts):
    """Check counts for proper shape, values, then return tensor version."""
    if not self.validate_args:
      return counts

    counts = distribution_util.embed_check_nonnegative_discrete(
        counts, check_integer=True)
    return control_flow_ops.with_dependencies([
        check_ops.assert_equal(
            self.total_count, math_ops.reduce_sum(counts, -1),
            message="counts must sum to `self.total_count`"),
    ], counts) 
Example #30
Source File: attention_wrapper.py    From QGforQA with MIT License 5 votes vote down vote up
def _batch_size_checks(self, batch_size, error_message):
    return [check_ops.assert_equal(batch_size,
                                   attention_mechanism.batch_size,
                                   message=error_message)
            for attention_mechanism in self._attention_mechanisms]