Python tensorflow.python.ops.check_ops.assert_rank_at_least() Examples

The following are 23 code examples of tensorflow.python.ops.check_ops.assert_rank_at_least(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.check_ops , or try the search function .
Example #1
Source File: layers.py    From tensornets with MIT License 6 votes vote down vote up
def _dense_inner_flatten(inputs, new_rank):
  """Helper function for `inner_flatten`."""
  rank_assertion = check_ops.assert_rank_at_least(
      inputs, new_rank, message='inputs has rank less than new_rank')
  with ops.control_dependencies([rank_assertion]):
    outer_dimensions = array_ops.strided_slice(
        array_ops.shape(inputs), [0], [new_rank - 1])
    new_shape = array_ops.concat((outer_dimensions, [-1]), 0)
    reshaped = array_ops.reshape(inputs, new_shape)

  # if `new_rank` is an integer, try to calculate new shape.
  if isinstance(new_rank, six.integer_types):
    static_shape = inputs.get_shape()
    if static_shape is not None and static_shape.dims is not None:
      static_shape = static_shape.as_list()
      static_outer_dims = static_shape[:new_rank - 1]
      static_inner_dims = static_shape[new_rank - 1:]
      flattened_dimension = 1
      for inner_dim in static_inner_dims:
        if inner_dim is None:
          flattened_dimension = None
          break
        flattened_dimension *= inner_dim
      reshaped.set_shape(static_outer_dims + [flattened_dimension])
  return reshaped 
Example #2
Source File: layers.py    From lambda-packs with MIT License 6 votes vote down vote up
def _dense_inner_flatten(inputs, new_rank):
  """Helper function for `inner_flatten`."""
  rank_assertion = check_ops.assert_rank_at_least(
      inputs, new_rank, message='inputs has rank less than new_rank')
  with ops.control_dependencies([rank_assertion]):
    outer_dimensions = array_ops.strided_slice(
        array_ops.shape(inputs), [0], [new_rank - 1])
    new_shape = array_ops.concat((outer_dimensions, [-1]), 0)
    reshaped = array_ops.reshape(inputs, new_shape)

  # if `new_rank` is an integer, try to calculate new shape.
  if isinstance(new_rank, six.integer_types):
    static_shape = inputs.get_shape()
    if static_shape is not None and static_shape.dims is not None:
      static_shape = static_shape.as_list()
      static_outer_dims = static_shape[:new_rank - 1]
      static_inner_dims = static_shape[new_rank - 1:]
      flattened_dimension = 1
      for inner_dim in static_inner_dims:
        if inner_dim is None:
          flattened_dimension = None
          break
        flattened_dimension *= inner_dim
      reshaped.set_shape(static_outer_dims + [flattened_dimension])
  return reshaped 
Example #3
Source File: operator_pd_cholesky.py    From lambda-packs with MIT License 6 votes vote down vote up
def _check_chol(self, chol):
    """Verify that `chol` is proper."""
    chol = ops.convert_to_tensor(chol, name="chol")
    if not self.verify_pd:
      return chol

    shape = array_ops.shape(chol)
    rank = array_ops.rank(chol)

    is_matrix = check_ops.assert_rank_at_least(chol, 2)
    is_square = check_ops.assert_equal(
        array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1))

    deps = [is_matrix, is_square]
    diag = array_ops.matrix_diag_part(chol)
    deps.append(check_ops.assert_positive(diag))

    return control_flow_ops.with_dependencies(deps, chol) 
Example #4
Source File: operator_pd_cholesky.py    From keras-lambda with MIT License 6 votes vote down vote up
def _check_chol(self, chol):
    """Verify that `chol` is proper."""
    chol = ops.convert_to_tensor(chol, name="chol")
    if not self.verify_pd:
      return chol

    shape = array_ops.shape(chol)
    rank = array_ops.rank(chol)

    is_matrix = check_ops.assert_rank_at_least(chol, 2)
    is_square = check_ops.assert_equal(
        array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1))

    deps = [is_matrix, is_square]
    diag = array_ops.matrix_diag_part(chol)
    deps.append(check_ops.assert_positive(diag))

    return control_flow_ops.with_dependencies(deps, chol) 
Example #5
Source File: layers.py    From keras-lambda with MIT License 6 votes vote down vote up
def _dense_inner_flatten(inputs, new_rank):
  """Helper function for `inner_flatten`."""
  rank_assertion = check_ops.assert_rank_at_least(
      inputs, new_rank, message='inputs has rank less than new_rank')
  with ops.control_dependencies([rank_assertion]):
    outer_dimensions = array_ops.strided_slice(
        array_ops.shape(inputs), [0], [new_rank - 1])
    new_shape = array_ops.concat((outer_dimensions, [-1]), 0)
    reshaped = array_ops.reshape(inputs, new_shape)

  # if `new_rank` is an integer, try to calculate new shape.
  if isinstance(new_rank, six.integer_types):
    static_shape = inputs.get_shape()
    if static_shape is not None and static_shape.dims is not None:
      static_shape = static_shape.as_list()
      static_outer_dims = static_shape[:new_rank - 1]
      static_inner_dims = static_shape[new_rank - 1:]
      flattened_dimension = 1
      for inner_dim in static_inner_dims:
        if inner_dim is None:
          flattened_dimension = None
          break
        flattened_dimension *= inner_dim
      reshaped.set_shape(static_outer_dims + [flattened_dimension])
  return reshaped 
Example #6
Source File: layers.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _dense_inner_flatten(inputs, new_rank):
  """Helper function for `inner_flatten`."""
  rank_assertion = check_ops.assert_rank_at_least(
      inputs, new_rank, message='inputs has rank less than new_rank')
  with ops.control_dependencies([rank_assertion]):
    outer_dimensions = array_ops.strided_slice(
        array_ops.shape(inputs), [0], [new_rank - 1])
    new_shape = array_ops.concat((outer_dimensions, [-1]), 0)
    reshaped = array_ops.reshape(inputs, new_shape)

  # if `new_rank` is an integer, try to calculate new shape.
  if isinstance(new_rank, six.integer_types):
    static_shape = inputs.get_shape()
    if static_shape is not None and static_shape.dims is not None:
      static_shape = static_shape.as_list()
      static_outer_dims = static_shape[:new_rank - 1]
      static_inner_dims = static_shape[new_rank - 1:]
      flattened_dimension = 1
      for inner_dim in static_inner_dims:
        if inner_dim is None:
          flattened_dimension = None
          break
        flattened_dimension *= inner_dim
      reshaped.set_shape(static_outer_dims + [flattened_dimension])
  return reshaped 
Example #7
Source File: operator_pd_cholesky.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _check_chol(self, chol):
    """Verify that `chol` is proper."""
    chol = ops.convert_to_tensor(chol, name="chol")
    if not self.verify_pd:
      return chol

    shape = array_ops.shape(chol)
    rank = array_ops.rank(chol)

    is_matrix = check_ops.assert_rank_at_least(chol, 2)
    is_square = check_ops.assert_equal(
        array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1))

    deps = [is_matrix, is_square]
    diag = array_ops.matrix_diag_part(chol)
    deps.append(check_ops.assert_positive(diag))

    return control_flow_ops.with_dependencies(deps, chol) 
Example #8
Source File: layers.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def _dense_inner_flatten(inputs, new_rank):
  """Helper function for `inner_flatten`."""
  rank_assertion = check_ops.assert_rank_at_least(
      inputs, new_rank, message='inputs has rank less than new_rank')
  with ops.control_dependencies([rank_assertion]):
    outer_dimensions = array_ops.strided_slice(
        array_ops.shape(inputs), [0], [new_rank - 1])
    new_shape = array_ops.concat((outer_dimensions, [-1]), 0)
    reshaped = array_ops.reshape(inputs, new_shape)

  # if `new_rank` is an integer, try to calculate new shape.
  if isinstance(new_rank, six.integer_types):
    static_shape = inputs.get_shape()
    if static_shape is not None and static_shape.dims is not None:
      static_shape = static_shape.as_list()
      static_outer_dims = static_shape[:new_rank - 1]
      static_inner_dims = static_shape[new_rank - 1:]
      flattened_dimension = 1
      for inner_dim in static_inner_dims:
        if inner_dim is None:
          flattened_dimension = None
          break
        flattened_dimension *= inner_dim
      reshaped.set_shape(static_outer_dims + [flattened_dimension])
  return reshaped 
Example #9
Source File: layers.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _dense_inner_flatten(inputs, new_rank):
  """Helper function for `inner_flatten`."""
  rank_assertion = check_ops.assert_rank_at_least(
      inputs, new_rank, message='inputs has rank less than new_rank')
  with ops.control_dependencies([rank_assertion]):
    outer_dimensions = array_ops.slice(
        array_ops.shape(inputs), [0], [new_rank - 1])
    new_shape = array_ops.concat(0, (outer_dimensions, [-1]))
    reshaped = array_ops.reshape(inputs, new_shape)

  # if `new_rank` is an integer, try to calculate new shape.
  if isinstance(new_rank, six.integer_types):
    static_shape = inputs.get_shape()
    if static_shape is not None and static_shape.dims is not None:
      static_shape = static_shape.as_list()
      static_outer_dims = static_shape[:new_rank - 1]
      static_inner_dims = static_shape[new_rank - 1:]
      flattened_dimension = 1
      for inner_dim in static_inner_dims:
        if inner_dim is None:
          flattened_dimension = None
          break
        flattened_dimension *= inner_dim
      reshaped.set_shape(static_outer_dims + [flattened_dimension])
  return reshaped 
Example #10
Source File: operator_pd_cholesky.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _check_chol(self, chol):
    """Verify that `chol` is proper."""
    chol = ops.convert_to_tensor(chol, name="chol")
    if not self.verify_pd:
      return chol

    shape = array_ops.shape(chol)
    rank = array_ops.rank(chol)

    is_matrix = check_ops.assert_rank_at_least(chol, 2)
    is_square = check_ops.assert_equal(
        array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1))

    deps = [is_matrix, is_square]
    diag = array_ops.matrix_diag_part(chol)
    deps.append(check_ops.assert_positive(diag))

    return control_flow_ops.with_dependencies(deps, chol) 
Example #11
Source File: bijector.py    From keras-lambda with MIT License 5 votes vote down vote up
def _forward(self, x):
    if self._static_event_ndims == 0:
      return math_ops.square(x)
    if self.validate_args:
      is_matrix = check_ops.assert_rank_at_least(x, 2)
      shape = array_ops.shape(x)
      is_square = check_ops.assert_equal(shape[-2], shape[-1])
      x = control_flow_ops.with_dependencies([is_matrix, is_square], x)
    # For safety, explicitly zero-out the upper triangular part.
    x = array_ops.matrix_band_part(x, -1, 0)
    return math_ops.matmul(x, x, adjoint_b=True) 
Example #12
Source File: dirichlet_multinomial.py    From keras-lambda with MIT License 5 votes vote down vote up
def _assert_valid_alpha(self, alpha, validate_args):
    alpha = ops.convert_to_tensor(alpha, name="alpha")
    if not validate_args:
      return alpha
    return control_flow_ops.with_dependencies(
        [check_ops.assert_rank_at_least(alpha, 1),
         check_ops.assert_positive(alpha)], alpha) 
Example #13
Source File: dirichlet.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _maybe_assert_valid_concentration(self, concentration, validate_args):
    """Checks the validity of the concentration parameter."""
    if not validate_args:
      return concentration
    return control_flow_ops.with_dependencies([
        check_ops.assert_positive(
            concentration,
            message="Concentration parameter must be positive."),
        check_ops.assert_rank_at_least(
            concentration, 1,
            message="Concentration parameter must have >=1 dimensions."),
        check_ops.assert_less(
            1, array_ops.shape(concentration)[-1],
            message="Concentration parameter must have event_size >= 2."),
    ], concentration) 
Example #14
Source File: bijector.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _forward(self, x):
    if self._static_event_ndims == 0:
      return math_ops.square(x)
    if self.validate_args:
      is_matrix = check_ops.assert_rank_at_least(x, 2)
      shape = array_ops.shape(x)
      is_square = check_ops.assert_equal(shape[-2], shape[-1])
      x = control_flow_ops.with_dependencies([is_matrix, is_square], x)
    # For safety, explicitly zero-out the upper triangular part.
    x = array_ops.matrix_band_part(x, -1, 0)
    return math_ops.batch_matmul(x, x, adj_y=True) 
Example #15
Source File: dirichlet_multinomial.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _assert_valid_alpha(self, alpha, validate_args):
    alpha = ops.convert_to_tensor(alpha, name="alpha")
    if not validate_args:
      return alpha
    return control_flow_ops.with_dependencies(
        [check_ops.assert_rank_at_least(alpha, 1),
         check_ops.assert_positive(alpha)], alpha) 
Example #16
Source File: bijector.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _forward(self, x):
    if self._static_event_ndims == 0:
      return math_ops.square(x)
    if self.validate_args:
      is_matrix = check_ops.assert_rank_at_least(x, 2)
      shape = array_ops.shape(x)
      is_square = check_ops.assert_equal(shape[-2], shape[-1])
      x = control_flow_ops.with_dependencies([is_matrix, is_square], x)
    # For safety, explicitly zero-out the upper triangular part.
    x = array_ops.matrix_band_part(x, -1, 0)
    return math_ops.matmul(x, x, adjoint_b=True) 
Example #17
Source File: dirichlet_multinomial.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _assert_valid_alpha(self, alpha, validate_args):
    alpha = ops.convert_to_tensor(alpha, name="alpha")
    if not validate_args:
      return alpha
    return control_flow_ops.with_dependencies(
        [check_ops.assert_rank_at_least(alpha, 1),
         check_ops.assert_positive(alpha)], alpha) 
Example #18
Source File: deterministic.py    From lambda-packs with MIT License 5 votes vote down vote up
def _prob(self, x):
    if self.validate_args:
      is_vector_check = check_ops.assert_rank_at_least(x, 1)
      right_vec_space_check = check_ops.assert_equal(
          self.event_shape_tensor(),
          array_ops.gather(array_ops.shape(x), array_ops.rank(x) - 1),
          message=
          "Argument 'x' not defined in the same space R^k as this distribution")
      with ops.control_dependencies([is_vector_check]):
        with ops.control_dependencies([right_vec_space_check]):
          x = array_ops.identity(x)
    return math_ops.cast(
        math_ops.reduce_all(math_ops.abs(x - self.loc) <= self._slack, axis=-1),
        dtype=self.dtype) 
Example #19
Source File: dirichlet.py    From lambda-packs with MIT License 5 votes vote down vote up
def _maybe_assert_valid_concentration(self, concentration, validate_args):
    """Checks the validity of the concentration parameter."""
    if not validate_args:
      return concentration
    return control_flow_ops.with_dependencies([
        check_ops.assert_positive(
            concentration,
            message="Concentration parameter must be positive."),
        check_ops.assert_rank_at_least(
            concentration, 1,
            message="Concentration parameter must have >=1 dimensions."),
        check_ops.assert_less(
            1, array_ops.shape(concentration)[-1],
            message="Concentration parameter must have event_size >= 2."),
    ], concentration) 
Example #20
Source File: dirichlet_multinomial.py    From lambda-packs with MIT License 5 votes vote down vote up
def _maybe_assert_valid_concentration(self, concentration, validate_args):
    """Checks the validity of the concentration parameter."""
    if not validate_args:
      return concentration
    return control_flow_ops.with_dependencies([
        check_ops.assert_positive(
            concentration,
            message="Concentration parameter must be positive."),
        check_ops.assert_rank_at_least(
            concentration, 1,
            message="Concentration parameter must have >=1 dimensions."),
        check_ops.assert_less(
            1, array_ops.shape(concentration)[-1],
            message="Concentration parameter must have event_size >= 2."),
    ], concentration) 
Example #21
Source File: special_math_ops.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def lbeta(x, name='lbeta'):
  r"""Computes `ln(|Beta(x)|)`, reducing along the last dimension.

  Given one-dimensional `z = [z_0,...,z_{K-1}]`, we define

  ```Beta(z) = \prod_j Gamma(z_j) / Gamma(\sum_j z_j)```

  And for `n + 1` dimensional `x` with shape `[N1, ..., Nn, K]`, we define
  `lbeta(x)[i1, ..., in] = Log(|Beta(x[i1, ..., in, :])|)`.  In other words,
  the last dimension is treated as the `z` vector.

  Note that if `z = [u, v]`, then
  `Beta(z) = int_0^1 t^{u-1} (1 - t)^{v-1} dt`, which defines the traditional
  bivariate beta function.

  Args:
    x: A rank `n + 1` `Tensor` with type `float`, or `double`.
    name: A name for the operation (optional).

  Returns:
    The logarithm of `|Beta(x)|` reducing along the last dimension.

  Raises:
    ValueError:  If `x` is empty with rank one or less.
  """
  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name='x')
    x = control_flow_ops.with_dependencies(
        [check_ops.assert_rank_at_least(x, 1)], x)

    is_empty = math_ops.equal(0, array_ops.size(x))

    def nonempty_lbeta():
      log_prod_gamma_x = math_ops.reduce_sum(
          math_ops.lgamma(x), reduction_indices=[-1])
      sum_x = math_ops.reduce_sum(x, reduction_indices=[-1])
      log_gamma_sum_x = math_ops.lgamma(sum_x)
      result = log_prod_gamma_x - log_gamma_sum_x
      return result

    def empty_lbeta():
      # If x is empty, return version with one less dimension.
      # Can only do this if rank >= 2.
      assertion = check_ops.assert_rank_at_least(x, 2)
      with ops.control_dependencies([assertion]):
        return array_ops.squeeze(x, squeeze_dims=[0])

    static_size = x.get_shape().num_elements()
    if static_size is not None:
      if static_size > 0:
        return nonempty_lbeta()
      else:
        return empty_lbeta()
    else:
      return control_flow_ops.cond(is_empty, empty_lbeta, nonempty_lbeta) 
Example #22
Source File: special_math_ops.py    From keras-lambda with MIT License 4 votes vote down vote up
def lbeta(x, name='lbeta'):
  r"""Computes `ln(|Beta(x)|)`, reducing along the last dimension.

  Given one-dimensional `z = [z_0,...,z_{K-1}]`, we define

  ```Beta(z) = \prod_j Gamma(z_j) / Gamma(\sum_j z_j)```

  And for `n + 1` dimensional `x` with shape `[N1, ..., Nn, K]`, we define
  `lbeta(x)[i1, ..., in] = Log(|Beta(x[i1, ..., in, :])|)`.  In other words,
  the last dimension is treated as the `z` vector.

  Note that if `z = [u, v]`, then
  `Beta(z) = int_0^1 t^{u-1} (1 - t)^{v-1} dt`, which defines the traditional
  bivariate beta function.

  Args:
    x: A rank `n + 1` `Tensor` with type `float`, or `double`.
    name: A name for the operation (optional).

  Returns:
    The logarithm of `|Beta(x)|` reducing along the last dimension.

  Raises:
    ValueError:  If `x` is empty with rank one or less.
  """
  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name='x')
    x = control_flow_ops.with_dependencies(
        [check_ops.assert_rank_at_least(x, 1)], x)

    is_empty = math_ops.equal(0, array_ops.size(x))

    def nonempty_lbeta():
      log_prod_gamma_x = math_ops.reduce_sum(
          math_ops.lgamma(x), reduction_indices=[-1])
      sum_x = math_ops.reduce_sum(x, reduction_indices=[-1])
      log_gamma_sum_x = math_ops.lgamma(sum_x)
      result = log_prod_gamma_x - log_gamma_sum_x
      return result

    def empty_lbeta():
      # If x is empty, return version with one less dimension.
      # Can only do this if rank >= 2.
      assertion = check_ops.assert_rank_at_least(x, 2)
      with ops.control_dependencies([assertion]):
        return array_ops.squeeze(x, squeeze_dims=[0])

    static_size = x.get_shape().num_elements()
    if static_size is not None:
      if static_size > 0:
        return nonempty_lbeta()
      else:
        return empty_lbeta()
    else:
      return control_flow_ops.cond(is_empty, empty_lbeta, nonempty_lbeta) 
Example #23
Source File: special_math_ops.py    From auto-alt-text-lambda-api with MIT License 4 votes vote down vote up
def lbeta(x, name='lbeta'):
  r"""Computes `ln(|Beta(x)|)`, reducing along the last dimension.

  Given one-dimensional `z = [z_0,...,z_{K-1}]`, we define

  ```Beta(z) = \prod_j Gamma(z_j) / Gamma(\sum_j z_j)```

  And for `n + 1` dimensional `x` with shape `[N1, ..., Nn, K]`, we define
  `lbeta(x)[i1, ..., in] = Log(|Beta(x[i1, ..., in, :])|)`.  In other words,
  the last dimension is treated as the `z` vector.

  Note that if `z = [u, v]`, then
  `Beta(z) = int_0^1 t^{u-1} (1 - t)^{v-1} dt`, which defines the traditional
  bivariate beta function.

  Args:
    x: A rank `n + 1` `Tensor` with type `float`, or `double`.
    name: A name for the operation (optional).

  Returns:
    The logarithm of `|Beta(x)|` reducing along the last dimension.

  Raises:
    ValueError:  If `x` is empty with rank one or less.
  """
  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name='x')
    x = control_flow_ops.with_dependencies(
        [check_ops.assert_rank_at_least(x, 1)], x)

    is_empty = math_ops.equal(0, array_ops.size(x))

    def nonempty_lbeta():
      log_prod_gamma_x = math_ops.reduce_sum(
          math_ops.lgamma(x), reduction_indices=[-1])
      sum_x = math_ops.reduce_sum(x, reduction_indices=[-1])
      log_gamma_sum_x = math_ops.lgamma(sum_x)
      result = log_prod_gamma_x - log_gamma_sum_x
      return result

    def empty_lbeta():
      # If x is empty, return version with one less dimension.
      # Can only do this if rank >= 2.
      assertion = check_ops.assert_rank_at_least(x, 2)
      with ops.control_dependencies([assertion]):
        return array_ops.squeeze(x, squeeze_dims=[0])

    static_size = x.get_shape().num_elements()
    if static_size is not None:
      if static_size > 0:
        return nonempty_lbeta()
      else:
        return empty_lbeta()
    else:
      return control_flow_ops.cond(is_empty, empty_lbeta, nonempty_lbeta)