Python tensorflow.python.framework.tensor_util.constant_value_as_shape() Examples

The following are 24 code examples of tensorflow.python.framework.tensor_util.constant_value_as_shape(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.framework.tensor_util , or try the search function .
Example #1
Source File: array_ops.py    From keras-lambda with MIT License 6 votes vote down vote up
def _TileGradShape(op):
  """Shape function for the TileGrad op."""
  multiples_shape = op.inputs[1].get_shape().with_rank(1)
  input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
  # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
  # it is a vector of non-negative integers, and (ii) doing so allows
  # us to handle partially-known multiples.
  multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
      input_shape.ndims)
  if multiples.ndims is None:
    return [tensor_shape.unknown_shape()]
  else:
    output_dims = []
    for dim, multiple in zip(input_shape.dims, multiples.dims):
      output_dims.append(dim // multiple)
    return [tensor_shape.TensorShape(output_dims)] 
Example #2
Source File: array_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _TileGradShape(op):
  """Shape function for the TileGrad op."""
  multiples_shape = op.inputs[1].get_shape().with_rank(1)
  input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
  # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
  # it is a vector of non-negative integers, and (ii) doing so allows
  # us to handle partially-known multiples.
  multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
      input_shape.ndims)
  if multiples.ndims is None:
    return [tensor_shape.unknown_shape()]
  else:
    output_dims = []
    for dim, multiple in zip(input_shape.dims, multiples.dims):
      output_dims.append(dim // multiple)
    return [tensor_shape.TensorShape(output_dims)] 
Example #3
Source File: array_ops.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def _TileGradShape(op):
  """Shape function for the TileGrad op."""
  multiples_shape = op.inputs[1].get_shape().with_rank(1)
  input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
  # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
  # it is a vector of non-negative integers, and (ii) doing so allows
  # us to handle partially-known multiples.
  multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
      input_shape.ndims)
  if multiples.ndims is None:
    return [tensor_shape.unknown_shape()]
  else:
    output_dims = []
    for dim, multiple in zip(input_shape.dims, multiples.dims):
      output_dims.append(dim // multiple)
    return [tensor_shape.TensorShape(output_dims)] 
Example #4
Source File: array_ops.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _FillShape(op):
  """Shape function for the Fill op.

  This op takes a vector of dimensions and a scalar, and produces a
  tensor with the given dimensions.

  Args:
    op: A Fill Operation.

  Returns:
    A single-element list containing the shape of the output.

  Raises:
    ValueError: If the shapes or arguments are known to be invalid.
  """
  op.inputs[0].get_shape().assert_has_rank(1)
  op.inputs[1].get_shape().assert_has_rank(0)
  fill_dims = tensor_util.constant_value(op.inputs[0])
  if fill_dims is not None and any(d < 0 for d in fill_dims):
    raise ValueError("Fill dimensions must be >= 0")
  return [tensor_util.constant_value_as_shape(op.inputs[0])] 
Example #5
Source File: array_ops.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _TileGradShape(op):
  """Shape function for the TileGrad op."""
  multiples_shape = op.inputs[1].get_shape().with_rank(1)
  input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
  # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
  # it is a vector of non-negative integers, and (ii) doing so allows
  # us to handle partially-known multiples.
  multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
      input_shape.ndims)
  if multiples.ndims is None:
    return [tensor_shape.unknown_shape()]
  else:
    output_dims = []
    for dim, multiple in zip(input_shape.dims, multiples.dims):
      output_dims.append(dim // multiple)
    return [tensor_shape.TensorShape(output_dims)] 
Example #6
Source File: array_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _TileGradShape(op):
  """Shape function for the TileGrad op."""
  multiples_shape = op.inputs[1].get_shape().with_rank(1)
  input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
  # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
  # it is a vector of non-negative integers, and (ii) doing so allows
  # us to handle partially-known multiples.
  multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
      input_shape.ndims)
  if multiples.ndims is None:
    return [tensor_shape.unknown_shape()]
  else:
    output_dims = []
    for dim, multiple in zip(input_shape.dims, multiples.dims):
      output_dims.append(dim // multiple)
    return [tensor_shape.TensorShape(output_dims)] 
Example #7
Source File: tensor_util_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testConstant(self):
    np_val = np.random.rand(3).astype(np.int32)
    tf_val = tf.constant(np_val)
    self.assertEqual(tf.TensorShape(np_val),
                     tensor_util.constant_value_as_shape(tf_val))

    tf_val = tf.constant([], dtype=tf.int32)
    self.assertEqual(tf.TensorShape([]),
                     tensor_util.constant_value_as_shape(tf_val)) 
Example #8
Source File: sparse_tensor.py    From keras-lambda with MIT License 5 votes vote down vote up
def get_shape(self):
    """Get the `TensorShape` representing the shape of the dense tensor.

    Returns:
      A `TensorShape` object.
    """
    return tensor_util.constant_value_as_shape(self._dense_shape) 
Example #9
Source File: transformed_distribution.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _batch_shape(self):
    # If there's a chance that the batch_shape has been overridden, we return
    # what we statically know about the `batch_shape_override`. This works
    # because: `_is_maybe_batch_override` means `static_override` is `None` or a
    # non-empty list, i.e., we don't statically know the `batch_shape` or we do.
    #
    # Notice that this implementation parallels the `_event_shape` except that
    # the `bijector` doesn't get to alter the `batch_shape`. Recall that
    # `batch_shape` is a property of a distribution while `event_shape` is
    # shared between both the `distribution` instance and the `bijector`.
    static_override = tensor_util.constant_value_as_shape(
        self._override_batch_shape)
    return (static_override
            if self._is_maybe_batch_override
            else self.distribution.batch_shape) 
Example #10
Source File: transformed_distribution.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _event_shape(self):
    # If there's a chance that the event_shape has been overridden, we return
    # what we statically know about the `event_shape_override`. This works
    # because: `_is_maybe_event_override` means `static_override` is `None` or a
    # non-empty list, i.e., we don't statically know the `event_shape` or we do.
    #
    # Since the `bijector` may change the `event_shape`, we then forward what we
    # know to the bijector. This allows the `bijector` to have final say in the
    # `event_shape`.
    static_override = tensor_util.constant_value_as_shape(
        self._override_event_shape)
    return self.bijector.forward_event_shape(
        static_override
        if self._is_maybe_event_override
        else self.distribution.event_shape) 
Example #11
Source File: dataset_ops.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def output_shapes(self):

    def _padded_shape_to_batch_shape(s):
      return tensor_shape.vector(None).concatenate(
          tensor_util.constant_value_as_shape(s))

    return nest.map_structure(_padded_shape_to_batch_shape, self._padded_shapes) 
Example #12
Source File: sparse_tensor.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def get_shape(self):
    """Get the `TensorShape` that represents the shape of the dense tensor.

    Returns:
      A `TensorShape` object.
    """
    return tensor_util.constant_value_as_shape(self._shape) 
Example #13
Source File: tensor_util_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testConcat(self):
    tf_val = tf.concat(0, [[16, 37], tf.placeholder(tf.int32, shape=(2,))])
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([16, 37, None, None], c_val.as_list())

    tf_val = tf.concat(0,
                       [[16, 37], tf.placeholder(tf.int32, shape=(1,)), [48]])
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([16, 37, None, 48], c_val.as_list()) 
Example #14
Source File: tensor_util_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testPack(self):
    tf_val = tf.stack([tf.constant(16), 37, tf.placeholder(tf.int32)])
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([16, 37, None], c_val.as_list()) 
Example #15
Source File: tensor_util_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testShape(self):
    tf_val = tf.shape(tf.constant(0.0, shape=[1, 2, 3]))
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual(tf.TensorShape([1, 2, 3]), c_val) 
Example #16
Source File: array_ops.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _TileShape(op):
  """Shape function for the Tile op.

  This op has two inputs:

  * input: A rank-N tensor.
  * multiples: A length-N vector, in which the i^th element contains
    the factor by which `input` will be tiled in the i^th dimension.

  It has one output, which has the same rank as input, and additional
  elements according to the values in multiples

  Args:
    op: A Tile Operation.

  Returns:
    A single-element list containing the shape of the output.
  """
  multiples_shape = op.inputs[1].get_shape().with_rank(1)
  input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0].value)
  # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
  # it is a vector of non-negative integers, and (ii) doing so allows
  # us to handle partially-known multiples.
  multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
      input_shape.ndims)
  if multiples.ndims is None:
    return [tensor_shape.unknown_shape()]
  else:
    output_dims = []
    for dim, multiple in zip(input_shape.dims, multiples.dims):
      output_dims.append(dim * multiple)
    return [tensor_shape.TensorShape(output_dims)] 
Example #17
Source File: array_ops.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _ReshapeShape(op):
  """Shape function for Reshape op."""
  input_shape = op.inputs[0].get_shape()
  if input_shape.ndims is not None:
    num_elements = tensor_shape.Dimension(1)
    for dim in input_shape.dims:
      num_elements *= dim
  else:
    num_elements = tensor_shape.Dimension(None)
  new_shape = tensor_util.constant_value_as_shape(op.inputs[1])
  if new_shape.ndims is None:
    # We have no information about the shape of the output.
    return [new_shape]
  if None not in new_shape.as_list():
    # The new shape is fully defined.
    if (num_elements.value is not None
        and num_elements.value != np.prod(new_shape)):
      raise ValueError(
          "Cannot reshape a tensor with %d elements to shape %s (%d elements)"
          % (num_elements.value, new_shape, np.prod(new_shape)))
  elif num_elements.value is not None:
    # We know the number of elements, so we can calculate the missing
    # dimension in the new_shape.
    known_elements = 1
    unknown_indices = []
    for i, dim in enumerate(new_shape):
      if dim.value is None:
        unknown_indices.append(i)
      else:
        known_elements *= dim.value
    if known_elements != 0:
      if num_elements % known_elements != 0:
        raise ValueError("input has %s elements, which isn't divisible by %d" %
                         (num_elements, known_elements))
      if len(unknown_indices) == 1:
        unknown_index = unknown_indices[0]
        new_shape = new_shape.merge_with(
            new_shape[:unknown_index].concatenate(
                [num_elements // known_elements]).concatenate(
                    new_shape[unknown_index+1:]))
  return [new_shape] 
Example #18
Source File: array_ops.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _SliceShape(op):
  """Shape function for array_ops.slice."""
  input_shape = op.inputs[0].get_shape()
  begin_shape = op.inputs[1].get_shape().with_rank(1)
  sizes_shape = op.inputs[2].get_shape().with_rank(1)
  ndims = begin_shape.merge_with(sizes_shape)[0].value
  if ndims is not None:
    input_shape.assert_has_rank(ndims)
  # NOTE(mrry): Use `constant_value_as_shape()` to handle
  # partially-known values.
  begin_value = tensor_util.constant_value_as_shape(
      op.inputs[1]).with_rank(ndims)
  # NOTE(mrry): We can't use `constant_value_as_shape()` for `sizes`
  # because it might contain -1, which can't be represented as a
  # `TensorShape`.
  sizes_value = tensor_util.constant_value(op.inputs[2])
  if sizes_value is not None:
    returned_dims = []
    for i, (slice_size, begin_dim) in enumerate(zip(sizes_value.ravel(),
                                                    begin_value.dims)):
      if slice_size != -1:
        returned_dims.append(slice_size)
      else:
        returned_dims.append(input_shape[i] - begin_dim)
    return [tensor_shape.TensorShape(returned_dims)]
  else:
    if input_shape.ndims is not None:
      return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
    elif ndims is not None:
      return [tensor_shape.unknown_shape(ndims=ndims)]
    else:
      return [tensor_shape.unknown_shape()] 
Example #19
Source File: strcuture.py    From BERT with Apache License 2.0 5 votes vote down vote up
def from_value(value):
    sparse_tensor = sparse_tensor_lib.SparseTensor.from_value(value)
    return SparseTensorStructure(
        sparse_tensor.dtype,
        tensor_util.constant_value_as_shape(sparse_tensor.dense_shape)) 
Example #20
Source File: strcuture.py    From BERT with Apache License 2.0 5 votes vote down vote up
def _to_batched_tensor_list(self, value):
    if self._dense_shape.merge_with(
        tensor_util.constant_value_as_shape(value.dense_shape)).ndims == 0:
      raise ValueError(
          "Unbatching a sparse tensor is only supported for rank >= 1")
    return [sparse_ops.serialize_many_sparse(value, out_type=dtypes.variant)] 
Example #21
Source File: sparse_tensor.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def get_shape(self):
    """Get the `TensorShape` representing the shape of the dense tensor.

    Returns:
      A `TensorShape` object.
    """
    return tensor_util.constant_value_as_shape(self._dense_shape) 
Example #22
Source File: dataset_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def output_shapes(self):
    def _padded_shape_to_batch_shape(s):
      return tensor_shape.vector(None).concatenate(
          tensor_util.constant_value_as_shape(s))
    return nest.map_structure(_padded_shape_to_batch_shape, self._padded_shapes) 
Example #23
Source File: sparse_tensor.py    From lambda-packs with MIT License 5 votes vote down vote up
def get_shape(self):
    """Get the `TensorShape` representing the shape of the dense tensor.

    Returns:
      A `TensorShape` object.
    """
    return tensor_util.constant_value_as_shape(self._dense_shape) 
Example #24
Source File: spectral_ops.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 4 votes vote down vote up
def _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=False):
  """Pads `input_tensor` to `fft_length` on its inner-most `fft_rank` dims."""
  fft_shape = _tensor_util.constant_value_as_shape(fft_length)

  # Edge case: skip padding empty tensors.
  if (input_tensor.shape.ndims is not None and
      any(dim.value == 0 for dim in input_tensor.shape)):
    return input_tensor

  # If we know the shapes ahead of time, we can either skip or pre-compute the
  # appropriate paddings. Otherwise, fall back to computing paddings in
  # TensorFlow.
  if fft_shape.is_fully_defined() and input_tensor.shape.ndims is not None:
    # Slice the last FFT-rank dimensions from input_tensor's shape.
    input_fft_shape = input_tensor.shape[-fft_shape.ndims:]

    if input_fft_shape.is_fully_defined():
      # In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
      if is_reverse:
        fft_shape = fft_shape[:-1].concatenate(fft_shape[-1].value // 2 + 1)

      paddings = [[0, max(fft_dim.value - input_dim.value, 0)]
                  for fft_dim, input_dim in zip(fft_shape, input_fft_shape)]
      if any(pad > 0 for _, pad in paddings):
        outer_paddings = [[0, 0]] * max((input_tensor.shape.ndims -
                                         fft_shape.ndims), 0)
        return _array_ops.pad(input_tensor, outer_paddings + paddings)
      return input_tensor

  # If we can't determine the paddings ahead of time, then we have to pad. If
  # the paddings end up as zero, tf.pad has a special-case that does no work.
  input_rank = _array_ops.rank(input_tensor)
  input_fft_shape = _array_ops.shape(input_tensor)[-fft_rank:]
  outer_dims = _math_ops.maximum(0, input_rank - fft_rank)
  outer_paddings = _array_ops.zeros([outer_dims], fft_length.dtype)
  # In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
  if is_reverse:
    fft_length = _array_ops.concat([fft_length[:-1],
                                    fft_length[-1:] // 2 + 1], 0)
  fft_paddings = _math_ops.maximum(0, fft_length - input_fft_shape)
  paddings = _array_ops.concat([outer_paddings, fft_paddings], 0)
  paddings = _array_ops.stack([_array_ops.zeros_like(paddings), paddings],
                              axis=1)
  return _array_ops.pad(input_tensor, paddings)