Python tensorflow.python.framework.dtypes.int32() Examples

The following are 30 code examples of tensorflow.python.framework.dtypes.int32(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.framework.dtypes , or try the search function .
Example #1
Source File: data_flow_grad.py    From lambda-packs with MIT License 6 votes vote down vote up
def _DynamicStitchGrads(op, grad):
  """Gradients for DynamicStitch."""

  num_values = len(op.inputs) // 2
  indices_grad = [None] * num_values

  def AsInt32(x):
    return (x if op.inputs[0].dtype == dtypes.int32 else
            math_ops.cast(x, dtypes.int32))
  inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)]
  if isinstance(grad, ops.IndexedSlices):
    output_shape = array_ops.shape(op.outputs[0])
    output_rows = output_shape[0]
    grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows)
  values_grad = [array_ops.gather(grad, inp) for inp in inputs]
  return indices_grad + values_grad 
Example #2
Source File: conv_batchnorm.py    From model-optimization with Apache License 2.0 6 votes vote down vote up
def _build_for_quantization(self):
    """All Keras build() logic for quantization for fused layers."""
    if not self.is_quantized:
      return

    self._weight_quantizer_vars = self.weight_quantizer.build(
        self.weights[0].shape, 'weight', self)

    self.optimizer_step = self.add_weight(
        'optimizer_step',
        initializer=initializers.Constant(-1),
        dtype=dtypes.int32,
        trainable=False)

    # TODO(alanchiao): re-explore if we can handle this with
    # QuantizeAwareActivation.
    self._activation_min_var = self.add_variable(  # pylint: disable=protected-access
        'activation_min',
        initializer=initializers.Constant(-6.0),
        trainable=False)
    self._activation_max_var = self.add_variable(  # pylint: disable=protected-access
        'activation_max',
        initializer=initializers.Constant(6.0),
        trainable=False) 
Example #3
Source File: beam_search_decoder_from_tensorflow.py    From tensorflow_end2end_speech_recognition with MIT License 6 votes vote down vote up
def finalize(self, outputs, final_state, sequence_lengths):
        """Finalize and return the predicted_ids.
        Args:
          final_state: An instance of BeamSearchDecoderState. Passed through to the
            output.
          sequence_lengths: An `int32` tensor shaped `[batch_size, beam_width]`.
            The sequence lengths determined for each beam during decode.
        Returns:
          outputs: An instance of FinalBeamSearchDecoderOutput where the
            predicted_ids are the result of calling _gather_tree.
          final_state: The same input instance of BeamSearchDecoderState.
        """
        predicted_ids = gather_tree(
            outputs.predicted_ids, outputs.parent_ids,
            sequence_length=sequence_lengths)

        outputs = FinalBeamSearchDecoderOutput(
            beam_search_decoder_output=outputs, predicted_ids=predicted_ids)
        return outputs, final_state 
Example #4
Source File: math_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _neg(x, name=None):
  """Computes numerical negative value element-wise.

  I.e., \\(y = -x\\).

  Args:
    x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
      `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
  """
  return negative(x, name)


# pylint: enable=g-docstring-has-escape 
Example #5
Source File: math_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def square(x, name=None):
  r"""Computes square of x element-wise.

  I.e., \\(y = x * x = x^2\\).

  Args:
    x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
      `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` or `SparseTensor`. Has the same type as `x`.
  """
  with ops.name_scope(name, "Square", [x]) as name:
    if isinstance(x, sparse_tensor.SparseTensor):
      x_square = gen_math_ops.square(x.values, name=name)
      return sparse_tensor.SparseTensor(
          indices=x.indices, values=x_square, dense_shape=x.dense_shape)
    else:
      return gen_math_ops.square(x, name=name) 
Example #6
Source File: sets_impl.py    From lambda-packs with MIT License 6 votes vote down vote up
def set_size(a, validate_indices=True):
  """Compute number of unique elements along last dimension of `a`.

  Args:
    a: `SparseTensor`, with indices sorted in row-major order.
    validate_indices: Whether to validate the order and range of sparse indices
       in `a`.

  Returns:
    `int32` `Tensor` of set sizes. For `a` ranked `n`, this is a `Tensor` with
    rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the
    number of unique elements in the corresponding `[0...n-1]` dimension of `a`.

  Raises:
    TypeError: If `a` is an invalid types.
  """
  a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
  if not isinstance(a, sparse_tensor.SparseTensor):
    raise TypeError("Expected `SparseTensor`, got %s." % a)
  if a.values.dtype.base_dtype not in _VALID_DTYPES:
    raise TypeError("Invalid dtype %s." % a.values.dtype)
  # pylint: disable=protected-access
  return gen_set_ops.set_size(
      a.indices, a.values, a.dense_shape, validate_indices) 
Example #7
Source File: array_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def shape(input, name=None, out_type=dtypes.int32):
  # pylint: disable=redefined-builtin
  """Returns the shape of a tensor.

  This operation returns a 1-D integer tensor representing the shape of `input`.

  For example:

  ```python
  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
  shape(t) ==> [2, 2, 3]
  ```

  Args:
    input: A `Tensor` or `SparseTensor`.
    name: A name for the operation (optional).
    out_type: (Optional) The specified output type of the operation
      (`int32` or `int64`). Defaults to `tf.int32`.

  Returns:
    A `Tensor` of type `out_type`.
  """
  return shape_internal(input, name, optimize=True, out_type=out_type) 
Example #8
Source File: array_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
  # pylint: disable=redefined-builtin
  """Returns the shape of a tensor.

  Args:
    input: A `Tensor` or `SparseTensor`.
    name: A name for the operation (optional).
    optimize: if true, encode the shape as a constant when possible.
    out_type: (Optional) The specified output type of the operation
      (`int32` or `int64`). Defaults to tf.int32.

  Returns:
    A `Tensor` of type `out_type`.

  """
  with ops.name_scope(name, "Shape", [input]) as name:
    if isinstance(
        input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
      return gen_math_ops.cast(input.dense_shape, out_type)
    else:
      input_tensor = ops.convert_to_tensor(input)
      input_shape = input_tensor.get_shape()
      if optimize and input_shape.is_fully_defined():
        return constant(input_shape.as_list(), out_type, name=name)
      return gen_array_ops.shape(input, name=name, out_type=out_type) 
Example #9
Source File: math_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def sigmoid(x, name=None):
  """Computes sigmoid of `x` element-wise.

  Specifically, `y = 1 / (1 + exp(-x))`.

  Args:
    x: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
      or `qint32`.
    name: A name for the operation (optional).

  Returns:
    A Tensor with the same type as `x` if `x.dtype != qint32`
      otherwise the return type is `quint8`.

  @compatibility(numpy)
  Equivalent to np.scipy.special.expit
  @end_compatibility
  """
  with ops.name_scope(name, "Sigmoid", [x]) as name:
    x = ops.convert_to_tensor(x, name="x")
    return gen_math_ops._sigmoid(x, name=name) 
Example #10
Source File: math_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def tanh(x, name=None):
  """Computes hyperbolic tangent of `x` element-wise.

  Args:
    x: A Tensor or SparseTensor with type `float`, `double`, `int32`,
      `complex64`, `int64`, or `qint32`.
    name: A name for the operation (optional).

  Returns:
    A Tensor or SparseTensor respectively with the same type as `x` if
    `x.dtype != qint32` otherwise the return type is `quint8`.
  """
  with ops.name_scope(name, "Tanh", [x]) as name:
    if isinstance(x, sparse_tensor.SparseTensor):
      x_tanh = gen_math_ops._tanh(x.values, name=name)
      return sparse_tensor.SparseTensor(
          indices=x.indices, values=x_tanh, dense_shape=x.dense_shape)
    else:
      return gen_math_ops._tanh(x, name=name) 
Example #11
Source File: array_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def size(input, name=None, out_type=dtypes.int32):
  # pylint: disable=redefined-builtin
  """Returns the size of a tensor.

  This operation returns an integer representing the number of elements in
  `input`.

  For example:

  ```python
  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
  size(t) ==> 12
  ```

  Args:
    input: A `Tensor` or `SparseTensor`.
    name: A name for the operation (optional).
    out_type: (Optional) The specified output type of the operation
      (`int32` or `int64`). Defaults to tf.int32.

  Returns:
    A `Tensor` of type `out_type`. Defaults to tf.int32.
  """
  return size_internal(input, name, optimize=True, out_type=out_type) 
Example #12
Source File: tf_helpers.py    From Counterfactual-StoryRW with MIT License 6 votes vote down vote up
def __init__(self, sample_fn, sample_shape, sample_dtype,
                 start_inputs, end_fn, next_inputs_fn=None):
        """Initializer.

        Args:
          sample_fn: A callable that takes `outputs` and emits tensor `sample_ids`.
          sample_shape: Either a list of integers, or a 1-D Tensor of type `int32`,
            the shape of the each sample in the batch returned by `sample_fn`.
          sample_dtype: the dtype of the sample returned by `sample_fn`.
          start_inputs: The initial batch of inputs.
          end_fn: A callable that takes `sample_ids` and emits a `bool` vector
            shaped `[batch_size]` indicating whether each sample is an end token.
          next_inputs_fn: (Optional) A callable that takes `sample_ids` and returns
            the next batch of inputs. If not provided, `sample_ids` is used as the
            next batch of inputs.
        """
        self._sample_fn = sample_fn
        self._end_fn = end_fn
        self._sample_shape = tensor_shape.TensorShape(sample_shape)
        self._sample_dtype = sample_dtype
        self._next_inputs_fn = next_inputs_fn
        self._batch_size = array_ops.shape(start_inputs)[0]
        self._start_inputs = ops.convert_to_tensor(
            start_inputs, name="start_inputs") 
Example #13
Source File: array_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
  # pylint: disable=redefined-builtin,protected-access
  """Returns the size of a tensor.

  Args:
    input: A `Tensor` or `SparseTensor`.
    name: A name for the operation (optional).
    optimize: if true, encode the size as a constant when possible.
    out_type: (Optional) The specified output type of the operation
      (`int32` or `int64`). Defaults to tf.int32.

  Returns:
    A `Tensor` of type `out_type`.
  """
  with ops.name_scope(name, "Size", [input]) as name:
    if isinstance(
        input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
      return gen_math_ops._prod(
          gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
    else:
      input_tensor = ops.convert_to_tensor(input)
      input_shape = input_tensor.get_shape()
      if optimize and input_shape.is_fully_defined():
        return constant(input_shape.num_elements(), out_type, name=name)
      return gen_array_ops.size(input, name=name, out_type=out_type) 
Example #14
Source File: nn_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def crelu(features, name=None):
  """Computes Concatenated ReLU.

  Concatenates a ReLU which selects only the positive part of the activation
  with a ReLU which selects only the *negative* part of the activation.
  Note that as a result this non-linearity doubles the depth of the activations.
  Source: [Understanding and Improving Convolutional Neural Networks via Concatenated Rectified Linear Units. W. Shang, et al.](https://arxiv.org/abs/1603.05201) 

  Args:
    features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
      `int16`, or `int8`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with the same type as `features`.
  """
  with ops.name_scope(name, "CRelu", [features]) as name:
    features = ops.convert_to_tensor(features, name="features")
    c = array_ops.concat([features, -features], -1, name=name)
    return gen_nn_ops.relu(c) 
Example #15
Source File: multinomial.py    From lambda-packs with MIT License 6 votes vote down vote up
def _sample_n(self, n, seed=None):
    n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
    if self.total_count.get_shape().ndims is not None:
      if self.total_count.get_shape().ndims != 0:
        raise NotImplementedError(
            "Sample only supported for scalar number of draws.")
    elif self.validate_args:
      is_scalar = check_ops.assert_rank(
          n_draws, 0,
          message="Sample only supported for scalar number of draws.")
      n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
    k = self.event_shape_tensor()[0]
    # Flatten batch dims so logits has shape [B, k],
    # where B = reduce_prod(self.batch_shape_tensor()).
    draws = random_ops.multinomial(
        logits=array_ops.reshape(self.logits, [-1, k]),
        num_samples=n * n_draws,
        seed=seed)
    draws = array_ops.reshape(draws, shape=[-1, n, n_draws])
    x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
                            axis=-2)  # shape: [B, n, k]
    x = array_ops.transpose(x, perm=[1, 0, 2])
    final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
    return array_ops.reshape(x, final_shape) 
Example #16
Source File: nn_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def bias_add_v1(value, bias, name=None):
  """Adds `bias` to `value`.

  This is a deprecated version of bias_add and will soon to be removed.

  This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
  Broadcasting is supported, so `value` may have any number of dimensions.
  Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
  case where both types are quantized.

  Args:
    value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
      `int16`, `int8`, `complex64`, or `complex128`.
    bias: A 1-D `Tensor` with size matching the last dimension of `value`.
      Must be the same type as `value` unless `value` is a quantized type,
      in which case a different quantized type may be used.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with the same type as `value`.
  """
  with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
    value = ops.convert_to_tensor(value, name="input")
    bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
    return gen_nn_ops._bias_add_v1(value, bias, name=name) 
Example #17
Source File: dirichlet_multinomial.py    From lambda-packs with MIT License 6 votes vote down vote up
def _sample_n(self, n, seed=None):
    n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
    k = self.event_shape_tensor()[0]
    unnormalized_logits = array_ops.reshape(
        math_ops.log(random_ops.random_gamma(
            shape=[n],
            alpha=self.concentration,
            dtype=self.dtype,
            seed=seed)),
        shape=[-1, k])
    draws = random_ops.multinomial(
        logits=unnormalized_logits,
        num_samples=n_draws,
        seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial"))
    x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), -2)
    final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
    return array_ops.reshape(x, final_shape) 
Example #18
Source File: distribution.py    From lambda-packs with MIT License 6 votes vote down vote up
def batch_shape_tensor(self, name="batch_shape_tensor"):
    """Shape of a single sample from a single event index as a 1-D `Tensor`.

    The batch dimensions are indexes into independent, non-identical
    parameterizations of this distribution.

    Args:
      name: name to give to the op

    Returns:
      batch_shape: `Tensor`.
    """
    with self._name_scope(name):
      if self.batch_shape.is_fully_defined():
        return ops.convert_to_tensor(self.batch_shape.as_list(),
                                     dtype=dtypes.int32,
                                     name="batch_shape")
      return self._batch_shape_tensor() 
Example #19
Source File: tf_helpers.py    From Counterfactual-StoryRW with MIT License 6 votes vote down vote up
def __init__(self, initialize_fn, sample_fn, next_inputs_fn,
                 sample_ids_shape=None, sample_ids_dtype=None):
        """Initializer.

        Args:
          initialize_fn: callable that returns `(finished, next_inputs)`
            for the first iteration.
          sample_fn: callable that takes `(time, outputs, state)`
            and emits tensor `sample_ids`.
          next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
            and emits `(finished, next_inputs, next_state)`.
          sample_ids_shape: Either a list of integers, or a 1-D Tensor of type
            `int32`, the shape of each value in the `sample_ids` batch. Defaults to
            a scalar.
          sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32.
        """
        self._initialize_fn = initialize_fn
        self._sample_fn = sample_fn
        self._next_inputs_fn = next_inputs_fn
        self._batch_size = None
        self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])
        self._sample_ids_dtype = sample_ids_dtype or dtypes.int32 
Example #20
Source File: image_ops_impl.py    From lambda-packs with MIT License 5 votes vote down vote up
def grayscale_to_rgb(images, name=None):
  """Converts one or more images from Grayscale to RGB.

  Outputs a tensor of the same `DType` and rank as `images`.  The size of the
  last dimension of the output is 3, containing the RGB value of the pixels.

  Args:
    images: The Grayscale tensor to convert. Last dimension must be size 1.
    name: A name for the operation (optional).

  Returns:
    The converted grayscale image(s).
  """
  with ops.name_scope(name, 'grayscale_to_rgb', [images]) as name:
    images = ops.convert_to_tensor(images, name='images')
    rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0)
    shape_list = (
        [array_ops.ones(rank_1,
                        dtype=dtypes.int32)] + [array_ops.expand_dims(3, 0)])
    multiples = array_ops.concat(shape_list, 0)
    rgb = array_ops.tile(images, multiples, name=name)
    rgb.set_shape(images.get_shape()[:-1].concatenate([3]))
    return rgb


# pylint: disable=invalid-name 
Example #21
Source File: exponential.py    From lambda-packs with MIT License 5 votes vote down vote up
def _param_shapes(sample_shape):
    return {"rate": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)} 
Example #22
Source File: gamma.py    From lambda-packs with MIT License 5 votes vote down vote up
def _event_shape_tensor(self):
    return constant_op.constant([], dtype=dtypes.int32) 
Example #23
Source File: control_flow_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def Assert(condition, data, summarize=None, name=None):
  """Asserts that the given condition is true.

  If `condition` evaluates to false, print the list of tensors in `data`.
  `summarize` determines how many entries of the tensors to print.

  NOTE: To ensure that Assert executes, one usually attaches a dependency:

  ```python
  # Ensure maximum element of x is smaller or equal to 1
  assert_op = tf.Assert(tf.less_equal(tf.reduce_max(x), 1.), [x])
  with tf.control_dependencies([assert_op]):
    ... code using x ...
  ```

  Args:
    condition: The condition to evaluate.
    data: The tensors to print out when condition is false.
    summarize: Print this many entries of each tensor.
    name: A name for this operation (optional).

  Returns:
    assert_op: An `Operation` that, when executed, raises a
    `tf.errors.InvalidArgumentError` if `condition` is not true.
  """
  with ops.name_scope(name, "Assert", [condition, data]) as name:
    xs = ops.convert_n_to_tensor(data)
    if all([x.dtype in {dtypes.string, dtypes.int32} for x in xs]):
      # As a simple heuristic, we assume that string and int32 are
      # on host to avoid the need to use cond. If it is not case,
      # we will pay the price copying the tensor to host memory.
      return gen_logging_ops._assert(
          condition, data, summarize, name="Assert")
    else:
      condition = ops.convert_to_tensor(condition, name="Condition")
      def true_assert():
        return gen_logging_ops._assert(
            condition, data, summarize, name="Assert")
      guarded_assert = cond(
          condition, no_op, true_assert, name="AssertGuard")
      return guarded_assert.op 
Example #24
Source File: math_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def pow(x, y, name=None):
  r"""Computes the power of one value to another.

  Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
  corresponding elements in `x` and `y`. For example:

  ```
  # tensor 'x' is [[2, 2], [3, 3]]
  # tensor 'y' is [[8, 16], [2, 3]]
  tf.pow(x, y) ==> [[256, 65536], [9, 27]]
  ```

  Args:
    x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
     or `complex128`.
    y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
     or `complex128`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`.
  """
  with ops.name_scope(name, "Pow", [x]) as name:
    return gen_math_ops._pow(x, y, name=name)


# pylint: disable=redefined-builtin,redefined-outer-name 
Example #25
Source File: spectral_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def _irfft_wrapper(ifft_fn, fft_rank, default_name):
  """Wrapper around gen_spectral_ops.irfft* that infers fft_length argument."""

  def _irfft(input_tensor, fft_length=None, name=None):
    with _ops.name_scope(name, default_name,
                         [input_tensor, fft_length]) as name:
      input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.complex64)
      if fft_length is None:
        fft_length = _infer_fft_length_for_irfft(input_tensor, fft_rank)
      else:
        fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
      return ifft_fn(input_tensor, fft_length, name)
  _irfft.__doc__ = ifft_fn.__doc__
  return _irfft 
Example #26
Source File: nn_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
  """Helper function for verifying strides and dilation_rate arguments.

  This is used by `convolution` and `pool`.

  Args:
    num_spatial_dims: int
    strides: Optional.  List of N ints >= 1.  Defaults to [1]*N.  If any value
      of strides is > 1, then all values of dilation_rate must be 1.
    dilation_rate: Optional.  List of N ints >= 1.  Defaults to [1]*N.  If any
      value of dilation_rate is > 1, then all values of strides must be 1.

  Returns:
    Normalized (strides, dilation_rate) as int32 numpy arrays of shape
    [num_spatial_dims].

  Raises:
    ValueError: if the parameters are invalid.
  """
  if dilation_rate is None:
    dilation_rate = [1] * num_spatial_dims
  elif len(dilation_rate) != num_spatial_dims:
    raise ValueError("len(dilation_rate)=%d but should be %d" %
                     (len(dilation_rate), num_spatial_dims))
  dilation_rate = np.array(dilation_rate, dtype=np.int32)
  if np.any(dilation_rate < 1):
    raise ValueError("all values of dilation_rate must be positive")

  if strides is None:
    strides = [1] * num_spatial_dims
  elif len(strides) != num_spatial_dims:
    raise ValueError("len(strides)=%d but should be %d" %
                     (len(strides), num_spatial_dims))
  strides = np.array(strides, dtype=np.int32)
  if np.any(strides < 1):
    raise ValueError("all values of strides must be positive")

  if np.any(strides > 1) and np.any(dilation_rate > 1):
    raise ValueError(
        "strides > 1 not supported in conjunction with dilation_rate > 1")
  return strides, dilation_rate 
Example #27
Source File: spectral_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def _rfft_wrapper(fft_fn, fft_rank, default_name):
  """Wrapper around gen_spectral_ops.rfft* that infers fft_length argument."""

  def _rfft(input_tensor, fft_length=None, name=None):
    with _ops.name_scope(name, default_name,
                         [input_tensor, fft_length]) as name:
      input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.float32)
      if fft_length is None:
        fft_length = _infer_fft_length_for_rfft(input_tensor, fft_rank)
      else:
        fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
      return fft_fn(input_tensor, fft_length, name)
  _rfft.__doc__ = fft_fn.__doc__
  return _rfft 
Example #28
Source File: spectral_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def _infer_fft_length_for_rfft(input_tensor, fft_rank):
  """Infers the `fft_length` argument for a `rank` RFFT from `input_tensor`."""
  # A TensorShape for the inner fft_rank dimensions.
  fft_shape = input_tensor.get_shape()[-fft_rank:]

  # If any dim is unknown, fall back to tensor-based math.
  if not fft_shape.is_fully_defined():
    return _array_ops.shape(input_tensor)[-fft_rank:]

  # Otherwise, return a constant.
  return _ops.convert_to_tensor(fft_shape.as_list(), _dtypes.int32) 
Example #29
Source File: nn_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def bias_add(value, bias, data_format=None, name=None):
  """Adds `bias` to `value`.

  This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
  Broadcasting is supported, so `value` may have any number of dimensions.
  Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
  case where both types are quantized.

  Args:
    value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
      `int16`, `int8`, `complex64`, or `complex128`.
    bias: A 1-D `Tensor` with size matching the last dimension of `value`.
      Must be the same type as `value` unless `value` is a quantized type,
      in which case a different quantized type may be used.
    data_format: A string. 'NHWC' and 'NCHW' are supported.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with the same type as `value`.
  """
  with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
    value = ops.convert_to_tensor(value, name="input")
    bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
    return gen_nn_ops._bias_add(value, bias, data_format=data_format, name=name)


# pylint: disable=protected-access 
Example #30
Source File: nn_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def top_k(input, k=1, sorted=True, name=None):
  """Finds values and indices of the `k` largest entries for the last dimension.

  If the input is a vector (rank-1), finds the `k` largest entries in the vector
  and outputs their values and indices as vectors.  Thus `values[j]` is the
  `j`-th largest entry in `input`, and its index is `indices[j]`.

  For matrices (resp. higher rank input), computes the top `k` entries in each
  row (resp. vector along the last dimension).  Thus,

      values.shape = indices.shape = input.shape[:-1] + [k]

  If two elements are equal, the lower-index element appears first.

  Args:
    input: 1-D or higher `Tensor` with last dimension at least `k`.
    k: 0-D `int32` `Tensor`.  Number of top elements to look for along the last
      dimension (along each row for matrices).
    sorted: If true the resulting `k` elements will be sorted by the values in
      descending order.
    name: Optional name for the operation.

  Returns:
    values: The `k` largest elements along each last dimensional slice.
    indices: The indices of `values` within the last dimension of `input`.
  """
  return gen_nn_ops._top_kv2(input, k=k, sorted=sorted, name=name)