Python tensorflow.python.framework.ops.name_scope() Examples

The following are 30 code examples of tensorflow.python.framework.ops.name_scope(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.framework.ops , or try the search function .
Example #1
Source File: variable_scope.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self,
               reuse,
               name="",
               initializer=None,
               regularizer=None,
               caching_device=None,
               partitioner=None,
               custom_getter=None,
               name_scope="",
               dtype=dtypes.float32,
               use_resource=None):
    """Creates a new VariableScope with the given properties."""
    self._name = name
    self._initializer = initializer
    self._regularizer = regularizer
    self._reuse = reuse
    self._caching_device = caching_device
    self._partitioner = partitioner
    self._custom_getter = custom_getter
    self._name_scope = name_scope
    self._dtype = dtype
    self._use_resource = use_resource 
Example #2
Source File: layers.py    From tensornets with MIT License 6 votes vote down vote up
def _lower_bound(inputs, bound, name=None):
    """Same as tf.maximum, but with helpful gradient for inputs < bound.

    The gradient is overwritten so that it is passed through if the input is not
    hitting the bound. If it is, only gradients that push `inputs` higher than
    the bound are passed through. No gradients are passed through to the bound.

    Args:
      inputs: input tensor
      bound: lower bound for the input tensor
      name: name for this op

    Returns:
      tf.maximum(inputs, bound)
    """
    with ops.name_scope(name, 'GDNLowerBound', [inputs, bound]) as scope:
      inputs = ops.convert_to_tensor(inputs, name='inputs')
      bound = ops.convert_to_tensor(bound, name='bound')
      with ops.get_default_graph().gradient_override_map(
          {'Maximum': 'GDNLowerBound'}):
        return math_ops.maximum(inputs, bound, name=scope) 
Example #3
Source File: layers.py    From tensornets with MIT License 6 votes vote down vote up
def flatten(inputs, outputs_collections=None, scope=None):
  """Flattens the input while maintaining the batch_size.

    Assumes that the first dimension represents the batch.

  Args:
    inputs: A tensor of size [batch_size, ...].
    outputs_collections: Collection to add the outputs.
    scope: Optional scope for name_scope.

  Returns:
    A flattened tensor with shape [batch_size, k].
  Raises:
    ValueError: If inputs rank is unknown or less than 2.
  """
  with ops.name_scope(scope, 'Flatten', [inputs]) as sc:
    inputs = ops.convert_to_tensor(inputs)
    outputs = core_layers.flatten(inputs)
    return utils.collect_named_outputs(outputs_collections, sc, outputs) 
Example #4
Source File: array_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def rank_internal(input, name=None, optimize=True):
  # pylint: disable=redefined-builtin
  """Returns the rank of a tensor.

  Args:
    input: A `Tensor` or `SparseTensor`.
    name: A name for the operation (optional).
    optimize: if true, encode the rank as a constant when possible.

  Returns:
    A `Tensor` of type `int32`.
  """
  with ops.name_scope(name, "Rank", [input]) as name:
    if isinstance(
        input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
      return gen_array_ops.size(input.dense_shape, name=name)
    else:
      input_tensor = ops.convert_to_tensor(input)
      input_shape = input_tensor.get_shape()
      if optimize and input_shape.ndims is not None:
        return constant(input_shape.ndims, dtypes.int32, name=name)
      return gen_array_ops.rank(input, name=name) 
Example #5
Source File: layers.py    From tensornets with MIT License 6 votes vote down vote up
def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None):
  """Converts a dense tensor into a sparse tensor.

  An example use would be to convert dense labels to sparse ones
  so that they can be fed to the ctc_loss.

  Args:
     tensor: An `int` `Tensor` to be converted to a `Sparse`.
     eos_token: An integer. It is part of the target label that signifies the
       end of a sentence.
     outputs_collections: Collection to add the outputs.
     scope: Optional scope for name_scope.
  """
  with variable_scope.variable_scope(scope, 'dense_to_sparse', [tensor]) as sc:
    tensor = ops.convert_to_tensor(tensor)
    indices = array_ops.where(
        math_ops.not_equal(tensor, constant_op.constant(eos_token,
                                                        tensor.dtype)))
    values = array_ops.gather_nd(tensor, indices)
    shape = array_ops.shape(tensor, out_type=dtypes.int64)
    outputs = sparse_tensor.SparseTensor(indices, values, shape)
    return utils.collect_named_outputs(outputs_collections, sc.name, outputs) 
Example #6
Source File: array_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
  # pylint: disable=redefined-builtin,protected-access
  """Returns the size of a tensor.

  Args:
    input: A `Tensor` or `SparseTensor`.
    name: A name for the operation (optional).
    optimize: if true, encode the size as a constant when possible.
    out_type: (Optional) The specified output type of the operation
      (`int32` or `int64`). Defaults to tf.int32.

  Returns:
    A `Tensor` of type `out_type`.
  """
  with ops.name_scope(name, "Size", [input]) as name:
    if isinstance(
        input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
      return gen_math_ops._prod(
          gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
    else:
      input_tensor = ops.convert_to_tensor(input)
      input_shape = input_tensor.get_shape()
      if optimize and input_shape.is_fully_defined():
        return constant(input_shape.num_elements(), out_type, name=name)
      return gen_array_ops.size(input, name=name, out_type=out_type) 
Example #7
Source File: recurrent_modules.py    From ludwig with Apache License 2.0 6 votes vote down vote up
def step(self, time, inputs, state, name=None):
        with ops.name_scope(name, 'BasicDecoderStep', (time, inputs, state)):
            cell_outputs, cell_state = self._cell(inputs, state)
            projection_inputs = cell_outputs  # get projection_inputs to compute sampled_softmax_cross_entropy_loss
            if self._output_layer is not None:
                cell_outputs = self._output_layer(cell_outputs)
            sample_ids = self._helper.sample(
                time=time, outputs=cell_outputs, state=cell_state)
            (finished, next_inputs, next_state) = self._helper.next_inputs(
                time=time,
                outputs=cell_outputs,
                state=cell_state,
                sample_ids=sample_ids)
        outputs = BasicDecoderOutput(cell_outputs, sample_ids,
                                     projection_inputs)
        return (outputs, next_state, next_inputs, finished) 
Example #8
Source File: nn_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def crelu(features, name=None):
  """Computes Concatenated ReLU.

  Concatenates a ReLU which selects only the positive part of the activation
  with a ReLU which selects only the *negative* part of the activation.
  Note that as a result this non-linearity doubles the depth of the activations.
  Source: [Understanding and Improving Convolutional Neural Networks via Concatenated Rectified Linear Units. W. Shang, et al.](https://arxiv.org/abs/1603.05201) 

  Args:
    features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
      `int16`, or `int8`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with the same type as `features`.
  """
  with ops.name_scope(name, "CRelu", [features]) as name:
    features = ops.convert_to_tensor(features, name="features")
    c = array_ops.concat([features, -features], -1, name=name)
    return gen_nn_ops.relu(c) 
Example #9
Source File: nn_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def xw_plus_b(x, weights, biases, name=None):  # pylint: disable=invalid-name
  """Computes matmul(x, weights) + biases.

  Args:
    x: a 2D tensor.  Dimensions typically: batch, in_units
    weights: a 2D tensor.  Dimensions typically: in_units, out_units
    biases: a 1D tensor.  Dimensions: out_units
    name: A name for the operation (optional).  If not specified
      "xw_plus_b" is used.

  Returns:
    A 2-D Tensor computing matmul(x, weights) + biases.
    Dimensions typically: batch, out_units.
  """
  with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
    x = ops.convert_to_tensor(x, name="x")
    weights = ops.convert_to_tensor(weights, name="weights")
    biases = ops.convert_to_tensor(biases, name="biases")
    mm = math_ops.matmul(x, weights)
    return bias_add(mm, biases, name=name) 
Example #10
Source File: nn_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
  """Performs the max pooling on the input.

  Args:
    value: A 4-D `Tensor` with shape `[batch, height, width, channels]` and
      type `tf.float32`.
    ksize: A list of ints that has length >= 4.  The size of the window for
      each dimension of the input tensor.
    strides: A list of ints that has length >= 4.  The stride of the sliding
      window for each dimension of the input tensor.
    padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
      See the @{tf.nn.convolution$comment here}
    data_format: A string. 'NHWC' and 'NCHW' are supported.
    name: Optional name for the operation.

  Returns:
    A `Tensor` with type `tf.float32`.  The max pooled output tensor.
  """
  with ops.name_scope(name, "MaxPool", [value]) as name:
    value = ops.convert_to_tensor(value, name="input")
    return gen_nn_ops._max_pool(value,
                                ksize=ksize,
                                strides=strides,
                                padding=padding,
                                data_format=data_format,
                                name=name) 
Example #11
Source File: clip_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def clip_by_value(t, clip_value_min, clip_value_max,
                  name=None):
  """Clips tensor values to a specified min and max.

  Given a tensor `t`, this operation returns a tensor of the same type and
  shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
  Any values less than `clip_value_min` are set to `clip_value_min`. Any values
  greater than `clip_value_max` are set to `clip_value_max`.

  Args:
    t: A `Tensor`.
    clip_value_min: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
      as `t`. The minimum value to clip by.
    clip_value_max: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
      as `t`. The maximum value to clip by.
    name: A name for the operation (optional).

  Returns:
    A clipped `Tensor`.

  Raises:
    ValueError: if the clip tensors would trigger array broadcasting
      that would make the returned tensor larger than the input.
  """
  with ops.name_scope(name, "clip_by_value",
                      [t, clip_value_min, clip_value_max]) as name:
    t = ops.convert_to_tensor(t, name="t")

    # Go through list of tensors, for each value in each tensor clip
    t_min = math_ops.minimum(t, clip_value_max)
    # Assert that the shape is compatible with the initial shape,
    # to prevent unintentional broadcasting.
    _ = t.shape.merge_with(t_min.shape)

    t_max = math_ops.maximum(t_min, clip_value_min, name=name)
    _ = t.shape.merge_with(t_max.shape)

  return t_max 
Example #12
Source File: array_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def zeros_like(tensor, dtype=None, name=None, optimize=True):
  """Creates a tensor with all elements set to zero.

  Given a single tensor (`tensor`), this operation returns a tensor of the
  same type and shape as `tensor` with all elements set to zero. Optionally,
  you can use `dtype` to specify a new type for the returned tensor.

  For example:

  ```python
  # 'tensor' is [[1, 2, 3], [4, 5, 6]]
  tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]
  ```

  Args:
    tensor: A `Tensor`.
    dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
    `int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, or `complex128`.
    name: A name for the operation (optional).
    optimize: if true, attempt to statically determine the shape of 'tensor'
    and encode it as a constant.

  Returns:
    A `Tensor` with all elements set to zero.
  """
  with ops.name_scope(name, "zeros_like", [tensor]) as name:
    tensor = ops.convert_to_tensor(tensor, name="tensor")

    if tensor.shape.is_fully_defined():
      # We can produce a zeros tensor independent of the value of 'tensor',
      # since the shape is known statically.
      return zeros(tensor.shape, dtype=dtype or tensor.dtype, name=name)

    if dtype is not None and dtype != tensor.dtype:
      return zeros(shape_internal(tensor, optimize=optimize), dtype=dtype,
                   name=name)
    else:
      return gen_array_ops._zeros_like(tensor, name=name) 
Example #13
Source File: sparse_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def sparse_maximum(sp_a, sp_b, name=None):
  """Returns the element-wise max of two SparseTensors.

  Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
  Example:

  ```python
  sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
  sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
  res = tf.sparse_maximum(sp_zero, sp_one).eval()
  # "res" should be equal to SparseTensor([[0], [1]], [0, 1], [7]).
  ```

  Args:
    sp_a: a `SparseTensor` operand whose dtype is real, and indices
      lexicographically ordered.
    sp_b: the other `SparseTensor` operand with the same requirements (and the
      same shape).
    name: optional name of the operation.
  Returns:
    output: the output SparseTensor.
  """
  with ops.name_scope(name, "SparseSparseMaximum", [sp_a.indices, sp_a.values,
                                                    sp_b.indices,
                                                    sp_b.values]) as name:
    out_indices, out_values = gen_sparse_ops.sparse_sparse_maximum(
        sp_a.indices,
        sp_a.values,
        sp_a.dense_shape,
        sp_b.indices,
        sp_b.values,
        sp_b.dense_shape,
        name=name)
  return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape) 
Example #14
Source File: nn_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
  """Performs the average pooling on the input.

  Each entry in `output` is the mean of the corresponding size `ksize`
  window in `value`.

  Args:
    value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
      `float32`, `float64`, `qint8`, `quint8`, or `qint32`.
    ksize: A list of ints that has length >= 4.
      The size of the window for each dimension of the input tensor.
    strides: A list of ints that has length >= 4.
      The stride of the sliding window for each dimension of the
      input tensor.
    padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
      See the @{tf.nn.convolution$comment here}
    data_format: A string. 'NHWC' and 'NCHW' are supported.
    name: Optional name for the operation.

  Returns:
    A `Tensor` with the same type as `value`.  The average pooled output tensor.
  """
  with ops.name_scope(name, "AvgPool", [value]) as name:
    value = ops.convert_to_tensor(value, name="input")
    return gen_nn_ops._avg_pool(value,
                                ksize=ksize,
                                strides=strides,
                                padding=padding,
                                data_format=data_format,
                                name=name) 
Example #15
Source File: sparse_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def sparse_minimum(sp_a, sp_b, name=None):
  """Returns the element-wise min of two SparseTensors.

  Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
  Example:

  ```python
  sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
  sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
  res = tf.sparse_minimum(sp_zero, sp_one).eval()
  # "res" should be equal to SparseTensor([[0], [1]], [0, 0], [7]).
  ```

  Args:
    sp_a: a `SparseTensor` operand whose dtype is real, and indices
      lexicographically ordered.
    sp_b: the other `SparseTensor` operand with the same requirements (and the
      same shape).
    name: optional name of the operation.
  Returns:
    output: the output SparseTensor.
  """
  with ops.name_scope(name, "SparseSparseMinimum", [sp_a.indices, sp_a.values,
                                                    sp_b.indices,
                                                    sp_b.values]) as name:
    out_indices, out_values = gen_sparse_ops.sparse_sparse_minimum(
        sp_a.indices,
        sp_a.values,
        sp_a.dense_shape,
        sp_b.indices,
        sp_b.values,
        sp_b.dense_shape,
        name=name)
  return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape) 
Example #16
Source File: nn_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def bias_add(value, bias, data_format=None, name=None):
  """Adds `bias` to `value`.

  This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
  Broadcasting is supported, so `value` may have any number of dimensions.
  Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
  case where both types are quantized.

  Args:
    value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
      `int16`, `int8`, `complex64`, or `complex128`.
    bias: A 1-D `Tensor` with size matching the last dimension of `value`.
      Must be the same type as `value` unless `value` is a quantized type,
      in which case a different quantized type may be used.
    data_format: A string. 'NHWC' and 'NCHW' are supported.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with the same type as `value`.
  """
  with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
    value = ops.convert_to_tensor(value, name="input")
    bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
    return gen_nn_ops._bias_add(value, bias, data_format=data_format, name=name)


# pylint: disable=protected-access 
Example #17
Source File: nn_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def relu6(features, name=None):
  """Computes Rectified Linear 6: `min(max(features, 0), 6)`.
  Source: [Convolutional Deep Belief Networks on CIFAR-10. A. Krizhevsky](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf)

  Args:
    features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
      `int16`, or `int8`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with the same type as `features`.
  """
  with ops.name_scope(name, "Relu6", [features]) as name:
    features = ops.convert_to_tensor(features, name="features")
    return gen_nn_ops._relu6(features, name=name) 
Example #18
Source File: array_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def ones_like(tensor, dtype=None, name=None, optimize=True):
  """Creates a tensor with all elements set to 1.

  Given a single tensor (`tensor`), this operation returns a tensor of the same
  type and shape as `tensor` with all elements set to 1. Optionally, you can
  specify a new type (`dtype`) for the returned tensor.

  For example:

  ```python
  # 'tensor' is [[1, 2, 3], [4, 5, 6]]
  tf.ones_like(tensor) ==> [[1, 1, 1], [1, 1, 1]]
  ```

  Args:
    tensor: A `Tensor`.
    dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
      `int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, `complex128` or
      `bool`.
    name: A name for the operation (optional).
    optimize: if true, attempt to statically determine the shape of 'tensor'
    and encode it as a constant.

  Returns:
    A `Tensor` with all elements set to 1.
  """
  with ops.name_scope(name, "ones_like", [tensor]) as name:
    tensor = ops.convert_to_tensor(tensor, name="tensor")
    ones_shape = shape_internal(tensor, optimize=optimize)
    if dtype is None:
      dtype = tensor.dtype
    ret = ones(ones_shape, dtype=dtype, name=name)
    ret.set_shape(tensor.get_shape())
    return ret 
Example #19
Source File: array_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def zeros(shape, dtype=dtypes.float32, name=None):
  """Creates a tensor with all elements set to zero.

  This operation returns a tensor of type `dtype` with shape `shape` and
  all elements set to zero.

  For example:

  ```python
  tf.zeros([3, 4], tf.int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
  ```

  Args:
    shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
    dtype: The type of an element in the resulting `Tensor`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with all elements set to zero.
  """
  dtype = dtypes.as_dtype(dtype).base_dtype
  with ops.name_scope(name, "zeros", [shape]) as name:
    if dtype == dtypes.bool:
      zero = False
    elif dtype == dtypes.string:
      zero = ""
    else:
      zero = 0
    try:
      shape = tensor_shape.as_shape(shape)
      output = constant(zero, shape=shape, dtype=dtype, name=name)
    except (TypeError, ValueError):
      shape = ops.convert_to_tensor(shape, dtype=dtypes.int32, name="shape")
      output = fill(shape, constant(zero, dtype=dtype), name=name)
  assert output.dtype.base_dtype == dtype
  return output 
Example #20
Source File: data_flow_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def enqueue(self, vals, name=None):
    """Enqueues one element to this queue.

    If the queue is full when this operation executes, it will block
    until the element has been enqueued.

    At runtime, this operation may raise an error if the queue is
    @{tf.QueueBase.close} before or during its execution. If the
    queue is closed before this operation runs,
    `tf.errors.CancelledError` will be raised. If this operation is
    blocked, and either (i) the queue is closed by a close operation
    with `cancel_pending_enqueues=True`, or (ii) the session is
    @{tf.Session.close},
    `tf.errors.CancelledError` will be raised.

    Args:
      vals: A tensor, a list or tuple of tensors, or a dictionary containing
        the values to enqueue.
      name: A name for the operation (optional).

    Returns:
      The operation that enqueues a new tuple of tensors to the queue.
    """
    with ops.name_scope(name, "%s_enqueue" % self._name,
                        self._scope_vals(vals)) as scope:
      vals = self._check_enqueue_dtypes(vals)

      # NOTE(mrry): Not using a shape function because we need access to
      # the `QueueBase` object.
      for val, shape in zip(vals, self._shapes):
        val.get_shape().assert_is_compatible_with(shape)

      if self._queue_ref.dtype == _dtypes.resource:
        return gen_data_flow_ops._queue_enqueue_v2(
            self._queue_ref, vals, name=scope)
      else:
        return gen_data_flow_ops._queue_enqueue(
            self._queue_ref, vals, name=scope) 
Example #21
Source File: gradients_impl.py    From lambda-packs with MIT License 5 votes vote down vote up
def _MaybeCompile(scope, op, func, grad_fn):
  """Compile the calculation in grad_fn if op was marked as compiled."""
  scope = scope.rstrip("/").replace("/", "_")
  if func is not None:
    xla_compile = func.definition.attr["_XlaCompile"].b
    xla_separate_compiled_gradients = func.definition.attr[
        "_XlaSeparateCompiledGradients"].b
    xla_scope = func.definition.attr["_XlaScope"].s.decode()
  else:
    try:
      xla_compile = op.get_attr("_XlaCompile")
      xla_separate_compiled_gradients = op.get_attr(
          "_XlaSeparateCompiledGradients")
      xla_scope = op.get_attr("_XlaScope").decode()
    except ValueError:
      return grad_fn()  # Exit early

  if not xla_compile:
    return grad_fn()  # Exit early

  # If the gradients are supposed to be compiled separately, we give them a
  # _XlaScope name that is based on the name_scope of the gradients.  Otherwise
  # they just inherit the existing _XlaScope name, which lets them be merged
  # together with the non-gradient computation.
  if xla_separate_compiled_gradients:
    xla_grad_scope = "%s_grad_%s" % (xla_scope, scope)
  else:
    xla_grad_scope = xla_scope

  attrs = {
      "_XlaCompile": attr_value_pb2.AttrValue(b=xla_compile),
      "_XlaScope": attr_value_pb2.AttrValue(s=xla_grad_scope.encode())
  }
  with ops.get_default_graph()._attr_scope(attrs):  # pylint: disable=protected-access
    return grad_fn() 
Example #22
Source File: metrics_impl.py    From lambda-packs with MIT License 5 votes vote down vote up
def _num_relevant(labels, k):
  """Computes number of relevant values for each row in labels.

  For labels with shape [D1, ... DN, num_labels], this is the minimum of
  `num_labels` and `k`.

  Args:
    labels: `int64` `Tensor` or `SparseTensor` with shape
      [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
      target classes for the associated prediction. Commonly, N=1 and `labels`
      has shape [batch_size, num_labels].
    k: Integer, k for @k metric.

  Returns:
    Integer `Tensor` of shape [D1, ... DN], where each value is the number of
    relevant values for that row.

  Raises:
    ValueError: if inputs have invalid dtypes or values.
  """
  if k < 1:
    raise ValueError('Invalid k=%s.' % k)
  with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
    # For SparseTensor, calculate separate count for each row.
    labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
    if isinstance(labels, sparse_tensor.SparseTensor):
      return math_ops.minimum(sets.set_size(labels), k, name=scope)

    # For dense Tensor, calculate scalar count based on last dimension, and
    # tile across labels shape.
    labels_shape = array_ops.shape(labels)
    labels_size = labels_shape[-1]
    num_relevant_scalar = math_ops.minimum(labels_size, k)
    return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope) 
Example #23
Source File: rnn_cell_impl.py    From lambda-packs with MIT License 5 votes vote down vote up
def zero_state(self, batch_size, dtype):
    with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
      if self._state_is_tuple:
        return tuple(cell.zero_state(batch_size, dtype) for cell in self._cells)
      else:
        # We know here that state_size of each cell is not a tuple and
        # presumably does not contain TensorArrays or anything else fancy
        return super(MultiRNNCell, self).zero_state(batch_size, dtype) 
Example #24
Source File: rnn_cell_impl.py    From lambda-packs with MIT License 5 votes vote down vote up
def zero_state(self, batch_size, dtype):
    with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
      return self._cell.zero_state(batch_size, dtype) 
Example #25
Source File: rnn_cell_impl.py    From lambda-packs with MIT License 5 votes vote down vote up
def zero_state(self, batch_size, dtype):
    with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
      return self._cell.zero_state(batch_size, dtype) 
Example #26
Source File: rnn_cell_impl.py    From lambda-packs with MIT License 5 votes vote down vote up
def zero_state(self, batch_size, dtype):
    """Return zero-filled state tensor(s).

    Args:
      batch_size: int, float, or unit Tensor representing the batch size.
      dtype: the data type to use for the state.

    Returns:
      If `state_size` is an int or TensorShape, then the return value is a
      `N-D` tensor of shape `[batch_size x state_size]` filled with zeros.

      If `state_size` is a nested list or tuple, then the return value is
      a nested list or tuple (of the same structure) of `2-D` tensors with
      the shapes `[batch_size x s]` for each s in `state_size`.
    """
    with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
      state_size = self.state_size
      return _zero_state_tensors(state_size, batch_size, dtype) 
Example #27
Source File: data_flow_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def put(self, values, name=None):
    """Create an op that places a value into the staging area.

    Args:
      values: Tensor (or a tuple of Tensors) to place into the staging area.
      name: A name for the operation (optional).

    Returns:
        The created op.

    Raises:
      ValueError: If the number or type of inputs don't match the staging area.
    """
    with ops.name_scope(name, "%s_put" % self._name,
                        self._scope_vals(values)) as scope:
      vals = self._check_put_dtypes(values)
      if len(values) != len(self._dtypes):
        raise ValueError("Unexpected number of inputs " + str(len(values)) +
                         "vs " + str(len(self._dtypes)))
      for val, dtype in zip(vals, self._dtypes):
        if val.dtype != dtype:
          raise ValueError("Datatypes do not match. " + str(val.dtype) + " != "
                           + str(dtype))

      for val, shape in zip(vals, self._shapes):
        val.get_shape().assert_is_compatible_with(shape)

      with ops.colocate_with(self._coloc_op):
        op = gen_data_flow_ops.stage(values=vals, shared_name=self._name,
                                     name=scope)

      return op 
Example #28
Source File: data_flow_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def _scope_vals(self, vals):
    """Return a list of values to pass to `name_scope()`.

    Args:
      vals: A tensor, a list or tuple of tensors, or a dictionary.

    Returns:
      The values in vals as a list.
    """
    if isinstance(vals, (list, tuple)):
      return vals
    elif isinstance(vals, dict):
      return vals.values()
    else:
      return [vals] 
Example #29
Source File: data_flow_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def enqueue_many(self, vals, name=None):
    """Enqueues zero or more elements to this queue.

    This operation slices each component tensor along the 0th dimension to
    make multiple queue elements. All of the tensors in `vals` must have the
    same size in the 0th dimension.

    If the queue is full when this operation executes, it will block
    until all of the elements have been enqueued.

    At runtime, this operation may raise an error if the queue is
    @{tf.QueueBase.close} before or during its execution. If the
    queue is closed before this operation runs,
    `tf.errors.CancelledError` will be raised. If this operation is
    blocked, and either (i) the queue is closed by a close operation
    with `cancel_pending_enqueues=True`, or (ii) the session is
    @{tf.Session.close},
    `tf.errors.CancelledError` will be raised.

    Args:
      vals: A tensor, a list or tuple of tensors, or a dictionary
        from which the queue elements are taken.
      name: A name for the operation (optional).

    Returns:
      The operation that enqueues a batch of tuples of tensors to the queue.
    """
    with ops.name_scope(name, "%s_EnqueueMany" % self._name,
                        self._scope_vals(vals)) as scope:
      vals = self._check_enqueue_dtypes(vals)

      # NOTE(mrry): Not using a shape function because we need access to
      # the `QueueBase` object.
      batch_dim = vals[0].get_shape().with_rank_at_least(1)[0]
      for val, shape in zip(vals, self._shapes):
        batch_dim = batch_dim.merge_with(
            val.get_shape().with_rank_at_least(1)[0])
        val.get_shape()[1:].assert_is_compatible_with(shape)

      return gen_data_flow_ops._queue_enqueue_many_v2(
          self._queue_ref, vals, name=scope) 
Example #30
Source File: tf_helpers.py    From Counterfactual-StoryRW with MIT License 5 votes vote down vote up
def initialize(self, name=None):
        with ops.name_scope(name, "%sInitialize" % type(self).__name__):
            (finished, next_inputs) = self._initialize_fn()
            if self._batch_size is None:
                self._batch_size = array_ops.size(finished)
        return (finished, next_inputs)