Python tensorflow.python.framework.ops.IndexedSlices() Examples

The following are 30 code examples of tensorflow.python.framework.ops.IndexedSlices(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.framework.ops , or try the search function .
Example #1
Source File: optimizers.py    From lambda-packs with MIT License 6 votes vote down vote up
def _multiply_gradients(grads_and_vars, gradient_multipliers):
  """Multiply specified gradients."""
  multiplied_grads_and_vars = []
  for grad, var in grads_and_vars:
    if (grad is not None and
        (var in gradient_multipliers or var.name in gradient_multipliers)):
      key = var if var in gradient_multipliers else var.name
      multiplier = constant_op.constant(
          gradient_multipliers[key], dtype=dtypes.float32)
      if isinstance(grad, ops.IndexedSlices):
        grad_values = grad.values * multiplier
        grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape)
      else:
        grad *= multiplier
    multiplied_grads_and_vars.append((grad, var))
  return multiplied_grads_and_vars 
Example #2
Source File: data_flow_grad.py    From lambda-packs with MIT License 6 votes vote down vote up
def _DynamicStitchGrads(op, grad):
  """Gradients for DynamicStitch."""

  num_values = len(op.inputs) // 2
  indices_grad = [None] * num_values

  def AsInt32(x):
    return (x if op.inputs[0].dtype == dtypes.int32 else
            math_ops.cast(x, dtypes.int32))
  inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)]
  if isinstance(grad, ops.IndexedSlices):
    output_shape = array_ops.shape(op.outputs[0])
    output_rows = output_shape[0]
    grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows)
  values_grad = [array_ops.gather(grad, inp) for inp in inputs]
  return indices_grad + values_grad 
Example #3
Source File: learning.py    From ctw-baseline with MIT License 6 votes vote down vote up
def add_gradients_summaries(grads_and_vars):
  """Add summaries to gradients.

  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).

  Returns:
    The list of created summaries.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(
          summary.histogram(var.op.name + '/gradient', grad_values))
      summaries.append(
          summary.scalar(var.op.name + '/gradient_norm',
                         clip_ops.global_norm([grad_values])))
    else:
      logging.info('Var %s has no gradient', var.op.name)

  return summaries 
Example #4
Source File: learning.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def add_gradients_summaries(grads_and_vars):
  """Add summaries to gradients.

  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).

  Returns:
    The list of created summaries.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(
          summary.histogram(var.op.name + '/gradient', grad_values))
      summaries.append(
          summary.histogram(var.op.name + '/gradient_norm',
                            clip_ops.global_norm([grad_values])))
    else:
      logging.info('Var %s has no gradient', var.op.name)

  return summaries 
Example #5
Source File: optimizer.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _apply_sparse(self, grad, var):
    """Add ops to apply sparse gradients to `var`.

    The IndexedSlices object passed to `grad` in this function is by default
    pre-processed in `_apply_sparse_duplicate_indices` to remove duplicate
    indices (see its docstring for details). Optimizers which can tolerate or
    have correct special cases for duplicate sparse indices may override
    `_apply_sparse_duplicate_indices` instead of this function, avoiding that
    overhead.

    Args:
      grad: `IndexedSlices`, with no repeated indices.
      var: A `Variable` object.

    Return:
      An `Operation`.
    """
    raise NotImplementedError() 
Example #6
Source File: control_flow_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _InitializeValues(self, values):
    """Makes the values known to this context."""
    self._values = set()
    for x in values:
      if isinstance(x, ops.Tensor):
        self._values.add(x.name)
      else:
        self._values.add(x.values.name)
        self._values.add(x.indices.name)
        if isinstance(x, ops.IndexedSlices):
          dense_shape = x.dense_shape
        elif isinstance(x, sparse_tensor.SparseTensor):
          dense_shape = x.dense_shape
        else:
          raise TypeError("Type %s not supported" % type(x))
        if dense_shape is not None:
          self._values.add(dense_shape.name) 
Example #7
Source File: control_flow_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _FixControlInputsAndContext(self, enters):
    graph = ops.get_default_graph()
    # pylint: disable=protected-access
    for e in enters:
      if isinstance(e, ops.Tensor):
        xs = [e]
      else:
        if not isinstance(e, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
          raise TypeError("Type %s not supported" % type(e))
        xs = [e.values, e.indices]
        shape = e.dense_shape
        if shape is not None:
          xs.append(shape)
      for x in xs:
        inp_op = x.op.inputs[0]
        control_inputs = graph._control_dependencies_for_inputs([inp_op])
        outer_control_inputs = [op for op in control_inputs
                                if self._IsInOuterContext(op)]
        x.op._set_control_flow_context(self)
        x.op._add_control_inputs(outer_control_inputs)
        graph._record_op_seen_by_control_dependencies(x.op)
    # pylint: enable=protected-access 
Example #8
Source File: data_flow_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def apply_indexed_slices_grad(self, grad, local_step=0, name=None):
    """Attempts to apply a gradient to the accumulator.

    The attempt is silently dropped if the gradient is stale, i.e., local_step
    is less than the accumulator's global time step.

    Args:
      grad: The gradient IndexedSlices to be applied.
      local_step: Time step at which the gradient was computed.
      name: Optional name for the operation.

    Returns:
      The operation that (conditionally) applies a gradient to the accumulator.

    Raises:
      InvalidArgumentError: If grad is of the wrong shape
    """
    return self.apply_grad(
        grad_indices=grad.indices,
        grad_values=grad.values,
        grad_shape=grad.dense_shape,
        local_step=local_step,
        name=name) 
Example #9
Source File: learning_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def testIndexedSlicesGradIsClippedCorrectly(self):
    sparse_grad_indices = np.array([0, 1, 4])
    sparse_grad_dense_shape = [self._grad_vec.size]

    values = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
    indices = constant_op.constant(sparse_grad_indices, dtype=dtypes.int32)
    dense_shape = constant_op.constant(
        sparse_grad_dense_shape, dtype=dtypes.int32)

    gradient = ops.IndexedSlices(values, indices, dense_shape)
    variable = variables_lib.Variable(self._zero_vec, dtype=dtypes.float32)

    gradients_to_variables = (gradient, variable)
    gradients_to_variables = learning.clip_gradient_norms(
        [gradients_to_variables], self._max_norm)[0]

    # Ensure the built IndexedSlice has the right form.
    self.assertEqual(gradients_to_variables[1], variable)
    self.assertEqual(gradients_to_variables[0].indices, indices)
    self.assertEqual(gradients_to_variables[0].dense_shape, dense_shape)

    with session.Session() as sess:
      actual_gradient = sess.run(gradients_to_variables[0].values)
    np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec) 
Example #10
Source File: control_flow_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _NextIteration(data, name=None):
  data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
  if isinstance(data, ops.Tensor):
    if data.dtype._is_ref_dtype:   # pylint: disable=protected-access
      return ref_next_iteration(data, name=name)
    else:
      return next_iteration(data, name=name)
  else:
    if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
      raise TypeError("Type %s not supported" % type(data))
    values = _NextIteration(data.values, name=name)
    indices = next_iteration(data.indices, name="indices")
    if isinstance(data, ops.IndexedSlices):
      dense_shape = data.dense_shape
      if dense_shape is not None:
        dense_shape = next_iteration(dense_shape, name="dense_shape")
      return ops.IndexedSlices(values, indices, dense_shape)
    else:
      dense_shape = next_iteration(data.dense_shape, name="dense_shape")
      return sparse_tensor.SparseTensor(indices, values, dense_shape) 
Example #11
Source File: variables.py    From lambda-packs with MIT License 6 votes vote down vote up
def scatter_sub(self, sparse_delta, use_locking=False):
    """Subtracts `IndexedSlices` from this variable.

    This is essentially a shortcut for `scatter_sub(self, sparse_delta.indices,
    sparse_delta.values)`.

    Args:
      sparse_delta: `IndexedSlices` to be subtracted from this variable.
      use_locking: If `True`, use locking during the operation.

    Returns:
      A `Tensor` that will hold the new value of this variable after
      the scattered subtraction has completed.

    Raises:
      ValueError: if `sparse_delta` is not an `IndexedSlices`.
    """
    if not isinstance(sparse_delta, ops.IndexedSlices):
      raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
    return state_ops.scatter_sub(
        self._variable,
        sparse_delta.indices,
        sparse_delta.values,
        use_locking=use_locking) 
Example #12
Source File: variables.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def scatter_sub(self, sparse_delta, use_locking=False):
    """Subtracts `IndexedSlices` from this variable.

    This is essentially a shortcut for `scatter_sub(self, sparse_delta.indices,
    sparse_delta.values)`.

    Args:
      sparse_delta: `IndexedSlices` to be subtracted from this variable.
      use_locking: If `True`, use locking during the operation.

    Returns:
      A `Tensor` that will hold the new value of this variable after
      the scattered subtraction has completed.

    Raises:
      ValueError: if `sparse_delta` is not an `IndexedSlices`.
    """
    if not isinstance(sparse_delta, ops.IndexedSlices):
      raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
    return state_ops.scatter_sub(
        self._variable,
        sparse_delta.indices,
        sparse_delta.values,
        use_locking=use_locking) 
Example #13
Source File: data_flow_grad.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _DynamicStitchGrads(op, grad):
  """Gradients for DynamicStitch."""

  num_values = len(op.inputs) // 2
  indices_grad = [None] * num_values

  def AsInt32(x):
    return (x if op.inputs[0].dtype == dtypes.int32 else
            math_ops.cast(x, dtypes.int32))
  inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)]
  if isinstance(grad, ops.IndexedSlices):
    output_shape = array_ops.shape(op.outputs[0])
    output_rows = output_shape[0]
    grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows)
  values_grad = [array_ops.gather(grad, inp) for inp in inputs]
  return indices_grad + values_grad 
Example #14
Source File: control_flow_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _InitializeValues(self, values):
    """Makes the values known to this context."""
    self._values = set()
    for x in values:
      if isinstance(x, ops.Tensor):
        self._values.add(x.name)
      else:
        self._values.add(x.values.name)
        self._values.add(x.indices.name)
        if isinstance(x, ops.IndexedSlices):
          dense_shape = x.dense_shape
        elif isinstance(x, sparse_tensor.SparseTensor):
          dense_shape = x.dense_shape
        else:
          raise TypeError("Type %s not supported" % type(x))
        if dense_shape is not None:
          self._values.add(dense_shape.name) 
Example #15
Source File: control_flow_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _NextIteration(data, name=None):
  data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
  if isinstance(data, ops.Tensor):
    if data.dtype._is_ref_dtype:   # pylint: disable=protected-access
      return ref_next_iteration(data, name=name)
    else:
      return next_iteration(data, name=name)
  else:
    if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
      raise TypeError("Type %s not supported" % type(data))
    values = _NextIteration(data.values, name=name)
    indices = next_iteration(data.indices, name="indices")
    if isinstance(data, ops.IndexedSlices):
      dense_shape = data.dense_shape
      if dense_shape is not None:
        dense_shape = next_iteration(dense_shape, name="dense_shape")
      return ops.IndexedSlices(values, indices, dense_shape)
    else:
      dense_shape = next_iteration(data.dense_shape, name="dense_shape")
      return sparse_tensor.SparseTensor(indices, values, dense_shape) 
Example #16
Source File: training.py    From lambda-packs with MIT License 6 votes vote down vote up
def add_gradients_summaries(grads_and_vars):
  """Add summaries to gradients.

  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).

  Returns:
    The list of created summaries.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(
          summary.histogram(var.op.name + '_gradient', grad_values))
      summaries.append(
          summary.scalar(var.op.name + '_gradient_norm',
                         clip_ops.global_norm([grad_values])))
    else:
      logging.info('Var %s has no gradient', var.op.name)

  return summaries 
Example #17
Source File: learning.py    From lambda-packs with MIT License 6 votes vote down vote up
def add_gradients_summaries(grads_and_vars):
  """Add summaries to gradients.

  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).

  Returns:
    The list of created summaries.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(
          summary.histogram(var.op.name + '/gradient', grad_values))
      summaries.append(
          summary.scalar(var.op.name + '/gradient_norm',
                         clip_ops.global_norm([grad_values])))
    else:
      logging.info('Var %s has no gradient', var.op.name)

  return summaries 
Example #18
Source File: learning.py    From ctw-baseline with MIT License 6 votes vote down vote up
def clip_gradient_norms(gradients_to_variables, max_norm):
  """Clips the gradients by the given value.

  Args:
    gradients_to_variables: A list of gradient to variable pairs (tuples).
    max_norm: the maximum norm value.

  Returns:
    A list of clipped gradient to variable pairs.
  """
  clipped_grads_and_vars = []
  for grad, var in gradients_to_variables:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        tmp = clip_ops.clip_by_norm(grad.values, max_norm)
        grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
      else:
        grad = clip_ops.clip_by_norm(grad, max_norm)
    clipped_grads_and_vars.append((grad, var))
  return clipped_grads_and_vars 
Example #19
Source File: learning.py    From lambda-packs with MIT License 6 votes vote down vote up
def clip_gradient_norms(gradients_to_variables, max_norm):
  """Clips the gradients by the given value.

  Args:
    gradients_to_variables: A list of gradient to variable pairs (tuples).
    max_norm: the maximum norm value.

  Returns:
    A list of clipped gradient to variable pairs.
  """
  clipped_grads_and_vars = []
  for grad, var in gradients_to_variables:
    if grad is not None:
      if isinstance(grad, ops.IndexedSlices):
        tmp = clip_ops.clip_by_norm(grad.values, max_norm)
        grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
      else:
        grad = clip_ops.clip_by_norm(grad, max_norm)
    clipped_grads_and_vars.append((grad, var))
  return clipped_grads_and_vars 
Example #20
Source File: variable_clipping_optimizer.py    From lambda-packs with MIT License 6 votes vote down vote up
def _clip_sparse(self, grad, var):
    assert isinstance(grad, ops.IndexedSlices)
    clip_dims = self._vars_to_clip_dims[var]
    if 0 in clip_dims:
      logging.warning("Clipping norm across dims %s for %s is inefficient "
                      "when including sparse dimension 0.", clip_dims,
                      var.op.name)
      return self._clip_dense(var)

    with ops.colocate_with(var):
      var_subset = array_ops.gather(var, grad.indices)
    with self._maybe_colocate_with(var):
      normalized_var_subset = clip_ops.clip_by_norm(
          var_subset, self._max_norm, clip_dims)
      delta = ops.IndexedSlices(
          var_subset - normalized_var_subset, grad.indices, grad.dense_shape)
    with ops.colocate_with(var):
      return var.scatter_sub(delta, use_locking=self._use_locking) 
Example #21
Source File: data_flow_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def apply_indexed_slices_grad(self, grad, local_step=0, name=None):
    """Attempts to apply a gradient to the accumulator.

    The attempt is silently dropped if the gradient is stale, i.e., local_step
    is less than the accumulator's global time step.

    Args:
      grad: The gradient IndexedSlices to be applied.
      local_step: Time step at which the gradient was computed.
      name: Optional name for the operation.

    Returns:
      The operation that (conditionally) applies a gradient to the accumulator.

    Raises:
      InvalidArgumentError: If grad is of the wrong shape
    """
    return self.apply_grad(
        grad_indices=grad.indices,
        grad_values=grad.values,
        grad_shape=grad.dense_shape,
        local_step=local_step,
        name=name) 
Example #22
Source File: optimizer.py    From lambda-packs with MIT License 6 votes vote down vote up
def _deduplicate_indexed_slices(values, indices):
  """Sums `values` associated with any non-unique `indices`.

  Args:
    values: A `Tensor` with rank >= 1.
    indices: A one-dimensional integer `Tensor`, indexing into the first
      dimension of `values` (as in an IndexedSlices object).
  Returns:
    A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
    de-duplicated version of `indices` and `summed_values` contains the sum of
    `values` slices associated with each unique index.
  """
  unique_indices, new_index_positions = array_ops.unique(indices)
  summed_values = math_ops.unsorted_segment_sum(
      values, new_index_positions,
      array_ops.shape(unique_indices)[0])
  return (summed_values, unique_indices) 
Example #23
Source File: resource_variable_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _GatherGrad(op, grad):
  """Gradient for gather op."""
  # Build appropriately shaped IndexedSlices
  # Walk graph back until the original handle is found.
  # TODO(apassos): more robust way of getting the shape.
  handle = op.inputs[0]
  while handle.op.type != "VarHandleOp":
    handle = handle.op.inputs[0]
  params_shape = ops.convert_to_tensor(
      tensor_shape.TensorShape(handle.op.get_attr("shape")))
  indices = op.inputs[1]
  size = array_ops.expand_dims(array_ops.size(indices), 0)
  values_shape = array_ops.concat([size, params_shape[1:]], 0)
  values = array_ops.reshape(grad, values_shape)
  indices = array_ops.reshape(indices, size)
  return [ops.IndexedSlices(values, indices, params_shape), None] 
Example #24
Source File: control_flow_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _FixControlInputsAndContext(self, enters):
    graph = ops.get_default_graph()
    # pylint: disable=protected-access
    for e in enters:
      if isinstance(e, ops.Tensor):
        xs = [e]
      else:
        if not isinstance(e, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
          raise TypeError("Type %s not supported" % type(e))
        xs = [e.values, e.indices]
        shape = e.dense_shape
        if shape is not None:
          xs.append(shape)
      for x in xs:
        inp_op = x.op.inputs[0]
        control_inputs = graph._control_dependencies_for_inputs([inp_op])
        outer_control_inputs = [op for op in control_inputs
                                if self._IsInOuterContext(op)]
        x.op._set_control_flow_context(self)
        x.op._add_control_inputs(outer_control_inputs)
        graph._record_op_seen_by_control_dependencies(x.op)
    # pylint: enable=protected-access 
Example #25
Source File: control_flow_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _BuildCondTensor(self, v):
    if isinstance(v, ops.Operation):
      # Use pivot as the proxy for this op.
      return with_dependencies([v], self._pivot)
    elif isinstance(v, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
      values = self._ProcessOutputTensor(v.values)
      indices = self._ProcessOutputTensor(v.indices)
      if isinstance(v, ops.IndexedSlices):
        dense_shape = v.dense_shape
        if dense_shape is not None:
          dense_shape = self._ProcessOutputTensor(dense_shape)
        return ops.IndexedSlices(values, indices, dense_shape)
      else:
        dense_shape = self._ProcessOutputTensor(v.dense_shape)
        return sparse_tensor.SparseTensor(indices, values, dense_shape)
    else:
      v = nest.map_structure(_convert_tensorarray_to_flow, v)
      return self._ProcessOutputTensor(ops.convert_to_tensor(v)) 
Example #26
Source File: data_flow_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def take_indexed_slices_grad(self, num_required, name=None):
    """Attempts to extract the average gradient from the accumulator.

    The operation blocks until sufficient number of gradients have been
    successfully applied to the accumulator.

    Once successful, the following actions are also triggered:
    - Counter of accumulated gradients is reset to 0.
    - Aggregated gradient is reset to 0 tensor.
    - Accumulator's internal time step is incremented by 1.

    Args:
      num_required: Number of gradients that needs to have been aggregated
      name: Optional name for the operation

    Returns:
      An IndexedSlices holding the value of the average gradient.

    Raises:
      InvalidArgumentError: If num_required < 1
    """
    return_val = gen_data_flow_ops.sparse_accumulator_take_gradient(
        self._accumulator_ref, num_required, dtype=self._dtype, name=name)
    return ops.IndexedSlices(
        indices=return_val.indices,
        values=return_val.values,
        dense_shape=return_val.shape) 
Example #27
Source File: optimizer.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def update_op(self, optimizer, g):
    if isinstance(g, ops.Tensor):
      return optimizer._apply_dense(g, self._v)  # pylint: disable=protected-access
    else:
      assert isinstance(g, ops.IndexedSlices), ("Gradient ", g, " is neither a "
                                                "tensor nor IndexedSlices.")
      # pylint: disable=protected-access
      return optimizer._apply_sparse_duplicate_indices(g, self._v) 
Example #28
Source File: control_flow_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def with_dependencies(dependencies, output_tensor, name=None):
  """Produces the content of `output_tensor` only after `dependencies`.

  In some cases, a user may want the output of an operation to be
  consumed externally only after some other dependencies have run
  first. This function ensures returns `output_tensor`, but only after all
  operations in `dependencies` have run. Note that this means that there is
  no guarantee that `output_tensor` will be evaluated after any `dependencies`
  have run.

  See also `tuple` and `group`.

  Args:
    dependencies: Iterable of operations to run before this op finishes.
    output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
    name: (Optional) A name for this operation.

  Returns:
    Same as `output_tensor`.

  Raises:
    TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
  """
  with ops.name_scope(name, "control_dependency",
                      list(dependencies) + [output_tensor]) as name:
    with ops.colocate_with(output_tensor):
      with ops.control_dependencies(dependencies):
        output_tensor = ops.convert_to_tensor_or_indexed_slices(output_tensor)
        if isinstance(output_tensor, ops.Tensor):
          return _Identity(output_tensor, name=name)
        else:
          return ops.IndexedSlices(_Identity(output_tensor.values, name=name),
                                   output_tensor.indices,
                                   output_tensor.dense_shape) 
Example #29
Source File: control_flow_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _ExitGrad(op, grad):
  """Gradients for an exit op are calculated using an Enter op."""
  graph = ops.get_default_graph()
  # pylint: disable=protected-access
  grad_ctxt = graph._get_control_flow_context()
  # pylint: enable=protected-access
  if not grad_ctxt.back_prop:
    # The flag `back_prop` is set by users to suppress gradient
    # computation for this loop. If the attribute `back_prop` is false,
    # no gradient computation.
    return None

  # pylint: disable=protected-access
  if op._get_control_flow_context().grad_state:
    raise TypeError("Second-order gradient for while loops not supported.")
  # pylint: enable=protected-access

  if isinstance(grad, ops.Tensor):
    grad_ctxt.AddName(grad.name)
  else:
    if not isinstance(grad, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
      raise TypeError("Type %s not supported" % type(grad))
    grad_ctxt.AddName(grad.values.name)
    grad_ctxt.AddName(grad.indices.name)
    dense_shape = grad.dense_shape
    if dense_shape is not None:
      grad_ctxt.AddName(dense_shape.name)
  enter_fn = control_flow_ops._Enter  # pylint: disable=protected-access
  grad_ctxt.Enter()
  result = enter_fn(grad, grad_ctxt.name, is_constant=False,
                    parallel_iterations=grad_ctxt.parallel_iterations,
                    name="b_exit")
  grad_ctxt.Exit()
  return result 
Example #30
Source File: learning.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def multiply_gradients(grads_and_vars, gradient_multipliers):
  """Multiply specified gradients.

  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).
    gradient_multipliers: A map from either `Variables` or `Variable` op names
      to the coefficient by which the associated gradient should be scaled.

  Returns:
    The updated list of gradient to variable pairs.

  Raises:
    ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`
    is empty or None or if `gradient_multipliers` is not a dictionary.
  """
  if not isinstance(grads_and_vars, list):
    raise ValueError('`grads_and_vars` must be a list.')
  if not gradient_multipliers:
    raise ValueError('`gradient_multipliers` is empty.')
  if not isinstance(gradient_multipliers, dict):
    raise ValueError('`gradient_multipliers` must be a dict.')

  multiplied_grads_and_vars = []
  for grad, var in grads_and_vars:
    if var in gradient_multipliers or var.op.name in gradient_multipliers:
      key = var if var in gradient_multipliers else var.op.name
      if grad is None:
        raise ValueError('Requested multiple of `None` gradient.')

      if isinstance(grad, ops.IndexedSlices):
        tmp = grad.values * constant_op.constant(
            gradient_multipliers[key], dtype=grad.dtype)
        grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
      else:
        grad *= constant_op.constant(
            gradient_multipliers[key], dtype=grad.dtype)
    multiplied_grads_and_vars.append((grad, var))
  return multiplied_grads_and_vars