Python tensorflow.python.ops.array_ops.constant() Examples

The following are 30 code examples of tensorflow.python.ops.array_ops.constant(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.array_ops , or try the search function .
Example #1
Source File: metrics_impl.py    From lambda-packs with MIT License 6 votes vote down vote up
def _safe_scalar_div(numerator, denominator, name):
  """Divides two values, returning 0 if the denominator is 0.

  Args:
    numerator: A scalar `float64` `Tensor`.
    denominator: A scalar `float64` `Tensor`.
    name: Name for the returned op.

  Returns:
    0 if `denominator` == 0, else `numerator` / `denominator`
  """
  numerator.get_shape().with_rank_at_most(1)
  denominator.get_shape().with_rank_at_most(1)
  return control_flow_ops.cond(
      math_ops.equal(
          array_ops.constant(0.0, dtype=dtypes.float64), denominator),
      lambda: array_ops.constant(0.0, dtype=dtypes.float64),
      lambda: math_ops.div(numerator, denominator),
      name=name) 
Example #2
Source File: feeder.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(
      self, dtypes, shapes=None, capacity=10, shared_name='feeding_queue'):
    self._dtypes = dtypes
    self._shapes = shapes
    self._shared_name = shared_name
    self._capacity = capacity
    self._local_q = data_flow_ops.FIFOQueue(capacity=self._capacity,
                                            dtypes=self._dtypes,
                                            shapes=self._shapes,
                                            name=self._shared_name,
                                            shared_name=self._shared_name)
    self._num_remote_feeds = 0

    # Fake do-nothing operation that's used to prevent remote queues
    # from being closed, and as a workaround for b/32749157
    self._fake_op = array_ops.constant('dummy close', name='feeder_fake_op').op
    self._feeding_event = threading.Event() 
Example #3
Source File: layers.py    From tensornets with MIT License 6 votes vote down vote up
def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None):
  """Converts a dense tensor into a sparse tensor.

  An example use would be to convert dense labels to sparse ones
  so that they can be fed to the ctc_loss.

  Args:
     tensor: An `int` `Tensor` to be converted to a `Sparse`.
     eos_token: An integer. It is part of the target label that signifies the
       end of a sentence.
     outputs_collections: Collection to add the outputs.
     scope: Optional scope for name_scope.
  """
  with variable_scope.variable_scope(scope, 'dense_to_sparse', [tensor]) as sc:
    tensor = ops.convert_to_tensor(tensor)
    indices = array_ops.where(
        math_ops.not_equal(tensor, constant_op.constant(eos_token,
                                                        tensor.dtype)))
    values = array_ops.gather_nd(tensor, indices)
    shape = array_ops.shape(tensor, out_type=dtypes.int64)
    outputs = sparse_tensor.SparseTensor(indices, values, shape)
    return utils.collect_named_outputs(outputs_collections, sc.name, outputs) 
Example #4
Source File: math_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testOverflow(self):
    x = [1000, 1001, 1002, 1003]
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.array(x, dtype=dtype)
      max_np = np.max(x_np)
      with self.assertRaisesRegexp(RuntimeWarning,
                                   "overflow encountered in exp"):
        out = log(np.sum(exp(x_np)))
        if out == np.inf:
          raise RuntimeWarning("overflow encountered in exp")

      with self.test_session(use_gpu=True):
        x_tf = constant_op.constant(x_np, shape=x_np.shape)
        y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
        y_np = log(np.sum(exp(x_np - max_np))) + max_np
        self.assertAllClose(y_tf_np, y_np) 
Example #5
Source File: metric_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _safe_scalar_div(numerator, denominator, name):
  """Divides two values, returning 0 if the denominator is 0.

  Args:
    numerator: A scalar `float64` `Tensor`.
    denominator: A scalar `float64` `Tensor`.
    name: Name for the returned op.

  Returns:
    0 if `denominator` == 0, else `numerator` / `denominator`
  """
  numerator.get_shape().with_rank_at_most(1)
  denominator.get_shape().with_rank_at_most(1)
  return control_flow_ops.cond(
      math_ops.equal(
          array_ops.constant(0.0, dtype=dtypes.float64), denominator),
      lambda: array_ops.constant(0.0, dtype=dtypes.float64),
      lambda: math_ops.div(numerator, denominator),
      name=name) 
Example #6
Source File: math_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testConsistent(self):
    nums, divs = self.intTestData()
    with self.test_session():
      tf_result = (
          math_ops.floor_div(nums, divs) * divs + math_ops.floor_mod(nums, divs)
      ).eval()
      tf_nums = array_ops.constant(nums)
      tf_divs = array_ops.constant(divs)
      tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
      np_result = (nums // divs) * divs + (nums % divs)
      # consistentcy with numpy
      self.assertAllEqual(tf_result, np_result)
      # consistentcy with two forms of divide
      self.assertAllEqual(tf_result, tf2_result)
      # consistency for truncation form
      tf3_result = (
          math_ops.truncatediv(nums, divs) * divs
          + math_ops.truncatemod(nums, divs)
      ).eval()
      expanded_nums = np.reshape(np.tile(nums, divs.shape[1]),
                                 (nums.shape[0], divs.shape[1]))
      # Consistent with desire to get numerator
      self.assertAllEqual(tf3_result, expanded_nums)
      # Consistent with desire to get numerator
      self.assertAllEqual(tf_result, expanded_nums) 
Example #7
Source File: math_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testUnderflow(self):
    x = [-1000, -1001, -1002, -1003]
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.array(x, dtype=dtype)
      max_np = np.max(x_np)
      with self.assertRaisesRegexp(RuntimeWarning,
                                   "divide by zero encountered in log"):
        out = log(np.sum(exp(x_np)))
        if out == -np.inf:
          raise RuntimeWarning("divide by zero encountered in log")

      with self.test_session(use_gpu=True):
        x_tf = constant_op.constant(x_np, shape=x_np.shape)
        y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
        y_np = log(np.sum(exp(x_np - max_np))) + max_np
        self.assertAllClose(y_tf_np, y_np) 
Example #8
Source File: layers.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None):
  """Converts a dense tensor into a sparse tensor.

  An example use would be to convert dense labels to sparse ones
  so that they can be fed to the ctc_loss.

  Args:
     tensor: An `int` `Tensor` to be converted to a `Sparse`.
     eos_token: An integer. It is part of the target label that signifies the
       end of a sentence.
     outputs_collections: Collection to add the outputs.
     scope: Optional scope for name_scope.
  """
  with variable_scope.variable_scope(scope, 'dense_to_sparse', [tensor]) as sc:
    tensor = ops.convert_to_tensor(tensor)
    indices = array_ops.where(
        math_ops.not_equal(tensor, constant_op.constant(eos_token,
                                                        tensor.dtype)))
    values = array_ops.gather_nd(tensor, indices)
    shape = array_ops.shape(tensor, out_type=dtypes.int64)
    outputs = sparse_tensor.SparseTensor(indices, values, shape)
    return utils.collect_named_outputs(outputs_collections, sc.name, outputs) 
Example #9
Source File: feeder.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def __init__(
      self, dtypes, shapes=None, capacity=10, shared_name='feeding_queue'):
    self._dtypes = dtypes
    self._shapes = shapes
    self._shared_name = shared_name
    self._capacity = capacity
    self._local_q = data_flow_ops.FIFOQueue(capacity=self._capacity,
                                            dtypes=self._dtypes,
                                            shapes=self._shapes,
                                            name=self._shared_name,
                                            shared_name=self._shared_name)
    self._num_remote_feeds = 0

    # Fake do-nothing operation that's used to prevent remote queues
    # from being closed, and as a workaround for b/32749157
    self._fake_op = array_ops.constant('dummy close', name='feeder_fake_op').op
    self._feeding_event = threading.Event() 
Example #10
Source File: metrics_impl.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _safe_scalar_div(numerator, denominator, name):
  """Divides two values, returning 0 if the denominator is 0.

  Args:
    numerator: A scalar `float64` `Tensor`.
    denominator: A scalar `float64` `Tensor`.
    name: Name for the returned op.

  Returns:
    0 if `denominator` == 0, else `numerator` / `denominator`
  """
  numerator.get_shape().with_rank_at_most(1)
  denominator.get_shape().with_rank_at_most(1)
  return control_flow_ops.cond(
      math_ops.equal(
          array_ops.constant(0.0, dtype=dtypes.float64), denominator),
      lambda: array_ops.constant(0.0, dtype=dtypes.float64),
      lambda: math_ops.div(numerator, denominator),
      name=name) 
Example #11
Source File: math_grad.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _ErfGrad(op, grad):
  """Returns grad * 2/sqrt(pi) * exp(-x**2)."""
  x = op.inputs[0]
  two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x)) 
Example #12
Source File: math_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testFloorModFloat(self):
    nums, divs = self.floatTestData()
    with self.test_session():
      tf_result = math_ops.floormod(nums, divs).eval()
      np_result = nums % divs
      self.assertAllEqual(tf_result, np_result)
      # TODO(aselle): put this test in once % switched to floormod
      # tf2_result = (array_ops.constant(nums)
      #               % array_ops.constant(divs)).eval()
      # self.assertAllEqual(tf2_result, tf_result) 
Example #13
Source File: math_grad.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _DigammaGrad(op, grad):
  """Compute gradient of the digamma function with respect to its argument."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x) 
Example #14
Source File: math_grad.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _ErfcGrad(op, grad):
  """Returns -grad * 2/sqrt(pi) * exp(-x**2)."""
  x = op.inputs[0]
  minus_two_over_root_pi = constant_op.constant(-2 / np.sqrt(np.pi),
                                                dtype=grad.dtype)
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x)) 
Example #15
Source File: math_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _AtanGrad(op, grad):
  """Returns grad * 1/ (1 + x^2)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    x2 = math_ops.square(x)
    one = constant_op.constant(1, dtype=grad.dtype)
    inv = math_ops.reciprocal(math_ops.add(one, x2))
    return grad * inv 
Example #16
Source File: math_grad.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _SegmentMeanGrad(op, grad):
  """Gradient for SegmentMean."""
  input_rank = array_ops.rank(op.inputs[0])
  ones_shape = array_ops.concat(
      0, [array_ops.shape(op.inputs[1]),
          array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)])
  ones = array_ops.fill(ones_shape,
                        constant_op.constant(1, dtype=grad.dtype))
  scaled_grad = math_ops.div(grad, math_ops.segment_sum(ones, op.inputs[1]))
  return array_ops.gather(scaled_grad, op.inputs[1]), None 
Example #17
Source File: control_flow_ops.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def PostProcessing(self):
    """Perform postprocessing at the end of gradients().

    We have created the gradient graph at this point. So this function
    can be used to perform any postprocessing on the gradient graph.
    We currently perform the following postprocessing:
      1. Patch the gradient graph if the output of a loop variable
         doesn't depend on its input.
    """
    for _, grad_state in self._map.items():
      for _, b_merge in grad_state.switch_map.items():
        if b_merge.op.inputs[0] == b_merge.op.inputs[1]:
          # The value of this loop variable at iteration i+1 doesn't
          # depend on its value at iteration i. So use zeros as the
          # gradients for all iterations > 0.
          dtype = b_merge.op.inputs[0].dtype
          shape = b_merge.op.inputs[0].get_shape()
          # pylint: disable=protected-access
          if shape.is_fully_defined():
            grad_state.grad_context.Enter()
            # Create a zeros and use it for iterations > 0.
            grad_val = constant_op.constant(0, dtype=dtype, shape=shape)
            next_grad_val = _NextIteration(grad_val)
            grad_state.grad_context.Exit()
          else:
            # Create a zeros in the outer grad context.
            outer_grad_ctxt = grad_state.grad_context.outer_context
            if outer_grad_ctxt: outer_grad_ctxt.Enter()
            enter_grad_op = b_merge.op.inputs[0].op
            enter_grad = enter_grad_op.inputs[0]
            grad_shape = array_ops.shape_internal(enter_grad, optimize=False)
            grad_val = array_ops.zeros(grad_shape)
            if outer_grad_ctxt: outer_grad_ctxt.Exit()
            # Use the zeros for iterations > 0.
            grad_state.grad_context.Enter()
            next_grad_val = _NextIteration(grad_val)
            grad_state.grad_context.Exit()
          b_merge.op._update_input(1, next_grad_val)
          # pylint: enable=protected-access 
Example #18
Source File: resources.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def report_uninitialized_resources(resource_list=None,
                                   name="report_uninitialized_resources"):
  """Returns the names of all uninitialized resources in resource_list.

  If the returned tensor is empty then all resources have been initialized.

  Args:
   resource_list: resources to check. If None, will use shared_resources() +
    local_resources().
   name: name for the resource-checking op.

  Returns:
   Tensor containing names of the handles of all resources which have not
   yet been initialized.

  """
  if resource_list is None:
    resource_list = shared_resources() + local_resources()
  with ops.name_scope(name):
    if not resource_list:
      # Return an empty tensor so we only need to check for returned tensor
      # size being 0 as an indication of model ready.
      return array_ops.constant([], dtype=dtypes.string)
    # Get a 1-D boolean tensor listing whether each resource is initialized.
    variables_mask = math_ops.logical_not(array_ops.pack(
        [r.is_initialized for r in resource_list]))
    # Get a 1-D string tensor containing all the resource names.
    variable_names_tensor = array_ops.constant(
        [s.handle.name for s in resource_list])
    # Return a 1-D tensor containing all the names of uninitialized resources.
    return array_ops.boolean_mask(variable_names_tensor, variables_mask) 
Example #19
Source File: math_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testDivideInt(self):
    nums, divs = self.intTestData()
    with self.test_session():
      tf_result = math_ops.floor_div(nums, divs).eval()
      np_result = nums // divs
      self.assertAllEqual(tf_result, np_result)
      # TODO(aselle): Put this test in once // is switched to floordiv
      # tf2_result = (array_ops.constant(nums)
      #               // array_ops.constant(divs)).eval()
      # self.assertAllEqual(tf2_result, tf_result) 
Example #20
Source File: metric_loss_ops.py    From cluster-loss-tensorflow with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def get_cluster_assignment(pairwise_distances, centroid_ids):
  """Assign data points to the neareset centroids.

  Tensorflow has numerical instability and doesn't always choose
    the data point with theoretically zero distance as it's nearest neighbor.
    Thus, for each centroid in centroid_ids, explicitly assign
    the centroid itself as the nearest centroid.
    This is done through the mask tensor and the constraint_vect tensor.

  Args:
    pairwise_distances: 2-D Tensor of pairwise distances.
    centroid_ids: 1-D Tensor of centroid indices.

  Returns:
    y_fixed: 1-D tensor of cluster assignment.
  """
  predictions = math_ops.argmin(
      array_ops.gather(pairwise_distances, centroid_ids), dimension=0)
  batch_size = array_ops.shape(pairwise_distances)[0]

  # Deal with numerical instability
  mask = math_ops.reduce_any(array_ops.one_hot(
      centroid_ids, batch_size, True, False, axis=-1, dtype=dtypes.bool),
                             axis=0)
  constraint_one_hot = math_ops.multiply(
      array_ops.one_hot(centroid_ids,
                        batch_size,
                        array_ops.constant(1, dtype=dtypes.int64),
                        array_ops.constant(0, dtype=dtypes.int64),
                        axis=0,
                        dtype=dtypes.int64),
      math_ops.to_int64(math_ops.range(array_ops.shape(centroid_ids)[0])))
  constraint_vect = math_ops.reduce_sum(
      array_ops.transpose(constraint_one_hot), axis=0)

  y_fixed = array_ops.where(mask, constraint_vect, predictions)
  return y_fixed 
Example #21
Source File: metric_learning.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def compute_augmented_facility_locations_pam(pairwise_distances,
                                             labels,
                                             margin_multiplier,
                                             margin_type,
                                             chosen_ids,
                                             pam_max_iter=5):
  """Refine the cluster centroids with PAM local search.

  For fixed iterations, alternate between updating the cluster assignment
    and updating cluster medoids.

  Args:
    pairwise_distances: 2-D Tensor of pairwise distances.
    labels: 1-D Tensor of ground truth cluster assignment.
    margin_multiplier: multiplication constant.
    margin_type: Type of structured margin to use. Default is nmi.
    chosen_ids: 1-D Tensor of initial estimate of cluster centroids.
    pam_max_iter: Number of refinement iterations.

  Returns:
    chosen_ids: Updated 1-D Tensor of cluster centroid indices.
  """
  for _ in range(pam_max_iter):
    # update the cluster assignment given the chosen_ids (S_pred)
    predictions = get_cluster_assignment(pairwise_distances, chosen_ids)

    # update the medoids per each cluster
    chosen_ids = update_all_medoids(pairwise_distances, predictions, labels,
                                    chosen_ids, margin_multiplier, margin_type)

  return chosen_ids 
Example #22
Source File: metric_learning.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def get_cluster_assignment(pairwise_distances, centroid_ids):
  """Assign data points to the neareset centroids.

  Tensorflow has numerical instability and doesn't always choose
    the data point with theoretically zero distance as it's nearest neighbor.
    Thus, for each centroid in centroid_ids, explicitly assign
    the centroid itself as the nearest centroid.
    This is done through the mask tensor and the constraint_vect tensor.

  Args:
    pairwise_distances: 2-D Tensor of pairwise distances.
    centroid_ids: 1-D Tensor of centroid indices.

  Returns:
    y_fixed: 1-D tensor of cluster assignment.
  """
  predictions = math_ops.argmin(
      array_ops.gather(pairwise_distances, centroid_ids), dimension=0)
  batch_size = array_ops.shape(pairwise_distances)[0]

  # Deal with numerical instability
  mask = math_ops.reduce_any(array_ops.one_hot(
      centroid_ids, batch_size, True, False, axis=-1, dtype=dtypes.bool),
                             axis=0)
  constraint_one_hot = math_ops.multiply(
      array_ops.one_hot(centroid_ids,
                        batch_size,
                        array_ops.constant(1, dtype=dtypes.int64),
                        array_ops.constant(0, dtype=dtypes.int64),
                        axis=0,
                        dtype=dtypes.int64),
      math_ops.cast(math_ops.range(array_ops.shape(centroid_ids)[0]),
                    dtypes.int64))
  constraint_vect = math_ops.reduce_sum(
      array_ops.transpose(constraint_one_hot), axis=0)

  y_fixed = array_ops.where(mask, constraint_vect, predictions)
  return y_fixed 
Example #23
Source File: metric_learning.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def contrastive_loss(labels, embeddings_anchor, embeddings_positive,
                     margin=1.0):
  """Computes the contrastive loss.

  This loss encourages the embedding to be close to each other for
    the samples of the same label and the embedding to be far apart at least
    by the margin constant for the samples of different labels.
  See: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf

  Args:
    labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
      binary labels indicating positive vs negative pair.
    embeddings_anchor: 2-D float `Tensor` of embedding vectors for the anchor
      images. Embeddings should be l2 normalized.
    embeddings_positive: 2-D float `Tensor` of embedding vectors for the
      positive images. Embeddings should be l2 normalized.
    margin: margin term in the loss definition.

  Returns:
    contrastive_loss: tf.float32 scalar.
  """
  # Get per pair distances
  distances = math_ops.sqrt(
      math_ops.reduce_sum(
          math_ops.squared_difference(embeddings_anchor, embeddings_positive),
          1))

  # Add contrastive loss for the siamese network.
  #   label here is {0,1} for neg, pos.
  return math_ops.reduce_mean(
      math_ops.cast(labels, distances.dtype) * math_ops.square(distances) +
      (1. - math_ops.cast(labels, distances.dtype)) *
      math_ops.square(math_ops.maximum(margin - distances, 0.)),
      name='contrastive_loss') 
Example #24
Source File: bernoulli.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _event_shape(self):
    return array_ops.constant([], dtype=dtypes.int32) 
Example #25
Source File: sparsify.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _apply_transform(self, input_tensors, **kwargs):
    """Applies the transformation to the `transform_input`.

    Args:
      input_tensors: a list of Tensors representing the input to
        the Transform.
      **kwargs: Additional keyword arguments, unused here.

    Returns:
        A namedtuple of Tensors representing the transformed output.
    """
    d = input_tensors[0]

    if self.strip_value is np.nan:
      strip_hot = math_ops.is_nan(d)
    else:
      strip_hot = math_ops.equal(d,
                                 array_ops.constant([self.strip_value],
                                                    dtype=d.dtype))
    keep_hot = math_ops.logical_not(strip_hot)

    length = array_ops.reshape(array_ops.shape(d), [])
    indices = array_ops.boolean_mask(math_ops.range(length), keep_hot)
    values = array_ops.boolean_mask(d, keep_hot)

    sparse_indices = array_ops.reshape(
        math_ops.cast(indices, dtypes.int64), [-1, 1])
    shape = math_ops.cast(array_ops.shape(d), dtypes.int64)

    # pylint: disable=not-callable
    return self.return_type(
        sparse_tensor.SparseTensor(sparse_indices, values, shape)) 
Example #26
Source File: cudnn_rnn_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def __call__(self, input_data, input_h, input_c, params, is_training=True):
    """Runs the forward step for the RNN model.

    Args:
      input_data: the input sequence to the RNN model.
      input_h: the initial hidden state for h.
      input_c: the initial hidden state for c. This is only relevant for LSTM.
      params: the parameter buffer created for this model.
      is_training: whether this operation will be used in training or inference.

    Returns:
      output: the output sequuence.
      output_h: the final state for h.
      output_c: the final state for c. This is only relevant for LSTM.
    """
    if self._rnn_mode != "lstm":
      # For model that doesn't take input_c, replace with a dummy tensor.
      input_c = array_ops.constant([], dtype=dtypes.float32)
    output, output_h, output_c, _ = gen_cudnn_rnn_ops.cudnn_rnn(
        input=input_data,
        input_h=input_h,
        input_c=input_c,
        params=params,
        rnn_mode=self._rnn_mode,
        input_mode=self._input_mode,
        direction=self._direction,
        dropout=self._dropout,
        seed=self._seed,
        seed2=self._seed2,
        is_training=is_training)
    return (output, output_h, output_c) 
Example #27
Source File: math_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _RealGrad(_, grad):
  """Returns 'grad' as the real part and set the imaginary part 0."""
  zero = constant_op.constant(0, dtype=grad.dtype)
  return math_ops.complex(grad, zero) 
Example #28
Source File: math_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _AcosGrad(op, grad):
  """Returns grad * -1/sqrt(1-x^2)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    x2 = math_ops.square(x)
    one = constant_op.constant(1, dtype=grad.dtype)
    den = math_ops.sqrt(math_ops.subtract(one, x2))
    inv = math_ops.reciprocal(den)
    return -grad * inv 
Example #29
Source File: math_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _AsinGrad(op, grad):
  """Returns grad * 1/sqrt(1-x^2)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    x2 = math_ops.square(x)
    one = constant_op.constant(1, dtype=grad.dtype)
    den = math_ops.sqrt(math_ops.subtract(one, x2))
    inv = math_ops.reciprocal(den)
    return grad * inv 
Example #30
Source File: math_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _DigammaGrad(op, grad):
  """Compute gradient of the digamma function with respect to its argument."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x)