Python tensorflow.python.ops.array_ops.expand_dims() Examples

The following are 30 code examples of tensorflow.python.ops.array_ops.expand_dims(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.array_ops , or try the search function .
Example #1
Source File: sdca_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _linear_predictions(self, examples):
    """Returns predictions of the form w*x."""
    with name_scope('sdca/prediction'):
      sparse_variables = self._convert_n_to_tensor(self._variables[
          'sparse_features_weights'])
      result = 0.0
      for sfc, sv in zip(examples['sparse_features'], sparse_variables):
        # TODO(sibyl-Aix6ihai): following does not take care of missing features.
        result += math_ops.segment_sum(
            math_ops.multiply(
                array_ops.gather(sv, sfc.feature_indices), sfc.feature_values),
            sfc.example_indices)
      dense_features = self._convert_n_to_tensor(examples['dense_features'])
      dense_variables = self._convert_n_to_tensor(self._variables[
          'dense_features_weights'])

      for i in range(len(dense_variables)):
        result += math_ops.matmul(dense_features[i],
                                  array_ops.expand_dims(dense_variables[i], -1))

    # Reshaping to allow shape inference at graph construction time.
    return array_ops.reshape(result, [-1]) 
Example #2
Source File: linear_operator_identity.py    From lambda-packs with MIT License 6 votes vote down vote up
def add_to_tensor(self, mat, name="add_to_tensor"):
    """Add matrix represented by this operator to `mat`.  Equiv to `I + mat`.

    Args:
      mat:  `Tensor` with same `dtype` and shape broadcastable to `self`.
      name:  A name to give this `Op`.

    Returns:
      A `Tensor` with broadcast shape and same `dtype` as `self`.
    """
    with self._name_scope(name, values=[mat]):
      # Shape [B1,...,Bb, 1]
      multiplier_vector = array_ops.expand_dims(self.multiplier, -1)

      # Shape [C1,...,Cc, M, M]
      mat = ops.convert_to_tensor(mat, name="mat")

      # Shape [C1,...,Cc, M]
      mat_diag = array_ops.matrix_diag_part(mat)

      # multiplier_vector broadcasts here.
      new_diag = multiplier_vector + mat_diag

      return array_ops.matrix_set_diag(mat, new_diag) 
Example #3
Source File: resource_variable_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _GatherGrad(op, grad):
  """Gradient for gather op."""
  # Build appropriately shaped IndexedSlices
  # Walk graph back until the original handle is found.
  # TODO(apassos): more robust way of getting the shape.
  handle = op.inputs[0]
  while handle.op.type != "VarHandleOp":
    handle = handle.op.inputs[0]
  params_shape = ops.convert_to_tensor(
      tensor_shape.TensorShape(handle.op.get_attr("shape")))
  indices = op.inputs[1]
  size = array_ops.expand_dims(array_ops.size(indices), 0)
  values_shape = array_ops.concat([size, params_shape[1:]], 0)
  values = array_ops.reshape(grad, values_shape)
  indices = array_ops.reshape(indices, size)
  return [ops.IndexedSlices(values, indices, params_shape), None] 
Example #4
Source File: pooling.py    From lambda-packs with MIT License 6 votes vote down vote up
def call(self, inputs):
    # There is no TF op for 1D pooling, hence we make the inputs 4D.
    if self.data_format == 'channels_last':
      inputs = array_ops.expand_dims(inputs, 2)
      pool_shape = (1,) + self.pool_size + (1, 1)
      strides = (1,) + self.strides + (1, 1)
      data_format = 'NHWC'
    else:
      inputs = array_ops.expand_dims(inputs, 1)
      pool_shape = (1, 1) + self.pool_size + (1,)
      strides = (1, 1) + self.strides + (1,)
      data_format = 'NCHW'

    outputs = self.pool_function(
        inputs,
        ksize=pool_shape,
        strides=strides,
        padding=self.padding.upper(),
        data_format=data_format)

    if self.data_format == 'channels_last':
      return array_ops.squeeze(outputs, 2)
    else:
      return array_ops.squeeze(outputs, 1) 
Example #5
Source File: tfexample_decoder.py    From lambda-packs with MIT License 6 votes vote down vote up
def tensors_to_item(self, keys_to_tensors):
    """Maps the given dictionary of tensors to a contatenated list of bboxes.

    Args:
      keys_to_tensors: a mapping of TF-Example keys to parsed tensors.

    Returns:
      [num_boxes, 4] tensor of bounding box coordinates,
        i.e. 1 bounding box per row, in order [y_min, x_min, y_max, x_max].
    """
    sides = []
    for key in self._full_keys:
      side = array_ops.expand_dims(keys_to_tensors[key].values, 0)
      sides.append(side)

    bounding_box = array_ops.concat(sides, 0)
    return array_ops.transpose(bounding_box) 
Example #6
Source File: gmm_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _define_partial_maximization_operation(self, shard_id, shard):
    """Computes the partial statistics of the means and covariances.

    Args:
      shard_id: current shard id.
      shard: current data shard, 1 X num_examples X dimensions.
    """
    # Soft assignment of each data point to each of the two clusters.
    self._points_in_k[shard_id] = math_ops.reduce_sum(
        self._w[shard_id], 0, keep_dims=True)
    # Partial means.
    w_mul_x = array_ops.expand_dims(
        math_ops.matmul(
            self._w[shard_id], array_ops.squeeze(shard, [0]), transpose_a=True),
        1)
    self._w_mul_x.append(w_mul_x)
    # Partial covariances.
    x = array_ops.concat([shard for _ in range(self._num_classes)], 0)
    x_trans = array_ops.transpose(x, perm=[0, 2, 1])
    x_mul_w = array_ops.concat([
        array_ops.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0)
        for k in range(self._num_classes)
    ], 0)
    self._w_mul_x2.append(math_ops.matmul(x_mul_w, x)) 
Example #7
Source File: mixture.py    From lambda-packs with MIT License 6 votes vote down vote up
def _mean(self):
    with ops.control_dependencies(self._assertions):
      distribution_means = [d.mean() for d in self.components]
      cat_probs = self._cat_probs(log_probs=False)
      # This was checked to not be None at construction time.
      static_event_rank = self.event_shape.ndims
      # Expand the rank of x up to static_event_rank times so that
      # broadcasting works correctly.
      def expand(x):
        expanded_x = x
        for _ in range(static_event_rank):
          expanded_x = array_ops.expand_dims(expanded_x, -1)
        return expanded_x
      cat_probs = [expand(c_p) for c_p in cat_probs]
      partial_means = [
          c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
      ]
      # These should all be the same shape by virtue of matching
      # batch_shape and event_shape.
      return math_ops.add_n(partial_means) 
Example #8
Source File: crf.py    From lambda-packs with MIT License 6 votes vote down vote up
def _lengths_to_masks(lengths, max_length):
  """Creates a binary matrix that can be used to mask away padding.

  Args:
    lengths: A vector of integers representing lengths.
    max_length: An integer indicating the maximum length. All values in
      lengths should be less than max_length.
  Returns:
    masks: Masks that can be used to get rid of padding.
  """
  tiled_ranges = array_ops.tile(
      array_ops.expand_dims(math_ops.range(max_length), 0),
      [array_ops.shape(lengths)[0], 1])
  lengths = array_ops.expand_dims(lengths, 1)
  masks = math_ops.to_float(
      math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
  return masks 
Example #9
Source File: head.py    From lambda-packs with MIT License 6 votes vote down vote up
def _softmax_cross_entropy_loss(labels, logits, weights=None):
  with ops.name_scope(
      None, "softmax_cross_entropy_loss", (logits, labels,)) as name:
    labels = ops.convert_to_tensor(labels)
    # Check that we got integer for classification.
    if not labels.dtype.is_integer:
      raise ValueError("Labels dtype should be integer "
                       "Instead got %s." % labels.dtype)

    # sparse_softmax_cross_entropy_with_logits requires [batch_size] labels.
    is_squeezed_labels = False
    # TODO(ptucker): This will break for dynamic shapes.
    if len(labels.get_shape()) == 2:
      labels = array_ops.squeeze(labels, squeeze_dims=(1,))
      is_squeezed_labels = True

    loss = nn.sparse_softmax_cross_entropy_with_logits(
        labels=labels, logits=logits, name=name)

    # Restore squeezed dimension, if necessary, so loss matches weights shape.
    if is_squeezed_labels:
      loss = array_ops.expand_dims(loss, axis=(1,))

    return _compute_weighted_loss(loss, weights) 
Example #10
Source File: backend.py    From lambda-packs with MIT License 6 votes vote down vote up
def repeat(x, n):
  """Repeats a 2D tensor.

  if `x` has shape (samples, dim) and `n` is `2`,
  the output will have shape `(samples, 2, dim)`.

  Arguments:
      x: Tensor or variable.
      n: Python integer, number of times to repeat.

  Returns:
      A tensor.
  """
  assert ndim(x) == 2
  x = array_ops.expand_dims(x, 1)
  pattern = array_ops.stack([1, n, 1])
  return array_ops.tile(x, pattern) 
Example #11
Source File: gmm_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _define_diag_covariance_probs(self, shard_id, shard):
    """Defines the diagonal covariance probabilities per example in a class.

    Args:
      shard_id: id of the current shard.
      shard: current data shard, 1 X num_examples X dimensions.

    Returns a matrix num_examples * num_classes.
    """
    # num_classes X 1
    # TODO(xavigonzalvo): look into alternatives to log for
    # reparametrization of variance parameters.
    det_expanded = math_ops.reduce_sum(
        math_ops.log(self._covs + 1e-3), 1, keep_dims=True)
    diff = shard - self._means
    x2 = math_ops.square(diff)
    cov_expanded = array_ops.expand_dims(1.0 / (self._covs + 1e-3), 2)
    # num_classes X num_examples
    x2_cov = math_ops.matmul(x2, cov_expanded)
    x2_cov = array_ops.transpose(array_ops.squeeze(x2_cov, [2]))
    self._probs[shard_id] = -0.5 * (
        math_ops.to_float(self._dimensions) * math_ops.log(2.0 * np.pi) +
        array_ops.transpose(det_expanded) + x2_cov) 
Example #12
Source File: beam_search_decoder.py    From lambda-packs with MIT License 6 votes vote down vote up
def _tile_batch(t, multiplier):
  """Core single-tensor implementation of tile_batch."""
  t = ops.convert_to_tensor(t, name="t")
  shape_t = array_ops.shape(t)
  if t.shape.ndims is None or t.shape.ndims < 1:
    raise ValueError("t must have statically known rank")
  tiling = [1] * (t.shape.ndims + 1)
  tiling[1] = multiplier
  tiled_static_batch_size = (
      t.shape[0].value * multiplier if t.shape[0].value is not None else None)
  tiled = array_ops.tile(array_ops.expand_dims(t, 1), tiling)
  tiled = array_ops.reshape(
      tiled, array_ops.concat(([shape_t[0] * multiplier], shape_t[1:]), 0))
  tiled.set_shape(
      tensor_shape.TensorShape(
          [tiled_static_batch_size]).concatenate(t.shape[1:]))
  return tiled 
Example #13
Source File: wishart.py    From lambda-packs with MIT License 5 votes vote down vote up
def _multi_gamma_sequence(self, a, p, name="multi_gamma_sequence"):
    """Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p]."""
    with self._name_scope(name, values=[a, p]):
      # Linspace only takes scalars, so we'll add in the offset afterwards.
      seq = math_ops.linspace(
          constant_op.constant(0., dtype=self.dtype),
          0.5 - 0.5 * p,
          math_ops.cast(p, dtypes.int32))
      return seq + array_ops.expand_dims(a, [-1]) 
Example #14
Source File: wishart.py    From lambda-packs with MIT License 5 votes vote down vote up
def _variance(self):
    x = math_ops.sqrt(self.df) * self.scale_operator_pd.to_dense()
    d = array_ops.expand_dims(array_ops.matrix_diag_part(x), -1)
    v = math_ops.square(x) + math_ops.matmul(d, d, adjoint_b=True)
    if self.cholesky_input_output_matrices:
      return linalg_ops.cholesky(v)
    return v 
Example #15
Source File: sparse_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
  """Common code for SparseDenseCwise{Mul,Div} gradients."""
  x_indices = op.inputs[0]
  x_shape = op.inputs[2]
  y = op.inputs[3]

  y_shape = math_ops.to_int64(array_ops.shape(y))
  num_added_dims = array_ops.expand_dims(
      array_ops.size(x_shape) - array_ops.size(y_shape), 0)
  augmented_y_shape = array_ops.concat(
      [array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)

  scaling = x_shape // augmented_y_shape
  scaled_indices = x_indices // scaling
  scaled_indices = array_ops.slice(scaled_indices,
                                   array_ops.concat([[0], num_added_dims], 0),
                                   [-1, -1])
  dense_vals = array_ops.gather_nd(y, scaled_indices)

  if is_mul:
    dx = grad * dense_vals
    dy_val = grad * op.inputs[1]
  else:
    dx = grad / dense_vals
    dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
  # indices can repeat after scaling, so we can't use sparse_to_dense().
  dy = sparse_ops.sparse_add(
      array_ops.zeros_like(y),
      sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))

  # (sp_indices, sp_vals, sp_shape, dense)
  return (None, dx, None, dy) 
Example #16
Source File: operator_pd_diag.py    From lambda-packs with MIT License 5 votes vote down vote up
def _batch_matmul(self, x, transpose_x=False):
    if transpose_x:
      x = array_ops.matrix_transpose(x)
    diag_mat = array_ops.expand_dims(self._diag, -1)
    return diag_mat * x 
Example #17
Source File: sample_stats.py    From lambda-packs with MIT License 5 votes vote down vote up
def _insert_back_keep_dims(x, axis):
  """Insert the dims in `axis` back as singletons after being removed.

  Args:
    x:  `Tensor`.
    axis:  Python list of integers.

  Returns:
    `Tensor` with same values as `x`, but additional singleton dimensions.
  """
  for i in sorted(axis):
    x = array_ops.expand_dims(x, axis=i)
  return x 
Example #18
Source File: image_ops_impl.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def grayscale_to_rgb(images, name=None):
  """Converts one or more images from Grayscale to RGB.

  Outputs a tensor of the same `DType` and rank as `images`.  The size of the
  last dimension of the output is 3, containing the RGB value of the pixels.

  Args:
    images: The Grayscale tensor to convert. Last dimension must be size 1.
    name: A name for the operation (optional).

  Returns:
    The converted grayscale image(s).
  """
  with ops.name_scope(name, 'grayscale_to_rgb', [images]) as name:
    images = ops.convert_to_tensor(images, name='images')
    rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0)
    shape_list = (
        [array_ops.ones(rank_1,
                        dtype=dtypes.int32)] + [array_ops.expand_dims(3, 0)])
    multiples = array_ops.concat(shape_list, 0)
    rgb = array_ops.tile(images, multiples, name=name)
    rgb.set_shape(images.get_shape()[:-1].concatenate([3]))
    return rgb


# pylint: disable=invalid-name 
Example #19
Source File: nn_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _BroadcastMul(vec, mat):
  """Multiply after broadcasting vec to match dimensions of mat.

  Args:
    vec: A 1-D tensor of dimension [D0]
    mat: A 2-D tensor of dimension [D0, D1]

  Returns:
    A tensor of dimension [D0, D1], the result of vec * mat
  """
  # Reshape vec to [D0, 1]
  vec = array_ops.expand_dims(vec, -1)
  return vec * mat 
Example #20
Source File: operator_pd.py    From lambda-packs with MIT License 5 votes vote down vote up
def _flip_vector_to_matrix_static(vec, batch_shape):
  """flip_vector_to_matrix with static shapes."""
  # Shapes associated with batch_shape
  batch_rank = batch_shape.ndims

  # Shapes associated with vec.
  vec = ops.convert_to_tensor(vec, name="vec")
  vec_shape = vec.get_shape()
  vec_rank = len(vec_shape)
  vec_batch_rank = vec_rank - 1

  m = vec_batch_rank - batch_rank
  # vec_shape_left = [M1,...,Mm] or [].
  vec_shape_left = vec_shape[:m]
  # If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1
  # If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
  condensed_shape = [np.prod(vec_shape_left)]
  k = vec_shape[-1]
  new_shape = batch_shape.concatenate(k).concatenate(condensed_shape)

  def _flip_front_dims_to_back():
    # Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
    perm = array_ops.concat((math_ops.range(m, vec_rank), math_ops.range(0, m)),
                            0)
    return array_ops.transpose(vec, perm=perm)

  if 0 < m:
    x_flipped = _flip_front_dims_to_back()
  else:
    x_flipped = array_ops.expand_dims(vec, -1)

  return array_ops.reshape(x_flipped, new_shape) 
Example #21
Source File: operator_pd.py    From lambda-packs with MIT License 5 votes vote down vote up
def _flip_vector_to_matrix_dynamic(vec, batch_shape):
  """flip_vector_to_matrix with dynamic shapes."""
  # Shapes associated with batch_shape
  batch_rank = array_ops.size(batch_shape)

  # Shapes associated with vec.
  vec = ops.convert_to_tensor(vec, name="vec")
  vec_shape = array_ops.shape(vec)
  vec_rank = array_ops.rank(vec)
  vec_batch_rank = vec_rank - 1

  m = vec_batch_rank - batch_rank
  # vec_shape_left = [M1,...,Mm] or [].
  vec_shape_left = array_ops.strided_slice(vec_shape, [0], [m])
  # If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1
  # If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
  condensed_shape = [math_ops.reduce_prod(vec_shape_left)]
  k = array_ops.gather(vec_shape, vec_rank - 1)
  new_shape = array_ops.concat((batch_shape, [k], condensed_shape), 0)

  def _flip_front_dims_to_back():
    # Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
    perm = array_ops.concat((math_ops.range(m, vec_rank), math_ops.range(0, m)),
                            0)
    return array_ops.transpose(vec, perm=perm)

  x_flipped = control_flow_ops.cond(
      math_ops.less(0, m),
      _flip_front_dims_to_back,
      lambda: array_ops.expand_dims(vec, -1))

  return array_ops.reshape(x_flipped, new_shape) 
Example #22
Source File: target_column.py    From lambda-packs with MIT License 5 votes vote down vote up
def _log_loss_with_two_classes(logits, target):
  # sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.
  if len(target.get_shape()) == 1:
    target = array_ops.expand_dims(target, dim=[1])
  loss_vec = nn.sigmoid_cross_entropy_with_logits(
      labels=math_ops.to_float(target), logits=logits)
  return loss_vec 
Example #23
Source File: target_column.py    From lambda-packs with MIT License 5 votes vote down vote up
def _mean_squared_loss(logits, target):
  # To prevent broadcasting inside "-".
  if len(target.get_shape()) == 1:
    target = array_ops.expand_dims(target, dim=[1])

  logits.get_shape().assert_is_compatible_with(target.get_shape())
  return math_ops.square(logits - math_ops.to_float(target)) 
Example #24
Source File: nn_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _TopKGrad(op, grad, _):
  """Return the gradients for TopK.

  Args:
    op: The TopKOp for which we need to generate gradients.
    grad: Tensor. The gradients passed to the TopKOp.

  Returns:
    A list of two tensors, the first being the gradient w.r.t to the input and
    TopK, and the second being the gradient w.r.t. to the indices (all zero).
  """
  in_shape = array_ops.shape(op.inputs[0])
  ind_shape = array_ops.shape(op.outputs[1])

  ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
  # Flatten indices to 2D.
  ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))

  in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
  outerdim = array_ops.shape(ind_2d)[0]
  # Compute linear indices (flattened to 1D).
  ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
      math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])

  # Substitute grad to appropriate locations and fill the rest with zeros,
  # finally reshaping it to the original input shape.
  return [array_ops.reshape(
      sparse_ops.sparse_to_dense(ind,
                                 array_ops.reshape(
                                     math_ops.reduce_prod(in_shape), [1]),
                                 array_ops.reshape(grad, [-1]),
                                 validate_indices=False),
      in_shape), array_ops.zeros(
          [], dtype=dtypes.int32)] 
Example #25
Source File: head.py    From lambda-packs with MIT License 5 votes vote down vote up
def _log_loss_with_two_classes(labels, logits, weights=None):
  with ops.name_scope(None, "log_loss_with_two_classes",
                      (logits, labels)) as name:
    logits = ops.convert_to_tensor(logits)
    labels = math_ops.to_float(labels)
    # TODO(ptucker): This will break for dynamic shapes.
    # sigmoid_cross_entropy_with_logits requires [batch_size, 1] labels.
    if len(labels.get_shape()) == 1:
      labels = array_ops.expand_dims(labels, dim=(1,))
    loss = nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits,
                                                name=name)
    return _compute_weighted_loss(loss, weights) 
Example #26
Source File: head.py    From lambda-packs with MIT License 5 votes vote down vote up
def _poisson_loss(labels, logits, weights=None):
  """Computes poisson loss from logits."""
  with ops.name_scope(None, "_poisson_loss", (logits, labels)) as name:
    logits = ops.convert_to_tensor(logits)
    labels = ops.convert_to_tensor(labels)
    # To prevent broadcasting inside "-".
    if len(labels.get_shape()) == 1:
      labels = array_ops.expand_dims(labels, dim=(1,))
    # TODO(zakaria): make sure it does not recreate the broadcast bug.
    if len(logits.get_shape()) == 1:
      logits = array_ops.expand_dims(logits, dim=(1,))
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    loss = nn.log_poisson_loss(labels, logits, compute_full_loss=True,
                               name=name)
    return _compute_weighted_loss(loss, weights) 
Example #27
Source File: linalg_grad.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
  """Gradient for SelfAdjointEigV2."""
  e = op.outputs[0]
  v = op.outputs[1]
  # a = op.inputs[0], which satisfies
  # a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
  with ops.control_dependencies([grad_e.op, grad_v.op]):
    if grad_v is not None:
      # Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
      # Notice that because of the term involving f, the gradient becomes
      # infinite (or NaN in practice) when eigenvalues are not unique.
      # Mathematically this should not be surprising, since for (k-fold)
      # degenerate eigenvalues, the corresponding eigenvectors are only defined
      # up to arbitrary rotation in a (k-dimensional) subspace.
      f = array_ops.matrix_set_diag(
          math_ops.reciprocal(
              array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
          array_ops.zeros_like(e))
      grad_a = math_ops.matmul(
          v,
          math_ops.matmul(
              array_ops.matrix_diag(grad_e) + f * math_ops.matmul(
                  v, grad_v, adjoint_a=True),
              v,
              adjoint_b=True))
    else:
      grad_a = math_ops.matmul(
          v, math_ops.matmul(
              array_ops.matrix_diag(grad_e), v, adjoint_b=True))
    # The forward op only depends on the lower triangular part of a, so here we
    # symmetrize and take the lower triangle
    grad_a = array_ops.matrix_band_part(
        grad_a + array_ops.matrix_transpose(grad_a), -1, 0)
    grad_a = array_ops.matrix_set_diag(grad_a,
                                       0.5 * array_ops.matrix_diag_part(grad_a))
    return grad_a 
Example #28
Source File: head.py    From lambda-packs with MIT License 5 votes vote down vote up
def _mean_squared_loss(labels, logits, weights=None):
  with ops.name_scope(None, "mean_squared_loss", (logits, labels)) as name:
    logits = ops.convert_to_tensor(logits)
    labels = ops.convert_to_tensor(labels)
    # To prevent broadcasting inside "-".
    if len(labels.get_shape()) == 1:
      labels = array_ops.expand_dims(labels, dim=(1,))
    # TODO(zakaria): make sure it does not recreate the broadcast bug.
    if len(logits.get_shape()) == 1:
      logits = array_ops.expand_dims(logits, dim=(1,))
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    loss = math_ops.square(logits - math_ops.to_float(labels), name=name)
    return _compute_weighted_loss(loss, weights) 
Example #29
Source File: rnn_cell.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell with attention (LSTMA)."""
    with vs.variable_scope(scope or "attention_cell_wrapper"):
      if self._state_is_tuple:
        state, attns, attn_states = state
      else:
        states = state
        state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size])
        attns = array_ops.slice(
            states, [0, self._cell.state_size], [-1, self._attn_size])
        attn_states = array_ops.slice(
            states, [0, self._cell.state_size + self._attn_size],
            [-1, self._attn_size * self._attn_length])
      attn_states = array_ops.reshape(attn_states,
                                      [-1, self._attn_length, self._attn_size])
      input_size = self._input_size
      if input_size is None:
        input_size = inputs.get_shape().as_list()[1]
      inputs = _linear([inputs, attns], input_size, True)
      lstm_output, new_state = self._cell(inputs, state)
      if self._state_is_tuple:
        new_state_cat = array_ops.concat(nest.flatten(new_state), 1)
      else:
        new_state_cat = new_state
      new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
      with vs.variable_scope("attn_output_projection"):
        output = _linear([lstm_output, new_attns], self._attn_size, True)
      new_attn_states = array_ops.concat(
          [new_attn_states, array_ops.expand_dims(output, 1)], 1)
      new_attn_states = array_ops.reshape(
          new_attn_states, [-1, self._attn_length * self._attn_size])
      new_state = (new_state, new_attns, new_attn_states)
      if not self._state_is_tuple:
        new_state = array_ops.concat(list(new_state), 1)
      return output, new_state 
Example #30
Source File: image_ops_impl.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def rgb_to_grayscale(images, name=None):
  """Converts one or more images from RGB to Grayscale.

  Outputs a tensor of the same `DType` and rank as `images`.  The size of the
  last dimension of the output is 1, containing the Grayscale value of the
  pixels.

  Args:
    images: The RGB tensor to convert. Last dimension must have size 3 and
      should contain RGB values.
    name: A name for the operation (optional).

  Returns:
    The converted grayscale image(s).
  """
  with ops.name_scope(name, 'rgb_to_grayscale', [images]) as name:
    images = ops.convert_to_tensor(images, name='images')
    # Remember original dtype to so we can convert back if needed
    orig_dtype = images.dtype
    flt_image = convert_image_dtype(images, dtypes.float32)

    # Reference for converting between RGB and grayscale.
    # https://en.wikipedia.org/wiki/Luma_%28video%29
    rgb_weights = [0.2989, 0.5870, 0.1140]
    rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0)
    gray_float = math_ops.reduce_sum(flt_image * rgb_weights,
                                     rank_1,
                                     keep_dims=True)
    gray_float.set_shape(images.get_shape()[:-1].concatenate([1]))
    return convert_image_dtype(gray_float, orig_dtype, name=name)