Python tensorflow.python.ops.array_ops.ones() Examples

The following are 30 code examples of tensorflow.python.ops.array_ops.ones(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.array_ops , or try the search function .
Example #1
Source File: backend.py    From lambda-packs with MIT License 6 votes vote down vote up
def random_binomial(shape, p=0.0, dtype=None, seed=None):
  """Returns a tensor with random binomial distribution of values.

  Arguments:
      shape: A tuple of integers, the shape of tensor to create.
      p: A float, `0. <= p <= 1`, probability of binomial distribution.
      dtype: String, dtype of returned tensor.
      seed: Integer, random seed.

  Returns:
      A tensor.
  """
  if dtype is None:
    dtype = floatx()
  if seed is None:
    seed = np.random.randint(10e6)
  return array_ops.where(
      random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
      array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype)) 
Example #2
Source File: student_t.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _sample_n(self, n, seed=None):
    # The sampling method comes from the fact that if:
    #   X ~ Normal(0, 1)
    #   Z ~ Chi2(df)
    #   Y = X / sqrt(Z / df)
    # then:
    #   Y ~ StudentT(df).
    shape = array_ops.concat([[n], self.batch_shape()], 0)
    normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
    df = self.df * array_ops.ones(self.batch_shape(), dtype=self.dtype)
    gamma_sample = random_ops.random_gamma(
        [n],
        0.5 * df,
        beta=0.5,
        dtype=self.dtype,
        seed=distribution_util.gen_new_seed(seed, salt="student_t"))
    samples = normal_sample / math_ops.sqrt(gamma_sample / df)
    return samples * self.sigma + self.mu  # Abs(sigma) not wanted. 
Example #3
Source File: bijector.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _process_matrix(self, matrix, min_rank, event_ndims):
    """Helper to __init__ which gets matrix in batch-ready form."""
    # Pad the matrix so that matmul works in the case of a matrix and vector
    # input.  Keep track if the matrix was padded, to distinguish between a
    # rank 3 tensor and a padded rank 2 tensor.
    # TODO(srvasude): Remove side-effects from functions. Its currently unbroken
    # but error-prone since the function call order may change in the future.
    self._rank_two_event_ndims_one = math_ops.logical_and(
        math_ops.equal(array_ops.rank(matrix), min_rank),
        math_ops.equal(event_ndims, 1))
    left = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
    pad = array_ops.concat(
        [array_ops.ones(
            [left], dtype=dtypes.int32), array_ops.shape(matrix)],
        0)
    return array_ops.reshape(matrix, pad) 
Example #4
Source File: dirichlet.py    From lambda-packs with MIT License 6 votes vote down vote up
def _mode(self):
    k = math_ops.cast(self.event_shape_tensor()[0], self.dtype)
    mode = (self.concentration - 1.) / (
        self.total_concentration[..., array_ops.newaxis] - k)
    if self.allow_nan_stats:
      nan = array_ops.fill(
          array_ops.shape(mode),
          np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
          name="nan")
      return array_ops.where(
          math_ops.reduce_all(self.concentration > 1., axis=-1),
          mode, nan)
    return control_flow_ops.with_dependencies([
        check_ops.assert_less(
            array_ops.ones([], self.dtype),
            self.concentration,
            message="Mode undefined when any concentration <= 1"),
    ], mode) 
Example #5
Source File: dirichlet.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _mode(self):
    mode = ((self.alpha - 1.) /
            (array_ops.expand_dims(self.alpha_sum, dim=-1) -
             math_ops.cast(self.event_shape()[0], self.dtype)))
    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      shape = array_ops.concat((self.batch_shape(), self.event_shape()), 0)
      return array_ops.where(
          math_ops.greater(self.alpha, 1.),
          mode,
          array_ops.fill(shape, nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies([
          check_ops.assert_less(
              array_ops.ones((), dtype=self.dtype), self.alpha,
              message="mode not defined for components of alpha <= 1")
      ], mode) 
Example #6
Source File: relaxed_onehot_categorical.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _sample_n(self, n, seed=None):
    sample_shape = array_ops.concat(([n], array_ops.shape(self.logits)), 0)
    logits = self.logits * array_ops.ones(sample_shape)
    if logits.get_shape().ndims == 2:
      logits_2d = logits
    else:
      logits_2d = array_ops.reshape(logits, [-1, self.num_classes])
    np_dtype = self.dtype.as_numpy_dtype()
    minval = np.nextafter(np_dtype(0), np_dtype(1))
    uniform = random_ops.random_uniform(shape=array_ops.shape(logits_2d),
                                        minval=minval,
                                        maxval=1,
                                        dtype=self.dtype,
                                        seed=seed)
    gumbel = - math_ops.log(- math_ops.log(uniform))
    noisy_logits = math_ops.div(gumbel + logits_2d, self.temperature)
    samples = nn_ops.log_softmax(noisy_logits)
    ret = array_ops.reshape(samples, sample_shape)
    return ret 
Example #7
Source File: student_t.py    From lambda-packs with MIT License 6 votes vote down vote up
def _mean(self):
    mean = self.loc * array_ops.ones(self.batch_shape_tensor(),
                                     dtype=self.dtype)
    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      return array_ops.where(
          math_ops.greater(
              self.df,
              array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
          mean,
          array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies(
          [
              check_ops.assert_less(
                  array_ops.ones([], dtype=self.dtype),
                  self.df,
                  message="mean not defined for components of df <= 1"),
          ],
          mean) 
Example #8
Source File: backend.py    From lambda-packs with MIT License 6 votes vote down vote up
def ones_like(x, dtype=None, name=None):
  """Instantiates an all-ones variable of the same shape as another tensor.

  Arguments:
      x: Keras variable or tensor.
      dtype: String, dtype of returned Keras variable.
           None uses the dtype of x.
      name: String, name for the variable to create.

  Returns:
      A Keras variable with the shape of x filled with ones.

  Example:
  ```python
      >>> from keras import backend as K
      >>> kvar = K.variable(np.random.random((2,3)))
      >>> kvar_ones = K.ones_like(kvar)
      >>> K.eval(kvar_ones)
      array([[ 1.,  1.,  1.],
             [ 1.,  1.,  1.]], dtype=float32)
  ```
  """
  return array_ops.ones_like(x, dtype=dtype, name=name) 
Example #9
Source File: student_t.py    From lambda-packs with MIT License 6 votes vote down vote up
def _sample_n(self, n, seed=None):
    # The sampling method comes from the fact that if:
    #   X ~ Normal(0, 1)
    #   Z ~ Chi2(df)
    #   Y = X / sqrt(Z / df)
    # then:
    #   Y ~ StudentT(df).
    shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
    normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
    df = self.df * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
    gamma_sample = random_ops.random_gamma(
        [n],
        0.5 * df,
        beta=0.5,
        dtype=self.dtype,
        seed=distribution_util.gen_new_seed(seed, salt="student_t"))
    samples = normal_sample * math_ops.rsqrt(gamma_sample / df)
    return samples * self.scale + self.loc  # Abs(scale) not wanted. 
Example #10
Source File: affine_impl.py    From lambda-packs with MIT License 6 votes vote down vote up
def _process_matrix(self, matrix, min_rank, event_ndims):
    """Helper to __init__ which gets matrix in batch-ready form."""
    # Pad the matrix so that matmul works in the case of a matrix and vector
    # input. Keep track if the matrix was padded, to distinguish between a
    # rank 3 tensor and a padded rank 2 tensor.
    # TODO(srvasude): Remove side-effects from functions. Its currently unbroken
    # but error-prone since the function call order may change in the future.
    self._rank_two_event_ndims_one = math_ops.logical_and(
        math_ops.equal(array_ops.rank(matrix), min_rank),
        math_ops.equal(event_ndims, 1))
    left = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
    pad = array_ops.concat(
        [array_ops.ones(
            [left], dtype=dtypes.int32), array_ops.shape(matrix)],
        0)
    return array_ops.reshape(matrix, pad) 
Example #11
Source File: relaxed_onehot_categorical.py    From lambda-packs with MIT License 6 votes vote down vote up
def _sample_n(self, n, seed=None):
    sample_shape = array_ops.concat([[n], array_ops.shape(self.logits)], 0)
    logits = self.logits * array_ops.ones(sample_shape)
    logits_2d = array_ops.reshape(logits, [-1, self.event_size])
    # Uniform variates must be sampled from the open-interval `(0, 1)` rather
    # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
    # because it is the smallest, positive, "normal" number. A "normal" number
    # is such that the mantissa has an implicit leading 1. Normal, positive
    # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
    # this case, a subnormal number (i.e., np.nextafter) can cause us to sample
    # 0.
    uniform = random_ops.random_uniform(
        shape=array_ops.shape(logits_2d),
        minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
        maxval=1.,
        dtype=self.dtype,
        seed=seed)
    gumbel = -math_ops.log(-math_ops.log(uniform))
    noisy_logits = math_ops.div(gumbel + logits_2d, self._temperature_2d)
    samples = nn_ops.log_softmax(noisy_logits)
    ret = array_ops.reshape(samples, sample_shape)
    return ret 
Example #12
Source File: core_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def test_axis_order_scope(self):
    xz_lt = core.LabeledTensor(array_ops.ones((2, 3)), ['x', 'z'])
    yz_lt = core.LabeledTensor(array_ops.ones((4, 3)), ['y', 'z'])

    _, _, broadcast_axes = core.align(xz_lt, yz_lt)
    self.assertEqual(list(broadcast_axes.keys()), ['x', 'y', 'z'])

    _, _, broadcast_axes = core.align(yz_lt, xz_lt)
    self.assertEqual(list(broadcast_axes.keys()), ['y', 'x', 'z'])

    with core.axis_order_scope(['x', 'y', 'z']):
      _, _, broadcast_axes = core.align(yz_lt, xz_lt)
      self.assertEqual(list(broadcast_axes.keys()), ['x', 'y', 'z'])

    with core.axis_order_scope(['x', 'y']):
      with self.assertRaises(core.AxisOrderError):
        core.align(xz_lt, yz_lt)
      with self.assertRaises(core.AxisOrderError):
        core.align(yz_lt, xz_lt) 
Example #13
Source File: linear_operator_addition.py    From lambda-packs with MIT License 6 votes vote down vote up
def _add(self, op1, op2, operator_name, hints):
    # Will build a LinearOperatorScaledIdentity.

    if _type(op1) == _SCALED_IDENTITY:
      multiplier_1 = op1.multiplier
    else:
      multiplier_1 = array_ops.ones(op1.batch_shape_tensor(), dtype=op1.dtype)

    if _type(op2) == _SCALED_IDENTITY:
      multiplier_2 = op2.multiplier
    else:
      multiplier_2 = array_ops.ones(op2.batch_shape_tensor(), dtype=op2.dtype)

    return linear_operator_identity.LinearOperatorScaledIdentity(
        num_rows=op1.range_dimension_tensor(),
        multiplier=multiplier_1 + multiplier_2,
        is_non_singular=hints.is_non_singular,
        is_self_adjoint=hints.is_self_adjoint,
        is_positive_definite=hints.is_positive_definite,
        name=operator_name) 
Example #14
Source File: ops_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def test_invalid(self):
    scalar_lt = core.LabeledTensor(array_ops.ones(()), [])
    x_lt = core.LabeledTensor(array_ops.ones((2,)), ['x'])
    x2_lt = core.LabeledTensor(array_ops.ones((3,)), ['x'])
    y_lt = core.LabeledTensor(array_ops.ones((3,)), ['y'])
    xy_lt = core.LabeledTensor(array_ops.ones((2, 3)), ['x', 'y'])
    xyz_lt = core.LabeledTensor(array_ops.ones((2, 3, 1)), ['x', 'y', 'z'])

    with self.assertRaisesRegexp(ValueError, 'inputs with at least rank'):
      ops.matmul(x_lt, scalar_lt)

    with self.assertRaises(NotImplementedError):
      ops.matmul(x_lt, xyz_lt)

    with self.assertRaisesRegexp(ValueError, 'exactly one axis in common'):
      ops.matmul(x_lt, y_lt)

    with self.assertRaises(NotImplementedError):
      ops.matmul(xy_lt, xy_lt)

    with self.assertRaisesRegexp(ValueError, 'does not match'):
      ops.matmul(x_lt, x2_lt) 
Example #15
Source File: beta.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _mode(self):
    mode = (self.a - 1.)/ (self.a_b_sum - 2.)
    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      return array_ops.where(
          math_ops.logical_and(
              math_ops.greater(self.a, 1.),
              math_ops.greater(self.b, 1.)),
          mode,
          array_ops.fill(self.batch_shape(), nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies([
          check_ops.assert_less(
              array_ops.ones((), dtype=self.dtype), self.a,
              message="Mode not defined for components of a <= 1."),
          check_ops.assert_less(
              array_ops.ones((), dtype=self.dtype), self.b,
              message="Mode not defined for components of b <= 1."),
      ], mode) 
Example #16
Source File: layers.py    From tensornets with MIT License 6 votes vote down vote up
def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None):
  """Converts a dense tensor into a sparse tensor.

  An example use would be to convert dense labels to sparse ones
  so that they can be fed to the ctc_loss.

  Args:
     tensor: An `int` `Tensor` to be converted to a `Sparse`.
     eos_token: An integer. It is part of the target label that signifies the
       end of a sentence.
     outputs_collections: Collection to add the outputs.
     scope: Optional scope for name_scope.
  """
  with variable_scope.variable_scope(scope, 'dense_to_sparse', [tensor]) as sc:
    tensor = ops.convert_to_tensor(tensor)
    indices = array_ops.where(
        math_ops.not_equal(tensor, constant_op.constant(eos_token,
                                                        tensor.dtype)))
    values = array_ops.gather_nd(tensor, indices)
    shape = array_ops.shape(tensor, out_type=dtypes.int64)
    outputs = sparse_tensor.SparseTensor(indices, values, shape)
    return utils.collect_named_outputs(outputs_collections, sc.name, outputs) 
Example #17
Source File: ops_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def test_name(self):
    x_lt = core.LabeledTensor(array_ops.ones((3,)), ['x'])
    matmul_lt = ops.matmul(x_lt, x_lt)
    self.assertIn('lt_matmul', matmul_lt.name) 
Example #18
Source File: core_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def test_invalid(self):
    axis_order = ['w', 'x', 'y', 'z']
    lt = core.LabeledTensor(array_ops.ones((1, 1, 1, 1)), axis_order)
    with self.assertRaises(core.AxisOrderError):
      core.check_axis_order(lt)
    with self.assertRaises(core.AxisOrderError):
      core.check_axis_order(lt, axis_order[:-1])
    with self.assertRaises(core.AxisOrderError):
      core.check_axis_order(lt, axis_order[::-1]) 
Example #19
Source File: exponential.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def __init__(self,
               lam,
               validate_args=False,
               allow_nan_stats=True,
               name="Exponential"):
    """Construct Exponential distribution with parameter `lam`.

    Args:
      lam: Floating point tensor, the rate of the distribution(s).
        `lam` must contain only positive values.
      validate_args: `Boolean`, default `False`.  Whether to assert that
        `lam > 0`, and that `x > 0` in the methods `prob(x)` and `log_prob(x)`.
        If `validate_args` is `False` and the inputs are invalid, correct
        behavior is not guaranteed.
      allow_nan_stats: `Boolean`, default `True`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member. If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to prepend to all ops created by this distribution.
    """
    parameters = locals()
    parameters.pop("self")
    # Even though all statistics of are defined for valid inputs, this is not
    # true in the parent class "Gamma."  Therefore, passing
    # allow_nan_stats=True
    # through to the parent class results in unnecessary asserts.
    with ops.name_scope(name, values=[lam]) as ns:
      self._lam = ops.convert_to_tensor(lam, name="lam")
    super(Exponential, self).__init__(
        alpha=array_ops.ones((), dtype=self._lam.dtype),
        beta=self._lam,
        allow_nan_stats=allow_nan_stats,
        validate_args=validate_args,
        name=ns)
    # While the Gamma distribution is not re-parameterizable, the
    # exponential distribution is.
    self._is_reparameterized = True
    self._parameters = parameters
    self._graph_parents += [self._lam] 
Example #20
Source File: inverse_gamma.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _mean(self):
    mean = self.beta / (self.alpha - 1.)
    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      return array_ops.where(
          self.alpha > 1., mean,
          array_ops.fill(self.batch_shape(), nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies([
          check_ops.assert_less(
              array_ops.ones((), self.dtype), self.alpha,
              message="mean not defined for components of self.alpha <= 1"),
      ], mean) 
Example #21
Source File: student_t.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _entropy(self):
    v = array_ops.ones(self.batch_shape(), dtype=self.dtype)[..., None]
    u = v * self.df[..., None]
    beta_arg = array_ops.concat([u, v], -1) / 2.
    return (math_ops.log(math_ops.abs(self.sigma)) +
            0.5 * math_ops.log(self.df) +
            special_math_ops.lbeta(beta_arg) +
            0.5 * (self.df + 1.) *
            (math_ops.digamma(0.5 * (self.df + 1.)) -
             math_ops.digamma(0.5 * self.df))) 
Example #22
Source File: gamma.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _mode(self):
    mode = (self.alpha - 1.) / self.beta
    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      return array_ops.where(
          self.alpha >= 1.,
          mode,
          array_ops.fill(self.batch_shape(), nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies([
          check_ops.assert_less(
              array_ops.ones((), self.dtype),
              self.alpha,
              message="mode not defined for components of alpha <= 1"),
          ], mode) 
Example #23
Source File: operator_pd_identity.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _sqrt_to_dense(self):
    diag = array_ops.ones(self.vector_shape(), dtype=self.dtype)
    dense = array_ops.matrix_diag(diag)
    dense.set_shape(self.get_shape())
    return math_ops.sqrt(self._scale) * dense 
Example #24
Source File: operator_pd_identity.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _to_dense(self):
    diag = array_ops.ones(self.vector_shape(), dtype=self.dtype)
    dense = array_ops.matrix_diag(diag)
    dense.set_shape(self.get_shape())
    return self._scale * dense 
Example #25
Source File: operator_pd_identity.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _batch_log_det(self):
    rank = array_ops.size(self._shape_arg)
    last_dim = math_ops.cast(
        array_ops.gather(self._shape_arg, rank - 1), dtype=self.dtype)
    log_det = (last_dim * math_ops.log(math_ops.abs(self._scale)) *
               array_ops.ones(self.batch_shape(), dtype=self.dtype))
    log_det.set_shape(self.get_batch_shape())
    return log_det 
Example #26
Source File: layers.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def unit_norm(inputs, dim, epsilon=1e-7, scope=None):
  """Normalizes the given input across the specified dimension to unit length.

  Note that the rank of `input` must be known.

  Args:
    inputs: A `Tensor` of arbitrary size.
    dim: The dimension along which the input is normalized.
    epsilon: A small value to add to the inputs to avoid dividing by zero.
    scope: Optional scope for variable_scope.

  Returns:
    The normalized `Tensor`.

  Raises:
    ValueError: If dim is smaller than the number of dimensions in 'inputs'.
  """
  with variable_scope.variable_scope(scope, 'UnitNorm', [inputs]):
    if not inputs.get_shape():
      raise ValueError('The input rank must be known.')
    input_rank = len(inputs.get_shape().as_list())
    if dim < 0 or dim >= input_rank:
      raise ValueError(
          'dim must be positive but smaller than the input rank.')

    lengths = math_ops.sqrt(epsilon + math_ops.reduce_sum(
        math_ops.square(inputs), dim, True))
    multiples = []
    if dim > 0:
      multiples.append(array_ops.ones([dim], dtypes.int32))
    multiples.append(
        array_ops.strided_slice(array_ops.shape(inputs), [dim], [dim + 1]))
    if dim < (input_rank - 1):
      multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))
    multiples = array_ops.concat(multiples, 0)
    return math_ops.div(inputs, array_ops.tile(lengths, multiples)) 
Example #27
Source File: linear.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
                     columns_to_variables):
  """Adds a fake bias feature column filled with all 1s."""
  # TODO(b/31008490): Move definition to a common constants place.
  bias_column_name = "tf_virtual_bias_column"
  if any(col.name is bias_column_name for col in feature_columns):
    raise ValueError("%s is a reserved column name." % bias_column_name)
  if not feature_columns:
    raise ValueError("feature_columns can't be empty.")

  # Loop through input tensors until we can figure out batch_size.
  batch_size = None
  for column in columns_to_tensors.values():
    if isinstance(column, tuple):
      column = column[0]
    if isinstance(column, sparse_tensor.SparseTensor):
      shape = tensor_util.constant_value(column.dense_shape)
      if shape is not None:
        batch_size = shape[0]
        break
    else:
      batch_size = array_ops.shape(column)[0]
      break
  if batch_size is None:
    raise ValueError("Could not infer batch size from input features.")

  bias_column = layers.real_valued_column(bias_column_name)
  columns_to_tensors[bias_column] = array_ops.ones([batch_size, 1],
                                                   dtype=dtypes.float32)
  columns_to_variables[bias_column] = [bias_variable] 
Example #28
Source File: clustering_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _init_clusters(self):
    """Initialization of clusters.

    Returns:
    Tuple with following elements:
      cluster_centers: a Tensor for storing cluster centers
      cluster_counts: a Tensor for storing counts of points assigned to this
        cluster. This is used by mini-batch training.
    """
    init = self._initial_clusters
    if init == RANDOM_INIT:
      clusters_init = self._init_clusters_random()
    elif init == KMEANS_PLUS_PLUS_INIT:
      # Points from only the first shard are used for initializing centers.
      # TODO(ands): Use all points.
      clusters_init = gen_clustering_ops.kmeans_plus_plus_initialization(
          self._inputs[0], self._num_clusters, self._random_seed,
          self._kmeans_plus_plus_num_retries)
    elif callable(init):
      clusters_init = init(self._inputs, self._num_clusters)
    elif not isinstance(init, str):
      clusters_init = init
    else:
      assert False, 'Unsupported init passed to Kmeans %s' % str(init)
    if self._distance_metric == COSINE_DISTANCE and clusters_init is not None:
      clusters_init = nn_impl.l2_normalize(clusters_init, dim=1)
    clusters_init = clusters_init if clusters_init is not None else []
    cluster_centers = variables.Variable(
        clusters_init, name='clusters', validate_shape=False)
    cluster_counts = (variables.Variable(
        array_ops.ones(
            [self._num_clusters], dtype=dtypes.int64)) if self._use_mini_batch
                      else None)
    return cluster_centers, cluster_counts 
Example #29
Source File: nn_impl.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _sum_rows(x):
  """Returns a vector summing up each row of the matrix x."""
  # _sum_rows(x) is equivalent to math_ops.reduce_sum(x, 1) when x is
  # a matrix.  The gradient of _sum_rows(x) is more efficient than
  # reduce_sum(x, 1)'s gradient in today's implementation. Therefore,
  # we use _sum_rows(x) in the nce_loss() computation since the loss
  # is mostly used for training.
  cols = array_ops.shape(x)[1]
  ones_shape = array_ops.stack([cols, 1])
  ones = array_ops.ones(ones_shape, x.dtype)
  return array_ops.reshape(math_ops.matmul(x, ones), [-1]) 
Example #30
Source File: image_ops_impl.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def grayscale_to_rgb(images, name=None):
  """Converts one or more images from Grayscale to RGB.

  Outputs a tensor of the same `DType` and rank as `images`.  The size of the
  last dimension of the output is 3, containing the RGB value of the pixels.

  Args:
    images: The Grayscale tensor to convert. Last dimension must be size 1.
    name: A name for the operation (optional).

  Returns:
    The converted grayscale image(s).
  """
  with ops.name_scope(name, 'grayscale_to_rgb', [images]) as name:
    images = ops.convert_to_tensor(images, name='images')
    rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0)
    shape_list = (
        [array_ops.ones(rank_1,
                        dtype=dtypes.int32)] + [array_ops.expand_dims(3, 0)])
    multiples = array_ops.concat(shape_list, 0)
    rgb = array_ops.tile(images, multiples, name=name)
    rgb.set_shape(images.get_shape()[:-1].concatenate([3]))
    return rgb


# pylint: disable=invalid-name