Python tensorflow.python.framework.constant_op.constant() Examples

The following are 30 code examples of tensorflow.python.framework.constant_op.constant(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.framework.constant_op , or try the search function .
Example #1
Source File: session_debug_testlib.py    From lambda-packs with MIT License 6 votes vote down vote up
def testDebugWhileLoopWatchingWholeGraphWorks(self):
    with session.Session() as sess:
      loop_body = lambda i: math_ops.add(i, 2)
      loop_cond = lambda i: math_ops.less(i, 16)

      i = constant_op.constant(10, name="i")
      loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])

      run_options = config_pb2.RunOptions(output_partition_graphs=True)
      debug_utils.watch_graph(run_options,
                              sess.graph,
                              debug_urls=self._debug_urls())
      run_metadata = config_pb2.RunMetadata()
      self.assertEqual(
          16, sess.run(loop, options=run_options, run_metadata=run_metadata))

      dump = debug_data.DebugDumpDir(
          self._dump_root, partition_graphs=run_metadata.partition_graphs)

      self.assertEqual(
          [[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
      self.assertEqual(
          [[12], [14], [16]],
          dump.get_tensors("while/NextIteration", 0, "DebugIdentity")) 
Example #2
Source File: normal.py    From lambda-packs with MIT License 6 votes vote down vote up
def _kl_normal_normal(n_a, n_b, name=None):
  """Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.

  Args:
    n_a: instance of a Normal distribution object.
    n_b: instance of a Normal distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_normal_normal".

  Returns:
    Batchwise KL(n_a || n_b)
  """
  with ops.name_scope(name, "kl_normal_normal", [n_a.loc, n_b.loc]):
    one = constant_op.constant(1, dtype=n_a.dtype)
    two = constant_op.constant(2, dtype=n_a.dtype)
    half = constant_op.constant(0.5, dtype=n_a.dtype)
    s_a_squared = math_ops.square(n_a.scale)
    s_b_squared = math_ops.square(n_b.scale)
    ratio = s_a_squared / s_b_squared
    return (math_ops.square(n_a.loc - n_b.loc) / (two * s_b_squared) +
            half * (ratio - one - math_ops.log(ratio))) 
Example #3
Source File: array_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def rank_internal(input, name=None, optimize=True):
  # pylint: disable=redefined-builtin
  """Returns the rank of a tensor.

  Args:
    input: A `Tensor` or `SparseTensor`.
    name: A name for the operation (optional).
    optimize: if true, encode the rank as a constant when possible.

  Returns:
    A `Tensor` of type `int32`.
  """
  with ops.name_scope(name, "Rank", [input]) as name:
    if isinstance(
        input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
      return gen_array_ops.size(input.dense_shape, name=name)
    else:
      input_tensor = ops.convert_to_tensor(input)
      input_shape = input_tensor.get_shape()
      if optimize and input_shape.ndims is not None:
        return constant(input_shape.ndims, dtypes.int32, name=name)
      return gen_array_ops.rank(input, name=name) 
Example #4
Source File: array_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
  # pylint: disable=redefined-builtin
  """Returns the shape of a tensor.

  Args:
    input: A `Tensor` or `SparseTensor`.
    name: A name for the operation (optional).
    optimize: if true, encode the shape as a constant when possible.
    out_type: (Optional) The specified output type of the operation
      (`int32` or `int64`). Defaults to tf.int32.

  Returns:
    A `Tensor` of type `out_type`.

  """
  with ops.name_scope(name, "Shape", [input]) as name:
    if isinstance(
        input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
      return gen_math_ops.cast(input.dense_shape, out_type)
    else:
      input_tensor = ops.convert_to_tensor(input)
      input_shape = input_tensor.get_shape()
      if optimize and input_shape.is_fully_defined():
        return constant(input_shape.as_list(), out_type, name=name)
      return gen_array_ops.shape(input, name=name, out_type=out_type) 
Example #5
Source File: string_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _reduce_join_reduction_dims(x, axis, reduction_indices):
  """Returns range(rank(x) - 1, 0, -1) if reduction_indices is None."""
  # TODO(aselle): Remove this after deprecation
  if reduction_indices is not None:
    if axis is not None:
      raise ValueError("Can't specify both 'axis' and 'reduction_indices'.")
    axis = reduction_indices
  if axis is not None:
    return axis
  else:
    # Fast path: avoid creating Rank and Range ops if ndims is known.
    if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
      return constant_op.constant(
          np.arange(x.get_shape().ndims - 1, -1, -1), dtype=dtypes.int32)

    # Otherwise, we rely on Range and Rank to do the right thing at run-time.
    return math_ops.range(array_ops.rank(x) - 1, -1, -1) 
Example #6
Source File: layers.py    From tensornets with MIT License 6 votes vote down vote up
def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None):
  """Converts a dense tensor into a sparse tensor.

  An example use would be to convert dense labels to sparse ones
  so that they can be fed to the ctc_loss.

  Args:
     tensor: An `int` `Tensor` to be converted to a `Sparse`.
     eos_token: An integer. It is part of the target label that signifies the
       end of a sentence.
     outputs_collections: Collection to add the outputs.
     scope: Optional scope for name_scope.
  """
  with variable_scope.variable_scope(scope, 'dense_to_sparse', [tensor]) as sc:
    tensor = ops.convert_to_tensor(tensor)
    indices = array_ops.where(
        math_ops.not_equal(tensor, constant_op.constant(eos_token,
                                                        tensor.dtype)))
    values = array_ops.gather_nd(tensor, indices)
    shape = array_ops.shape(tensor, out_type=dtypes.int64)
    outputs = sparse_tensor.SparseTensor(indices, values, shape)
    return utils.collect_named_outputs(outputs_collections, sc.name, outputs) 
Example #7
Source File: nn.py    From kvae with MIT License 6 votes vote down vote up
def log_likelihood(mu, var, x, muq, varq, a, mask_flat, config):
    if config.out_distr == 'bernoulli':
        log_lik = log_bernoulli(x, mu, eps=1e-6)  # (bs*L, d1*d2)
    elif config.out_distr == 'gaussian':
        log_lik = log_gaussian(x, mu, var)

    log_lik = tf.reduce_sum(log_lik, 1)  # (bs*L, )
    log_lik = tf.multiply(mask_flat, log_lik)
    # TODO: dropout scales the output as input/keep_prob. Issue?
    if config.ll_keep_prob < 1.0:
        log_lik = tf.layers.dropout(log_lik, config.ll_keep_prob)

    # We compute the log-likelihood *per frame*
    num_el = tf.reduce_sum(mask_flat)
    log_px_given_a = tf.truediv(tf.reduce_sum(log_lik), num_el)  # ()

    if config.use_vae:
        log_qa_given_x = tf.reduce_sum(log_gaussian(a, muq, varq), 1)  # (bs*L, )
        log_qa_given_x = tf.multiply(mask_flat, log_qa_given_x)
        log_qa_given_x = tf.truediv(tf.reduce_sum(log_qa_given_x), num_el)  # ()
    else:
        log_qa_given_x = tf.constant(0.0, dtype=tf.float32, shape=())

    LL = log_px_given_a - log_qa_given_x
    return LL, log_px_given_a, log_qa_given_x 
Example #8
Source File: nn.py    From kvae with MIT License 6 votes vote down vote up
def __call__(self, shape, dtype=None, partition_info=None):
        if dtype is None:
            dtype = self.dtype

        if len(shape) == 1:
            return constant_op.constant(0., dtype=dtype, shape=shape)
        elif len(shape) == 2 and shape[0] == shape[1]:
            return constant_op.constant(np.identity(shape[0], dtype))
        elif len(shape) == 4 and shape[2] == shape[3]:
            array = np.zeros(shape, dtype=float)
            cx, cy = shape[0]/2, shape[1]/2
            for i in range(shape[2]):
                array[cx, cy, i, i] = 1
            return constant_op.constant(array, dtype=dtype)
        else:
            constant_op.constant(0., dtype=dtype, shape=shape) 
Example #9
Source File: backend.py    From lambda-packs with MIT License 5 votes vote down vote up
def constant(value, dtype=None, shape=None, name=None):
  if dtype is None:
    dtype = floatx()
  return constant_op.constant(value, dtype=dtype, shape=shape, name=name) 
Example #10
Source File: clustering_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def _full_batch_training_op(self, inputs, cluster_idx_list, cluster_centers):
    """Creates an op for training for full batch case.

    Args:
      inputs: list of input Tensors.
      cluster_idx_list: A vector (or list of vectors). Each element in the
        vector corresponds to an input row in 'inp' and specifies the cluster id
        corresponding to the input.
      cluster_centers: Tensor Ref of cluster centers.

    Returns:
      An op for doing an update of mini-batch k-means.
    """
    cluster_sums = []
    cluster_counts = []
    epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype)
    for inp, cluster_idx in zip(inputs, cluster_idx_list):
      with ops.colocate_with(inp):
        cluster_sums.append(
            math_ops.unsorted_segment_sum(inp, cluster_idx, self._num_clusters))
        cluster_counts.append(
            math_ops.unsorted_segment_sum(
                array_ops.reshape(
                    array_ops.ones(
                        array_ops.reshape(array_ops.shape(inp)[0], [-1])),
                    [-1, 1]), cluster_idx, self._num_clusters))
    with ops.colocate_with(cluster_centers):
      new_clusters_centers = math_ops.add_n(cluster_sums) / (math_ops.cast(
          math_ops.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon)
      if self._clusters_l2_normalized():
        new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)
    return state_ops.assign(cluster_centers, new_clusters_centers) 
Example #11
Source File: proximal_adagrad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _create_slots(self, var_list):
    for v in var_list:
      with ops.colocate_with(v):
        val = constant_op.constant(self._initial_accumulator_value,
                                   shape=v.get_shape(),
                                   dtype=v.dtype.base_dtype)
      self._get_or_make_slot(v, val, "accumulator", self._name) 
Example #12
Source File: saver.py    From lambda-packs with MIT License 5 votes vote down vote up
def _AddShardedSaveOps(self, filename_tensor, per_device):
    """Add ops to save the params per shard.

    Args:
      filename_tensor: a scalar String Tensor.
      per_device: A list of (device, BaseSaverBuilder.SaveableObject) pairs, as
        returned by _GroupByDevices().

    Returns:
      An op to save the variables.
    """
    if self._write_version == saver_pb2.SaverDef.V2:
      return self._AddShardedSaveOpsForV2(filename_tensor, per_device)

    num_shards = len(per_device)
    sharded_saves = []
    num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
    for shard, (device, saveables) in enumerate(per_device):
      with ops.device(device):
        sharded_filename = self.sharded_filename(filename_tensor, shard,
                                                 num_shards_tensor)
        sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))
    # Return the sharded name for the save path.
    with ops.control_dependencies([x.op for x in sharded_saves]):
      # pylint: disable=protected-access
      return gen_io_ops._sharded_filespec(filename_tensor, num_shards_tensor) 
Example #13
Source File: input.py    From lambda-packs with MIT License 5 votes vote down vote up
def limit_epochs(tensor, num_epochs=None, name=None):
  """Returns tensor `num_epochs` times and then raises an `OutOfRange` error.

  Note: creates local counter `epochs`. Use `local_variables_initializer()` to
  initialize local variables.

  Args:
    tensor: Any `Tensor`.
    num_epochs: A positive integer (optional).  If specified, limits the number
      of steps the output tensor may be evaluated.
    name: A name for the operations (optional).

  Returns:
    tensor or `OutOfRange`.

  Raises:
    ValueError: if `num_epochs` is invalid.
  """
  if num_epochs is None:
    return tensor
  if num_epochs <= 0:
    raise ValueError("num_epochs must be > 0 not %d." % num_epochs)
  with ops.name_scope(name, "limit_epochs", [tensor]) as name:
    zero64 = constant_op.constant(0, dtype=dtypes.int64)
    epochs = vs.variable(
        zero64, name="epochs", trainable=False,
        collections=[ops.GraphKeys.LOCAL_VARIABLES])
    counter = epochs.count_up_to(num_epochs)
    with ops.control_dependencies([counter]):
      return array_ops.identity(tensor, name=name) 
Example #14
Source File: ftrl.py    From lambda-packs with MIT License 5 votes vote down vote up
def _create_slots(self, var_list):
    # Create the "accum" and "linear" slots.
    for v in var_list:
      with ops.colocate_with(v):
        val = constant_op.constant(
            self._initial_accumulator_value, dtype=v.dtype, shape=v.get_shape())
        self._get_or_make_slot(v, val, "accum", self._accum_name or self._name)
        self._zeros_slot(v, "linear", self._linear_name or self._name) 
Example #15
Source File: test_utils.py    From lambda-packs with MIT License 5 votes vote down vote up
def _encoder(image, image_format):
  assert image_format in ['jpeg', 'png']
  if image_format == 'jpeg':
    tf_image = constant_op.constant(image, dtype=dtypes.uint8)
    return image_ops.encode_jpeg(tf_image)
  if image_format == 'png':
    tf_image = constant_op.constant(image, dtype=dtypes.uint8)
    return image_ops.encode_png(tf_image) 
Example #16
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _ErfcGrad(op, grad):
  """Returns -grad * 2/sqrt(pi) * exp(-x**2)."""
  x = op.inputs[0]
  minus_two_over_root_pi = constant_op.constant(
      -2 / np.sqrt(np.pi), dtype=grad.dtype)
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x)) 
Example #17
Source File: math_grad.py    From lambda-packs with MIT License 5 votes vote down vote up
def _ErfGrad(op, grad):
  """Returns grad * 2/sqrt(pi) * exp(-x**2)."""
  x = op.inputs[0]
  two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x)) 
Example #18
Source File: beta.py    From lambda-packs with MIT License 5 votes vote down vote up
def _event_shape_tensor(self):
    return constant_op.constant([], dtype=dtypes.int32) 
Example #19
Source File: categorical.py    From lambda-packs with MIT License 5 votes vote down vote up
def _event_shape_tensor(self):
    return constant_op.constant([], dtype=dtypes.int32) 
Example #20
Source File: normal.py    From lambda-packs with MIT License 5 votes vote down vote up
def _event_shape_tensor(self):
    return constant_op.constant([], dtype=dtypes.int32) 
Example #21
Source File: laplace.py    From lambda-packs with MIT License 5 votes vote down vote up
def _event_shape_tensor(self):
    return constant_op.constant([], dtype=dtypes.int32) 
Example #22
Source File: identity_bijector.py    From lambda-packs with MIT License 5 votes vote down vote up
def _forward_log_det_jacobian(self, x):
    return constant_op.constant(0., dtype=x.dtype) 
Example #23
Source File: special_math.py    From lambda-packs with MIT License 5 votes vote down vote up
def _ndtr(x):
  """Implements ndtr core logic."""
  half_sqrt_2 = constant_op.constant(
      0.5 * math.sqrt(2.), dtype=x.dtype, name="half_sqrt_2")
  w = x * half_sqrt_2
  z = math_ops.abs(w)
  y = array_ops.where(math_ops.less(z, half_sqrt_2),
                      1. + math_ops.erf(w),
                      array_ops.where(math_ops.greater(w, 0.),
                                      2. - math_ops.erfc(z),
                                      math_ops.erfc(z)))
  return 0.5 * y 
Example #24
Source File: uniform.py    From lambda-packs with MIT License 5 votes vote down vote up
def _event_shape_tensor(self):
    return constant_op.constant([], dtype=dtypes.int32) 
Example #25
Source File: student_t.py    From lambda-packs with MIT License 5 votes vote down vote up
def _event_shape_tensor(self):
    return constant_op.constant([], dtype=math_ops.int32) 
Example #26
Source File: transformed_distribution.py    From lambda-packs with MIT License 5 votes vote down vote up
def _ndims_from_shape(shape):
  """Returns `Tensor`'s `rank` implied by a `Tensor` shape."""
  if shape.get_shape().ndims not in (None, 1):
    raise ValueError("input is not a valid shape: not 1D")
  if not shape.dtype.is_integer:
    raise TypeError("input is not a valid shape: wrong dtype")
  if shape.get_shape().is_fully_defined():
    return constant_op.constant(shape.get_shape().as_list()[0])
  return array_ops.shape(shape)[0] 
Example #27
Source File: transformed_distribution.py    From lambda-packs with MIT License 5 votes vote down vote up
def _concat_vectors(*args):
  """Convenience function which concatenates input vectors."""
  args_ = [_static_value(x) for x in args]
  if any(x_ is None for x_ in args_):
    return array_ops.concat(args, 0)
  return constant_op.constant([x_ for vec_ in args_ for x_ in vec_]) 
Example #28
Source File: transformed_distribution.py    From lambda-packs with MIT License 5 votes vote down vote up
def _logical_equal(x, y):
  """Convenience function which attempts to statically compute `x == y`."""
  x_ = _static_value(x)
  y_ = _static_value(y)
  if x_ is None or y_ is None:
    return math_ops.equal(x, y)
  return constant_op.constant(np.array_equal(x_, y_)) 
Example #29
Source File: transformed_distribution.py    From lambda-packs with MIT License 5 votes vote down vote up
def _logical_and(*args):
  """Convenience function which attempts to statically `reduce_all`."""
  args_ = [_static_value(x) for x in args]
  if any(x is not None and not bool(x) for x in args_):
    return constant_op.constant(False)
  if all(x is not None and bool(x) for x in args_):
    return constant_op.constant(True)
  if len(args) == 2:
    return math_ops.logical_and(*args)
  return math_ops.reduce_all(args) 
Example #30
Source File: tensor_forest.py    From lambda-packs with MIT License 5 votes vote down vote up
def _get_loss(self, features, labels):
    """Constructs, caches, and returns the inference-based loss."""
    if self._loss is not None:
      return self._loss

    def _average_loss():
      probs = self.inference_graph(features)
      return math_ops.reduce_sum(self.loss_fn(
          probs, labels)) / math_ops.to_float(array_ops.shape(labels)[0])

    self._loss = control_flow_ops.cond(
        self.average_size() > 0, _average_loss,
        lambda: constant_op.constant(sys.maxsize, dtype=dtypes.float32))

    return self._loss