Python tensorflow.python.framework.tensor_shape.scalar() Examples

The following are 30 code examples of tensorflow.python.framework.tensor_shape.scalar(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.framework.tensor_shape , or try the search function .
Example #1
Source File: retrain.py    From tensorflow-image-detection with MIT License 6 votes vote down vote up
def add_evaluation_step(result_tensor, ground_truth_tensor):
  """Inserts the operations we need to evaluate the accuracy of our results.

  Args:
    result_tensor: The new final node that produces results.
    ground_truth_tensor: The node we feed ground truth data
    into.

  Returns:
    Tuple of (evaluation step, prediction).
  """
  with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
      prediction = tf.argmax(result_tensor, 1)
      correct_prediction = tf.equal(
          prediction, tf.argmax(ground_truth_tensor, 1))
    with tf.name_scope('accuracy'):
      evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  tf.summary.scalar('accuracy', evaluation_step)
  return evaluation_step, prediction 
Example #2
Source File: dataset_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def enumerate(self, start=0):
    """Enumerate the elements of this dataset.  Similar to python's `enumerate`.

    For example:

    ```python
    # NOTE: The following examples use `{ ... }` to represent the
    # contents of a dataset.
    a = { 1, 2, 3 }
    b = { (7, 8), (9, 10), (11, 12) }

    # The nested structure of the `datasets` argument determines the
    # structure of elements in the resulting dataset.
    a.enumerate(start=5) == { (5, 1), (6, 2), (7, 3) }
    b.enumerate() == { (0, (7, 8)), (1, (9, 10)), (2, (11, 12)) }

    Args:
      start: A `tf.int64` scalar `tf.Tensor`, representing the start
        value for enumeration.

    Returns:
      A `Dataset`.
    """
    max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max
    return Dataset.zip((Dataset.range(start, max_value), self)) 
Example #3
Source File: lookup_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self, table_ref, default_value, initializer):
    """Construct a table object from a table reference.

    If requires a table initializer object (subclass of `TableInitializerBase`).
    It provides the table key and value types, as well as the op to initialize
    the table. The caller is responsible to execute the initialization op.

    Args:
      table_ref: The table reference, i.e. the output of the lookup table ops.
      default_value: The value to use if a key is missing in the table.
      initializer: The table initializer to use.
    """
    super(InitializableLookupTableBase,
          self).__init__(initializer.key_dtype, initializer.value_dtype,
                         table_ref.op.name.split("/")[-1])
    self._table_ref = table_ref
    self._default_value = ops.convert_to_tensor(
        default_value, dtype=self._value_dtype)
    self._default_value.get_shape().merge_with(tensor_shape.scalar())
    self._init = initializer.initialize(self) 
Example #4
Source File: dataset_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self, iterator_resource, initializer, output_types,
               output_shapes):
    """Creates a new iterator from the given iterator resource.

    NOTE(mrry): Most users will not call this initializer directly, and will
    instead use `Iterator.from_dataset()` or `Dataset.make_one_shot_iterator()`.

    Args:
      iterator_resource: A `tf.resource` scalar `tf.Tensor` representing the
        iterator.
      initializer: A `tf.Operation` that should be run to initialize this
        iterator.
      output_types: A nested structure of `tf.DType` objects corresponding to
        each component of an element of this iterator.
      output_shapes: A nested structure of `tf.TensorShape` objects
        corresponding to each component of an element of this dataset.
    """
    self._iterator_resource = iterator_resource
    self._initializer = initializer
    self._output_types = output_types
    self._output_shapes = output_shapes 
Example #5
Source File: lookup_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self, table_ref, default_value, initializer):
    """Construct a table object from a table reference.

    If requires a table initializer object (subclass of `TableInitializerBase`).
    It provides the table key and value types, as well as the op to initialize
    the table. The caller is responsible to execute the initialization op.

    Args:
      table_ref: The table reference, i.e. the output of the lookup table ops.
      default_value: The value to use if a key is missing in the table.
      initializer: The table initializer to use.
    """
    super(InitializableLookupTableBase, self).__init__(
        initializer.key_dtype, initializer.value_dtype,
        table_ref.op.name.split("/")[-1])
    self._table_ref = table_ref
    self._default_value = ops.convert_to_tensor(default_value,
                                                dtype=self._value_dtype)
    self._default_value.get_shape().merge_with(tensor_shape.scalar())
    self._init = initializer.initialize(self) 
Example #6
Source File: lookup_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def __init__(self, table_ref, default_value, initializer):
    """Construct a table object from a table reference.

    If requires a table initializer object (subclass of `TableInitializerBase`).
    It provides the table key and value types, as well as the op to initialize
    the table. The caller is responsible to execute the initialization op.

    Args:
      table_ref: The table reference, i.e. the output of the lookup table ops.
      default_value: The value to use if a key is missing in the table.
      initializer: The table initializer to use.
    """
    super(InitializableLookupTableBase, self).__init__(
        initializer.key_dtype, initializer.value_dtype,
        table_ref.op.name.split("/")[-1])
    self._table_ref = table_ref
    self._default_value = ops.convert_to_tensor(default_value,
                                                dtype=self._value_dtype)
    self._default_value.get_shape().merge_with(tensor_shape.scalar())
    self._init = initializer.initialize(self) 
Example #7
Source File: dataset_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def map(self, map_func, num_threads=None, output_buffer_size=None):
    """Maps `map_func` across this datset.

    Args:
      map_func: A function mapping a nested structure of tensors (having
        shapes and types defined by `self.output_shapes` and
       `self.output_types`) to another nested structure of tensors.
      num_threads: (Optional.) A `tf.int32` scalar `tf.Tensor`, representing
        the number of threads to use for processing elements in parallel. If
        not specified, elements will be processed sequentially without
        buffering.
      output_buffer_size: (Optional.) A `tf.int64` scalar `tf.Tensor`,
        representing the maximum number of processed elements that will be
        buffered when processing in parallel.

    Returns:
      A `Dataset`.
    """
    return MapDataset(self, map_func, num_threads, output_buffer_size) 
Example #8
Source File: dataset_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _padding_value_to_tensor(value, output_type):
  """Converts the padding value to a tensor.

  Args:
    value: The padding value.
    output_type: Its expected dtype.

  Returns:
    A scalar `Tensor`.

  Raises:
    ValueError: if the padding value is not a scalar.
    TypeError: if the padding value's type does not match `output_type`.
  """
  value = ops.convert_to_tensor(value, name="padding_value")
  if not value.shape.is_compatible_with(tensor_shape.scalar()):
    raise ValueError(
        "Padding value should be a scalar, but is not: %s" % value)
  if value.dtype != output_type:
    raise TypeError(
        "Padding value tensor (%s) does not match output type: %s"
        % (value, output_type))
  return value 
Example #9
Source File: retrain.py    From diabetic-retinopathy-screening with GNU General Public License v3.0 6 votes vote down vote up
def add_evaluation_step(result_tensor, ground_truth_tensor):
  """Inserts the operations we need to evaluate the accuracy of our results.

  Args:
    result_tensor: The new final node that produces results.
    ground_truth_tensor: The node we feed ground truth data
    into.

  Returns:
    Tuple of (evaluation step, prediction).
  """
  with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
      prediction = tf.argmax(result_tensor, 1)
      correct_prediction = tf.equal(
          prediction, tf.argmax(ground_truth_tensor, 1))
    with tf.name_scope('accuracy'):
      evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  tf.summary.scalar('accuracy', evaluation_step)
  return evaluation_step, prediction 
Example #10
Source File: retrain.py    From Music_player_with_Emotions_recognition with MIT License 6 votes vote down vote up
def add_evaluation_step(result_tensor, ground_truth_tensor):
  """Inserts the operations we need to evaluate the accuracy of our results.

  Args:
    result_tensor: The new final node that produces results.
    ground_truth_tensor: The node we feed ground truth data
    into.

  Returns:
    Tuple of (evaluation step, prediction).
  """
  with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
      prediction = tf.argmax(result_tensor, 1)
      correct_prediction = tf.equal(
          prediction, tf.argmax(ground_truth_tensor, 1))
    with tf.name_scope('accuracy'):
      evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  tf.summary.scalar('accuracy', evaluation_step)
  return evaluation_step, prediction 
Example #11
Source File: retrain.py    From powerai-transfer-learning with Apache License 2.0 6 votes vote down vote up
def add_evaluation_step(result_tensor, ground_truth_tensor):
  """Inserts the operations we need to evaluate the accuracy of our results.

  Args:
    result_tensor: The new final node that produces results.
    ground_truth_tensor: The node we feed ground truth data
    into.

  Returns:
    Tuple of (evaluation step, prediction).
  """
  with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
      prediction = tf.argmax(result_tensor, 1)
      correct_prediction = tf.equal(
          prediction, tf.argmax(ground_truth_tensor, 1))
    with tf.name_scope('accuracy'):
      evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  tf.summary.scalar('accuracy', evaluation_step)
  return evaluation_step, prediction 
Example #12
Source File: strcuture.py    From BERT with Apache License 2.0 5 votes vote down vote up
def _flat_shapes(self):
    # A TensorArray is represented via its variant object, which is a scalar.
    return [tensor_shape.scalar()] 
Example #13
Source File: strcuture.py    From BERT with Apache License 2.0 5 votes vote down vote up
def _from_tensor_list(self, flat_value):
    if (len(flat_value) != 1 or flat_value[0].dtype != dtypes.variant or
        not flat_value[0].shape.is_compatible_with(tensor_shape.scalar())):
      raise ValueError("TensorArrayStructure corresponds to a single "
                       "tf.variant scalar.")
    return self._from_compatible_tensor_list(flat_value) 
Example #14
Source File: lookup_ops.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def size(self, name=None):
    """Compute the number of elements in this table.

    Args:
      name: A name for the operation (optional).

    Returns:
      A scalar tensor containing the number of elements in this table.
    """
    with ops.name_scope(name, "%s_Size" % self._name,
                        [self._table_ref]) as name:
      # pylint: disable=protected-access
      return gen_data_flow_ops._lookup_table_size(self._table_ref, name=name) 
Example #15
Source File: general.py    From BERT with Apache License 2.0 5 votes vote down vote up
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0,
                 noise_shape=None, seed=None, name=None, training=False):
    """Dropout to a value with rescaling."""

    def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
        keep_prob = 1.0 - rate
        x = ops.convert_to_tensor(x, name="x")
        if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
            raise ValueError("keep_prob must be a scalar tensor or a float in the "
                                             "range (0, 1], got %g" % keep_prob)
        keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob")
        keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())

        alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
        alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar())

        if tensor_util.constant_value(keep_prob) == 1:
            return x

        noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
        random_tensor = keep_prob
        random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype)
        binary_tensor = math_ops.floor(random_tensor)
        ret = x * binary_tensor + alpha * (1-binary_tensor)

        a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar)))

        b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
        ret = a * ret + b
        ret.set_shape(x.get_shape())
        return ret

    with ops.name_scope(name, "dropout", [x]) as name:
        return utils.smart_cond(training,
            lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name),
            lambda: array_ops.identity(x)) 
Example #16
Source File: retrain.py    From Music_player_with_Emotions_recognition with MIT License 5 votes vote down vote up
def variable_summaries(var):
  """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
  with tf.name_scope('summaries'):
    mean = tf.reduce_mean(var)
    tf.summary.scalar('mean', mean)
    with tf.name_scope('stddev'):
      stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    tf.summary.scalar('stddev', stddev)
    tf.summary.scalar('max', tf.reduce_max(var))
    tf.summary.scalar('min', tf.reduce_min(var))
    tf.summary.histogram('histogram', var) 
Example #17
Source File: logistic.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _get_event_shape(self):
    return tensor_shape.scalar() 
Example #18
Source File: inverse_gamma.py    From lambda-packs with MIT License 5 votes vote down vote up
def _event_shape(self):
    return tensor_shape.scalar() 
Example #19
Source File: inverse_gamma.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _get_event_shape(self):
    return tensor_shape.scalar() 
Example #20
Source File: retrain.py    From tensorflow-image-detection with MIT License 5 votes vote down vote up
def variable_summaries(var):
  """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
  with tf.name_scope('summaries'):
    mean = tf.reduce_mean(var)
    tf.summary.scalar('mean', mean)
    with tf.name_scope('stddev'):
      stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    tf.summary.scalar('stddev', stddev)
    tf.summary.scalar('max', tf.reduce_max(var))
    tf.summary.scalar('min', tf.reduce_min(var))
    tf.summary.histogram('histogram', var) 
Example #21
Source File: gamma.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _get_event_shape(self):
    return tensor_shape.scalar() 
Example #22
Source File: common_shapes.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def scalar_shape(unused_op):
  """Shape function for ops that output a scalar value."""
  return [tensor_shape.scalar()] 
Example #23
Source File: lookup_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def size(self, name=None):
    """Compute the number of elements in this table.

    Args:
      name: A name for the operation (optional).

    Returns:
      A scalar tensor containing the number of elements in this table.
    """
    with ops.name_scope(name, "%s_Size" % self._name,
                        [self._table_ref]) as name:
      # pylint: disable=protected-access
      return gen_lookup_ops._lookup_table_size(self._table_ref, name=name) 
Example #24
Source File: lookup_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def size(self, name=None):
    """Compute the number of elements in this table.

    Args:
      name: A name for the operation (optional).

    Returns:
      A scalar tensor containing the number of elements in this table.
    """
    with ops.name_scope(name, "%s_Size" % self._name,
                        [self._table_ref]) as scope:
      # pylint: disable=protected-access
      return gen_lookup_ops._lookup_table_size(self._table_ref, name=scope)
      # pylint: enable=protected-access 
Example #25
Source File: binomial.py    From lambda-packs with MIT License 5 votes vote down vote up
def _event_shape(self):
    return tensor_shape.scalar() 
Example #26
Source File: gumbel.py    From lambda-packs with MIT License 5 votes vote down vote up
def _event_shape(self):
    return tensor_shape.scalar() 
Example #27
Source File: deterministic.py    From lambda-packs with MIT License 5 votes vote down vote up
def _event_shape(self):
    return tensor_shape.scalar() 
Example #28
Source File: poisson.py    From lambda-packs with MIT License 5 votes vote down vote up
def _event_shape(self):
    return tensor_shape.scalar() 
Example #29
Source File: logistic.py    From lambda-packs with MIT License 5 votes vote down vote up
def _event_shape(self):
    return tensor_shape.scalar() 
Example #30
Source File: retrain.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def variable_summaries(var):
  """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
  with tf.name_scope('summaries'):
    mean = tf.reduce_mean(var)
    tf.summary.scalar('mean', mean)
    with tf.name_scope('stddev'):
      stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    tf.summary.scalar('stddev', stddev)
    tf.summary.scalar('max', tf.reduce_max(var))
    tf.summary.scalar('min', tf.reduce_min(var))
    tf.summary.histogram('histogram', var)