Python tensorflow.uint64() Examples

The following are 9 code examples of tensorflow.uint64(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: tfrecord_test.py    From nobrainer with Apache License 2.0 6 votes vote down vote up
def test__dtype_to_bytes():
    np_tf_dt = [
        (np.uint8, tf.uint8, b"uint8"),
        (np.uint16, tf.uint16, b"uint16"),
        (np.uint32, tf.uint32, b"uint32"),
        (np.uint64, tf.uint64, b"uint64"),
        (np.int8, tf.int8, b"int8"),
        (np.int16, tf.int16, b"int16"),
        (np.int32, tf.int32, b"int32"),
        (np.int64, tf.int64, b"int64"),
        (np.float16, tf.float16, b"float16"),
        (np.float32, tf.float32, b"float32"),
        (np.float64, tf.float64, b"float64"),
    ]

    for npd, tfd, dt in np_tf_dt:
        npd = np.dtype(npd)
        assert tfrecord._dtype_to_bytes(npd) == dt
        assert tfrecord._dtype_to_bytes(tfd) == dt

    assert tfrecord._dtype_to_bytes("float32") == b"float32"
    assert tfrecord._dtype_to_bytes("foobar") == b"foobar" 
Example #2
Source File: tensorflow_util.py    From MedicalDataAugmentationTool with GNU General Public License v3.0 6 votes vote down vote up
def reduce_mean_support_empty(input, keepdims=False):
    return tf.cond(tf.size(input) > 0, lambda: tf.reduce_mean(input, keepdims=keepdims), lambda: tf.zeros_like(input))


# def bit_tensor_list(input):
#     assert input.dtype in [tf.uint8, tf.uint16, tf.uint32, tf.uint64], 'unsupported data type, must be uint*'
#     num_bits = 0
#     if input.dtype == tf.int8:
#         num_bits = 8
#     elif input.dtype == tf.int16:
#         num_bits = 16
#     elif input.dtype == tf.uint32:
#         num_bits = 32
#     elif input.dtype == tf.uint64:
#         num_bits = 64
#     bit_tensors = []
#     for i in range(num_bits):
#         current_bit = 1 << i
#         current_bit_tensor = tf.bitwise.bitwise_and(input, current_bit) == 1
#         bit_tensors.append(current_bit_tensor)
#     print(bit_tensors)
#     return bit_tensors 
Example #3
Source File: mod.py    From onnx-tensorflow with Apache License 2.0 5 votes vote down vote up
def args_check(cls, node, **kwargs):
    unsupported_dtype = [
        tf.int8, tf.int16, tf.uint8, tf.uint16, tf.uint32, tf.uint64
    ]
    x = kwargs["tensor_dict"][node.inputs[0]]
    y = kwargs["tensor_dict"][node.inputs[1]]
    if x.dtype in unsupported_dtype:
      exception.OP_UNSUPPORTED_EXCEPT("Mod Dividend in " + str(x.dtype),
                                      "Tensorflow")
    if y.dtype in unsupported_dtype:
      exception.OP_UNSUPPORTED_EXCEPT("Mod Divisor in " + str(y.dtype),
                                      "Tensorflow") 
Example #4
Source File: clip.py    From onnx-tensorflow with Apache License 2.0 5 votes vote down vote up
def args_check(cls, node, **kwargs):
    x = kwargs["tensor_dict"][node.inputs[0]]
    # uint64 cannot upcast to any tensorflow supported datatype
    # for tf.clip_by_value that didn't lose precision
    if x.dtype == tf.uint64:
      exception.OP_UNSUPPORTED_EXCEPT(
          "Clip input, min and max in " + str(x.dtype) + " datatype",
          "Tensorflow") 
Example #5
Source File: tf_utils.py    From transform with Apache License 2.0 5 votes vote down vote up
def reduce_batch_minus_min_and_max(x, reduce_instance_dims):
  """Computes the -min and max of a tensor x.

  Args:
    x: A `tf.Tensor`.
    reduce_instance_dims: A bool indicating whether this should collapse the
      batch and instance dimensions to arrive at a single scalar output, or only
      collapse the batch dimension and outputs a vector of the same shape as the
      input.

  Returns:
    The computed `tf.Tensor`s (batch -min, batch max) pair.
  """
  output_dtype = x.dtype

  if x.dtype == tf.uint8 or x.dtype == tf.uint16:
    x = tf.cast(x, tf.int32)

  elif x.dtype == tf.uint32 or x.dtype == tf.uint64:
    raise TypeError('Tensor type %r is not supported' % x.dtype)

  if reduce_instance_dims:
    if isinstance(x, tf.SparseTensor):
      x = x.values

    x_batch_max = tf.reduce_max(input_tensor=x)
    x_batch_minus_min = tf.reduce_max(input_tensor=tf.zeros_like(x) - x)
    x_batch_minus_min, x_batch_max = assert_same_shape(x_batch_minus_min,
                                                       x_batch_max)
  elif isinstance(x, tf.SparseTensor):
    x_batch_minus_min, x_batch_max = (
        _sparse_minus_reduce_min_and_reduce_max(x))
  else:
    x_batch_max = tf.reduce_max(input_tensor=x, axis=0)
    x_batch_minus_min = tf.reduce_max(input_tensor=0 - x, axis=0)

  # TODO(b/112309021): Remove workaround once tf.reduce_max of a tensor of all
  # NaNs produces -inf.
  return (_inf_to_nan(x_batch_minus_min, output_dtype),
          _inf_to_nan(x_batch_max, output_dtype)) 
Example #6
Source File: analyzers.py    From transform with Apache License 2.0 5 votes vote down vote up
def sum(x, reduce_instance_dims=True, name=None):  # pylint: disable=redefined-builtin
  """Computes the sum of the values of a `Tensor` over the whole dataset.

  Args:
    x: A `Tensor` or `SparseTensor`. Its type must be floating point
        (float{16|32|64}),integral (int{8|16|32|64}), or
        unsigned integral (uint{8|16})
    reduce_instance_dims: By default collapses the batch and instance dimensions
        to arrive at a single scalar output. If False, only collapses the batch
        dimension and outputs a vector of the same shape as the input.
    name: (Optional) A name for this operation.

  Returns:
    A `Tensor` containing the sum. If `x` is float32 or float64, the sum will
    have the same type as `x`. If `x` is float16, the output is cast to float32.
    If `x` is integral, the output is cast to [u]int64. If `x` is sparse and
    reduce_inst_dims is False will return 0 in place where column has no values
    across batches.

  Raises:
    TypeError: If the type of `x` is not supported.
  """
  with tf.compat.v1.name_scope(name, 'sum'):
    if reduce_instance_dims:
      if isinstance(x, tf.SparseTensor):
        x = x.values
      x = tf.reduce_sum(input_tensor=x)
    elif isinstance(x, tf.SparseTensor):
      if x.dtype == tf.uint8 or x.dtype == tf.uint16:
        x = tf.cast(x, tf.int64)
      elif x.dtype == tf.uint32 or x.dtype == tf.uint64:
        TypeError('Data type %r is not supported' % x.dtype)
      x = tf.sparse.reduce_sum(x, axis=0)
    else:
      x = tf.reduce_sum(input_tensor=x, axis=0)
    output_dtype, sum_fn = _sum_combine_fn_and_dtype(x.dtype)
    return _numeric_combine([x], sum_fn, reduce_instance_dims,
                            [output_dtype])[0] 
Example #7
Source File: layers.py    From fold with Apache License 2.0 5 votes vote down vote up
def _create_variables(self):
    if self.input_type.ndim != 0:
      raise TypeError('Embeddings take scalar inputs.')
    dtype = tf.as_dtype(self.input_type.dtype)
    if not dtype.is_integer: raise TypeError('Embeddings take integer inputs.')
    if dtype not in (tf.int32, tf.int64):  # only dtypes supported by tf.gather
      if np.iinfo(dtype.as_numpy_dtype).max > 2147483647:
         # pedantic future-proofing to handle hypothetical tf.uint64
        raise TypeError('cannot gather or upcast dtype %s' % dtype)
      self._cast = True
    else:
      self._cast = False
    self._weights = tf.get_variable(
        'weights', self._weights_shape, initializer=self._initializer,
        trainable=self._trainable) 
Example #8
Source File: tensorflow_util.py    From MedicalDataAugmentationTool with GNU General Public License v3.0 5 votes vote down vote up
def masked_bit(input, bit_index):
    """
    Returns a boolean tensor, where values are true, on which the bit on bit_index is True.
    :param input: The input tensor to check.
    :param bit_index: The bit index which will be compared with bitwise and. (LSB 0 order)
    :return: The tensor.
    """
    assert input.dtype in [tf.int8, tf.int16, tf.int32, tf.int64, tf.uint8, tf.uint16, tf.uint32, tf.uint64], 'unsupported data type, must be *int*'
    current_bit = tf.bitwise.left_shift(tf.constant(1, dtype=input.dtype), tf.cast(bit_index, dtype=input.dtype))
    return tf.greater(tf.bitwise.bitwise_and(input, current_bit), 0) 
Example #9
Source File: tf_utils.py    From transform with Apache License 2.0 4 votes vote down vote up
def reduce_batch_minus_min_and_max_per_key(x, key):
  """Computes the -min and max of a tensor x.

  Args:
    x: A `tf.Tensor` or `SparseTensor`.
    key: A `Tensor` or `SparseTensor`.
        Must meet one of the following conditions:
        1. Both x and key are dense,
        2. Both x and key are sparse and `key` must exactly match `x` in
        everything except values,
        3. The axis=1 index of each x matches its index of dense key.
  Returns:
    A 3-tuple containing the `Tensor`s (key_vocab, min_per_key, max_per_key).
  """
  output_dtype = x.dtype

  if x.dtype == tf.uint8 or x.dtype == tf.uint16:
    x = tf.cast(x, tf.int32)

  elif x.dtype == tf.uint32 or x.dtype == tf.uint64:
    raise TypeError('Tensor type %r is not supported' % x.dtype)

  x, key = _validate_and_get_dense_value_key_inputs(x, key)

  def get_batch_max_per_key(tensor, key_uniques, dtype):  # pylint: disable=missing-docstring
    if tensor.get_shape().ndims < 2:
      row_maxes = tensor
    else:
      row_maxes = tf.reduce_max(
          tensor, axis=tf.range(1, tensor.get_shape().ndims))
    batch_max = tf.math.unsorted_segment_max(
        row_maxes, key_uniques.idx, tf.size(input=key_uniques.y))

    # TODO(b/112309021): Remove workaround once tf.reduce_max of a tensor of all
    # NaNs produces -inf.
    return _inf_to_nan(batch_max, dtype)

  unique = tf.unique_with_counts(key, out_idx=tf.int64)
  x_batch_maxes = get_batch_max_per_key(x, unique, output_dtype)
  x_batch_minus_mins = get_batch_max_per_key(-x, unique, output_dtype)

  x_batch_minus_mins, x_batch_maxes = assert_same_shape(x_batch_minus_mins,
                                                        x_batch_maxes)

  return (unique.y, x_batch_minus_mins, x_batch_maxes)