Python tensorflow.python.framework.dtypes.uint16() Examples

The following are 20 code examples of tensorflow.python.framework.dtypes.uint16(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.framework.dtypes , or try the search function .
Example #1
Source File: backend.py    From lambda-packs with MIT License 6 votes vote down vote up
def _convert_string_dtype(dtype):
  if dtype == 'float16':
    return dtypes_module.float16
  if dtype == 'float32':
    return dtypes_module.float32
  elif dtype == 'float64':
    return dtypes_module.float64
  elif dtype == 'int16':
    return dtypes_module.int16
  elif dtype == 'int32':
    return dtypes_module.int32
  elif dtype == 'int64':
    return dtypes_module.int64
  elif dtype == 'uint8':
    return dtypes_module.int8
  elif dtype == 'uint16':
    return dtypes_module.uint16
  else:
    raise ValueError('Unsupported dtype:', dtype) 
Example #2
Source File: image_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testConvertBetweenInt16AndInt8(self):
    with self.test_session(use_gpu=True):
      # uint8, uint16
      self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8,
                    [0, 255])
      self._convert([0, 255], dtypes.uint8, dtypes.uint16,
                    [0, 255 * 256])
      # int8, uint16
      self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8,
                    [0, 127])
      self._convert([0, 127], dtypes.int8, dtypes.uint16,
                    [0, 127 * 2 * 256])
      # int16, uint16
      self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16,
                    [0, 255 * 128])
      self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16,
                    [0, 255 * 256]) 
Example #3
Source File: image_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSyntheticTwoChannelUint16(self):
    with self.test_session(use_gpu=True) as sess:
      # Strip the b channel from an rgb image to get a two-channel image.
      gray_alpha = _SimpleColorRamp()[:, :, 0:2]
      image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
      png0 = image_ops.encode_png(image0, compression=7)
      image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
      png0, image0, image1 = sess.run([png0, image0, image1])
      self.assertEqual(2, image0.shape[-1])
      self.assertAllEqual(image0, image1) 
Example #4
Source File: tensor_util.py    From keras-lambda with MIT License 5 votes vote down vote up
def ExtractBitsFromFloat16(x):
  return np.asscalar(np.asarray(x, dtype=np.float16).view(np.uint16)) 
Example #5
Source File: tensor_util.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def ExtractBitsFromFloat16(x):
  return np.asscalar(np.asarray(x, dtype=np.float16).view(np.uint16)) 
Example #6
Source File: util.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _is_known_unsigned_by_dtype(dt):
  """Helper returning True if dtype is known to be unsigned."""
  return {
      dtypes.bool: True,
      dtypes.uint8: True,
      dtypes.uint16: True,
  }.get(dt.base_dtype, False) 
Example #7
Source File: backend.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _convert_string_dtype(dtype):
  """Get the type from a string.

  Arguments:
      dtype: A string representation of a type.

  Returns:
      The type requested.

  Raises:
      ValueError: if `dtype` is not supported.
  """
  if dtype == 'float16':
    return dtypes_module.float16
  if dtype == 'float32':
    return dtypes_module.float32
  elif dtype == 'float64':
    return dtypes_module.float64
  elif dtype == 'int16':
    return dtypes_module.int16
  elif dtype == 'int32':
    return dtypes_module.int32
  elif dtype == 'int64':
    return dtypes_module.int64
  elif dtype == 'uint8':
    return dtypes_module.int8
  elif dtype == 'uint16':
    return dtypes_module.uint16
  else:
    raise ValueError('Unsupported dtype:', dtype) 
Example #8
Source File: image_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSyntheticUint16(self):
    with self.test_session(use_gpu=True) as sess:
      # Encode it, then decode it
      image0 = constant_op.constant(_SimpleColorRamp(), dtype=dtypes.uint16)
      png0 = image_ops.encode_png(image0, compression=7)
      image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
      png0, image0, image1 = sess.run([png0, image0, image1])

      # PNG is lossless
      self.assertAllEqual(image0, image1)

      # Smooth ramps compress well, but not too well
      self.assertGreaterEqual(len(png0), 800)
      self.assertLessEqual(len(png0), 1500) 
Example #9
Source File: tensor_util.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def ExtractBitsFromFloat16(x):
  return np.asscalar(np.asarray(x, dtype=np.float16).view(np.uint16)) 
Example #10
Source File: tensor_util.py    From lambda-packs with MIT License 5 votes vote down vote up
def ExtractBitsFromFloat16(x):
  return np.asscalar(np.asarray(x, dtype=np.float16).view(np.uint16)) 
Example #11
Source File: math_ops.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
  """Compute the cumulative product of the tensor `x` along `axis`.

  By default, this op performs an inclusive cumprod, which means that the
  first
  element of the input is identical to the first element of the output:
  ```prettyprint
  tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c]
  ```

  By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
  performed
  instead:
  ```prettyprint
  tf.cumprod([a, b, c], exclusive=True) ==> [1, a, a * b]
  ```

  By setting the `reverse` kwarg to `True`, the cumprod is performed in the
  opposite direction:
  ```prettyprint
  tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c]
  ```
  This is more efficient than using separate `tf.reverse` ops.

  The `reverse` and `exclusive` kwargs can also be combined:
  ```prettyprint
  tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 1]
  ```

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
    axis: A `Tensor` of type `int32` (default: 0).
    reverse: A `bool` (default: False).
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `x`.
  """
  with ops.name_scope(name, "Cumprod", [x]) as name:
    x = ops.convert_to_tensor(x, name="x")
    return gen_math_ops.cumprod(
        x, axis, exclusive=exclusive, reverse=reverse, name=name) 
Example #12
Source File: math_ops.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
  """Compute the cumulative sum of the tensor `x` along `axis`.

  By default, this op performs an inclusive cumsum, which means that the first
  element of the input is identical to the first element of the output:
  ```prettyprint
  tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c]
  ```

  By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
  instead:
  ```prettyprint
  tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b]
  ```

  By setting the `reverse` kwarg to `True`, the cumsum is performed in the
  opposite direction:
  ```prettyprint
  tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c]
  ```
  This is more efficient than using separate `tf.reverse` ops.

  The `reverse` and `exclusive` kwargs can also be combined:
  ```prettyprint
  tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0]
  ```

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
       axis: A `Tensor` of type `int32` (default: 0).
       reverse: A `bool` (default: False).
       name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `x`.
  """
  with ops.name_scope(name, "Cumsum", [x]) as name:
    x = ops.convert_to_tensor(x, name="x")
    return gen_math_ops.cumsum(
        x, axis, exclusive=exclusive, reverse=reverse, name=name) 
Example #13
Source File: math_ops.py    From lambda-packs with MIT License 4 votes vote down vote up
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
  """Compute the cumulative sum of the tensor `x` along `axis`.

  By default, this op performs an inclusive cumsum, which means that the first
  element of the input is identical to the first element of the output:
  ```prettyprint
  tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c]
  ```

  By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
  instead:
  ```prettyprint
  tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b]
  ```

  By setting the `reverse` kwarg to `True`, the cumsum is performed in the
  opposite direction:
  ```prettyprint
  tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c]
  ```
  This is more efficient than using separate `tf.reverse` ops.

  The `reverse` and `exclusive` kwargs can also be combined:
  ```prettyprint
  tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0]
  ```

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
    axis: A `Tensor` of type `int32` (default: 0).
    exclusive: If `True`, perform exclusive cumsum.
    reverse: A `bool` (default: False).
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `x`.
  """
  with ops.name_scope(name, "Cumsum", [x]) as name:
    x = ops.convert_to_tensor(x, name="x")
    return gen_math_ops.cumsum(
        x, axis, exclusive=exclusive, reverse=reverse, name=name) 
Example #14
Source File: math_ops.py    From auto-alt-text-lambda-api with MIT License 4 votes vote down vote up
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
  """Compute the cumulative product of the tensor `x` along `axis`.

  By default, this op performs an inclusive cumprod, which means that the
  first
  element of the input is identical to the first element of the output:
  ```prettyprint
  tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c]
  ```

  By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
  performed
  instead:
  ```prettyprint
  tf.cumprod([a, b, c], exclusive=True) ==> [1, a, a * b]
  ```

  By setting the `reverse` kwarg to `True`, the cumprod is performed in the
  opposite direction:
  ```prettyprint
  tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c]
  ```
  This is more efficient than using separate `tf.reverse` ops.

  The `reverse` and `exclusive` kwargs can also be combined:
  ```prettyprint
  tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 1]
  ```

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
    axis: A `Tensor` of type `int32` (default: 0).
    reverse: A `bool` (default: False).
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `x`.
  """
  with ops.name_scope(name, "Cumprod", [x]) as name:
    x = ops.convert_to_tensor(x, name="x")
    return gen_math_ops.cumprod(
        x, axis, exclusive=exclusive, reverse=reverse, name=name) 
Example #15
Source File: math_ops.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 4 votes vote down vote up
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
  """Compute the cumulative sum of the tensor `x` along `axis`.

  By default, this op performs an inclusive cumsum, which means that the first
  element of the input is identical to the first element of the output:

  ```python
  tf.cumsum([a, b, c])  # [a, a + b, a + b + c]
  ```

  By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
  instead:

  ```python
  tf.cumsum([a, b, c], exclusive=True)  # [0, a, a + b]
  ```

  By setting the `reverse` kwarg to `True`, the cumsum is performed in the
  opposite direction:

  ```python
  tf.cumsum([a, b, c], reverse=True)  # [a + b + c, b + c, c]
  ```

  This is more efficient than using separate `tf.reverse` ops.

  The `reverse` and `exclusive` kwargs can also be combined:

  ```python
  tf.cumsum([a, b, c], exclusive=True, reverse=True)  # [b + c, c, 0]
  ```

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
    axis: A `Tensor` of type `int32` (default: 0). Must be in the range
      `[-rank(x), rank(x))`.
    exclusive: If `True`, perform exclusive cumsum.
    reverse: A `bool` (default: False).
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `x`.
  """
  with ops.name_scope(name, "Cumsum", [x]) as name:
    x = ops.convert_to_tensor(x, name="x")
    return gen_math_ops.cumsum(
        x, axis, exclusive=exclusive, reverse=reverse, name=name) 
Example #16
Source File: math_ops.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 4 votes vote down vote up
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
  """Compute the cumulative product of the tensor `x` along `axis`.

  By default, this op performs an inclusive cumprod, which means that the
  first element of the input is identical to the first element of the output:

  ```python
  tf.cumprod([a, b, c])  # [a, a * b, a * b * c]
  ```

  By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
  performed
  instead:

  ```python
  tf.cumprod([a, b, c], exclusive=True)  # [1, a, a * b]
  ```

  By setting the `reverse` kwarg to `True`, the cumprod is performed in the
  opposite direction:

  ```python
  tf.cumprod([a, b, c], reverse=True)  # [a * b * c, b * c, c]
  ```

  This is more efficient than using separate `tf.reverse` ops.
  The `reverse` and `exclusive` kwargs can also be combined:

  ```python
  tf.cumprod([a, b, c], exclusive=True, reverse=True)  # [b * c, c, 1]
  ```

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
    axis: A `Tensor` of type `int32` (default: 0). Must be in the range
      `[-rank(x), rank(x))`.
    exclusive: If `True`, perform exclusive cumprod.
    reverse: A `bool` (default: False).
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `x`.
  """
  with ops.name_scope(name, "Cumprod", [x]) as name:
    x = ops.convert_to_tensor(x, name="x")
    return gen_math_ops.cumprod(
        x, axis, exclusive=exclusive, reverse=reverse, name=name) 
Example #17
Source File: math_ops.py    From auto-alt-text-lambda-api with MIT License 4 votes vote down vote up
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
  """Compute the cumulative sum of the tensor `x` along `axis`.

  By default, this op performs an inclusive cumsum, which means that the first
  element of the input is identical to the first element of the output:
  ```prettyprint
  tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c]
  ```

  By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
  instead:
  ```prettyprint
  tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b]
  ```

  By setting the `reverse` kwarg to `True`, the cumsum is performed in the
  opposite direction:
  ```prettyprint
  tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c]
  ```
  This is more efficient than using separate `tf.reverse` ops.

  The `reverse` and `exclusive` kwargs can also be combined:
  ```prettyprint
  tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0]
  ```

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
       axis: A `Tensor` of type `int32` (default: 0).
       reverse: A `bool` (default: False).
       name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `x`.
  """
  with ops.name_scope(name, "Cumsum", [x]) as name:
    x = ops.convert_to_tensor(x, name="x")
    return gen_math_ops.cumsum(
        x, axis, exclusive=exclusive, reverse=reverse, name=name) 
Example #18
Source File: math_ops.py    From keras-lambda with MIT License 4 votes vote down vote up
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
  """Compute the cumulative sum of the tensor `x` along `axis`.

  By default, this op performs an inclusive cumsum, which means that the first
  element of the input is identical to the first element of the output:
  ```prettyprint
  tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c]
  ```

  By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
  instead:
  ```prettyprint
  tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b]
  ```

  By setting the `reverse` kwarg to `True`, the cumsum is performed in the
  opposite direction:
  ```prettyprint
  tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c]
  ```
  This is more efficient than using separate `tf.reverse` ops.

  The `reverse` and `exclusive` kwargs can also be combined:
  ```prettyprint
  tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0]
  ```

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
       axis: A `Tensor` of type `int32` (default: 0).
       reverse: A `bool` (default: False).
       name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `x`.
  """
  with ops.name_scope(name, "Cumsum", [x]) as name:
    x = ops.convert_to_tensor(x, name="x")
    return gen_math_ops.cumsum(
        x, axis, exclusive=exclusive, reverse=reverse, name=name) 
Example #19
Source File: math_ops.py    From keras-lambda with MIT License 4 votes vote down vote up
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
  """Compute the cumulative product of the tensor `x` along `axis`.

  By default, this op performs an inclusive cumprod, which means that the
  first
  element of the input is identical to the first element of the output:
  ```prettyprint
  tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c]
  ```

  By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
  performed
  instead:
  ```prettyprint
  tf.cumprod([a, b, c], exclusive=True) ==> [1, a, a * b]
  ```

  By setting the `reverse` kwarg to `True`, the cumprod is performed in the
  opposite direction:
  ```prettyprint
  tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c]
  ```
  This is more efficient than using separate `tf.reverse` ops.

  The `reverse` and `exclusive` kwargs can also be combined:
  ```prettyprint
  tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 1]
  ```

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
    axis: A `Tensor` of type `int32` (default: 0).
    reverse: A `bool` (default: False).
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `x`.
  """
  with ops.name_scope(name, "Cumprod", [x]) as name:
    x = ops.convert_to_tensor(x, name="x")
    return gen_math_ops.cumprod(
        x, axis, exclusive=exclusive, reverse=reverse, name=name) 
Example #20
Source File: math_ops.py    From lambda-packs with MIT License 4 votes vote down vote up
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
  """Compute the cumulative product of the tensor `x` along `axis`.

  By default, this op performs an inclusive cumprod, which means that the
  first
  element of the input is identical to the first element of the output:
  ```prettyprint
  tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c]
  ```

  By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
  performed
  instead:
  ```prettyprint
  tf.cumprod([a, b, c], exclusive=True) ==> [1, a, a * b]
  ```

  By setting the `reverse` kwarg to `True`, the cumprod is performed in the
  opposite direction:
  ```prettyprint
  tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c]
  ```
  This is more efficient than using separate `tf.reverse` ops.

  The `reverse` and `exclusive` kwargs can also be combined:
  ```prettyprint
  tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 1]
  ```

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
    axis: A `Tensor` of type `int32` (default: 0).
    exclusive: If `True`, perform exclusive cumprod.
    reverse: A `bool` (default: False).
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `x`.
  """
  with ops.name_scope(name, "Cumprod", [x]) as name:
    x = ops.convert_to_tensor(x, name="x")
    return gen_math_ops.cumprod(
        x, axis, exclusive=exclusive, reverse=reverse, name=name)