Python tensorflow.int8() Examples
The following are 30
code examples of tensorflow.int8().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.

Example #1
Source File: test_node.py From onnx-tensorflow with Apache License 2.0 | 6 votes |
def test_dequantize_linear(self): node_def = helper.make_node("DequantizeLinear", ["x", "x_scale", "x_zero_point"], ["y"]) for x, x_zero_point in [[ self._get_rnd_int(-128, 127, [2, 6], np.int8), self._get_rnd_int(-128, 127, dtype=np.int8) ], [ self._get_rnd_int(0, 255, [2, 6], np.uint8), self._get_rnd_int(0, 255, dtype=np.uint8) ], [self._get_rnd_int(-512, 512, [2, 6]), np.int32(0)]]: x_scale = self._get_rnd_float32(-10., 10) y = np.subtract(np.float32(x), np.float32(x_zero_point)) y = np.multiply(y, x_scale) output = run_node(node_def, [x, x_scale, x_zero_point]) np.testing.assert_almost_equal(output["y"], y)
Example #2
Source File: test_node.py From onnx-tensorflow with Apache License 2.0 | 6 votes |
def test_max_pool_2d_dilations_ceil_pads_int8(self): if legacy_opset_pre_ver(12): raise unittest.SkipTest( "ONNX version {} does not support int8 input type.".format( defs.onnx_opset_version())) kernel_shape = [3, 3] strides = [2, 2] dilations = [3, 3] pads = [1, 1, 2, 2] ceil_mode = 1 input_shape = [10, 3, 23, 23] self._test_pooling(input_shape=input_shape, kernel_shape=kernel_shape, strides=strides, dilations=dilations, pads=pads, ceil_mode=ceil_mode, input_dtype=np.int8)
Example #3
Source File: test_node.py From onnx-tensorflow with Apache License 2.0 | 6 votes |
def test_quantize_linear(self): node_def = helper.make_node("QuantizeLinear", ["x", "y_scale", "y_zero_point"], ["y"]) for x in [ self._get_rnd_float32(-512., 512., [2, 6]), self._get_rnd_int(-512, 512, [2, 6]) ]: y_scale = self._get_rnd_float32(-10., 10.) for y_zero_point in [ self._get_rnd_int(-128, 127, dtype=np.int8), self._get_rnd_int(0, 255, dtype=np.uint8) ]: y = np.divide(x, y_scale) y = np.round(y) y = np.add(y, y_zero_point) if y_zero_point.dtype.type is np.int8: y = np.clip(y, -128, 127).astype(np.int8) else: y = np.clip(y, 0, 255).astype(np.uint8) output = run_node(node_def, [x, y_scale, y_zero_point]) np.testing.assert_almost_equal(output["y"], y)
Example #4
Source File: constant_op_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testDtype(self): with self.test_session(): d = tf.fill([2, 3], 12., name="fill") self.assertEqual(d.get_shape(), [2, 3]) # Test default type for both constant size and dynamic size z = tf.zeros([2, 3]) self.assertEqual(z.dtype, tf.float32) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.eval(), np.zeros([2, 3])) z = tf.zeros(tf.shape(d)) self.assertEqual(z.dtype, tf.float32) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.eval(), np.zeros([2, 3])) # Test explicit type control for dtype in [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.complex128, tf.int64, tf.bool]: z = tf.zeros([2, 3], dtype=dtype) self.assertEqual(z.dtype, dtype) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.eval(), np.zeros([2, 3])) z = tf.zeros(tf.shape(d), dtype=dtype) self.assertEqual(z.dtype, dtype) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.eval(), np.zeros([2, 3]))
Example #5
Source File: constant_op_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testDtype(self): with self.test_session(): d = tf.fill([2, 3], 12., name="fill") self.assertEqual(d.get_shape(), [2, 3]) # Test default type for both constant size and dynamic size z = tf.ones([2, 3]) self.assertEqual(z.dtype, tf.float32) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.eval(), np.ones([2, 3])) z = tf.ones(tf.shape(d)) self.assertEqual(z.dtype, tf.float32) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.eval(), np.ones([2, 3])) # Test explicit type control for dtype in (tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.complex128, tf.int64, tf.bool): z = tf.ones([2, 3], dtype=dtype) self.assertEqual(z.dtype, dtype) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.eval(), np.ones([2, 3])) z = tf.ones(tf.shape(d), dtype=dtype) self.assertEqual(z.dtype, dtype) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.eval(), np.ones([2, 3]))
Example #6
Source File: constant_op_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testOnesLike(self): for dtype in [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.complex128, tf.int64]: numpy_dtype = dtype.as_numpy_dtype with self.test_session(): # Creates a tensor of non-zero values with shape 2 x 3. d = tf.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype) # Constructs a tensor of zeros of the same dimensions and type as "d". z_var = tf.ones_like(d) # Test that the type is correct self.assertEqual(z_var.dtype, dtype) z_value = z_var.eval() # Test that the value is correct self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2))) self.assertEqual([2, 3], z_var.get_shape())
Example #7
Source File: as_string_op_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testLargeInt(self): # Cannot use values outside -128..127 for test, because we're also # testing int8 s = lambda strs: [x.decode("ascii") for x in strs] with self.test_session(): input_ = tf.placeholder(tf.int32) int_inputs_ = [np.iinfo(np.int32).min, np.iinfo(np.int32).max] output = tf.as_string(input_) result = output.eval(feed_dict={input_: int_inputs_}) self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_]) input_ = tf.placeholder(tf.int64) int_inputs_ = [np.iinfo(np.int64).min, np.iinfo(np.int64).max] output = tf.as_string(input_) result = output.eval(feed_dict={input_: int_inputs_}) self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
Example #8
Source File: tensor_util_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testIntTypes(self): for dtype, nptype in [ (tf.int32, np.int32), (tf.uint8, np.uint8), (tf.uint16, np.uint16), (tf.int16, np.int16), (tf.int8, np.int8)]: # Test with array. t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtype) self.assertEquals(dtype, t.dtype) self.assertProtoEquals("dim { size: 3 }", t.tensor_shape) a = tensor_util.MakeNdarray(t) self.assertEquals(nptype, a.dtype) self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a) # Test with ndarray. t = tensor_util.make_tensor_proto(np.array([10, 20, 30], dtype=nptype)) self.assertEquals(dtype, t.dtype) self.assertProtoEquals("dim { size: 3 }", t.tensor_shape) a = tensor_util.MakeNdarray(t) self.assertEquals(nptype, a.dtype) self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
Example #9
Source File: dtypes_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testNumpyConversion(self): self.assertIs(tf.float32, tf.as_dtype(np.float32)) self.assertIs(tf.float64, tf.as_dtype(np.float64)) self.assertIs(tf.int32, tf.as_dtype(np.int32)) self.assertIs(tf.int64, tf.as_dtype(np.int64)) self.assertIs(tf.uint8, tf.as_dtype(np.uint8)) self.assertIs(tf.uint16, tf.as_dtype(np.uint16)) self.assertIs(tf.int16, tf.as_dtype(np.int16)) self.assertIs(tf.int8, tf.as_dtype(np.int8)) self.assertIs(tf.complex64, tf.as_dtype(np.complex64)) self.assertIs(tf.complex128, tf.as_dtype(np.complex128)) self.assertIs(tf.string, tf.as_dtype(np.object)) self.assertIs(tf.string, tf.as_dtype(np.array(["foo", "bar"]).dtype)) self.assertIs(tf.bool, tf.as_dtype(np.bool)) with self.assertRaises(TypeError): tf.as_dtype(np.dtype([("f1", np.uint), ("f2", np.int32)]))
Example #10
Source File: recommender.py From openrec with Apache License 2.0 | 6 votes |
def _input(self, dtype='float32', shape=None, name=None): """Define an input for the recommender. Parameters ---------- dtype: str Data type: "float16", "float32", "float64", "int8", "int16", "int32", "int64", "bool", or "string". shape: list or tuple Input shape. name: str Name of the input. Returns ------- Tensorflow placeholder Defined tensorflow placeholder. """ if dtype not in self._str_to_dtype: raise ValueError else: return tf.placeholder(self._str_to_dtype[dtype], shape=shape, name=name)
Example #11
Source File: tfrecord_test.py From nobrainer with Apache License 2.0 | 6 votes |
def test__dtype_to_bytes(): np_tf_dt = [ (np.uint8, tf.uint8, b"uint8"), (np.uint16, tf.uint16, b"uint16"), (np.uint32, tf.uint32, b"uint32"), (np.uint64, tf.uint64, b"uint64"), (np.int8, tf.int8, b"int8"), (np.int16, tf.int16, b"int16"), (np.int32, tf.int32, b"int32"), (np.int64, tf.int64, b"int64"), (np.float16, tf.float16, b"float16"), (np.float32, tf.float32, b"float32"), (np.float64, tf.float64, b"float64"), ] for npd, tfd, dt in np_tf_dt: npd = np.dtype(npd) assert tfrecord._dtype_to_bytes(npd) == dt assert tfrecord._dtype_to_bytes(tfd) == dt assert tfrecord._dtype_to_bytes("float32") == b"float32" assert tfrecord._dtype_to_bytes("foobar") == b"foobar"
Example #12
Source File: tensorflow_backend.py From KerasNeuralFingerprint with MIT License | 6 votes |
def _convert_string_dtype(dtype): if dtype == 'float16': return tf.float16 if dtype == 'float32': return tf.float32 elif dtype == 'float64': return tf.float64 elif dtype == 'int16': return tf.int16 elif dtype == 'int32': return tf.int32 elif dtype == 'int64': return tf.int64 elif dtype == 'uint8': return tf.int8 elif dtype == 'uint16': return tf.uint16 else: raise ValueError('Unsupported dtype:', dtype)
Example #13
Source File: tensorflow_util.py From MedicalDataAugmentationTool with GNU General Public License v3.0 | 6 votes |
def reduce_mean_support_empty(input, keepdims=False): return tf.cond(tf.size(input) > 0, lambda: tf.reduce_mean(input, keepdims=keepdims), lambda: tf.zeros_like(input)) # def bit_tensor_list(input): # assert input.dtype in [tf.uint8, tf.uint16, tf.uint32, tf.uint64], 'unsupported data type, must be uint*' # num_bits = 0 # if input.dtype == tf.int8: # num_bits = 8 # elif input.dtype == tf.int16: # num_bits = 16 # elif input.dtype == tf.uint32: # num_bits = 32 # elif input.dtype == tf.uint64: # num_bits = 64 # bit_tensors = [] # for i in range(num_bits): # current_bit = 1 << i # current_bit_tensor = tf.bitwise.bitwise_and(input, current_bit) == 1 # bit_tensors.append(current_bit_tensor) # print(bit_tensors) # return bit_tensors
Example #14
Source File: tf_utils.py From deepsignal with GNU General Public License v3.0 | 6 votes |
def parse_a_line_b(value, base_num, signal_num): vec = tf.decode_raw(value, tf.int8) bases = tf.cast(tf.reshape(tf.strided_slice(vec, [0], [base_num]), [base_num]), dtype=tf.int32) means = tf.bitcast( tf.reshape(tf.strided_slice(vec, [base_num], [base_num + base_num * 4]), [base_num, 4]), type=tf.float32) stds = tf.bitcast( tf.reshape(tf.strided_slice(vec, [base_num * 5], [base_num * 5 + base_num * 4]), [base_num, 4]), type=tf.float32) sanum = tf.cast(tf.bitcast( tf.reshape(tf.strided_slice(vec, [base_num * 9], [base_num * 9 + base_num * 2]), [base_num, 2]), type=tf.int16), dtype=tf.int32) signals = tf.bitcast( tf.reshape(tf.strided_slice(vec, [base_num * 11], [base_num * 11 + 4 * signal_num]), [signal_num, 4]), type=tf.float32) labels = tf.cast( tf.reshape(tf.strided_slice(vec, [base_num * 11 + signal_num * 4], [base_num * 11 + signal_num * 4 + 1]), [1]), dtype=tf.int32) return bases, means, stds, sanum, signals, labels
Example #15
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 6 votes |
def test_tensor_array_write_read(): def run(dtype_str, infer_shape, element_shape): with tf.Graph().as_default(): dtype = tf_dtypes[dtype_str] np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str) in_data = [np_data, np_data] t1 = tf.constant(np_data, dtype=dtype) t2 = tf.constant(np_data, dtype=dtype) ta1 = tf.TensorArray(dtype=dtype, size=2, infer_shape=infer_shape, element_shape=element_shape) ta2 = ta1.write(0, t1) ta3 = ta2.write(1, t2) out = ta3.read(0) g = tf.get_default_graph() compare_tf_with_tvm([], [], 'TensorArrayReadV3:0', mode='vm') for dtype in ["float32", "int8"]: run(dtype, False, None) run(dtype, False, tf.TensorShape([None, 2])) run(dtype, True, None)
Example #16
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 6 votes |
def test_tensor_array_scatter(): def run(dtype_str, infer_shape): with tf.Graph().as_default(): dtype = tf_dtypes[dtype_str] if infer_shape: element_shape = tf.TensorShape([tf.Dimension(None)]) else: element_shape = None t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str), dtype=dtype) indices = tf.constant([2, 1, 0]) ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape, element_shape=element_shape) ta2 = ta1.scatter(indices, t) out0 = ta2.read(0) out1 = ta2.read(1) out2 = ta2.read(2) g = tf.get_default_graph() compare_tf_with_tvm([], [], ['TensorArrayReadV3:0'], mode='vm') compare_tf_with_tvm([], [], ['TensorArrayReadV3_1:0'], mode='vm') compare_tf_with_tvm([], [], ['TensorArrayReadV3_2:0'], mode='vm') for dtype in ["float32", "int8"]: run(dtype, False) run(dtype, True)
Example #17
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 6 votes |
def test_tensor_array_split(): def run(dtype_str, infer_shape): with tf.Graph().as_default(): dtype = tf_dtypes[dtype_str] t = tf.constant(np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(dtype_str), dtype=dtype) split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32) ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape) ta2 = ta1.split(t, split_length) out0 = ta2.read(0) out1 = ta2.read(1) out2 = ta2.read(2) out3 = ta2.read(3) g = tf.get_default_graph() compare_tf_with_tvm([], [], ['TensorArrayReadV3:0'], mode='debug') compare_tf_with_tvm([], [], ['TensorArrayReadV3_1:0'], mode='debug') compare_tf_with_tvm([], [], ['TensorArrayReadV3_2:0'], mode='debug') compare_tf_with_tvm([], [], ['TensorArrayReadV3_3:0'], mode='debug') for dtype in ["float32", "int8"]: run(dtype, False) run(dtype, True)
Example #18
Source File: dumpTFWts.py From iAI with MIT License | 5 votes |
def getTRTType(tensor): if tf.as_dtype(tensor.dtype) == tf.float32: return 0 if tf.as_dtype(tensor.dtype) == tf.float16: return 1 if tf.as_dtype(tensor.dtype) == tf.int8: return 2 if tf.as_dtype(tensor.dtype) == tf.int32: return 3 print("Tensor data type of %s is not supported in TensorRT"%(tensor.dtype)) sys.exit();
Example #19
Source File: dumpTFWts.py From iAI with MIT License | 5 votes |
def getTRTType(tensor): if tf.as_dtype(tensor.dtype) == tf.float32: return 0 if tf.as_dtype(tensor.dtype) == tf.float16: return 1 if tf.as_dtype(tensor.dtype) == tf.int8: return 2 if tf.as_dtype(tensor.dtype) == tf.int32: return 3 print("Tensor data type of %s is not supported in TensorRT"%(tensor.dtype)) sys.exit();
Example #20
Source File: dumpTFWts.py From iAI with MIT License | 5 votes |
def getTRTType(tensor): if tf.as_dtype(tensor.dtype) == tf.float32: return 0 if tf.as_dtype(tensor.dtype) == tf.float16: return 1 if tf.as_dtype(tensor.dtype) == tf.int8: return 2 if tf.as_dtype(tensor.dtype) == tf.int32: return 3 print("Tensor data type of %s is not supported in TensorRT"%(tensor.dtype)) sys.exit();
Example #21
Source File: executor.py From EasyRL with Apache License 2.0 | 5 votes |
def reshape_flattened_obs(self, flattened_obs): """recovery the nested structure of flattened_obs Arguments: flattened_obs (obj): flattened array. Returns: the original nested struct of input obs. """ tf2np_dtype = { tf.float32: np.float32, tf.float64: np.float64, tf.bool: np.bool, tf.int32: np.int32, tf.int64: np.int64, tf.int8: np.int8 } if isinstance(self.ob_ph_spec, list): restore_obs = [] cur_idx = 0 for ph_dtype, ph_shape in self.ob_ph_spec: np_type = tf2np_dtype.get(ph_dtype, np.float32) restore_obs.append( np.asarray(flattened_obs[:, cur_idx:cur_idx + ph_shape[1]]).astype(np_type)) cur_idx += ph_shape[1] elif isinstance(self.ob_ph_spec, OrderedDict): restore_obs = {} cur_idx = 0 for name, ph_tuple in self.ob_ph_spec.items(): ph_dtype, ph_shape = ph_tuple np_type = tf2np_dtype.get(ph_dtype, np.float32) restore_obs[name] = np.asarray( flattened_obs[:, cur_idx:cur_idx + ph_shape[1]]).astype(np_type) else: restore_obs = flattened_obs return restore_obs
Example #22
Source File: h3_encoders.py From ludwig with Apache License 2.0 | 5 votes |
def __call__( self, input_vector, regularizer, dropout_rate, is_training=True ): """ :param input_vector: The input vector fed into the encoder. Shape: [batch x 19], type tf.int8 :type input_vector: Tensor :param regularizer: The regularizer to use for the weights of the encoder. :type regularizer: :param dropout_rate: Tensor (tf.float) of the probability of dropout :type dropout_rate: Tensor :param is_training: Tesnor (tf.bool) specifying if in training mode (important for dropout) :type is_training: Tensor """ # ================ Embeddings ================ embedded_h3, _ = self.h3_embed( input_vector, regularizer, dropout_rate, is_training=is_training ) # ================ RNN ================ hidden, hidden_size = self.recurrent_stack( embedded_h3, regularizer=regularizer, dropout_rate=dropout_rate, is_training=is_training ) return hidden, hidden_size
Example #23
Source File: tensor.py From dgl with Apache License 2.0 | 5 votes |
def data_type_dict(): return {'float16': tf.float16, 'float32': tf.float32, 'float64': tf.float64, 'uint8': tf.uint8, 'int8': tf.int8, 'int16': tf.int16, 'int32': tf.int32, 'int64': tf.int64, 'bool' : tf.bool}
Example #24
Source File: cumsum.py From onnx-tensorflow with Apache License 2.0 | 5 votes |
def args_check(cls, node, **kwargs): supported_dtype = [ tf.bfloat16, tf.half, tf.float32, tf.float64, tf.uint8, tf.uint16, tf.int8, tf.int16, tf.int32, tf.int64, tf.complex64, tf.complex128 ] x = kwargs["tensor_dict"][node.inputs[0]] if x.dtype not in supported_dtype: exception.OP_UNSUPPORTED_EXCEPT( "CumSum input in " + str(x.dtype) + " which", "Tensorflow")
Example #25
Source File: equal.py From onnx-tensorflow with Apache License 2.0 | 5 votes |
def args_check(cls, node, **kwargs): supported_dtype = [ tf.bfloat16, tf.half, tf.float32, tf.float64, tf.uint8, tf.int8, tf.int16, tf.int32, tf.int64, tf.complex64, tf.quint8, tf.qint8, tf.qint32, tf.string, tf.bool, tf.complex128 ] x = kwargs["tensor_dict"][node.inputs[0]] if x.dtype not in supported_dtype: exception.OP_UNSUPPORTED_EXCEPT( "Equal inputs in " + str(x.dtype) + " which", "Tensorflow")
Example #26
Source File: mod.py From onnx-tensorflow with Apache License 2.0 | 5 votes |
def args_check(cls, node, **kwargs): unsupported_dtype = [ tf.int8, tf.int16, tf.uint8, tf.uint16, tf.uint32, tf.uint64 ] x = kwargs["tensor_dict"][node.inputs[0]] y = kwargs["tensor_dict"][node.inputs[1]] if x.dtype in unsupported_dtype: exception.OP_UNSUPPORTED_EXCEPT("Mod Dividend in " + str(x.dtype), "Tensorflow") if y.dtype in unsupported_dtype: exception.OP_UNSUPPORTED_EXCEPT("Mod Divisor in " + str(y.dtype), "Tensorflow")
Example #27
Source File: expand.py From onnx-tensorflow with Apache License 2.0 | 5 votes |
def version_8(cls, node, **kwargs): tensor_dict = kwargs["tensor_dict"] x, shape = tensor_dict[node.inputs[0]], tensor_dict[node.inputs[1]] # tf.math.multiply does not support bool therefore use int8 if x.dtype is tf.bool: ones = tf.ones(shape, dtype=tf.int8) r = tf.cast(x, tf.int8) * ones return [tf.cast(r, tf.bool)] else: ones = tf.ones(shape, dtype=x.dtype) return [x * ones]
Example #28
Source File: clip.py From onnx-tensorflow with Apache License 2.0 | 5 votes |
def _common(cls, node, **kwargs): tensor_dict = kwargs["tensor_dict"] x = tensor_dict[node.inputs[0]] x_dtype = x.dtype if cls.SINCE_VERSION < 11: # min/max were required and passed as attributes clip_value_min = node.attrs.get("min", tf.reduce_min(x)) clip_value_max = node.attrs.get("max", tf.reduce_max(x)) else: # min/max are optional and passed as inputs clip_value_min = tensor_dict[node.inputs[1]] if len( node.inputs) > 1 and node.inputs[1] != "" else x_dtype.min clip_value_max = tensor_dict[node.inputs[2]] if len( node.inputs) > 2 and node.inputs[2] != "" else x_dtype.max # tf.clip_by_value doesn't support uint8, uint16, uint32, int8 and int16 # dtype for x, therefore need to upcast it to tf.int32 or tf.int64 if x_dtype in [tf.uint8, tf.uint16, tf.uint32, tf.int8, tf.int16]: cast_to = tf.int64 if x_dtype == tf.uint32 else tf.int32 x = tf.cast(x, cast_to) clip_value_min = tf.cast(clip_value_min, cast_to) clip_value_max = tf.cast(clip_value_max, cast_to) y = tf.clip_by_value(x, clip_value_min, clip_value_max) y = tf.cast(y, x_dtype) else: y = tf.clip_by_value(x, clip_value_min, clip_value_max) return [y]
Example #29
Source File: test_tensorflow.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test_horovod_allgather(self): """Test that the allgather correctly gathers 1D, 2D, 3D tensors.""" hvd.init() rank = hvd.rank() size = hvd.size() with self.test_session(config=self.config) as session: dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16, tf.int32, tf.int64, tf.float16, tf.float32, tf.float64, tf.bool] dims = [1, 2, 3] for dtype, dim in itertools.product(dtypes, dims): tensor = tf.ones([17] * dim) * rank if dtype == tf.bool: tensor = tensor % 2 tensor = tf.cast(tensor, dtype=dtype) gathered = hvd.allgather(tensor) gathered_tensor = session.run(gathered) self.assertEqual(list(gathered_tensor.shape), [17 * size] + [17] * (dim - 1)) for i in range(size): rank_tensor = tf.slice(gathered_tensor, [i * 17] + [0] * (dim - 1), [17] + [-1] * (dim - 1)) self.assertEqual(list(rank_tensor.shape), [17] * dim) # tf.equal() does not support tf.uint16 as of TensorFlow 1.2, # so need to cast rank_tensor to tf.int32. if dtype != tf.bool: value = i else: value = i % 2 self.assertTrue( session.run(tf.reduce_all( tf.equal(tf.cast(rank_tensor, tf.int32), value))), "hvd.allgather produces incorrect gathered tensor")
Example #30
Source File: test_tensorflow.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test_horovod_broadcast(self): """Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors.""" hvd.init() rank = hvd.rank() size = hvd.size() # This test does not apply if there is only one worker. if size == 1: return with self.test_session(config=self.config) as session: dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16, tf.int32, tf.int64, tf.float16, tf.float32, tf.float64, tf.bool] dims = [1, 2, 3] root_ranks = list(range(size)) for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks): tensor = tf.ones([17] * dim) * rank root_tensor = tf.ones([17] * dim) * root_rank if dtype == tf.bool: tensor = tensor % 2 root_tensor = root_tensor % 2 tensor = tf.cast(tensor, dtype=dtype) root_tensor = tf.cast(root_tensor, dtype=dtype) broadcasted_tensor = hvd.broadcast(tensor, root_rank) self.assertTrue( session.run(tf.reduce_all(tf.equal( tf.cast(root_tensor, tf.int32), tf.cast(broadcasted_tensor, tf.int32)))), "hvd.broadcast produces incorrect broadcasted tensor")