Python tensorflow.quint8() Examples

The following are 23 code examples of tensorflow.quint8(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: quantize_graph.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def eightbitize_reshape_node(self, original_node):
    """Replaces a Reshape node with the eight bit equivalent sub-graph.

    Args:
      original_node: Float node to be converted.

    Returns:
      Subgraph representing the quantized version of the original node.

    """
    namespace_prefix = original_node.name + "_eightbit"
    quantized_reshape_name = namespace_prefix + "_quantized_reshape"
    reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
        namespace_prefix)
    shape_input_name = original_node.input[1]
    quantize_input_name, min_input_name, max_input_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_node.input[0],
                                       reshape_dims_name, reduction_dims_name))
    quantized_reshape_node = create_node(
        "QuantizedReshape", quantized_reshape_name,
        [quantize_input_name, shape_input_name, min_input_name, max_input_name])
    set_attr_dtype(quantized_reshape_node, "T", tf.quint8)
    self.add_output_graph_node(quantized_reshape_node)
    self.add_dequantize_result_node(quantized_reshape_name, original_node.name) 
Example #2
Source File: quantize_graph.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def add_dequantize_result_node(self, quantized_output_name,
                                 original_node_name, min_tensor_index=1):
    min_max_inputs = [
        "%s:%s" % (quantized_output_name, min_tensor_index),
        "%s:%s" % (quantized_output_name, (min_tensor_index + 1))]
    dequantize_name = original_node_name
    if self.should_merge_with_fake_quant_node():
      fake_quant_node = self.state.output_node_stack[-1][0]
      if original_node_name not in self.state.merged_with_fake_quant:
        min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
        self.state.merged_with_fake_quant[original_node_name] = True
      dequantize_name = fake_quant_node.name

    dequantize_node = create_node(
        "Dequantize", dequantize_name,
        [quantized_output_name, min_max_inputs[0], min_max_inputs[1]])
    set_attr_dtype(dequantize_node, "T", tf.quint8)
    set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
    self.add_output_graph_node(dequantize_node) 
Example #3
Source File: quantize_graph.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def eightbitize_mat_mul_node(self, original_node):
    """Replaces a MatMul node with the eight bit equivalent sub-graph."""
    quantized_mat_mul_name = original_node.name + "_eightbit_quantized_mat_mul"
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_mat_mul_node = create_node(
        "QuantizedMatMul", quantized_mat_mul_name,
        all_input_names)
    set_attr_dtype(quantized_mat_mul_node, "T1", tf.quint8)
    set_attr_dtype(quantized_mat_mul_node, "T2", tf.quint8)
    set_attr_dtype(quantized_mat_mul_node, "Toutput", tf.qint32)
    copy_attr(quantized_mat_mul_node, "transpose_a",
              original_node.attr["transpose_a"])
    copy_attr(quantized_mat_mul_node, "transpose_b",
              original_node.attr["transpose_b"])
    self.add_output_graph_node(quantized_mat_mul_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_mat_mul_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #4
Source File: quantize_graph.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def eightbitize_bias_add_node(self, original_node):
    """Replaces a BiasAdd node with the eight bit equivalent sub-graph."""
    quantized_bias_add_name = (original_node.name +
                               "_eightbit_quantized_bias_add")
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_bias_add_node = create_node(
        "QuantizedBiasAdd", quantized_bias_add_name,
        all_input_names)
    set_attr_dtype(quantized_bias_add_node, "T1", tf.quint8)
    set_attr_dtype(quantized_bias_add_node, "T2", tf.quint8)
    set_attr_dtype(quantized_bias_add_node, "out_type", tf.qint32)
    self.add_output_graph_node(quantized_bias_add_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_bias_add_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #5
Source File: estimator.py    From WorkControl with Apache License 2.0 5 votes vote down vote up
def inference(self, npimg, resize_to_default=True, upsample_size=1.0):
        if npimg is None:
            raise Exception('The image is not valid. Please check your image exists.')

        if resize_to_default:
            upsample_size = [int(self.target_size[1] / 8 * upsample_size), int(self.target_size[0] / 8 * upsample_size)]
        else:
            upsample_size = [int(npimg.shape[0] / 8 * upsample_size), int(npimg.shape[1] / 8 * upsample_size)]

        if self.tensor_image.dtype == tf.quint8:
            # quantize input image
            npimg = TfPoseEstimator._quantize_img(npimg)
            pass

        logger.debug('inference+ original shape=%dx%d' % (npimg.shape[1], npimg.shape[0]))
        img = npimg
        if resize_to_default:
            img = self._get_scaled_img(npimg, None)[0][0]
        peaks, heatMat_up, pafMat_up = self.persistent_sess.run(
            [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={
                self.tensor_image: [img], self.upsample_size: upsample_size
            })
        peaks = peaks[0]
        self.heatMat = heatMat_up[0]
        self.pafMat = pafMat_up[0]
        logger.debug('inference- heatMat=%dx%d pafMat=%dx%d' % (
            self.heatMat.shape[1], self.heatMat.shape[0], self.pafMat.shape[1], self.pafMat.shape[0]))

        t = time.time()
        humans = PoseEstimator.estimate_paf(peaks, self.heatMat, self.pafMat)
        logger.debug('estimate time=%.5f' % (time.time() - t))
        return humans 
Example #6
Source File: estimator.py    From tf-pose-estimation with Apache License 2.0 5 votes vote down vote up
def inference(self, npimg, resize_to_default=True, upsample_size=1.0):
        if npimg is None:
            raise Exception('The image is not valid. Please check your image exists.')

        if resize_to_default:
            upsample_size = [int(self.target_size[1] / 8 * upsample_size), int(self.target_size[0] / 8 * upsample_size)]
        else:
            upsample_size = [int(npimg.shape[0] / 8 * upsample_size), int(npimg.shape[1] / 8 * upsample_size)]

        if self.tensor_image.dtype == tf.quint8:
            # quantize input image
            npimg = TfPoseEstimator._quantize_img(npimg)
            pass

        logger.debug('inference+ original shape=%dx%d' % (npimg.shape[1], npimg.shape[0]))
        img = npimg
        if resize_to_default:
            img = self._get_scaled_img(npimg, None)[0][0]
        peaks, heatMat_up, pafMat_up = self.persistent_sess.run(
            [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={
                self.tensor_image: [img], self.upsample_size: upsample_size
            })
        peaks = peaks[0]
        self.heatMat = heatMat_up[0]
        self.pafMat = pafMat_up[0]
        logger.debug('inference- heatMat=%dx%d pafMat=%dx%d' % (
            self.heatMat.shape[1], self.heatMat.shape[0], self.pafMat.shape[1], self.pafMat.shape[0]))

        t = time.time()
        humans = PoseEstimator.estimate_paf(peaks, self.heatMat, self.pafMat)
        logger.debug('estimate time=%.5f' % (time.time() - t))
        return humans 
Example #7
Source File: test_converter.py    From utensor_cgen with Apache License 2.0 5 votes vote down vote up
def test_quint8():
    tf_quint8 = as_dtype(tf.quint8).as_datatype_enum
    np_uint8 = DataTypeConverter.get_generic_value(tf_quint8)
    assert np_uint8 == np.dtype('uint8')
    assert isinstance(np_uint8, DataTypeConverter.__utensor_generic_type__)
    assert isinstance(DataTypeConverter.get_tf_value(np_uint8),
                      DataTypeConverter.__tfproto_type__) 
Example #8
Source File: estimator.py    From MobileNetV2-PoseEstimation with MIT License 5 votes vote down vote up
def inference(self, npimg, resize_to_default=True, upsample_size=1.0):
        if npimg is None:
            raise Exception('The image is not valid. Please check your image exists.')

        if resize_to_default:
            upsample_size = [int(self.target_size[1] / 8 * upsample_size), int(self.target_size[0] / 8 * upsample_size)]
        else:
            upsample_size = [int(npimg.shape[0] / 8 * upsample_size), int(npimg.shape[1] / 8 * upsample_size)]

        if self.tensor_image.dtype == tf.quint8:
            # quantize input image
            npimg = TfPoseEstimator._quantize_img(npimg)
            pass

        logger.debug('inference+ original shape=%dx%d' % (npimg.shape[1], npimg.shape[0]))
        img = npimg
        if resize_to_default:
            img = self._get_scaled_img(npimg, None)[0][0]
        peaks, heatMat_up, pafMat_up = self.persistent_sess.run(
            [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={
                self.tensor_image: [img], self.upsample_size: upsample_size
            })
        peaks = peaks[0]
        self.heatMat = heatMat_up[0]
        self.pafMat = pafMat_up[0]

        print("peaks.shape==", peaks.shape)
        print("heatMat.shape==", self.heatMat.shape)
        print("pafMat.shape==", self.pafMat.shape)

        logger.debug('inference- heatMat=%dx%d pafMat=%dx%d' % (
            self.heatMat.shape[1], self.heatMat.shape[0], self.pafMat.shape[1], self.pafMat.shape[0]))

        t = time.time()
        humans = PoseEstimator.estimate_paf(peaks, self.heatMat, self.pafMat)
        logger.debug('estimate time=%.5f' % (time.time() - t))
        return humans 
Example #9
Source File: tensorflow_frozenparser.py    From MMdnn with MIT License 5 votes vote down vote up
def rename_QuantizedConv2D(self, source_node):
        IR_node = self._convert_identity_operation(source_node, new_op = 'QConv')
        kwargs = {}
        kwargs['strides'] = source_node.get_attr('strides')
        kwargs['padding'] = source_node.get_attr('padding')

        # weights
        input_node = self.src_graph.get_parent(source_node.name, [1])
        tensor_content = input_node.get_attr('value')
        W = tensor_util.MakeNdarray(tensor_content)
        W = W.astype(np.uint8)

        kwargs['kernel_shape'] = self.tensor_shape_to_list(input_node.get_attr('_output_shapes'))[0]

        input_node_minw = self.src_graph.get_parent(source_node.name, [4])
        min_W = input_node_minw.get_attr('value').float_val[0]
        input_node_maxw = self.src_graph.get_parent(source_node.name, [5])
        max_W = input_node_maxw.get_attr('value').float_val[0]

        if source_node.get_attr('Tfilter') == tensorflow.quint8:
            W = ((max_W - min_W)/255.0) * W + min_W
        else:
            assert False, ('Only uint8 weights handled currently by the converter')

        self.set_weight(source_node.name, 'kernel_weights', W)
        assign_IRnode_values(IR_node, kwargs) 
Example #10
Source File: estimator.py    From Gesture-Recognition with MIT License 5 votes vote down vote up
def inference(self, npimg, resize_to_default=True, upsample_size=1.0):
        if npimg is None:
            raise Exception('The image is not valid. Please check your image exists.')

        if resize_to_default: #1.08155608177

            upsample_size = [int(self.target_size[1] / 8 * upsample_size), int(self.target_size[0] / 8 * upsample_size)]
        else:
            upsample_size = [int(npimg.shape[0] / 8 * upsample_size), int(npimg.shape[1] / 8 * upsample_size)]

        if self.tensor_image.dtype == tf.quint8:
            # quantize input image
            npimg = TfPoseEstimator._quantize_img(npimg)
            pass

        logger.debug('inference+ original shape=%dx%d' % (npimg.shape[1], npimg.shape[0]))
        img = npimg
        if resize_to_default:
            img = self._get_scaled_img(npimg, None)[0][0]
        peaks, heatMat_up, pafMat_up = self.persistent_sess.run(
            [self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={
                self.tensor_image: [img], self.upsample_size: upsample_size
            })
        peaks = peaks[0]
        self.heatMat = heatMat_up[0]
        self.pafMat = pafMat_up[0]
        logger.debug('inference- heatMat=%dx%d pafMat=%dx%d' % (
        self.heatMat.shape[1], self.heatMat.shape[0], self.pafMat.shape[1], self.pafMat.shape[0]))

        t = time.time()
        humans = PoseEstimator.estimate_paf(peaks, self.heatMat, self.pafMat)
        logger.debug('estimate time=%.5f' % (time.time() - t))
        return humans 
Example #11
Source File: quantize_graph_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _RunTestsForQuantizedInputRange(self, float_graph_def, input_map,
                                      output_names, input_range):
    if sys.version_info[0] == 3:
      # uint8->quint8 conversion for numpy is not working currently.
      return

    quantized_input_map = {}
    for k, v in input_map.items():
      arr = [
          int(round((n-input_range[0])*255/(input_range[1]-input_range[0])))
          for n in v.flat]
      arr = np.array(arr, np.uint8)
      arr = arr.reshape(v.shape)
      arr = arr.astype(tf.quint8.as_numpy_dtype)
      quantized_input_map[k] = arr
    output_tensors = [output_name + ":0" for output_name in output_names]
    float_results = run_graph_def(float_graph_def, input_map, output_tensors)

    # Quantize treating the input as quantized in range <input_range>.
    rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit",
                                            input_range)
    graph_def = rewriter.rewrite(output_names)
    results = run_graph_def(graph_def, quantized_input_map, output_tensors)
    for expected, result in zip(float_results, results):
      assert are_tensors_near(expected, result, .5)
    ops = [node.op for node in graph_def.node]
    self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
    self.assertEqual(len(output_names), ops.count("Dequantize"))

    # Quantize without treating input as quantized.
    rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit",
                                            quantized_input_range=None)
    graph_def = rewriter.rewrite(output_names)
    results = run_graph_def(graph_def, input_map, output_tensors)
    for expected, result in zip(float_results, results):
      assert are_tensors_near(expected, result, .5)
    ops = [node.op for node in graph_def.node]
    self.assertEqual(len(input_map),
                     ops.count("QuantizeV2") + ops.count("Quantize"))
    self.assertEqual(len(output_names), ops.count("Dequantize")) 
Example #12
Source File: quantize_graph.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def eightbitize_placeholder_node(self, current_node):
    """Replaces a placeholder node with a quint8 placeholder node+dequantize."""
    name = current_node.name

    # Convert the placeholder into a quantized type.
    output_node = tf.NodeDef()
    output_node.CopyFrom(current_node)
    set_attr_dtype(output_node, "dtype", tf.quint8)
    output_node.name += "_original_input"
    self.add_output_graph_node(output_node)

    # Add a dequantize to convert back to float.
    dequantize_node = create_node(
        "Dequantize", name,
        [output_node.name, "quantized_input_min_value",
         "quantized_input_max_value"])
    set_attr_dtype(dequantize_node, "T", tf.quint8)
    set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
    self.add_output_graph_node(dequantize_node)

    # For the descent over the graph to work, the dequantize node must be named
    # current_node.name.  However, for the feeding of the graph to work, the
    # placeholder must have the name current_node.name; so record a final set
    # of renames to apply after all processing has been done.
    self.final_node_renames[output_node.name] = name
    self.final_node_renames[dequantize_node.name] = name + "_dequantize" 
Example #13
Source File: quantize_graph.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def add_relu_function(self, unused_arg_node, quantized_op_node):
    set_attr_dtype(quantized_op_node, "Tinput", tf.quint8) 
Example #14
Source File: quantize_graph.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def add_pool_function(self, original_node, quantized_op_node):
    set_attr_dtype(quantized_op_node, "T", tf.quint8)
    copy_attr(quantized_op_node, "ksize", original_node.attr["ksize"])
    copy_attr(quantized_op_node, "strides", original_node.attr["strides"])
    copy_attr(quantized_op_node, "padding", original_node.attr["padding"]) 
Example #15
Source File: equal.py    From onnx-tensorflow with Apache License 2.0 5 votes vote down vote up
def args_check(cls, node, **kwargs):
    supported_dtype = [
        tf.bfloat16, tf.half, tf.float32, tf.float64, tf.uint8, tf.int8,
        tf.int16, tf.int32, tf.int64, tf.complex64, tf.quint8, tf.qint8,
        tf.qint32, tf.string, tf.bool, tf.complex128
    ]
    x = kwargs["tensor_dict"][node.inputs[0]]
    if x.dtype not in supported_dtype:
      exception.OP_UNSUPPORTED_EXCEPT(
          "Equal inputs in " + str(x.dtype) + " which", "Tensorflow") 
Example #16
Source File: quantize_graph.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def add_quantize_down_nodes(self, original_node, quantized_output_name):
    quantized_outputs = [
        quantized_output_name, quantized_output_name + ":1",
        quantized_output_name + ":2"
    ]
    min_max_inputs = None
    if self.should_merge_with_fake_quant_node():
      # Use the inputs to the FakeQuantWithMinMaxVars node as the inputs to
      # Requantize.
      fake_quant_node = self.state.output_node_stack[-1][0]
      min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
      assert original_node.name not in self.state.merged_with_fake_quant
      self.state.merged_with_fake_quant[original_node.name] = True
    elif self.fallback_quantization_range:
      min_max_inputs = ["fallback_quantization_min_value:0",
                        "fallback_quantization_max_value:0"]
    else:
      # Add a RequantizationRange node for finding the min and max values.
      requant_range_node = create_node(
          "RequantizationRange", original_node.name + "_eightbit_requant_range",
          quantized_outputs)
      set_attr_dtype(requant_range_node, "Tinput", tf.qint32)
      self.add_output_graph_node(requant_range_node)
      min_max_inputs = [requant_range_node.name + ":0",
                        requant_range_node.name + ":1"]
    requantize_node = create_node(
        "Requantize", original_node.name + "_eightbit_requantize",
        quantized_outputs + min_max_inputs)
    set_attr_dtype(requantize_node, "Tinput", tf.qint32)
    set_attr_dtype(requantize_node, "out_type", tf.quint8)
    self.add_output_graph_node(requantize_node)
    return requantize_node.name 
Example #17
Source File: quantize_graph.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def eightbitize_input_to_node(self, namespace_prefix, original_input_name,
                                reshape_dims_name, reduction_dims_name):
    """Takes one float input to an op, and converts it to quantized form."""
    unique_input_name = unique_node_name_from_input(original_input_name)
    reshape_input_name = namespace_prefix + "_reshape_" + unique_input_name
    min_input_name = namespace_prefix + "_min_" + unique_input_name
    max_input_name = namespace_prefix + "_max_" + unique_input_name
    quantize_input_name = namespace_prefix + "_quantize_" + unique_input_name
    reshape_input_node = create_node("Reshape", reshape_input_name,
                                     [original_input_name, reshape_dims_name])
    set_attr_dtype(reshape_input_node, "T", tf.float32)
    self.add_output_graph_node(reshape_input_node)
    min_input_node = create_node("Min", min_input_name, [reshape_input_name,
                                                         reduction_dims_name])
    set_attr_dtype(min_input_node, "T", tf.float32)
    set_attr_bool(min_input_node, "keep_dims", False)
    self.add_output_graph_node(min_input_node)
    max_input_node = create_node("Max", max_input_name, [reshape_input_name,
                                                         reduction_dims_name])
    set_attr_dtype(max_input_node, "T", tf.float32)
    set_attr_bool(max_input_node, "keep_dims", False)
    self.add_output_graph_node(max_input_node)
    quantize_input_node = create_node("QuantizeV2", quantize_input_name,
                                      [original_input_name, min_input_name,
                                       max_input_name])
    set_attr_dtype(quantize_input_node, "T", tf.quint8)
    set_attr_string(quantize_input_node, "mode", b"MIN_FIRST")
    self.add_output_graph_node(quantize_input_node)
    min_output_name = quantize_input_name + ":1"
    max_output_name = quantize_input_name + ":2"
    return quantize_input_name, min_output_name, max_output_name 
Example #18
Source File: dtypes_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testStringConversion(self):
    self.assertIs(tf.float32, tf.as_dtype("float32"))
    self.assertIs(tf.float64, tf.as_dtype("float64"))
    self.assertIs(tf.int32, tf.as_dtype("int32"))
    self.assertIs(tf.uint8, tf.as_dtype("uint8"))
    self.assertIs(tf.uint16, tf.as_dtype("uint16"))
    self.assertIs(tf.int16, tf.as_dtype("int16"))
    self.assertIs(tf.int8, tf.as_dtype("int8"))
    self.assertIs(tf.string, tf.as_dtype("string"))
    self.assertIs(tf.complex64, tf.as_dtype("complex64"))
    self.assertIs(tf.complex128, tf.as_dtype("complex128"))
    self.assertIs(tf.int64, tf.as_dtype("int64"))
    self.assertIs(tf.bool, tf.as_dtype("bool"))
    self.assertIs(tf.qint8, tf.as_dtype("qint8"))
    self.assertIs(tf.quint8, tf.as_dtype("quint8"))
    self.assertIs(tf.qint32, tf.as_dtype("qint32"))
    self.assertIs(tf.bfloat16, tf.as_dtype("bfloat16"))
    self.assertIs(tf.float32_ref, tf.as_dtype("float32_ref"))
    self.assertIs(tf.float64_ref, tf.as_dtype("float64_ref"))
    self.assertIs(tf.int32_ref, tf.as_dtype("int32_ref"))
    self.assertIs(tf.uint8_ref, tf.as_dtype("uint8_ref"))
    self.assertIs(tf.int16_ref, tf.as_dtype("int16_ref"))
    self.assertIs(tf.int8_ref, tf.as_dtype("int8_ref"))
    self.assertIs(tf.string_ref, tf.as_dtype("string_ref"))
    self.assertIs(tf.complex64_ref, tf.as_dtype("complex64_ref"))
    self.assertIs(tf.complex128_ref, tf.as_dtype("complex128_ref"))
    self.assertIs(tf.int64_ref, tf.as_dtype("int64_ref"))
    self.assertIs(tf.bool_ref, tf.as_dtype("bool_ref"))
    self.assertIs(tf.qint8_ref, tf.as_dtype("qint8_ref"))
    self.assertIs(tf.quint8_ref, tf.as_dtype("quint8_ref"))
    self.assertIs(tf.qint32_ref, tf.as_dtype("qint32_ref"))
    self.assertIs(tf.bfloat16_ref, tf.as_dtype("bfloat16_ref"))
    with self.assertRaises(TypeError):
      tf.as_dtype("not_a_type") 
Example #19
Source File: dequantize_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testBasicQuint8(self):
    self._testDequantizeOp(np.array([0, 128, 255]),
                           0.0, 6.0, tf.quint8)
    self._testDequantizeOp(np.array([0, 128, 255]),
                           0.0, 123.456, tf.quint8)
    self._testDequantizeOp(np.array([0, 4, 42, 108, 243]),
                           5.0, 200.2, tf.quint8) 
Example #20
Source File: dequantize_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _testDequantizeOp(self, inputs, min_range, max_range, dtype):
    with self.test_session():
      input_op = tf.constant(inputs, shape=[len(inputs)], dtype=dtype)
      dequantized = tf.dequantize(
          input_op, min_range, max_range)
      tf_ans = dequantized.eval()

    # TODO(vrv): Add support for DT_QINT32 quantization if needed.
    type_dict = {
        tf.quint8: np.uint8,
        tf.qint8: np.int8,
        tf.quint16: np.uint16,
        tf.qint16: np.int16
        }
    self.assertTrue(dtype in type_dict.keys())
    v_max = np.iinfo(type_dict[dtype]).max
    v_min = np.iinfo(type_dict[dtype]).min
    self.assertTrue(min_range >= v_min)
    self.assertTrue(max_range <= v_max)
    type_range = v_max - v_min
    if v_min < 0:
      half_range = (type_range + 1) / 2
    else:
      half_range = 0.0

    np_ans = ((inputs.astype(np.float32) + half_range) *
              (max_range - min_range) / type_range) + min_range
    self.assertAllClose(tf_ans, np_ans) 
Example #21
Source File: quantize_graph.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def quantize_node(self, input_node):
    """Handles quantizing a single node."""
    input_name = input_node.name
    if input_name in self.already_quantized:
      return
    self.already_quantized[input_name] = True
    original_input_name = input_name + "_original"
    reshape_name = input_name + "_reshape"
    reshape_dims_name = input_name + "_reshape_dims"
    max_name = input_name + "_max"
    min_name = input_name + "_min"
    dims_name = input_name + "_dims"
    quantize_name = input_name + "_quantize"
    dequantize_name = input_name
    original_input_node = tf.NodeDef()
    original_input_node.CopyFrom(input_node)
    original_input_node.name = original_input_name
    self.add_output_graph_node(original_input_node)
    reshape_dims_node = create_constant_node(reshape_dims_name, -1, tf.int32,
                                             [1])
    self.add_output_graph_node(reshape_dims_node)
    reshape_node = create_node("Reshape", reshape_name, [original_input_name,
                                                         reshape_dims_name])
    set_attr_dtype(reshape_node, "T", tf.float32)
    self.add_output_graph_node(reshape_node)
    dims_node = create_constant_node(dims_name, 0, tf.int32, [1])
    self.add_output_graph_node(dims_node)
    max_node = create_node("Max", max_name, [reshape_name, dims_name])
    set_attr_dtype(max_node, "T", tf.float32)
    set_attr_bool(max_node, "keep_dims", False)
    self.add_output_graph_node(max_node)
    min_node = create_node("Min", min_name, [reshape_name, dims_name])
    set_attr_dtype(min_node, "T", tf.float32)
    set_attr_bool(min_node, "keep_dims", False)
    self.add_output_graph_node(min_node)
    quantize_node = create_node("Quantize", quantize_name, [original_input_name,
                                                            min_name, max_name])
    set_attr_dtype(quantize_node, "T", tf.quint8)
    set_attr_string(quantize_node, "mode", b"MIN_FIRST")
    self.add_output_graph_node(quantize_node)
    dequantize_node = create_node("Dequantize", dequantize_name,
                                  [quantize_name, min_name, max_name])
    set_attr_dtype(dequantize_node, "T", tf.quint8)
    set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
    self.add_output_graph_node(dequantize_node) 
Example #22
Source File: tensor_util_test.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def testQuantizedTypes(self):
    # Test with array.
    data = [(21,), (22,), (23,)]

    t = tensor_util.make_tensor_proto(data, dtype=tf.qint32)
    self.assertProtoEquals("""
      dtype: DT_QINT32
      tensor_shape { dim { size: 3 } }
      tensor_content: "\025\000\000\000\026\000\000\000\027\000\000\000"
      """, t)
    a = tensor_util.MakeNdarray(t)
    self.assertEquals(tf.qint32.as_numpy_dtype, a.dtype)
    self.assertAllEqual(np.array(data, dtype=a.dtype), a)

    t = tensor_util.make_tensor_proto(data, dtype=tf.quint8)
    self.assertProtoEquals("""
      dtype: DT_QUINT8
      tensor_shape { dim { size: 3 } }
      tensor_content: "\025\026\027"
      """, t)
    a = tensor_util.MakeNdarray(t)
    self.assertEquals(tf.quint8.as_numpy_dtype, a.dtype)
    self.assertAllEqual(np.array(data, dtype=a.dtype), a)

    t = tensor_util.make_tensor_proto(data, dtype=tf.qint8)
    self.assertProtoEquals("""
      dtype: DT_QINT8
      tensor_shape { dim { size: 3 } }
      tensor_content: "\025\026\027"
      """, t)
    a = tensor_util.MakeNdarray(t)
    self.assertEquals(tf.qint8.as_numpy_dtype, a.dtype)
    self.assertAllEqual(np.array(data, dtype=a.dtype), a)

    t = tensor_util.make_tensor_proto(data, dtype=tf.quint16)
    self.assertProtoEquals("""
      dtype: DT_QUINT16
      tensor_shape { dim { size: 3 } }
      tensor_content: "\025\000\026\000\027\000"
      """, t)
    a = tensor_util.MakeNdarray(t)
    self.assertEquals(tf.quint16.as_numpy_dtype, a.dtype)
    self.assertAllEqual(np.array(data, dtype=a.dtype), a)

    t = tensor_util.make_tensor_proto(data, dtype=tf.qint16)
    self.assertProtoEquals("""
      dtype: DT_QINT16
      tensor_shape { dim { size: 3 } }
      tensor_content: "\025\000\026\000\027\000"
      """, t)
    a = tensor_util.MakeNdarray(t)
    self.assertEquals(tf.qint16.as_numpy_dtype, a.dtype)
    self.assertAllEqual(np.array(data, dtype=a.dtype), a) 
Example #23
Source File: quantized_conv_ops_test.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
                    expected):
    """Verifies the output values of the convolution function.

    Args:
      tensor_in_sizes: Input tensor dimensions in
        [batch, input_rows, input_cols, input_depth].
      filter_in_sizes: Filter tensor dimensions in
        [kernel_rows, kernel_cols, input_depth, output_depth].
      stride: Stride.
      padding: Padding type.
      expected: An array containing the expected operation outputs.
    """
    total_size_1 = 1
    total_size_2 = 1
    for s in tensor_in_sizes:
      total_size_1 *= s
    for s in filter_in_sizes:
      total_size_2 *= s
    # Initializes the input tensor with array containing incrementing
    # numbers from 1.
    x1 = np.array([f for f in range(1, total_size_1 + 1)])
    x1 = x1.astype(np.uint8).reshape(tensor_in_sizes)
    x1_min = 0.0
    x1_max = 255.0
    x2 = np.array([f for f in range(1, total_size_2 + 1)]).astype(np.uint8)
    x2 = x2.astype(np.uint8).reshape(filter_in_sizes)
    x2_min = 0.0
    x2_max = 255.0
    with self.test_session(use_gpu=False) as sess:
      t1 = tf.constant(x1, shape=tensor_in_sizes, dtype=tf.quint8)
      t2 = tf.constant(x2, shape=filter_in_sizes, dtype=tf.quint8)
      conv = tf.nn.quantized_conv2d(t1,
                                    t2,
                                    out_type=tf.qint32,
                                    strides=[1, stride,
                                             stride, 1],
                                    padding=padding,
                                    min_input=x1_min,
                                    max_input=x1_max,
                                    min_filter=x2_min,
                                    max_filter=x2_max)
      value = sess.run(conv)
    quantized_output = value[0]
    output_min = value[1]
    output_max = value[2]
    float_output = self._QuantizedOutputToFloat(quantized_output, output_min,
                                                output_max)
    self.assertArrayNear(expected, float_output.flatten(), 1.0)
    self.assertEqual(value[0].shape, conv[0].get_shape())