Python tensorflow.python.framework.dtypes.qint32() Examples

The following are 25 code examples of tensorflow.python.framework.dtypes.qint32(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.framework.dtypes , or try the search function .
Example #1
Source File: quantize_graph.py    From tensorflow-for-poets-2 with Apache License 2.0 6 votes vote down vote up
def eightbitize_mat_mul_node(self, original_node):
    """Replaces a MatMul node with the eight bit equivalent sub-graph."""
    quantized_mat_mul_name = original_node.name + "_eightbit_quantized_mat_mul"
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_mat_mul_node = create_node("QuantizedMatMul",
                                         quantized_mat_mul_name,
                                         all_input_names)
    set_attr_dtype(quantized_mat_mul_node, "T1", dtypes.quint8)
    set_attr_dtype(quantized_mat_mul_node, "T2", dtypes.quint8)
    set_attr_dtype(quantized_mat_mul_node, "Toutput", dtypes.qint32)
    copy_attr(quantized_mat_mul_node, "transpose_a",
              original_node.attr["transpose_a"])
    copy_attr(quantized_mat_mul_node, "transpose_b",
              original_node.attr["transpose_b"])
    self.add_output_graph_node(quantized_mat_mul_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_mat_mul_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #2
Source File: quantize_graph.py    From AudioNet with MIT License 6 votes vote down vote up
def eightbitize_mat_mul_node(self, original_node):
    """Replaces a MatMul node with the eight bit equivalent sub-graph."""
    quantized_mat_mul_name = original_node.name + "_eightbit_quantized_mat_mul"
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_mat_mul_node = create_node("QuantizedMatMul",
                                         quantized_mat_mul_name,
                                         all_input_names)
    set_attr_dtype(quantized_mat_mul_node, "T1", dtypes.quint8)
    set_attr_dtype(quantized_mat_mul_node, "T2", dtypes.quint8)
    set_attr_dtype(quantized_mat_mul_node, "Toutput", dtypes.qint32)
    copy_attr(quantized_mat_mul_node, "transpose_a",
              original_node.attr["transpose_a"])
    copy_attr(quantized_mat_mul_node, "transpose_b",
              original_node.attr["transpose_b"])
    self.add_output_graph_node(quantized_mat_mul_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_mat_mul_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #3
Source File: quantize_graph.py    From MobileNet with Apache License 2.0 6 votes vote down vote up
def eightbitize_mat_mul_node(self, original_node):
    """Replaces a MatMul node with the eight bit equivalent sub-graph."""
    quantized_mat_mul_name = original_node.name + "_eightbit_quantized_mat_mul"
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_mat_mul_node = create_node("QuantizedMatMul",
                                         quantized_mat_mul_name,
                                         all_input_names)
    set_attr_dtype(quantized_mat_mul_node, "T1", dtypes.quint8)
    set_attr_dtype(quantized_mat_mul_node, "T2", dtypes.quint8)
    set_attr_dtype(quantized_mat_mul_node, "Toutput", dtypes.qint32)
    copy_attr(quantized_mat_mul_node, "transpose_a",
              original_node.attr["transpose_a"])
    copy_attr(quantized_mat_mul_node, "transpose_b",
              original_node.attr["transpose_b"])
    self.add_output_graph_node(quantized_mat_mul_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_mat_mul_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #4
Source File: quantize_graph.py    From pokemon-mini with Apache License 2.0 6 votes vote down vote up
def eightbitize_mat_mul_node(self, original_node):
    """Replaces a MatMul node with the eight bit equivalent sub-graph."""
    quantized_mat_mul_name = original_node.name + "_eightbit_quantized_mat_mul"
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_mat_mul_node = create_node("QuantizedMatMul",
                                         quantized_mat_mul_name,
                                         all_input_names)
    set_attr_dtype(quantized_mat_mul_node, "T1", dtypes.quint8)
    set_attr_dtype(quantized_mat_mul_node, "T2", dtypes.quint8)
    set_attr_dtype(quantized_mat_mul_node, "Toutput", dtypes.qint32)
    copy_attr(quantized_mat_mul_node, "transpose_a",
              original_node.attr["transpose_a"])
    copy_attr(quantized_mat_mul_node, "transpose_b",
              original_node.attr["transpose_b"])
    self.add_output_graph_node(quantized_mat_mul_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_mat_mul_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #5
Source File: quantize_graph.py    From sketch-to-react-native with MIT License 6 votes vote down vote up
def eightbitize_mat_mul_node(self, original_node):
    """Replaces a MatMul node with the eight bit equivalent sub-graph."""
    quantized_mat_mul_name = original_node.name + "_eightbit_quantized_mat_mul"
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_mat_mul_node = create_node("QuantizedMatMul",
                                         quantized_mat_mul_name,
                                         all_input_names)
    set_attr_dtype(quantized_mat_mul_node, "T1", dtypes.quint8)
    set_attr_dtype(quantized_mat_mul_node, "T2", dtypes.quint8)
    set_attr_dtype(quantized_mat_mul_node, "Toutput", dtypes.qint32)
    copy_attr(quantized_mat_mul_node, "transpose_a",
              original_node.attr["transpose_a"])
    copy_attr(quantized_mat_mul_node, "transpose_b",
              original_node.attr["transpose_b"])
    self.add_output_graph_node(quantized_mat_mul_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_mat_mul_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #6
Source File: quantize_graph.py    From sketch-to-react-native with MIT License 5 votes vote down vote up
def eightbitize_conv_node(self, original_node):
    """Replaces a Conv2D node with the eight bit equivalent sub-graph."""
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_conv_name = original_node.name + "_eightbit_quantized_conv"
    quantized_conv_node = create_node("QuantizedConv2D", quantized_conv_name,
                                      all_input_names)
    copy_attr(quantized_conv_node, "strides", original_node.attr["strides"])
    copy_attr(quantized_conv_node, "padding", original_node.attr["padding"])
    set_attr_dtype(quantized_conv_node, "Tinput", dtypes.quint8)
    set_attr_dtype(quantized_conv_node, "Tfilter", dtypes.quint8)
    set_attr_dtype(quantized_conv_node, "out_type", dtypes.qint32)
    self.add_output_graph_node(quantized_conv_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_conv_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #7
Source File: quantize_graph.py    From AudioNet with MIT License 5 votes vote down vote up
def eightbitize_bias_add_node(self, original_node):
    """Replaces a BiasAdd node with the eight bit equivalent sub-graph."""
    quantized_bias_add_name = (
        original_node.name + "_eightbit_quantized_bias_add")
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_bias_add_node = create_node("QuantizedBiasAdd",
                                          quantized_bias_add_name,
                                          all_input_names)
    set_attr_dtype(quantized_bias_add_node, "T1", dtypes.quint8)
    set_attr_dtype(quantized_bias_add_node, "T2", dtypes.quint8)
    set_attr_dtype(quantized_bias_add_node, "out_type", dtypes.qint32)
    self.add_output_graph_node(quantized_bias_add_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_bias_add_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #8
Source File: quantize_graph.py    From AudioNet with MIT License 5 votes vote down vote up
def eightbitize_conv_node(self, original_node):
    """Replaces a Conv2D node with the eight bit equivalent sub-graph."""
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_conv_name = original_node.name + "_eightbit_quantized_conv"
    quantized_conv_node = create_node("QuantizedConv2D", quantized_conv_name,
                                      all_input_names)
    copy_attr(quantized_conv_node, "strides", original_node.attr["strides"])
    copy_attr(quantized_conv_node, "padding", original_node.attr["padding"])
    set_attr_dtype(quantized_conv_node, "Tinput", dtypes.quint8)
    set_attr_dtype(quantized_conv_node, "Tfilter", dtypes.quint8)
    set_attr_dtype(quantized_conv_node, "out_type", dtypes.qint32)
    self.add_output_graph_node(quantized_conv_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_conv_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #9
Source File: quantize_graph.py    From AudioNet with MIT License 5 votes vote down vote up
def add_quantize_down_nodes(self, original_node, quantized_output_name):
    quantized_outputs = [
        quantized_output_name, quantized_output_name + ":1",
        quantized_output_name + ":2"
    ]
    min_max_inputs = None
    if self.should_merge_with_fake_quant_node():
      # Use the inputs to the FakeQuantWithMinMaxVars node as the inputs to
      # Requantize.
      fake_quant_node = self.state.output_node_stack[-1][0]
      min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
      assert original_node.name not in self.state.merged_with_fake_quant
      self.state.merged_with_fake_quant[original_node.name] = True
    elif self.fallback_quantization_range:
      min_max_inputs = [
          "fallback_quantization_min_value:0",
          "fallback_quantization_max_value:0"
      ]
    else:
      # Add a RequantizationRange node for finding the min and max values.
      requant_range_node = create_node(
          "RequantizationRange", original_node.name + "_eightbit_requant_range",
          quantized_outputs)
      set_attr_dtype(requant_range_node, "Tinput", dtypes.qint32)
      self.add_output_graph_node(requant_range_node)
      min_max_inputs = [
          requant_range_node.name + ":0", requant_range_node.name + ":1"
      ]
    requantize_node = create_node("Requantize",
                                  original_node.name + "_eightbit_requantize",
                                  quantized_outputs + min_max_inputs)
    set_attr_dtype(requantize_node, "Tinput", dtypes.qint32)
    set_attr_dtype(requantize_node, "out_type", dtypes.quint8)
    self.add_output_graph_node(requantize_node)
    return requantize_node.name 
Example #10
Source File: quantize_graph.py    From pokemon-mini with Apache License 2.0 5 votes vote down vote up
def eightbitize_bias_add_node(self, original_node):
    """Replaces a BiasAdd node with the eight bit equivalent sub-graph."""
    quantized_bias_add_name = (
        original_node.name + "_eightbit_quantized_bias_add")
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_bias_add_node = create_node("QuantizedBiasAdd",
                                          quantized_bias_add_name,
                                          all_input_names)
    set_attr_dtype(quantized_bias_add_node, "T1", dtypes.quint8)
    set_attr_dtype(quantized_bias_add_node, "T2", dtypes.quint8)
    set_attr_dtype(quantized_bias_add_node, "out_type", dtypes.qint32)
    self.add_output_graph_node(quantized_bias_add_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_bias_add_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #11
Source File: quantize_graph.py    From pokemon-mini with Apache License 2.0 5 votes vote down vote up
def eightbitize_conv_node(self, original_node):
    """Replaces a Conv2D node with the eight bit equivalent sub-graph."""
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_conv_name = original_node.name + "_eightbit_quantized_conv"
    quantized_conv_node = create_node("QuantizedConv2D", quantized_conv_name,
                                      all_input_names)
    copy_attr(quantized_conv_node, "strides", original_node.attr["strides"])
    copy_attr(quantized_conv_node, "padding", original_node.attr["padding"])
    set_attr_dtype(quantized_conv_node, "Tinput", dtypes.quint8)
    set_attr_dtype(quantized_conv_node, "Tfilter", dtypes.quint8)
    set_attr_dtype(quantized_conv_node, "out_type", dtypes.qint32)
    self.add_output_graph_node(quantized_conv_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_conv_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #12
Source File: quantize_graph.py    From pokemon-mini with Apache License 2.0 5 votes vote down vote up
def add_quantize_down_nodes(self, original_node, quantized_output_name):
    quantized_outputs = [
        quantized_output_name, quantized_output_name + ":1",
        quantized_output_name + ":2"
    ]
    min_max_inputs = None
    if self.should_merge_with_fake_quant_node():
      # Use the inputs to the FakeQuantWithMinMaxVars node as the inputs to
      # Requantize.
      fake_quant_node = self.state.output_node_stack[-1][0]
      min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
      assert original_node.name not in self.state.merged_with_fake_quant
      self.state.merged_with_fake_quant[original_node.name] = True
    elif self.fallback_quantization_range:
      min_max_inputs = [
          "fallback_quantization_min_value:0",
          "fallback_quantization_max_value:0"
      ]
    else:
      # Add a RequantizationRange node for finding the min and max values.
      requant_range_node = create_node(
          "RequantizationRange", original_node.name + "_eightbit_requant_range",
          quantized_outputs)
      set_attr_dtype(requant_range_node, "Tinput", dtypes.qint32)
      self.add_output_graph_node(requant_range_node)
      min_max_inputs = [
          requant_range_node.name + ":0", requant_range_node.name + ":1"
      ]
    requantize_node = create_node("Requantize",
                                  original_node.name + "_eightbit_requantize",
                                  quantized_outputs + min_max_inputs)
    set_attr_dtype(requantize_node, "Tinput", dtypes.qint32)
    set_attr_dtype(requantize_node, "out_type", dtypes.quint8)
    self.add_output_graph_node(requantize_node)
    return requantize_node.name 
Example #13
Source File: quantize_graph.py    From sketch-to-react-native with MIT License 5 votes vote down vote up
def eightbitize_bias_add_node(self, original_node):
    """Replaces a BiasAdd node with the eight bit equivalent sub-graph."""
    quantized_bias_add_name = (
        original_node.name + "_eightbit_quantized_bias_add")
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_bias_add_node = create_node("QuantizedBiasAdd",
                                          quantized_bias_add_name,
                                          all_input_names)
    set_attr_dtype(quantized_bias_add_node, "T1", dtypes.quint8)
    set_attr_dtype(quantized_bias_add_node, "T2", dtypes.quint8)
    set_attr_dtype(quantized_bias_add_node, "out_type", dtypes.qint32)
    self.add_output_graph_node(quantized_bias_add_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_bias_add_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #14
Source File: quantize_graph.py    From tensorflow-for-poets-2 with Apache License 2.0 5 votes vote down vote up
def add_quantize_down_nodes(self, original_node, quantized_output_name):
    quantized_outputs = [
        quantized_output_name, quantized_output_name + ":1",
        quantized_output_name + ":2"
    ]
    min_max_inputs = None
    if self.should_merge_with_fake_quant_node():
      # Use the inputs to the FakeQuantWithMinMaxVars node as the inputs to
      # Requantize.
      fake_quant_node = self.state.output_node_stack[-1][0]
      min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
      assert original_node.name not in self.state.merged_with_fake_quant
      self.state.merged_with_fake_quant[original_node.name] = True
    elif self.fallback_quantization_range:
      min_max_inputs = [
          "fallback_quantization_min_value:0",
          "fallback_quantization_max_value:0"
      ]
    else:
      # Add a RequantizationRange node for finding the min and max values.
      requant_range_node = create_node(
          "RequantizationRange", original_node.name + "_eightbit_requant_range",
          quantized_outputs)
      set_attr_dtype(requant_range_node, "Tinput", dtypes.qint32)
      self.add_output_graph_node(requant_range_node)
      min_max_inputs = [
          requant_range_node.name + ":0", requant_range_node.name + ":1"
      ]
    requantize_node = create_node("Requantize",
                                  original_node.name + "_eightbit_requantize",
                                  quantized_outputs + min_max_inputs)
    set_attr_dtype(requantize_node, "Tinput", dtypes.qint32)
    set_attr_dtype(requantize_node, "out_type", dtypes.quint8)
    self.add_output_graph_node(requantize_node)
    return requantize_node.name 
Example #15
Source File: quantize_graph.py    From sketch-to-react-native with MIT License 5 votes vote down vote up
def add_quantize_down_nodes(self, original_node, quantized_output_name):
    quantized_outputs = [
        quantized_output_name, quantized_output_name + ":1",
        quantized_output_name + ":2"
    ]
    min_max_inputs = None
    if self.should_merge_with_fake_quant_node():
      # Use the inputs to the FakeQuantWithMinMaxVars node as the inputs to
      # Requantize.
      fake_quant_node = self.state.output_node_stack[-1][0]
      min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
      assert original_node.name not in self.state.merged_with_fake_quant
      self.state.merged_with_fake_quant[original_node.name] = True
    elif self.fallback_quantization_range:
      min_max_inputs = [
          "fallback_quantization_min_value:0",
          "fallback_quantization_max_value:0"
      ]
    else:
      # Add a RequantizationRange node for finding the min and max values.
      requant_range_node = create_node(
          "RequantizationRange", original_node.name + "_eightbit_requant_range",
          quantized_outputs)
      set_attr_dtype(requant_range_node, "Tinput", dtypes.qint32)
      self.add_output_graph_node(requant_range_node)
      min_max_inputs = [
          requant_range_node.name + ":0", requant_range_node.name + ":1"
      ]
    requantize_node = create_node("Requantize",
                                  original_node.name + "_eightbit_requantize",
                                  quantized_outputs + min_max_inputs)
    set_attr_dtype(requantize_node, "Tinput", dtypes.qint32)
    set_attr_dtype(requantize_node, "out_type", dtypes.quint8)
    self.add_output_graph_node(requantize_node)
    return requantize_node.name 
Example #16
Source File: quantize_graph.py    From MobileNet with Apache License 2.0 5 votes vote down vote up
def eightbitize_bias_add_node(self, original_node):
    """Replaces a BiasAdd node with the eight bit equivalent sub-graph."""
    quantized_bias_add_name = (
        original_node.name + "_eightbit_quantized_bias_add")
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_bias_add_node = create_node("QuantizedBiasAdd",
                                          quantized_bias_add_name,
                                          all_input_names)
    set_attr_dtype(quantized_bias_add_node, "T1", dtypes.quint8)
    set_attr_dtype(quantized_bias_add_node, "T2", dtypes.quint8)
    set_attr_dtype(quantized_bias_add_node, "out_type", dtypes.qint32)
    self.add_output_graph_node(quantized_bias_add_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_bias_add_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #17
Source File: quantize_graph.py    From MobileNet with Apache License 2.0 5 votes vote down vote up
def eightbitize_conv_node(self, original_node):
    """Replaces a Conv2D node with the eight bit equivalent sub-graph."""
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_conv_name = original_node.name + "_eightbit_quantized_conv"
    quantized_conv_node = create_node("QuantizedConv2D", quantized_conv_name,
                                      all_input_names)
    copy_attr(quantized_conv_node, "strides", original_node.attr["strides"])
    copy_attr(quantized_conv_node, "padding", original_node.attr["padding"])
    set_attr_dtype(quantized_conv_node, "Tinput", dtypes.quint8)
    set_attr_dtype(quantized_conv_node, "Tfilter", dtypes.quint8)
    set_attr_dtype(quantized_conv_node, "out_type", dtypes.qint32)
    self.add_output_graph_node(quantized_conv_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_conv_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #18
Source File: quantize_graph.py    From MobileNet with Apache License 2.0 5 votes vote down vote up
def add_quantize_down_nodes(self, original_node, quantized_output_name):
    quantized_outputs = [
        quantized_output_name, quantized_output_name + ":1",
        quantized_output_name + ":2"
    ]
    min_max_inputs = None
    if self.should_merge_with_fake_quant_node():
      # Use the inputs to the FakeQuantWithMinMaxVars node as the inputs to
      # Requantize.
      fake_quant_node = self.state.output_node_stack[-1][0]
      min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
      assert original_node.name not in self.state.merged_with_fake_quant
      self.state.merged_with_fake_quant[original_node.name] = True
    elif self.fallback_quantization_range:
      min_max_inputs = [
          "fallback_quantization_min_value:0",
          "fallback_quantization_max_value:0"
      ]
    else:
      # Add a RequantizationRange node for finding the min and max values.
      requant_range_node = create_node(
          "RequantizationRange", original_node.name + "_eightbit_requant_range",
          quantized_outputs)
      set_attr_dtype(requant_range_node, "Tinput", dtypes.qint32)
      self.add_output_graph_node(requant_range_node)
      min_max_inputs = [
          requant_range_node.name + ":0", requant_range_node.name + ":1"
      ]
    requantize_node = create_node("Requantize",
                                  original_node.name + "_eightbit_requantize",
                                  quantized_outputs + min_max_inputs)
    set_attr_dtype(requantize_node, "Tinput", dtypes.qint32)
    set_attr_dtype(requantize_node, "out_type", dtypes.quint8)
    self.add_output_graph_node(requantize_node)
    return requantize_node.name 
Example #19
Source File: quantize_graph.py    From tensorflow-for-poets-2 with Apache License 2.0 5 votes vote down vote up
def eightbitize_bias_add_node(self, original_node):
    """Replaces a BiasAdd node with the eight bit equivalent sub-graph."""
    quantized_bias_add_name = (
        original_node.name + "_eightbit_quantized_bias_add")
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_bias_add_node = create_node("QuantizedBiasAdd",
                                          quantized_bias_add_name,
                                          all_input_names)
    set_attr_dtype(quantized_bias_add_node, "T1", dtypes.quint8)
    set_attr_dtype(quantized_bias_add_node, "T2", dtypes.quint8)
    set_attr_dtype(quantized_bias_add_node, "out_type", dtypes.qint32)
    self.add_output_graph_node(quantized_bias_add_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_bias_add_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #20
Source File: quantize_graph.py    From tensorflow-for-poets-2 with Apache License 2.0 5 votes vote down vote up
def eightbitize_conv_node(self, original_node):
    """Replaces a Conv2D node with the eight bit equivalent sub-graph."""
    all_input_names = self.add_eightbit_prologue_nodes(original_node)
    quantized_conv_name = original_node.name + "_eightbit_quantized_conv"
    quantized_conv_node = create_node("QuantizedConv2D", quantized_conv_name,
                                      all_input_names)
    copy_attr(quantized_conv_node, "strides", original_node.attr["strides"])
    copy_attr(quantized_conv_node, "padding", original_node.attr["padding"])
    set_attr_dtype(quantized_conv_node, "Tinput", dtypes.quint8)
    set_attr_dtype(quantized_conv_node, "Tfilter", dtypes.quint8)
    set_attr_dtype(quantized_conv_node, "out_type", dtypes.qint32)
    self.add_output_graph_node(quantized_conv_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_conv_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #21
Source File: quantize_graph.py    From sketch-to-react-native with MIT License 4 votes vote down vote up
def eightbitize_batch_norm_node(self, original_node):
    """Replaces a MatMul node with the eight bit equivalent sub-graph."""
    namespace_prefix = original_node.name + "_eightbit"
    original_input_name = original_node.input[0]
    original_mean_name = original_node.input[1]
    original_variance_name = original_node.input[2]
    original_beta_name = original_node.input[3]
    original_gamma_name = original_node.input[4]
    quantized_batch_norm_name = namespace_prefix + "_quantized_batch_norm"

    reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
        namespace_prefix)
    quantize_input_name, min_input_name, max_input_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_input_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_mean_name, min_mean_name, max_mean_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_mean_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_variance_name, min_variance_name, max_variance_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_variance_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_beta_name, min_beta_name, max_beta_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_beta_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_gamma_name, min_gamma_name, max_gamma_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_gamma_name,
                                       reshape_dims_name, reduction_dims_name))
    quantized_batch_norm_node = create_node(
        "QuantizedBatchNormWithGlobalNormalization", quantized_batch_norm_name,
        [
            quantize_input_name, min_input_name, max_input_name,
            quantize_mean_name, min_mean_name, max_mean_name,
            quantize_variance_name, min_variance_name, max_variance_name,
            quantize_beta_name, min_beta_name, max_beta_name,
            quantize_gamma_name, min_gamma_name, max_gamma_name
        ])
    set_attr_dtype(quantized_batch_norm_node, "Tinput", dtypes.quint8)
    set_attr_dtype(quantized_batch_norm_node, "out_type", dtypes.qint32)
    copy_attr(quantized_batch_norm_node, "scale_after_normalization",
              original_node.attr["scale_after_normalization"])
    copy_attr(quantized_batch_norm_node, "variance_epsilon",
              original_node.attr["variance_epsilon"])
    self.add_output_graph_node(quantized_batch_norm_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_batch_norm_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #22
Source File: quantize_graph.py    From MobileNet with Apache License 2.0 4 votes vote down vote up
def eightbitize_batch_norm_node(self, original_node):
    """Replaces a MatMul node with the eight bit equivalent sub-graph."""
    namespace_prefix = original_node.name + "_eightbit"
    original_input_name = original_node.input[0]
    original_mean_name = original_node.input[1]
    original_variance_name = original_node.input[2]
    original_beta_name = original_node.input[3]
    original_gamma_name = original_node.input[4]
    quantized_batch_norm_name = namespace_prefix + "_quantized_batch_norm"

    reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
        namespace_prefix)
    quantize_input_name, min_input_name, max_input_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_input_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_mean_name, min_mean_name, max_mean_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_mean_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_variance_name, min_variance_name, max_variance_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_variance_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_beta_name, min_beta_name, max_beta_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_beta_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_gamma_name, min_gamma_name, max_gamma_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_gamma_name,
                                       reshape_dims_name, reduction_dims_name))
    quantized_batch_norm_node = create_node(
        "QuantizedBatchNormWithGlobalNormalization", quantized_batch_norm_name,
        [
            quantize_input_name, min_input_name, max_input_name,
            quantize_mean_name, min_mean_name, max_mean_name,
            quantize_variance_name, min_variance_name, max_variance_name,
            quantize_beta_name, min_beta_name, max_beta_name,
            quantize_gamma_name, min_gamma_name, max_gamma_name
        ])
    set_attr_dtype(quantized_batch_norm_node, "Tinput", dtypes.quint8)
    set_attr_dtype(quantized_batch_norm_node, "out_type", dtypes.qint32)
    copy_attr(quantized_batch_norm_node, "scale_after_normalization",
              original_node.attr["scale_after_normalization"])
    copy_attr(quantized_batch_norm_node, "variance_epsilon",
              original_node.attr["variance_epsilon"])
    self.add_output_graph_node(quantized_batch_norm_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_batch_norm_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #23
Source File: quantize_graph.py    From pokemon-mini with Apache License 2.0 4 votes vote down vote up
def eightbitize_batch_norm_node(self, original_node):
    """Replaces a MatMul node with the eight bit equivalent sub-graph."""
    namespace_prefix = original_node.name + "_eightbit"
    original_input_name = original_node.input[0]
    original_mean_name = original_node.input[1]
    original_variance_name = original_node.input[2]
    original_beta_name = original_node.input[3]
    original_gamma_name = original_node.input[4]
    quantized_batch_norm_name = namespace_prefix + "_quantized_batch_norm"

    reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
        namespace_prefix)
    quantize_input_name, min_input_name, max_input_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_input_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_mean_name, min_mean_name, max_mean_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_mean_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_variance_name, min_variance_name, max_variance_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_variance_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_beta_name, min_beta_name, max_beta_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_beta_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_gamma_name, min_gamma_name, max_gamma_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_gamma_name,
                                       reshape_dims_name, reduction_dims_name))
    quantized_batch_norm_node = create_node(
        "QuantizedBatchNormWithGlobalNormalization", quantized_batch_norm_name,
        [
            quantize_input_name, min_input_name, max_input_name,
            quantize_mean_name, min_mean_name, max_mean_name,
            quantize_variance_name, min_variance_name, max_variance_name,
            quantize_beta_name, min_beta_name, max_beta_name,
            quantize_gamma_name, min_gamma_name, max_gamma_name
        ])
    set_attr_dtype(quantized_batch_norm_node, "Tinput", dtypes.quint8)
    set_attr_dtype(quantized_batch_norm_node, "out_type", dtypes.qint32)
    copy_attr(quantized_batch_norm_node, "scale_after_normalization",
              original_node.attr["scale_after_normalization"])
    copy_attr(quantized_batch_norm_node, "variance_epsilon",
              original_node.attr["variance_epsilon"])
    self.add_output_graph_node(quantized_batch_norm_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_batch_norm_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #24
Source File: quantize_graph.py    From tensorflow-for-poets-2 with Apache License 2.0 4 votes vote down vote up
def eightbitize_batch_norm_node(self, original_node):
    """Replaces a MatMul node with the eight bit equivalent sub-graph."""
    namespace_prefix = original_node.name + "_eightbit"
    original_input_name = original_node.input[0]
    original_mean_name = original_node.input[1]
    original_variance_name = original_node.input[2]
    original_beta_name = original_node.input[3]
    original_gamma_name = original_node.input[4]
    quantized_batch_norm_name = namespace_prefix + "_quantized_batch_norm"

    reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
        namespace_prefix)
    quantize_input_name, min_input_name, max_input_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_input_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_mean_name, min_mean_name, max_mean_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_mean_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_variance_name, min_variance_name, max_variance_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_variance_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_beta_name, min_beta_name, max_beta_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_beta_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_gamma_name, min_gamma_name, max_gamma_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_gamma_name,
                                       reshape_dims_name, reduction_dims_name))
    quantized_batch_norm_node = create_node(
        "QuantizedBatchNormWithGlobalNormalization", quantized_batch_norm_name,
        [
            quantize_input_name, min_input_name, max_input_name,
            quantize_mean_name, min_mean_name, max_mean_name,
            quantize_variance_name, min_variance_name, max_variance_name,
            quantize_beta_name, min_beta_name, max_beta_name,
            quantize_gamma_name, min_gamma_name, max_gamma_name
        ])
    set_attr_dtype(quantized_batch_norm_node, "Tinput", dtypes.quint8)
    set_attr_dtype(quantized_batch_norm_node, "out_type", dtypes.qint32)
    copy_attr(quantized_batch_norm_node, "scale_after_normalization",
              original_node.attr["scale_after_normalization"])
    copy_attr(quantized_batch_norm_node, "variance_epsilon",
              original_node.attr["variance_epsilon"])
    self.add_output_graph_node(quantized_batch_norm_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_batch_norm_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name) 
Example #25
Source File: quantize_graph.py    From AudioNet with MIT License 4 votes vote down vote up
def eightbitize_batch_norm_node(self, original_node):
    """Replaces a MatMul node with the eight bit equivalent sub-graph."""
    namespace_prefix = original_node.name + "_eightbit"
    original_input_name = original_node.input[0]
    original_mean_name = original_node.input[1]
    original_variance_name = original_node.input[2]
    original_beta_name = original_node.input[3]
    original_gamma_name = original_node.input[4]
    quantized_batch_norm_name = namespace_prefix + "_quantized_batch_norm"

    reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
        namespace_prefix)
    quantize_input_name, min_input_name, max_input_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_input_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_mean_name, min_mean_name, max_mean_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_mean_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_variance_name, min_variance_name, max_variance_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_variance_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_beta_name, min_beta_name, max_beta_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_beta_name,
                                       reshape_dims_name, reduction_dims_name))
    quantize_gamma_name, min_gamma_name, max_gamma_name = (
        self.eightbitize_input_to_node(namespace_prefix, original_gamma_name,
                                       reshape_dims_name, reduction_dims_name))
    quantized_batch_norm_node = create_node(
        "QuantizedBatchNormWithGlobalNormalization", quantized_batch_norm_name,
        [
            quantize_input_name, min_input_name, max_input_name,
            quantize_mean_name, min_mean_name, max_mean_name,
            quantize_variance_name, min_variance_name, max_variance_name,
            quantize_beta_name, min_beta_name, max_beta_name,
            quantize_gamma_name, min_gamma_name, max_gamma_name
        ])
    set_attr_dtype(quantized_batch_norm_node, "Tinput", dtypes.quint8)
    set_attr_dtype(quantized_batch_norm_node, "out_type", dtypes.qint32)
    copy_attr(quantized_batch_norm_node, "scale_after_normalization",
              original_node.attr["scale_after_normalization"])
    copy_attr(quantized_batch_norm_node, "variance_epsilon",
              original_node.attr["variance_epsilon"])
    self.add_output_graph_node(quantized_batch_norm_node)
    quantize_down_name = self.add_quantize_down_nodes(original_node,
                                                      quantized_batch_norm_name)
    self.add_dequantize_result_node(quantize_down_name, original_node.name)