Python google.protobuf.text_format.Parse() Examples

The following are 30 code examples for showing how to use google.protobuf.text_format.Parse(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module google.protobuf.text_format , or try the search function .

Example 1
Project: DOTA_models   Author: ringringyi   File: graph_builder_test.py    License: Apache License 2.0 6 votes vote down vote up
def RunTraining(self, hyperparam_config):
    master_spec = self.LoadSpec('master_spec_link.textproto')

    self.assertTrue(isinstance(hyperparam_config, spec_pb2.GridPoint))
    gold_doc = sentence_pb2.Sentence()
    text_format.Parse(_DUMMY_GOLD_SENTENCE, gold_doc)
    gold_doc_2 = sentence_pb2.Sentence()
    text_format.Parse(_DUMMY_GOLD_SENTENCE_2, gold_doc_2)
    reader_strings = [
        gold_doc.SerializeToString(), gold_doc_2.SerializeToString()
    ]
    tf.logging.info('Generating graph with config: %s', hyperparam_config)
    with tf.Graph().as_default():
      builder = graph_builder.MasterBuilder(master_spec, hyperparam_config)

      target = spec_pb2.TrainTarget()
      target.name = 'testTraining-all'
      train = builder.add_training_from_config(target)
      with self.test_session() as sess:
        logging.info('Initializing')
        sess.run(tf.global_variables_initializer())

        # Run one iteration of training and verify nothing crashes.
        logging.info('Training')
        sess.run(train['run'], feed_dict={train['input_batch']: reader_strings}) 
Example 2
Project: DOTA_models   Author: ringringyi   File: bulk_component_test.py    License: Apache License 2.0 6 votes vote down vote up
def testFailsOnFixedFeature(self):
    component_spec = spec_pb2.ComponentSpec()
    text_format.Parse("""
        name: "annotate"
        network_unit {
          registered_name: "IdentityNetwork"
        }
        fixed_feature {
          name: "fixed" embedding_dim: 32 size: 1
        }
        """, component_spec)
    with tf.Graph().as_default():
      comp = bulk_component.BulkAnnotatorComponentBuilder(
          self.master, component_spec)

      # Expect feature extraction to generate a runtime error due to the
      # fixed feature.
      with self.assertRaises(RuntimeError):
        comp.build_greedy_training(self.master_state, self.network_states) 
Example 3
Project: DOTA_models   Author: ringringyi   File: bulk_component_test.py    License: Apache License 2.0 6 votes vote down vote up
def testBulkFeatureIdExtractorOkWithOneFixedFeature(self):
    component_spec = spec_pb2.ComponentSpec()
    text_format.Parse("""
        name: "test"
        network_unit {
          registered_name: "IdentityNetwork"
        }
        fixed_feature {
          name: "fixed" embedding_dim: -1 size: 1
        }
        """, component_spec)
    with tf.Graph().as_default():
      comp = bulk_component.BulkFeatureIdExtractorComponentBuilder(
          self.master, component_spec)

      # Should not raise errors.
      self.network_states[component_spec.name] = component.NetworkState()
      comp.build_greedy_training(self.master_state, self.network_states)
      self.network_states[component_spec.name] = component.NetworkState()
      comp.build_greedy_inference(self.master_state, self.network_states) 
Example 4
Project: DOTA_models   Author: ringringyi   File: bulk_component_test.py    License: Apache License 2.0 6 votes vote down vote up
def testBulkFeatureIdExtractorFailsOnLinkedFeature(self):
    component_spec = spec_pb2.ComponentSpec()
    text_format.Parse("""
        name: "test"
        network_unit {
          registered_name: "IdentityNetwork"
        }
        fixed_feature {
          name: "fixed" embedding_dim: -1 size: 1
        }
        linked_feature {
          name: "linked" embedding_dim: -1 size: 1
          source_translator: "identity"
          source_component: "mock"
        }
        """, component_spec)
    with tf.Graph().as_default():
      with self.assertRaises(ValueError):
        unused_comp = bulk_component.BulkFeatureIdExtractorComponentBuilder(
            self.master, component_spec) 
Example 5
Project: ffn   Author: google   File: run_inference.py    License: Apache License 2.0 6 votes vote down vote up
def main(unused_argv):
  request = inference_flags.request_from_flags()

  if not gfile.Exists(request.segmentation_output_dir):
    gfile.MakeDirs(request.segmentation_output_dir)

  bbox = bounding_box_pb2.BoundingBox()
  text_format.Parse(FLAGS.bounding_box, bbox)

  runner = inference.Runner()
  runner.start(request)
  runner.run((bbox.start.z, bbox.start.y, bbox.start.x),
             (bbox.size.z, bbox.size.y, bbox.size.x))

  counter_path = os.path.join(request.segmentation_output_dir, 'counters.txt')
  if not gfile.Exists(counter_path):
    runner.counters.dump(counter_path) 
Example 6
Project: lingvo   Author: tensorflow   File: predictor.py    License: Apache License 2.0 6 votes vote down vote up
def LoadInferenceGraph(path, clear_device_placement=False):
  """Parse the given path as an InferenceGraph proto.

  Args:
    path: The path to the file to load.
    clear_device_placement: If true, clears device field from nodes in graph.

  Returns:
    An InferenceGraph object.
  """
  inference_graph = inference_graph_pb2.InferenceGraph()
  with tf.io.gfile.GFile(path, "r") as f:
    text_format.Parse(f.read(), inference_graph)
  if clear_device_placement:
    for node in inference_graph.graph_def.node:
      node.ClearField("device")
    for function in inference_graph.graph_def.library.function:
      for node_def in function.node_def:
        node_def.ClearField("device")
  return inference_graph 
Example 7
Project: mobile-ai-bench   Author: XiaoMi   File: bench_engine.py    License: Apache License 2.0 6 votes vote down vote up
def get_proto(push_list, output_dir):
    bench_factory = aibench_pb2.BenchFactory()
    model_factory = aibench_pb2.ModelFactory()
    try:
        with open("aibench/proto/benchmark.meta", "rb") as fin:
            file_content = fin.read()
            text_format.Parse(file_content, bench_factory)
            filepath = output_dir + "/benchmark.pb"
            with open(filepath, "wb") as fout:
                fout.write(bench_factory.SerializeToString())
                push_list.append(filepath)
        with open("aibench/proto/model.meta", "rb") as fin:
            file_content = fin.read()
            text_format.Parse(file_content, model_factory)
            filepath = output_dir + "/model.pb"
            with open(filepath, "wb") as fout:
                fout.write(model_factory.SerializeToString())
                push_list.append(filepath)
    except text_format.ParseError as e:
        raise IOError("Cannot parse file.", e)

    return bench_factory, model_factory 
Example 8
Project: lambda-packs   Author: ryfeus   File: text_format_test.py    License: MIT License 6 votes vote down vote up
def testParseExotic(self, message_module):
    message = message_module.TestAllTypes()
    text = ('repeated_int64: -9223372036854775808\n'
            'repeated_uint64: 18446744073709551615\n'
            'repeated_double: 123.456\n'
            'repeated_double: 1.23e+22\n'
            'repeated_double: 1.23e-18\n'
            'repeated_string: \n'
            '"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
            'repeated_string: "foo" \'corge\' "grault"\n'
            'repeated_string: "\\303\\274\\352\\234\\237"\n'
            'repeated_string: "\\xc3\\xbc"\n'
            'repeated_string: "\xc3\xbc"\n')
    text_format.Parse(text, message)

    self.assertEqual(-9223372036854775808, message.repeated_int64[0])
    self.assertEqual(18446744073709551615, message.repeated_uint64[0])
    self.assertEqual(123.456, message.repeated_double[0])
    self.assertEqual(1.23e22, message.repeated_double[1])
    self.assertEqual(1.23e-18, message.repeated_double[2])
    self.assertEqual('\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0])
    self.assertEqual('foocorgegrault', message.repeated_string[1])
    self.assertEqual('\u00fc\ua71f', message.repeated_string[2])
    self.assertEqual('\u00fc', message.repeated_string[3]) 
Example 9
Project: lambda-packs   Author: ryfeus   File: text_format_test.py    License: MIT License 6 votes vote down vote up
def testParseStringFieldUnescape(self, message_module):
    message = message_module.TestAllTypes()
    text = r'''repeated_string: "\xf\x62"
               repeated_string: "\\xf\\x62"
               repeated_string: "\\\xf\\\x62"
               repeated_string: "\\\\xf\\\\x62"
               repeated_string: "\\\\\xf\\\\\x62"
               repeated_string: "\x5cx20"'''

    text_format.Parse(text, message)

    SLASH = '\\'
    self.assertEqual('\x0fb', message.repeated_string[0])
    self.assertEqual(SLASH + 'xf' + SLASH + 'x62', message.repeated_string[1])
    self.assertEqual(SLASH + '\x0f' + SLASH + 'b', message.repeated_string[2])
    self.assertEqual(SLASH + SLASH + 'xf' + SLASH + SLASH + 'x62',
                     message.repeated_string[3])
    self.assertEqual(SLASH + SLASH + '\x0f' + SLASH + SLASH + 'b',
                     message.repeated_string[4])
    self.assertEqual(SLASH + 'x20', message.repeated_string[5]) 
Example 10
Project: lambda-packs   Author: ryfeus   File: text_format_test.py    License: MIT License 6 votes vote down vote up
def testParseMessageSet(self):
    message = unittest_pb2.TestAllTypes()
    text = ('repeated_uint64: 1\n' 'repeated_uint64: 2\n')
    text_format.Parse(text, message)
    self.assertEqual(1, message.repeated_uint64[0])
    self.assertEqual(2, message.repeated_uint64[1])

    message = unittest_mset_pb2.TestMessageSetContainer()
    text = ('message_set {\n'
            '  [protobuf_unittest.TestMessageSetExtension1] {\n'
            '    i: 23\n'
            '  }\n'
            '  [protobuf_unittest.TestMessageSetExtension2] {\n'
            '    str: \"foo\"\n'
            '  }\n'
            '}\n')
    text_format.Parse(text, message)
    ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
    ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
    self.assertEqual(23, message.message_set.Extensions[ext1].i)
    self.assertEqual('foo', message.message_set.Extensions[ext2].str) 
Example 11
Project: lambda-packs   Author: ryfeus   File: text_format_test.py    License: MIT License 6 votes vote down vote up
def testMergeExpandedAny(self):
    message = any_test_pb2.TestAny()
    text = ('any_value {\n'
            '  [type.googleapis.com/protobuf_unittest.OneString] {\n'
            '    data: "string"\n'
            '  }\n'
            '}\n')
    text_format.Merge(text, message)
    packed_message = unittest_pb2.OneString()
    message.any_value.Unpack(packed_message)
    self.assertEqual('string', packed_message.data)
    message.Clear()
    text_format.Parse(text, message)
    packed_message = unittest_pb2.OneString()
    message.any_value.Unpack(packed_message)
    self.assertEqual('string', packed_message.data) 
Example 12
Project: auto-alt-text-lambda-api   Author: abhisuri97   File: text_format_test.py    License: MIT License 6 votes vote down vote up
def testParseStringFieldUnescape(self, message_module):
    message = message_module.TestAllTypes()
    text = r'''repeated_string: "\xf\x62"
               repeated_string: "\\xf\\x62"
               repeated_string: "\\\xf\\\x62"
               repeated_string: "\\\\xf\\\\x62"
               repeated_string: "\\\\\xf\\\\\x62"
               repeated_string: "\x5cx20"'''

    text_format.Parse(text, message)

    SLASH = '\\'
    self.assertEqual('\x0fb', message.repeated_string[0])
    self.assertEqual(SLASH + 'xf' + SLASH + 'x62', message.repeated_string[1])
    self.assertEqual(SLASH + '\x0f' + SLASH + 'b', message.repeated_string[2])
    self.assertEqual(SLASH + SLASH + 'xf' + SLASH + SLASH + 'x62',
                     message.repeated_string[3])
    self.assertEqual(SLASH + SLASH + '\x0f' + SLASH + SLASH + 'b',
                     message.repeated_string[4])
    self.assertEqual(SLASH + 'x20', message.repeated_string[5]) 
Example 13
Project: auto-alt-text-lambda-api   Author: abhisuri97   File: text_format_test.py    License: MIT License 6 votes vote down vote up
def testParseMessageSet(self):
    message = unittest_pb2.TestAllTypes()
    text = ('repeated_uint64: 1\n' 'repeated_uint64: 2\n')
    text_format.Parse(text, message)
    self.assertEqual(1, message.repeated_uint64[0])
    self.assertEqual(2, message.repeated_uint64[1])

    message = unittest_mset_pb2.TestMessageSetContainer()
    text = ('message_set {\n'
            '  [protobuf_unittest.TestMessageSetExtension1] {\n'
            '    i: 23\n'
            '  }\n'
            '  [protobuf_unittest.TestMessageSetExtension2] {\n'
            '    str: \"foo\"\n'
            '  }\n'
            '}\n')
    text_format.Parse(text, message)
    ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
    ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
    self.assertEqual(23, message.message_set.Extensions[ext1].i)
    self.assertEqual('foo', message.message_set.Extensions[ext2].str) 
Example 14
Project: DOTA_models   Author: ringringyi   File: lexicon_test.py    License: Apache License 2.0 5 votes vote down vote up
def testCreateLexiconContext(self):
    expected_context = task_spec_pb2.TaskSpec()
    text_format.Parse(_EXPECTED_CONTEXT, expected_context)
    self.assertProtoEquals(
        lexicon.create_lexicon_context('/tmp'), expected_context) 
Example 15
Project: DOTA_models   Author: ringringyi   File: graph_builder_test.py    License: Apache License 2.0 5 votes vote down vote up
def LoadSpec(self, spec_path):
    master_spec = spec_pb2.MasterSpec()
    testdata = os.path.join(FLAGS.test_srcdir,
                            'dragnn/core/testdata')
    with file(os.path.join(testdata, spec_path), 'r') as fin:
      text_format.Parse(fin.read().replace('TESTDATA', testdata), master_spec)
      return master_spec 
Example 16
Project: DOTA_models   Author: ringringyi   File: bulk_component_test.py    License: Apache License 2.0 5 votes vote down vote up
def testFailsOnNonIdentityTranslator(self):
    component_spec = spec_pb2.ComponentSpec()
    text_format.Parse("""
        name: "test"
        network_unit {
          registered_name: "IdentityNetwork"
        }
        linked_feature {
          name: "features" embedding_dim: -1 size: 1
          source_translator: "history"
          source_component: "mock"
        }
        """, component_spec)

    # For feature extraction:
    with tf.Graph().as_default():
      comp = bulk_component.BulkFeatureExtractorComponentBuilder(
          self.master, component_spec)

      # Expect feature extraction to generate a error due to the "history"
      # translator.
      with self.assertRaises(NotImplementedError):
        comp.build_greedy_training(self.master_state, self.network_states)

    # As well as annotation:
    with tf.Graph().as_default():
      comp = bulk_component.BulkAnnotatorComponentBuilder(
          self.master, component_spec)

      with self.assertRaises(NotImplementedError):
        comp.build_greedy_training(self.master_state, self.network_states) 
Example 17
Project: DOTA_models   Author: ringringyi   File: bulk_component_test.py    License: Apache License 2.0 5 votes vote down vote up
def testFailsOnRecurrentLinkedFeature(self):
    component_spec = spec_pb2.ComponentSpec()
    text_format.Parse("""
        name: "test"
        network_unit {
          registered_name: "FeedForwardNetwork"
          parameters {
            key: 'hidden_layer_sizes' value: '64'
          }
        }
        linked_feature {
          name: "features" embedding_dim: -1 size: 1
          source_translator: "identity"
          source_component: "test"
          source_layer: "layer_0"
        }
        """, component_spec)

    # For feature extraction:
    with tf.Graph().as_default():
      comp = bulk_component.BulkFeatureExtractorComponentBuilder(
          self.master, component_spec)

      # Expect feature extraction to generate a error due to the "history"
      # translator.
      with self.assertRaises(RuntimeError):
        comp.build_greedy_training(self.master_state, self.network_states)

    # As well as annotation:
    with tf.Graph().as_default():
      comp = bulk_component.BulkAnnotatorComponentBuilder(
          self.master, component_spec)

      with self.assertRaises(RuntimeError):
        comp.build_greedy_training(self.master_state, self.network_states) 
Example 18
Project: DOTA_models   Author: ringringyi   File: bulk_component_test.py    License: Apache License 2.0 5 votes vote down vote up
def testNormalFixedFeaturesAreDifferentiable(self):
    component_spec = spec_pb2.ComponentSpec()
    text_format.Parse("""
        name: "test"
        network_unit {
          registered_name: "IdentityNetwork"
        }
        fixed_feature {
          name: "fixed" embedding_dim: 32 size: 1
          pretrained_embedding_matrix { part {} }
          vocab { part {} }
        }
        component_builder {
          registered_name: "bulk_component.BulkFeatureExtractorComponentBuilder"
        }
        """, component_spec)
    with tf.Graph().as_default():
      comp = bulk_component.BulkFeatureExtractorComponentBuilder(
          self.master, component_spec)

      # Get embedding matrix variables.
      with tf.variable_scope(comp.name, reuse=True):
        fixed_embedding_matrix = tf.get_variable(
            network_units.fixed_embeddings_name(0))

      # Get output layer.
      comp.build_greedy_training(self.master_state, self.network_states)
      activations = self.network_states[comp.name].activations
      outputs = activations[comp.network.layers[0].name].bulk_tensor

      # Compute the gradient of the output layer w.r.t. the embedding matrix.
      # This should be well-defined for in the normal case.
      gradients = tf.gradients(outputs, fixed_embedding_matrix)
      self.assertEqual(len(gradients), 1)
      self.assertFalse(gradients[0] is None) 
Example 19
Project: DOTA_models   Author: ringringyi   File: bulk_component_test.py    License: Apache License 2.0 5 votes vote down vote up
def testBulkFeatureIdExtractorFailsOnEmbeddedFixedFeature(self):
    component_spec = spec_pb2.ComponentSpec()
    text_format.Parse("""
        name: "test"
        network_unit {
          registered_name: "IdentityNetwork"
        }
        fixed_feature {
          name: "fixed" embedding_dim: 2 size: 1
        }
        """, component_spec)
    with tf.Graph().as_default():
      with self.assertRaises(ValueError):
        unused_comp = bulk_component.BulkFeatureIdExtractorComponentBuilder(
            self.master, component_spec) 
Example 20
Project: DOTA_models   Author: ringringyi   File: spec_builder_test.py    License: Apache License 2.0 5 votes vote down vote up
def assertSpecEqual(self, expected_spec_text, spec):
    expected_spec = spec_pb2.ComponentSpec()
    text_format.Parse(expected_spec_text, expected_spec)
    self.assertProtoEquals(expected_spec, spec) 
Example 21
Project: DOTA_models   Author: ringringyi   File: model_trainer.py    License: Apache License 2.0 5 votes vote down vote up
def _read_text_proto(path, proto_type):
  """Reads a text-format instance of |proto_type| from the |path|."""
  proto = proto_type()
  with tf.gfile.FastGFile(path) as proto_file:
    text_format.Parse(proto_file.read(), proto)
  return proto 
Example 22
Project: recipes-py   Author: luci   File: definition.py    License: Apache License 2.0 5 votes vote down vote up
def parse_warning_definitions(file_path):
  """Parse the warning definition file at the given absolute path. The file
  content is expected to be in text proto format of warning.DefinitionCollection
  proto message. Duplicate warning names will be raised. Each warning definition
  will be validated. The conditions are documented in warning.proto.

  Args:
    * file_path (str) - Absolute path to warning definition file

  Returns a dict of warning name to warning.Definition proto message instance
  """
  raw_text = ''
  try:
    with open(file_path, encoding='utf-8') as f:
      raw_text = f.read()
  except IOError as ex:
    if ex.errno == errno.ENOENT:
      # No warning defined
      return {}
    raise ex

  from PB.recipe_engine.warning import DefinitionCollection
  definition_collection = textpb.Parse(raw_text, DefinitionCollection())
  definitions = list(definition_collection.warning)

  if definition_collection.HasField('monorail_bug_default'):
    _populate_monorail_bug_default_fields(
      definitions, definition_collection.monorail_bug_default)

  ret = {}
  for definition in definitions:
    if definition.name in ret:
      raise ValueError(
        'Found warning definitions with duplicate name: %s' % definition.name)
    _validate(definition)
    ret[definition.name] = definition
  return ret 
Example 23
Project: vehicle_counting_tensorflow   Author: ahmetozlu   File: exporter_test.py    License: MIT License 5 votes vote down vote up
def _load_inference_graph(self, inference_graph_path, is_binary=True):
    od_graph = tf.Graph()
    with od_graph.as_default():
      od_graph_def = tf.GraphDef()
      with tf.gfile.GFile(inference_graph_path) as fid:
        if is_binary:
          od_graph_def.ParseFromString(fid.read())
        else:
          text_format.Parse(fid.read(), od_graph_def)
        tf.import_graph_def(od_graph_def, name='')
    return od_graph 
Example 24
Project: turkish-morphology   Author: google-research   File: pretty_print_test.py    License: Apache License 2.0 5 votes vote down vote up
def _read_analysis(basename):
  path = os.path.join(_TESTDATA_DIR, f"{basename}.pbtxt")
  return text_format.Parse(_read_file(path), analysis_pb2.Analysis()) 
Example 25
Project: turkish-morphology   Author: google-research   File: decompose_test.py    License: Apache License 2.0 5 votes vote down vote up
def _read_analysis(basename):
  path = os.path.join(_TESTDATA_DIR, f"{basename}.pbtxt")
  return text_format.Parse(_read_file(path), analysis_pb2.Analysis()) 
Example 26
Project: turkish-morphology   Author: google-research   File: validate_test.py    License: Apache License 2.0 5 votes vote down vote up
def _read_analysis(basename):
  path = os.path.join(_TESTDATA_DIR, f"{basename}.pbtxt")
  return text_format.Parse(_read_file(path), analysis_pb2.Analysis()) 
Example 27
Project: turkish-morphology   Author: google-research   File: generate_test.py    License: Apache License 2.0 5 votes vote down vote up
def _read_analysis(basename):
  path = os.path.join(_TESTDATA_DIR, f"{basename}.pbtxt")
  return text_format.Parse(_read_file(path), analysis_pb2.Analysis()) 
Example 28
Project: turkish-morphology   Author: google-research   File: parser_test.py    License: Apache License 2.0 5 votes vote down vote up
def test_success(self, entries, expected_pbtxt):
    actual = parser.parse(entries)
    expected = rule_pb2.RewriteRuleSet()
    text_format.Parse(expected_pbtxt, expected)
    self.assertEqual(expected, actual) 
Example 29
Project: ffn   Author: google   File: inference_flags.py    License: Apache License 2.0 5 votes vote down vote up
def options_from_flags():
  options = inference_pb2.InferenceOptions()
  if FLAGS.inference_options:
    text_format.Parse(FLAGS.inference_options, options)

  return options 
Example 30
Project: ffn   Author: google   File: inference_flags.py    License: Apache License 2.0 5 votes vote down vote up
def request_from_flags():
  request = inference_pb2.InferenceRequest()
  if FLAGS.inference_request:
    text_format.Parse(FLAGS.inference_request, request)

  return request