Python tensorflow.contrib.predictor.from_saved_model() Examples
The following are 27
code examples of tensorflow.contrib.predictor.from_saved_model().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.predictor
, or try the search function
.
Example #1
Source File: estimator.py From spleeter with MIT License | 6 votes |
def to_predictor(estimator, directory=DEFAULT_EXPORT_DIRECTORY): """ Exports given estimator as predictor into the given directory and returns associated tf.predictor instance. :param estimator: Estimator to export. :param directory: (Optional) path to write exported model into. """ input_provider = InputProviderFactory.get(estimator.params) def receiver(): features = input_provider.get_input_dict_placeholders() return tf.estimator.export.ServingInputReceiver(features, features) estimator.export_saved_model(directory, receiver) versions = [ model for model in Path(directory).iterdir() if model.is_dir() and 'temp' not in str(model)] latest = str(sorted(versions)[-1]) return predictor.from_saved_model(latest)
Example #2
Source File: model.py From MAX-Question-Answering with Apache License 2.0 | 6 votes |
def __init__(self, path=DEFAULT_MODEL_PATH): logger.info('Loading model from: {}...'.format(path)) # Parameters for inference (need to be the same values the model was trained with) self.max_seq_length = 512 self.doc_stride = 128 self.max_query_length = 64 self.max_answer_length = 30 # Initialize the tokenizer self.tokenizer = FullTokenizer( vocab_file='assets/vocab.txt', do_lower_case=True) self.predict_fn = predictor.from_saved_model(DEFAULT_MODEL_PATH) logger.info('Loaded model')
Example #3
Source File: nq_export_scorer.py From language with Apache License 2.0 | 6 votes |
def _annotate_long_answer(predict_fn, question, contexts): """Applies the model to the (question, contexts) and returns long answer. Args: predict_fn: Predictor from tf.contrib.predictor.from_saved_model. question: string. contexts: List of strings. Returns: long_answer_idx: integer. long_answer_score: float. """ # The inputs are not tokenized here because there are multiple contexts. inputs = {"question": question, "context": contexts} outputs = predict_fn(inputs) long_answer_idx = outputs["idx"] long_answer_score = outputs["score"] return long_answer_idx, float(long_answer_score)
Example #4
Source File: nq_export_scorer.py From language with Apache License 2.0 | 6 votes |
def _annotate_short_answer(predict_fn, question_tokens, context_tokens): """Applies the model to the (question, contexts) and returns long answer. Args: predict_fn: Predictor from tf.contrib.predictor.from_saved_model. question_tokens: List of strings. context_tokens: List of strings. Returns: long_answer_idx: integer. long_answer_score: float. """ # The inputs are tokenized unlike in the long answer case, since the goal # is to pick out a particular span in a single context. inputs = {"question": question_tokens, "context": context_tokens} outputs = predict_fn(inputs) start_idx = outputs["start_idx"] end_idx = outputs["end_idx"] short_answer_score = outputs["score"] return start_idx, end_idx, float(short_answer_score)
Example #5
Source File: bert_predict.py From FoolNLTK with Apache License 2.0 | 5 votes |
def __init__(self, export_model_path, vocab_file): self.export_model_path = export_model_path self.vocab_file = vocab_file self.tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=True) self.predict_fn = predictor.from_saved_model(self.export_model_path) self.label_map = pickle.load(open(LABEL_FILE, 'rb')) self.id_to_label = {v: k for k, v in self.label_map.items()}
Example #6
Source File: pipeline_invoke_python.py From models with Apache License 2.0 | 5 votes |
def _initialize_upon_import(): """Initialize / Restore Model Object.""" saved_model_path = './pipeline_tfserving/0' return predictor.from_saved_model(saved_model_path) # This is called unconditionally at *module import time*...
Example #7
Source File: pipeline_invoke_python_all_tf.py From models with Apache License 2.0 | 5 votes |
def _initialize_upon_import(): try: saved_model_path = './pipeline_tfserving/0' return predictor.from_saved_model(saved_model_path) except Exception: _logger.error('pipeline_invoke_python._initialize_upon_import.Exception:', exc_info=True) return None
Example #8
Source File: pipeline_invoke_python.py From models with Apache License 2.0 | 5 votes |
def _initialize_upon_import(): try: saved_model_path = './pipeline_tfserving/0' return predictor.from_saved_model(saved_model_path) except Exception: _logger.error('pipeline_invoke_python._initialize_upon_import.Exception:', exc_info=True) return None
Example #9
Source File: pipeline_invoke_python_normal_transformer.py From models with Apache License 2.0 | 5 votes |
def _initialize_upon_import(): try: saved_model_path = './pipeline_tfserving/0' return predictor.from_saved_model(saved_model_path) except Exception: _logger.error('pipeline_invoke_python._initialize_upon_import.Exception:', exc_info=True) return None
Example #10
Source File: pipeline_invoke_python.py From models with Apache License 2.0 | 5 votes |
def _initialize_upon_import(): """Initialize / Restore Model Object.""" saved_model_path = './pipeline_tfserving/0' return predictor.from_saved_model(saved_model_path) # This is called unconditionally at *module import time*...
Example #11
Source File: pipeline_invoke_python.py From models with Apache License 2.0 | 5 votes |
def _initialize_upon_import(): """Initialize / Restore Model Object.""" saved_model_path = './pipeline_tfserving/0' return predictor.from_saved_model(saved_model_path) # This is called unconditionally at *module import time*...
Example #12
Source File: pipeline_invoke_python.py From models with Apache License 2.0 | 5 votes |
def _initialize_upon_import(): """Initialize / Restore Model Object.""" saved_model_path = './pipeline_tfserving/0' return predictor.from_saved_model(saved_model_path) # This is called unconditionally at *module import time*...
Example #13
Source File: ngram_http.py From seq2annotation with Apache License 2.0 | 5 votes |
def load_predict_fn(export_dir): global predict_fn predict_fn = predictor.from_saved_model(export_dir) return predict_fn
Example #14
Source File: tensorflow_inference.py From seq2annotation with Apache License 2.0 | 5 votes |
def __init__(self, model_path): # load model self.model_dir = model_path self.predict_fn = predictor.from_saved_model(model_path)
Example #15
Source File: lookup_http.py From seq2annotation with Apache License 2.0 | 5 votes |
def load_predict_fn(export_dir): global predict_fn predict_fn = predictor.from_saved_model(export_dir) return predict_fn
Example #16
Source File: tensorflow_inference.py From seq2annotation with Apache License 2.0 | 5 votes |
def instance_predict_fn(self): return predictor.from_saved_model(self.model_path)
Example #17
Source File: lm_predict.py From BERT_TF with Apache License 2.0 | 5 votes |
def __init__(self, pb_path): subdirs = [x for x in Path(pb_path).iterdir() if x.is_dir() and 'temp' not in str(x)] latest_model = str(sorted(subdirs)[-1]) self.predict_fn = predictor.from_saved_model(latest_model)
Example #18
Source File: run_predict.py From BERT_TF with Apache License 2.0 | 5 votes |
def __init__(self, pb_path): subdirs = [x for x in Path(pb_path).iterdir() if x.is_dir() and 'temp' not in str(x)] latest = str(sorted(subdirs)[-1]) self.predict_fn = predictor.from_saved_model(latest) self.vocab_idx, self.idx_vocab = vocab_idx, idx_vocab
Example #19
Source File: default_export_generator_test.py From tensor2robot with Apache License 2.0 | 5 votes |
def test_create_serving_input_receiver_numpy(self): (model_dir, mock_t2r_model, prediction_ref) = self._train_and_eval_reference_model('numpy') exporter = default_export_generator.DefaultExportGenerator() exporter.set_specification_from_model(mock_t2r_model) # Export trained serving estimator. estimator_exporter = tf.estimator.Estimator( model_fn=mock_t2r_model.model_fn, config=tf.estimator.RunConfig(model_dir=model_dir)) serving_input_receiver_fn = ( exporter.create_serving_input_receiver_numpy_fn()) exported_savedmodel_path = estimator_exporter.export_saved_model( export_dir_base=model_dir, serving_input_receiver_fn=serving_input_receiver_fn, checkpoint_path=tf.train.latest_checkpoint(model_dir)) # Load trained and exported serving estimator, run prediction and assert # it is the same as before exporting. feed_predictor_fn = contrib_predictor.from_saved_model( exported_savedmodel_path) mock_input_generator = mocks.MockInputGenerator(batch_size=BATCH_SIZE) features, labels = mock_input_generator.create_numpy_data() for pos, value in enumerate(prediction_ref): actual = feed_predictor_fn({'x': features[pos, :].reshape( 1, -1)})['logit'].flatten() predicted = value['logit'].flatten() np.testing.assert_almost_equal( actual=actual, desired=predicted, decimal=4) if labels[pos] > 0: self.assertGreater(predicted[0], 0) else: self.assertLess(predicted[0], 0)
Example #20
Source File: embedding_bert_intent_estimator_classifier.py From rasa_nlu_gq with Apache License 2.0 | 5 votes |
def load(cls, meta, model_dir=None, # type: Text model_metadata=None, # type: Metadata cached_component=None, # type: Optional[Component] **kwargs # type: **Any ): # type: (...) -> EmbeddingBertIntentAdanetClassifier config_proto = cls.get_config_proto(meta) print("bert model loaded") if model_dir and meta.get("file"): file_name = meta.get("file") # tensorflow.contrib.predictor to load the model file which may has 10x speed up in predict time predict = Pred.from_saved_model(export_dir=os.path.join(model_dir,file_name),config=config_proto) with io.open(os.path.join( model_dir, file_name + "_inv_intent_dict.pkl"), 'rb') as f: inv_intent_dict = pickle.load(f) with io.open(os.path.join( model_dir, file_name + "_encoded_all_intents.pkl"), 'rb') as f: encoded_all_intents = pickle.load(f) return EmbeddingBertIntentEstimatorClassifier( component_config=meta, inv_intent_dict=inv_intent_dict, encoded_all_intents=encoded_all_intents, predictor=predict ) else: logger.warning("Failed to load nlu model. Maybe path {} " "doesn't exist" "".format(os.path.abspath(model_dir))) return EmbeddingBertIntentEstimatorClassifier(component_config=meta)
Example #21
Source File: test.py From nlp_research with MIT License | 5 votes |
def __init__(self, conf, **kwargs): self.conf = conf for attr in conf: setattr(self, attr, conf[attr]) self.zdy = {} #init embedding self.init_embedding() #load train data csv = pd.read_csv(self.ori_path, header = 0, sep="\t", error_bad_lines=False) if 'text' in csv.keys() and 'target' in csv.keys(): #format: text \t target #for this format, the size for each class should be larger than 2 self.text_list = list(csv['text']) self.label_list = list(csv['target']) elif 'text_a' in csv.keys() and 'text_b' in csv.keys() and'target' in csv.keys(): #format: text_a \t text_b \t target #for this format, target value can only be choosen from 0 or 1 self.text_a_list = list(csv['text_a']) self.text_b_list = list(csv['text_b']) self.text_list = self.text_a_list + self.text_b_list self.label_list = list(csv['target']) subdirs = [os.path.join(self.export_dir_path,x) for x in os.listdir(self.export_dir_path) if 'temp' not in(x)] latest = str(sorted(subdirs)[-1]) self.predict_fn = predictor.from_saved_model(latest)
Example #22
Source File: example.py From BERT with Apache License 2.0 | 5 votes |
def load_model(self): self.graph = tf.Graph() with self.graph.as_default(): self.predict_fn = predictor.from_saved_model(self.config['model'])
Example #23
Source File: default_export_generator_test.py From tensor2robot with Apache License 2.0 | 4 votes |
def test_create_serving_input_receiver_tf_example(self, multi_dataset): (model_dir, mock_t2r_model, prediction_ref) = self._train_and_eval_reference_model( 'tf_example', multi_dataset=multi_dataset) # Now we can actually export our serving estimator. estimator_exporter = tf.estimator.Estimator( model_fn=mock_t2r_model.model_fn, config=tf.estimator.RunConfig(model_dir=model_dir)) exporter = default_export_generator.DefaultExportGenerator() exporter.set_specification_from_model(mock_t2r_model) serving_input_receiver_fn = ( exporter.create_serving_input_receiver_tf_example_fn()) exported_savedmodel_path = estimator_exporter.export_saved_model( export_dir_base=model_dir, serving_input_receiver_fn=serving_input_receiver_fn, checkpoint_path=tf.train.latest_checkpoint(model_dir)) # Now we can load our exported estimator graph, there are no dependencies # on the model_fn or preprocessor anymore. feed_predictor_fn = contrib_predictor.from_saved_model( exported_savedmodel_path) mock_input_generator = mocks.MockInputGenerator(batch_size=BATCH_SIZE) features, labels = mock_input_generator.create_numpy_data() for pos, value in enumerate(prediction_ref): # We have to create our serialized tf.Example proto. example = tf.train.Example() example.features.feature['measured_position'].float_list.value.extend( features[pos]) serialized_example = np.array(example.SerializeToString()).reshape(1,) if multi_dataset: feed_dict = { 'input_example_dataset1': serialized_example, 'input_example_dataset2': serialized_example } else: feed_dict = { 'input_example_tensor': serialized_example } actual = feed_predictor_fn(feed_dict)['logit'].flatten() predicted = value['logit'].flatten() np.testing.assert_almost_equal( actual=actual, desired=predicted, decimal=4) if labels[pos] > 0: self.assertGreater(predicted[0], 0) else: self.assertLess(predicted[0], 0)
Example #24
Source File: maml_model_test.py From tensor2robot with Apache License 2.0 | 4 votes |
def test_maml_model(self, num_inner_loop_steps): model_dir = os.path.join(FLAGS.test_tmpdir, str(num_inner_loop_steps)) gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', _MAX_STEPS // 2) if tf.io.gfile.exists(model_dir): tf.io.gfile.rmtree(model_dir) mock_base_model = mocks.MockT2RModel( preprocessor_cls=noop_preprocessor.NoOpPreprocessor) mock_tf_model = MockMAMLModel( base_model=mock_base_model, num_inner_loop_steps=num_inner_loop_steps) # Note, we by choice use the same amount of conditioning samples for # inference as well during train and change the model for eval/inference # to only produce one output sample. mock_input_generator_train = MockMetaInputGenerator( batch_size=_BATCH_SIZE, num_condition_samples_per_task=_NUM_CONDITION_SAMPLES_PER_TASK, num_inference_samples_per_task=_NUM_CONDITION_SAMPLES_PER_TASK) mock_input_generator_train.set_specification_from_model( mock_tf_model, mode=tf.estimator.ModeKeys.TRAIN) mock_input_generator_eval = MockMetaInputGenerator( batch_size=_BATCH_SIZE, num_condition_samples_per_task=_NUM_CONDITION_SAMPLES_PER_TASK, num_inference_samples_per_task=1) mock_input_generator_eval.set_specification_from_model( mock_tf_model, mode=tf.estimator.ModeKeys.TRAIN) mock_export_generator = MockMetaExportGenerator( num_condition_samples_per_task=_NUM_CONDITION_SAMPLES_PER_TASK, num_inference_samples_per_task=1) train_eval.train_eval_model( t2r_model=mock_tf_model, input_generator_train=mock_input_generator_train, input_generator_eval=mock_input_generator_eval, max_train_steps=_MAX_STEPS, model_dir=model_dir, export_generator=mock_export_generator, create_exporters_fn=train_eval.create_default_exporters) export_dir = os.path.join(model_dir, 'export') # (best|latest)_exporter_(numpy|tf_example), servo self.assertLen(tf.io.gfile.glob(os.path.join(export_dir, '*')), 5) numpy_predictor_fn = contrib_predictor.from_saved_model( tf.io.gfile.glob(os.path.join(export_dir, 'best_exporter_numpy', '*'))[-1]) feed_tensor_keys = sorted(numpy_predictor_fn.feed_tensors.keys()) self.assertCountEqual( ['condition/features/x', 'condition/labels/y', 'inference/features/x'], feed_tensor_keys, ) tf_example_predictor_fn = contrib_predictor.from_saved_model( tf.io.gfile.glob( os.path.join(export_dir, 'best_exporter_tf_example', '*'))[-1]) self.assertCountEqual(['input_example_tensor'], list(tf_example_predictor_fn.feed_tensors.keys()))
Example #25
Source File: deploy.py From DLTK with Apache License 2.0 | 4 votes |
def predict(args): # Read in the csv with the file names you would want to predict on file_names = pd.read_csv( args.csv, dtype=object, keep_default_na=False, na_values=[]).as_matrix() # We trained on the first 4 subjects, so we predict on the rest file_names = file_names[-N_VALIDATION_SUBJECTS:] # From the model_path, parse the latest saved model and restore a # predictor from it export_dir = [os.path.join(args.model_path, o) for o in sorted( os.listdir(args.model_path)) if os.path.isdir( os.path.join(args.model_path, o)) and o.isdigit()][-1] print('Loading from {}'.format(export_dir)) my_predictor = predictor.from_saved_model(export_dir) # Iterate through the files, predict on the full volumes and compute a Dice # coefficient mae = [] for output in read_fn(file_references=file_names, mode=tf.estimator.ModeKeys.EVAL, params=READER_PARAMS): t0 = time.time() # Parse the read function output and add a dummy batch dimension as # required img = output['features']['x'] lbl = output['labels']['y'] test_id = output['img_id'] # We know, that the training input shape of [64, 96, 96] will work with # our model strides, so we collect several crops of the test image and # average the predictions. Alternatively, we could pad or crop the input # to any shape that is compatible with the resolution scales of the # model: num_crop_predictions = 4 crop_batch = extract_random_example_array( image_list=img, example_size=[64, 96, 96], n_examples=num_crop_predictions) y_ = my_predictor.session.run( fetches=my_predictor._fetch_tensors['logits'], feed_dict={my_predictor._feed_tensors['x']: crop_batch}) # Average the predictions on the cropped test inputs: y_ = np.mean(y_) # Calculate the absolute error for this subject mae.append(np.abs(y_ - lbl)) # Print outputs print('id={}; pred={:0.2f} yrs; true={:0.2f} yrs; run time={:0.2f} s; ' ''.format(test_id, y_, lbl[0], time.time() - t0)) print('mean absolute err={:0.3f} yrs'.format(np.mean(mae)))
Example #26
Source File: deploy.py From DLTK with Apache License 2.0 | 4 votes |
def predict(args): # Read in the csv with the file names you would want to predict on file_names = pd.read_csv( args.csv, dtype=object, keep_default_na=False, na_values=[]).as_matrix() # We trained on the first 4 subjects, so we predict on the rest file_names = file_names[-N_VALIDATION_SUBJECTS:] # From the model_path, parse the latest saved model and restore a # predictor from it export_dir = \ [os.path.join(args.model_path, o) for o in sorted(os.listdir(args.model_path)) if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()][-1] print('Loading from {}'.format(export_dir)) my_predictor = predictor.from_saved_model(export_dir) # Iterate through the files, predict on the full volumes and compute a Dice # coefficient accuracy = [] for output in read_fn(file_references=file_names, mode=tf.estimator.ModeKeys.EVAL, params=READER_PARAMS): t0 = time.time() # Parse the read function output and add a dummy batch dimension as # required img = output['features']['x'] lbl = output['labels']['y'] test_id = output['img_id'] # We know, that the training input shape of [64, 96, 96] will work with # our model strides, so we collect several crops of the test image and # average the predictions. Alternatively, we could pad or crop the input # to any shape that is compatible with the resolution scales of the # model: num_crop_predictions = 4 crop_batch = extract_random_example_array( image_list=img, example_size=[64, 96, 96], n_examples=num_crop_predictions) y_ = my_predictor.session.run( fetches=my_predictor._fetch_tensors['y_prob'], feed_dict={my_predictor._feed_tensors['x']: crop_batch}) # Average the predictions on the cropped test inputs: y_ = np.mean(y_, axis=0) predicted_class = np.argmax(y_) # Calculate the accuracy for this subject accuracy.append(predicted_class == lbl) # Print outputs print('id={}; pred={}; true={}; run time={:0.2f} s; ' ''.format(test_id, predicted_class, lbl[0], time.time() - t0)) print('accuracy={}'.format(np.mean(accuracy)))
Example #27
Source File: deploy.py From DLTK with Apache License 2.0 | 4 votes |
def predict(args): # Read in the csv with the file names you would want to predict on file_names = pd.read_csv( args.csv, dtype=object, keep_default_na=False, na_values=[]).as_matrix() # We trained on the first 4 subjects, so we predict on the rest file_names = file_names[-N_VALIDATION_SUBJECTS:] # From the model_path, parse the latest saved model and restore a # predictor from it export_dir = [os.path.join(args.model_path, o) for o in os.listdir(args.model_path) if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()][-1] print('Loading from {}'.format(export_dir)) my_predictor = predictor.from_saved_model(export_dir) # Fetch the output probability op of the trained network y_prob = my_predictor._fetch_tensors['y_prob'] num_classes = y_prob.get_shape().as_list()[-1] # Iterate through the files, predict on the full volumes and compute a Dice # coefficient for output in read_fn(file_references=file_names, mode=tf.estimator.ModeKeys.EVAL, params=READER_PARAMS): t0 = time.time() # Parse the read function output and add a dummy batch dimension as # required img = np.expand_dims(output['features']['x'], axis=0) lbl = np.expand_dims(output['labels']['y'], axis=0) # Do a sliding window inference with our DLTK wrapper pred = sliding_window_segmentation_inference( session=my_predictor.session, ops_list=[y_prob], sample_dict={my_predictor._feed_tensors['x']: img}, batch_size=32)[0] # Calculate the prediction from the probabilities pred = np.argmax(pred, -1) # Calculate the Dice coefficient dsc = np.nanmean(metrics.dice(pred, lbl, num_classes)[1:]) # Save the file as .nii.gz using the header information from the # original sitk image output_fn = os.path.join(args.model_path, '{}_seg.nii.gz'.format(output['subject_id'])) new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32)) new_sitk.CopyInformation(output['sitk']) sitk.WriteImage(new_sitk, output_fn) # Print outputs print('Id={}; Dice={:0.4f}; time={:0.2} secs; output_path={};'.format( output['subject_id'], dsc, time.time() - t0, output_fn))