Python tensorflow.python.training.evaluation._evaluate_once() Examples
The following are 4
code examples of tensorflow.python.training.evaluation._evaluate_once().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.training.evaluation
, or try the search function
.
Example #1
Source File: estimator.py From estimator with Apache License 2.0 | 5 votes |
def _evaluate_run(self, checkpoint_path, scaffold, update_op, eval_dict, all_hooks, output_dir): """Run evaluation.""" eval_results = evaluation._evaluate_once( # pylint: disable=protected-access checkpoint_path=checkpoint_path, master=self._config.evaluation_master, scaffold=scaffold, eval_ops=update_op, final_ops=eval_dict, hooks=all_hooks, config=self._session_config) current_global_step = eval_results[tf.compat.v1.GraphKeys.GLOBAL_STEP] _write_dict_to_summary( output_dir=output_dir, dictionary=eval_results, current_global_step=current_global_step) if checkpoint_path: _write_checkpoint_path_to_summary( output_dir=output_dir, checkpoint_path=checkpoint_path, current_global_step=current_global_step) return eval_results
Example #2
Source File: estimator.py From lambda-packs with MIT License | 4 votes |
def _evaluate_model(self, input_fn, hooks=None, checkpoint_path=None, name=''): """Evaluates the model using the training.evaluation library.""" # Check that model has been trained (if nothing has been set explicitly). if not checkpoint_path: latest_path = saver.latest_checkpoint(self._model_dir) if not latest_path: raise ValueError('Could not find trained model in model_dir: {}.'. format(self._model_dir)) checkpoint_path = latest_path # Setup output directory. eval_dir = os.path.join(self._model_dir, 'eval' if not name else 'eval_' + name) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) global_step_tensor = training.create_global_step(g) features, labels = input_fn() estimator_spec = self._call_model_fn( features, labels, model_fn_lib.ModeKeys.EVAL) if model_fn_lib.MetricKeys.LOSS in estimator_spec.eval_metric_ops: raise ValueError( 'Metric with name "%s" is not allowed, because Estimator ' % ( model_fn_lib.MetricKeys.LOSS) + 'already defines a default metric with the same name.') estimator_spec.eval_metric_ops[ model_fn_lib.MetricKeys.LOSS] = metrics_lib.mean(estimator_spec.loss) update_op, eval_dict = _extract_metric_update_ops( estimator_spec.eval_metric_ops) if ops.GraphKeys.GLOBAL_STEP in eval_dict: raise ValueError( 'Metric with name `global_step` is not allowed, because Estimator ' 'already defines a default metric with the same name.') eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor eval_results = evaluation._evaluate_once( # pylint: disable=protected-access checkpoint_path=checkpoint_path, master=self._config.evaluation_master, scaffold=estimator_spec.scaffold, eval_ops=update_op, final_ops=eval_dict, hooks=hooks, config=self._session_config) _write_dict_to_summary( output_dir=eval_dir, dictionary=eval_results, current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP]) return eval_results
Example #3
Source File: estimator_v2.py From boxnet with GNU General Public License v3.0 | 4 votes |
def _evaluate_model(self, input_fn, hooks=None, checkpoint_path=None, name=''): """Evaluates the model using the training.evaluation library.""" # Check that model has been trained (if nothing has been set explicitly). if not checkpoint_path: latest_path = saver.latest_checkpoint(self._model_dir) if not latest_path: raise ValueError('Could not find trained model in model_dir: {}.'. format(self._model_dir)) checkpoint_path = latest_path # Setup output directory. eval_dir = os.path.join(self._model_dir, 'eval' if not name else 'eval_' + name) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) global_step_tensor = self._create_and_assert_global_step(g) features, labels = self._get_features_and_labels_from_input_fn( input_fn, model_fn_lib.ModeKeys.EVAL) estimator_spec = self._call_model_fn( features, labels, model_fn_lib.ModeKeys.EVAL, self.config) if model_fn_lib.LOSS_METRIC_KEY in estimator_spec.eval_metric_ops: raise ValueError( 'Metric with name "%s" is not allowed, because Estimator ' % ( model_fn_lib.LOSS_METRIC_KEY) + 'already defines a default metric with the same name.') estimator_spec.eval_metric_ops[ model_fn_lib.LOSS_METRIC_KEY] = metrics_lib.mean(estimator_spec.loss) update_op, eval_dict = _extract_metric_update_ops( estimator_spec.eval_metric_ops) if ops.GraphKeys.GLOBAL_STEP in eval_dict: raise ValueError( 'Metric with name `global_step` is not allowed, because Estimator ' 'already defines a default metric with the same name.') eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor all_hooks = list(hooks or []) all_hooks.extend(list(estimator_spec.evaluation_hooks or [])) eval_results = evaluation._evaluate_once( # pylint: disable=protected-access checkpoint_path=checkpoint_path, master=self._config.evaluation_master, scaffold=estimator_spec.scaffold, eval_ops=update_op, final_ops=eval_dict, hooks=all_hooks, config=self._session_config) _write_dict_to_summary( output_dir=eval_dir, dictionary=eval_results, current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP]) return eval_results
Example #4
Source File: estimator.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 4 votes |
def _evaluate_model(self, input_fn, hooks=None, checkpoint_path=None, name=''): """Evaluates the model using the training.evaluation library.""" # Check that model has been trained (if nothing has been set explicitly). if not checkpoint_path: latest_path = saver.latest_checkpoint(self._model_dir) if not latest_path: raise ValueError('Could not find trained model in model_dir: {}.'. format(self._model_dir)) checkpoint_path = latest_path # Setup output directory. eval_dir = os.path.join(self._model_dir, 'eval' if not name else 'eval_' + name) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) global_step_tensor = self._create_and_assert_global_step(g) features, labels = self._get_features_and_labels_from_input_fn( input_fn, model_fn_lib.ModeKeys.EVAL) estimator_spec = self._call_model_fn( features, labels, model_fn_lib.ModeKeys.EVAL, self.config) if model_fn_lib.LOSS_METRIC_KEY in estimator_spec.eval_metric_ops: raise ValueError( 'Metric with name "%s" is not allowed, because Estimator ' % ( model_fn_lib.LOSS_METRIC_KEY) + 'already defines a default metric with the same name.') estimator_spec.eval_metric_ops[ model_fn_lib.LOSS_METRIC_KEY] = metrics_lib.mean(estimator_spec.loss) update_op, eval_dict = _extract_metric_update_ops( estimator_spec.eval_metric_ops) if ops.GraphKeys.GLOBAL_STEP in eval_dict: raise ValueError( 'Metric with name `global_step` is not allowed, because Estimator ' 'already defines a default metric with the same name.') eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor all_hooks = list(hooks or []) all_hooks.extend(list(estimator_spec.evaluation_hooks or [])) eval_results = evaluation._evaluate_once( # pylint: disable=protected-access checkpoint_path=checkpoint_path, master=self._config.evaluation_master, scaffold=estimator_spec.scaffold, eval_ops=update_op, final_ops=eval_dict, hooks=all_hooks, config=self._session_config) _write_dict_to_summary( output_dir=eval_dir, dictionary=eval_results, current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP]) return eval_results