Python tensorflow.contrib.metrics.streaming_mean() Examples

The following are 30 code examples of tensorflow.contrib.metrics.streaming_mean(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.metrics , or try the search function .
Example #1
Source File: head.py    From lambda-packs with MIT License 5 votes vote down vote up
def _metrics(self, eval_loss, predictions, labels, weights):
    """Returns a dict of metrics keyed by name."""
    del predictions, labels, weights  # Unused by this head.
    with ops.name_scope("metrics", values=[eval_loss]):
      return {
          _summary_key(self.head_name, mkey.LOSS):
              metrics_lib.streaming_mean(eval_loss)} 
Example #2
Source File: head.py    From keras-lambda with MIT License 5 votes vote down vote up
def _predictions_streaming_mean(predictions,
                                labels,
                                weights=None,
                                class_id=None):
  del labels
  if class_id is not None:
    predictions = predictions[:, class_id]
  return metrics_lib.streaming_mean(predictions, weights=weights)


# TODO(ptucker): Add support for SparseTensor labels. 
Example #3
Source File: head.py    From keras-lambda with MIT License 5 votes vote down vote up
def _indicator_labels_streaming_mean(predictions,
                                     labels,
                                     weights=None,
                                     class_id=None):
  del predictions
  if class_id is not None:
    labels = labels[:, class_id]
  return metrics_lib.streaming_mean(labels, weights=weights) 
Example #4
Source File: head.py    From keras-lambda with MIT License 5 votes vote down vote up
def _weighted_average_loss_metric_spec(loss_fn, pred_key, label_key,
                                       weight_key):

  def _streaming_weighted_average_loss(predictions, labels, weights=None):
    loss_unweighted = loss_fn(predictions, labels)
    if weights is not None:
      weights = math_ops.to_float(weights)
    _, weighted_average_loss = _loss(loss_unweighted, weights, name="eval_loss")
    return metrics_lib.streaming_mean(weighted_average_loss)

  return metric_spec.MetricSpec(_streaming_weighted_average_loss, pred_key,
                                label_key, weight_key) 
Example #5
Source File: estimator.py    From keras-lambda with MIT License 5 votes vote down vote up
def _get_eval_ops(self, features, labels, metrics):
    """Method that builds model graph and returns evaluation ops.

    Expected to be overriden by sub-classes that require custom support.
    This implementation uses `model_fn` passed as parameter to constructor to
    build model.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      labels: `Tensor` or `dict` of `Tensor` objects.
      metrics: Dict of metrics to run. If None, the default metric functions
        are used; if {}, no metrics are used. Otherwise, `metrics` should map
        friendly names for the metric to a `MetricSpec` object defining which
        model outputs to evaluate against which labels with which metric
        function. Metric ops should support streaming, e.g., returning
        update_op and value tensors. See more details in
        `../../../../metrics/python/metrics/ops/streaming_metrics.py` and
        `../metric_spec.py`.

    Returns:
      `ModelFnOps` object.

    Raises:
      ValueError: if `metrics` don't match `labels`.
    """
    model_fn_ops = self._call_model_fn(
        features, labels, model_fn_lib.ModeKeys.EVAL)

    # Custom metrics should overwrite defaults.
    if metrics:
      model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
          metrics, features, labels, model_fn_ops.predictions))

    if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
      model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
          metrics_lib.streaming_mean(model_fn_ops.loss))
    return model_fn_ops 
Example #6
Source File: metric_specs.py    From seq2seq with Apache License 2.0 5 votes vote down vote up
def create_metric_ops(self, _inputs, labels, predictions):
    """Creates the metric op"""
    loss_mask = tf.sequence_mask(
        lengths=tf.to_int32(labels["target_len"] - 1),
        maxlen=tf.to_int32(tf.shape(predictions["losses"])[1]))
    return metrics.streaming_mean(predictions["losses"], loss_mask) 
Example #7
Source File: metric_specs.py    From conv_seq2seq with Apache License 2.0 5 votes vote down vote up
def create_metric_ops(self, _inputs, labels, predictions):
    """Creates the metric op"""
    loss_mask = tf.sequence_mask(
        lengths=tf.to_int32(labels["target_len"] - 1),
        maxlen=tf.to_int32(tf.shape(predictions["losses"])[1]))
    return metrics.streaming_mean(predictions["losses"], loss_mask) 
Example #8
Source File: metric_specs.py    From reaction_prediction_seq2seq with Apache License 2.0 5 votes vote down vote up
def create_metric_ops(self, _inputs, labels, predictions):
    """Creates the metric op"""
    loss_mask = tf.sequence_mask(
        lengths=tf.to_int32(labels["target_len"] - 1),
        maxlen=tf.to_int32(tf.shape(predictions["losses"])[1]))
    return metrics.streaming_mean(predictions["losses"], loss_mask) 
Example #9
Source File: metric_specs.py    From natural-language-summary-generation-from-structured-data with MIT License 5 votes vote down vote up
def create_metric_ops(self, _inputs, labels, predictions):
    """Creates the metric op"""
    loss_mask = tf.sequence_mask(
        lengths=tf.to_int32(labels["target_len"] - 1),
        maxlen=tf.to_int32(tf.shape(predictions["losses"])[1]))
    return metrics.streaming_mean(predictions["losses"], loss_mask) 
Example #10
Source File: target_column.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _predictions_streaming_mean(predictions, unused_labels, weights=None):
  return metrics_lib.streaming_mean(predictions, weights=weights) 
Example #11
Source File: target_column.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _labels_streaming_mean(unused_predictions, labels, weights=None):
  return metrics_lib.streaming_mean(labels, weights=weights) 
Example #12
Source File: target_column.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def get_eval_ops(self, features, logits, labels, metrics=None):
    loss = self.loss(logits, labels, features)
    result = {"loss": metrics_lib.streaming_mean(loss)}
    if metrics:
      predictions = self.logits_to_predictions(logits, proba=False)
      result.update(_run_metrics(predictions, labels, metrics,
                                 self.get_weight_tensor(features)))
    return result 
Example #13
Source File: composable_model_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _get_eval_ops(self, features, labels, metrics=None):
    logits = self._model.build_model(
        features, self._feature_columns, is_training=False)
    model_fn_ops = self._head.head_ops(features, labels,
                                       tf.contrib.learn.ModeKeys.TRAIN,
                                       _noop_training_fn, logits=logits)
    return {'loss': metrics_lib.streaming_mean(model_fn_ops.loss)} 
Example #14
Source File: logistic_regressor.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _labels_streaming_mean(unused_predictions, labels):
  return metrics_lib.streaming_mean(labels) 
Example #15
Source File: head.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _predictions_streaming_mean(predictions, unused_labels, weights=None):
  return metrics_lib.streaming_mean(predictions, weights=weights) 
Example #16
Source File: head.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _labels_streaming_mean(unused_predictions, labels, weights=None):
  return metrics_lib.streaming_mean(labels, weights=weights) 
Example #17
Source File: estimator.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _get_eval_ops(self, features, labels, metrics):
    """Method that builds model graph and returns evaluation ops.

    Expected to be overriden by sub-classes that require custom support.
    This implementation uses `model_fn` passed as parameter to constructor to
    build model.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      labels: `Tensor` or `dict` of `Tensor` objects.
      metrics: Dict of metrics to run. If None, the default metric functions
        are used; if {}, no metrics are used. Otherwise, `metrics` should map
        friendly names for the metric to a `MetricSpec` object defining which
        model outputs to evaluate against which labels with which metric
        function. Metric ops should support streaming, e.g., returning
        update_op and value tensors. See more details in
        `../../../../metrics/python/metrics/ops/streaming_metrics.py` and
        `../metric_spec.py`.

    Returns:
      metrics: `dict` of `Tensor` objects.

    Raises:
      ValueError: if `metrics` don't match `labels`.
    """
    model_fn_ops = self._call_model_fn(features, labels, ModeKeys.EVAL)

    all_metrics = model_fn_ops.default_metrics
    # Custom metrics should overwrite defaults.
    if metrics:
      all_metrics.update(metrics)

    result = _make_metrics_ops(all_metrics, features, labels,
                               model_fn_ops.predictions)
    if metric_key.MetricKey.LOSS not in result:
      result[metric_key.MetricKey.LOSS] = metrics_lib.streaming_mean(
          model_fn_ops.loss)
    return result 
Example #18
Source File: estimator.py    From lambda-packs with MIT License 5 votes vote down vote up
def _get_eval_ops(self, features, labels, metrics):
    """Method that builds model graph and returns evaluation ops.

    Expected to be overriden by sub-classes that require custom support.
    This implementation uses `model_fn` passed as parameter to constructor to
    build model.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      labels: `Tensor` or `dict` of `Tensor` objects.
      metrics: Dict of metrics to run. If None, the default metric functions
        are used; if {}, no metrics are used. Otherwise, `metrics` should map
        friendly names for the metric to a `MetricSpec` object defining which
        model outputs to evaluate against which labels with which metric
        function. Metric ops should support streaming, e.g., returning
        update_op and value tensors. See more details in
        `../../../../metrics/python/metrics/ops/streaming_metrics.py` and
        `../metric_spec.py`.

    Returns:
      `ModelFnOps` object.

    Raises:
      ValueError: if `metrics` don't match `labels`.
    """
    model_fn_ops = self._call_model_fn(
        features, labels, model_fn_lib.ModeKeys.EVAL)

    features, labels = self._feature_engineering_fn(features, labels)
    # Custom metrics should overwrite defaults.
    if metrics:
      model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
          metrics, features, labels, model_fn_ops.predictions))

    if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
      model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
          metrics_lib.streaming_mean(model_fn_ops.loss))
    return model_fn_ops 
Example #19
Source File: head.py    From lambda-packs with MIT License 5 votes vote down vote up
def _metrics(self, eval_loss, predictions, labels, weights):
    """Returns a dict of metrics keyed by name."""
    with ops.name_scope(
        "metrics",
        values=((eval_loss, self._labels(labels), self._label_ids(labels),
                 weights) + tuple(six.itervalues(predictions)))):
      logits = predictions[prediction_key.PredictionKey.LOGITS]
      probabilities = predictions[prediction_key.PredictionKey.PROBABILITIES]
      classes = predictions[prediction_key.PredictionKey.CLASSES]

      metrics = {_summary_key(self.head_name, mkey.LOSS):
                 metrics_lib.streaming_mean(eval_loss)}
      # TODO(b/29366811): This currently results in both an "accuracy" and an
      # "accuracy/threshold_0.500000_mean" metric for binary classification.
      metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
          metrics_lib.streaming_accuracy(
              classes, self._labels(labels), weights))

      if not self._label_keys:
        # Classes are IDs. Add some metrics.
        for class_id in self._metric_class_ids:
          metrics[_summary_key(
              self.head_name, mkey.CLASS_PREDICTION_MEAN % class_id)] = (
                  _class_predictions_streaming_mean(classes, weights, class_id))
          # TODO(ptucker): Add per-class accuracy, precision, recall.
          metrics[_summary_key(
              self.head_name, mkey.CLASS_LABEL_MEAN % class_id)] = (
                  _class_labels_streaming_mean(
                      self._label_ids(labels), weights, class_id))
          metrics[_summary_key(
              self.head_name, mkey.CLASS_PROBABILITY_MEAN % class_id)] = (
                  _predictions_streaming_mean(probabilities, weights, class_id))
          metrics[_summary_key(
              self.head_name, mkey.CLASS_LOGITS_MEAN % class_id)] = (
                  _predictions_streaming_mean(logits, weights, class_id))

    return metrics 
Example #20
Source File: head.py    From lambda-packs with MIT License 5 votes vote down vote up
def _metrics(self, eval_loss, predictions, labels, weights):
    """See `_MultiClassHead`."""
    with ops.name_scope("metrics", values=(
        [eval_loss, labels, weights] + list(six.itervalues(predictions)))):
      metrics = {_summary_key(self.head_name, mkey.LOSS):
                 metrics_lib.streaming_mean(eval_loss)}

      # TODO(b/29366811): This currently results in both an "accuracy" and an
      # "accuracy/threshold_0.500000_mean" metric for binary classification.
      classes = predictions[prediction_key.PredictionKey.CLASSES]
      metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
          metrics_lib.streaming_accuracy(classes, labels, weights))
      # TODO(sibyl-vie3Poto): add more metrics relevant for svms.

    return metrics 
Example #21
Source File: head.py    From lambda-packs with MIT License 5 votes vote down vote up
def _indicator_labels_streaming_mean(labels, weights=None, class_id=None):
  labels = math_ops.to_float(labels)
  weights = _float_weights_or_none(weights)
  if weights is not None:
    weights = weights_broadcast_ops.broadcast_weights(weights, labels)
  if class_id is not None:
    if weights is not None:
      weights = weights[:, class_id]
    labels = labels[:, class_id]
  return metrics_lib.streaming_mean(labels, weights=weights) 
Example #22
Source File: head.py    From lambda-packs with MIT License 5 votes vote down vote up
def _predictions_streaming_mean(predictions,
                                weights=None,
                                class_id=None):
  predictions = math_ops.to_float(predictions)
  weights = _float_weights_or_none(weights)
  if weights is not None:
    weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
  if class_id is not None:
    if weights is not None:
      weights = weights[:, class_id]
    predictions = predictions[:, class_id]
  return metrics_lib.streaming_mean(predictions, weights=weights)


# TODO(ptucker): Add support for SparseTensor labels. 
Example #23
Source File: head.py    From lambda-packs with MIT License 5 votes vote down vote up
def _class_predictions_streaming_mean(predictions, weights, class_id):
  return metrics_lib.streaming_mean(
      array_ops.where(
          math_ops.equal(
              math_ops.to_int32(class_id), math_ops.to_int32(predictions)),
          array_ops.ones_like(predictions),
          array_ops.zeros_like(predictions)),
      weights=weights) 
Example #24
Source File: head.py    From lambda-packs with MIT License 5 votes vote down vote up
def _class_labels_streaming_mean(labels, weights, class_id):
  return metrics_lib.streaming_mean(
      array_ops.where(
          math_ops.equal(
              math_ops.to_int32(class_id), math_ops.to_int32(labels)),
          array_ops.ones_like(labels), array_ops.zeros_like(labels)),
      weights=weights) 
Example #25
Source File: estimator.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _get_eval_ops(self, features, labels, metrics):
    """Method that builds model graph and returns evaluation ops.

    Expected to be overriden by sub-classes that require custom support.
    This implementation uses `model_fn` passed as parameter to constructor to
    build model.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      labels: `Tensor` or `dict` of `Tensor` objects.
      metrics: Dict of metrics to run. If None, the default metric functions
        are used; if {}, no metrics are used. Otherwise, `metrics` should map
        friendly names for the metric to a `MetricSpec` object defining which
        model outputs to evaluate against which labels with which metric
        function. Metric ops should support streaming, e.g., returning
        update_op and value tensors. See more details in
        `../../../../metrics/python/metrics/ops/streaming_metrics.py` and
        `../metric_spec.py`.

    Returns:
      `ModelFnOps` object.

    Raises:
      ValueError: if `metrics` don't match `labels`.
    """
    model_fn_ops = self._call_model_fn(
        features, labels, model_fn_lib.ModeKeys.EVAL)

    # Custom metrics should overwrite defaults.
    if metrics:
      model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
          metrics, features, labels, model_fn_ops.predictions))

    if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
      model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
          metrics_lib.streaming_mean(model_fn_ops.loss))
    return model_fn_ops 
Example #26
Source File: head.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _weighted_average_loss_metric_spec(loss_fn, pred_key, label_key,
                                       weight_key):

  def _streaming_weighted_average_loss(predictions, labels, weights=None):
    loss_unweighted = loss_fn(predictions, labels)
    if weights is not None:
      weights = math_ops.to_float(weights)
    _, weighted_average_loss = _loss(loss_unweighted, weights, name="eval_loss")
    return metrics_lib.streaming_mean(weighted_average_loss)

  return metric_spec.MetricSpec(_streaming_weighted_average_loss, pred_key,
                                label_key, weight_key) 
Example #27
Source File: head.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _indicator_labels_streaming_mean(predictions,
                                     labels,
                                     weights=None,
                                     class_id=None):
  del predictions
  if class_id is not None:
    labels = labels[:, class_id]
  return metrics_lib.streaming_mean(labels, weights=weights) 
Example #28
Source File: head.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _predictions_streaming_mean(predictions,
                                labels,
                                weights=None,
                                class_id=None):
  del labels
  if class_id is not None:
    predictions = predictions[:, class_id]
  return metrics_lib.streaming_mean(predictions, weights=weights)


# TODO(ptucker): Add support for SparseTensor labels. 
Example #29
Source File: head.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _weighted_average_loss_metric_spec(loss_fn, predictoin_key,
                                       label_key, weight_key):
  def _streaming_weighted_average_loss(predictions, labels, weights=None):
    loss_unweighted = loss_fn(predictions, labels)
    if weights is not None:
      weights = math_ops.to_float(weights)
    _, weighted_average_loss = _loss(loss_unweighted,
                                     weights,
                                     name="eval_loss")
    return metrics_lib.streaming_mean(weighted_average_loss)
  return metric_spec.MetricSpec(_streaming_weighted_average_loss,
                                predictoin_key, label_key, weight_key) 
Example #30
Source File: logistic_regressor.py    From lambda-packs with MIT License 4 votes vote down vote up
def _make_logistic_eval_metric_ops(labels, predictions, thresholds):
  """Returns a dictionary of evaluation metric ops for logistic regression.

  Args:
    labels: The labels `Tensor`, or a dict with only one `Tensor` keyed by name.
    predictions: The predictions `Tensor`.
    thresholds: List of floating point thresholds to use for accuracy,
      precision, and recall metrics.

  Returns:
    A dict of metric results keyed by name.
  """
  # If labels is a dict with a single key, unpack into a single tensor.
  labels_tensor = labels
  if isinstance(labels, dict) and len(labels) == 1:
    labels_tensor = labels.values()[0]

  metrics = {}
  metrics[metric_key.MetricKey.PREDICTION_MEAN] = metrics_lib.streaming_mean(
      predictions)
  metrics[metric_key.MetricKey.LABEL_MEAN] = metrics_lib.streaming_mean(
      labels_tensor)
  # Also include the streaming mean of the label as an accuracy baseline, as
  # a reminder to users.
  metrics[metric_key.MetricKey.ACCURACY_BASELINE] = metrics_lib.streaming_mean(
      labels_tensor)

  metrics[metric_key.MetricKey.AUC] = metrics_lib.streaming_auc(
      labels=labels_tensor, predictions=predictions)

  for threshold in thresholds:
    predictions_at_threshold = math_ops.to_float(
        math_ops.greater_equal(predictions, threshold),
        name='predictions_at_threshold_%f' % threshold)
    metrics[metric_key.MetricKey.ACCURACY_MEAN % threshold] = (
        metrics_lib.streaming_accuracy(labels=labels_tensor,
                                       predictions=predictions_at_threshold))
    # Precision for positive examples.
    metrics[metric_key.MetricKey.PRECISION_MEAN % threshold] = (
        metrics_lib.streaming_precision(labels=labels_tensor,
                                        predictions=predictions_at_threshold))
    # Recall for positive examples.
    metrics[metric_key.MetricKey.RECALL_MEAN % threshold] = (
        metrics_lib.streaming_recall(labels=labels_tensor,
                                     predictions=predictions_at_threshold))

  return metrics