Python tensorflow.metrics() Examples

The following are 30 code examples of tensorflow.metrics(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: ops.py    From basenji with Apache License 2.0 6 votes vote down vote up
def per_target_r2(preds, targets, weights):
  """Returns ops for per-target R2 statistic following the tf.metrics API.

  Args:
    preds: arbitrary shaped predictions, with final dimension
           indexing distinct targets
    targets: targets (same shape as predictions)
    weights: per-instance weights (same shape as predictions)

  Returns:
    r2: idempotent [preds.shape[-1]] tensor of r2 values for each target.
    update_op: op for updating the value given new data
  """

  res_ss, res_ss_update = _per_target_mean(tf.square(preds - targets), weights)

  tot_ss, tot_ss_update = _per_target_variance(targets, weights)
  r2 = 1. - res_ss / tot_ss

  update_op = tf.group(res_ss_update, tot_ss_update)
  return r2, update_op 
Example #2
Source File: lstm_models.py    From synvae with MIT License 6 votes vote down vote up
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
    b_enc, b_dec = tf.split(
        flat_rnn_output,
        [self._nade.num_hidden, self._output_depth], axis=1)
    ll, cond_probs = self._nade.log_prob(
        flat_x_target, b_enc=b_enc, b_dec=b_dec)
    r_loss = -ll
    flat_truth = tf.cast(flat_x_target, tf.bool)
    flat_predictions = tf.greater_equal(cond_probs, 0.5)

    metric_map = {
        'metrics/accuracy':
            tf.metrics.mean(
                tf.reduce_all(tf.equal(flat_truth, flat_predictions), axis=-1)),
        'metrics/recall':
            tf.metrics.recall(flat_truth, flat_predictions),
        'metrics/precision':
            tf.metrics.precision(flat_truth, flat_predictions),
    }

    return r_loss, metric_map 
Example #3
Source File: post_export_metrics.py    From model-analysis with Apache License 2.0 6 votes vote down vote up
def get_metric_ops(
      self, features_dict: types.TensorTypeMaybeDict,
      predictions_dict: types.TensorTypeMaybeDict,
      labels_dict: types.TensorTypeMaybeDict
  ) -> Dict[Text, Tuple[types.TensorType, types.TensorType]]:
    """Returns the metric_ops entry for this metric.

    Note that the metric will be added to metric_ops via
    metric_ops.update(metric.get_metric_ops()).

    Args:
      features_dict: Dictionary containing references to the features Tensors
        for the model.
      predictions_dict: Dictionary containing references to the predictions
        Tensors for the model.
      labels_dict: Dictionary containing references to the labels Tensors for
        the model.

    Returns:
      A metric op dictionary,
      i.e. a dictionary[metric_name] = (value_op, update_op) containing all
      the metrics and ops for this metric.
    """
    raise NotImplementedError('not implemented') 
Example #4
Source File: post_export_metrics.py    From model-analysis with Apache License 2.0 6 votes vote down vote up
def populate_stats_and_pop(
      self, slice_key: slicer.SliceKeyType, combined_metrics: Dict[Text, Any],
      output_metrics: Dict[Text, metrics_pb2.MetricValue]) -> None:
    """Converts the metric in `combined_metrics` to `output_metrics` and pops.

    Please override the method if the metric is NOT plot type and should be
    converted into non-float type. The metric should also be popped out of
    `combined_metrics` after conversion. By default, this method does nothing.
    The metric, along with the rest metrics in `combined_metrics` will be
    converted into float values afterwards.

    Args:
      slice_key: The name of slice.
      combined_metrics: The dict containing raw TFMA metrics.
      output_metrics: The dict where we convert the metrics to.
    """
    pass 
Example #5
Source File: base_model.py    From synvae with MIT License 6 votes vote down vote up
def reconstruction_loss(self, x_input, x_target, x_length, z=None,
                          c_input=None):
    """Reconstruction loss calculation.

    Args:
      x_input: Batch of decoder input sequences for teacher forcing, sized
          `[batch_size, max(x_length), output_depth]`.
      x_target: Batch of expected output sequences to compute loss against,
          sized `[batch_size, max(x_length), output_depth]`.
      x_length: Length of input/output sequences, sized `[batch_size]`.
      z: (Optional) Latent vectors. Required if model is conditional. Sized
          `[n, z_size]`.
      c_input: (Optional) Batch of control sequences, sized
          `[batch_size, max(x_length), control_depth]`. Required if conditioning
          on control sequences.

    Returns:
      r_loss: The reconstruction loss for each sequence in the batch.
      metric_map: Map from metric name to tf.metrics return values for logging.
    """
    pass 
Example #6
Source File: metrics.py    From professional-services with Apache License 2.0 6 votes vote down vote up
def eval_metric_fn(labels, predictions, params):
    """Returns dict of <metric name>: <tf.metrics metric>.

    Args:
         labels: Ground truth values in [survival array | failure array] format
         predictions: Conditional likelihoods of surviving each interval
         params: Dict containing model parameters, including  classification
                    threshold
    """
    metrics = {}
    num_unbounded_intervals = metadata.NUM_INTERVALS + 1
    labels_value = get_label(labels)
    class_preds = get_class(predictions, params['threshold'])
    accuracy = tf.metrics.accuracy(labels_value,
                                   class_preds,
                                   name='acc_op')
    metrics['accuracy'] = accuracy
    accuracy_per_class = tf.metrics.mean_per_class_accuracy(
        labels=labels_value,
        predictions=class_preds,
        num_classes=num_unbounded_intervals,
        name='accuracy_per_class_op')
    metrics['accuracy_per_class'] = accuracy_per_class

    return metrics 
Example #7
Source File: post_export_metrics.py    From model-analysis with Apache License 2.0 6 votes vote down vote up
def get_metric_ops(
      self, features_dict: types.TensorTypeMaybeDict,
      predictions_dict: types.TensorTypeMaybeDict,
      labels_dict: types.TensorTypeMaybeDict
  ) -> Dict[Text, Tuple[types.TensorType, types.TensorType]]:
    # Note that we have to squeeze predictions, labels, weights so they are all
    # N element vectors (otherwise some of them might be N x 1 tensors, and
    # multiplying a N element vector with a N x 1 tensor uses matrix
    # multiplication rather than element-wise multiplication).
    predictions, labels = self._get_labels_and_predictions(
        predictions_dict, labels_dict)
    predictions = _flatten_to_one_dim(tf.cast(predictions, tf.float64))
    labels = _flatten_to_one_dim(tf.cast(labels, tf.float64))
    weights = tf.ones_like(predictions)
    if self._example_weight_key:
      weights = _flatten_to_one_dim(
          tf.cast(features_dict[self._example_weight_key], tf.float64))
    return {
        self._metric_key(metric_keys.SQUARED_PEARSON_CORRELATION):
            metrics.squared_pearson_correlation(predictions, labels, weights)
    } 
Example #8
Source File: post_export_metrics.py    From model-analysis with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               example_weight_key: Optional[Text] = None,
               target_prediction_keys: Optional[List[Text]] = None,
               labels_key: Optional[Text] = None,
               metric_tag: Optional[Text] = None,
               tensor_index: Optional[int] = None):
    """Create a metric that computes calibration.

    Args:
      example_weight_key: The key of the example weight column in the features
        dict. If None, all predictions are given a weight of 1.0.
      target_prediction_keys: If provided, the prediction keys to look for in
        order.
      labels_key: If provided, a custom label key.
      metric_tag: If provided, a custom metric tag. Only necessary to
        disambiguate instances of the same metric on different predictions.
      tensor_index: Optional index to specify class predictions to calculate
        metrics on in the case of multi-class models.
    """

    self._example_weight_key = example_weight_key
    super(_Calibration, self).__init__(
        target_prediction_keys=target_prediction_keys,
        labels_key=labels_key,
        metric_tag=metric_tag) 
Example #9
Source File: ops.py    From basenji with Apache License 2.0 6 votes vote down vote up
def _per_target_variance(data, weights=None):
  """Returns the variance of input tensor t, each entry weighted by the
  corresponding index in weights.

  Follows the tf.metrics API for an idempotent tensor and an update tensor.

  Args:
    data: input tensor of arbitrary shape.
    weights: input tensor of same shape as `t`. When None, use a weight of 1 for
      all inputs.

  Returns:
    variance_value: idempotent tensor containing the variance of `t`, whose
      shape is `[1]`
    update_op: A (non-idempotent) op to update the variance value
  """
  if weights is None:
    weights = tf.ones(shape=data.shape, dtype=tf.float32)

  tsquared_mean, tsquared_update = _per_target_mean(tf.square(data), weights)
  mean_t, t_update = _per_target_mean(data, weights)
  variance_value = tsquared_mean - mean_t * mean_t
  update_op = tf.group(tsquared_update, t_update)

  return variance_value, update_op 
Example #10
Source File: post_export_metrics.py    From model-analysis with Apache License 2.0 6 votes vote down vote up
def get_metric_ops(
      self, features_dict: types.TensorTypeMaybeDict,
      predictions_dict: types.TensorTypeMaybeDict,
      labels_dict: types.TensorTypeMaybeDict
  ) -> Dict[Text, Tuple[types.TensorType, types.TensorType]]:
    prediction_tensor, label_tensor = self._get_labels_and_predictions(
        predictions_dict, labels_dict)
    # metrics.calibration expects tensors with shape (n,)
    prediction_tensor = _flatten_to_one_dim(prediction_tensor)
    label_tensor = _flatten_to_one_dim(label_tensor)
    if self._example_weight_key:
      weights = _flatten_to_one_dim(features_dict[self._example_weight_key])
    else:
      weights = tf.ones_like(prediction_tensor)
    return {
        self._metric_key(metric_keys.CALIBRATION):
            metrics.calibration(prediction_tensor, label_tensor, weights)
    } 
Example #11
Source File: ops.py    From basenji with Apache License 2.0 6 votes vote down vote up
def r2_metric(preds, targets, weights):
  """Returns ops for R2 statistic following the tf.metrics API.
  Args:
    preds: predictions (arbitrary shape)
    targets: targets (same shape as predictions)
    weights: per-instance weights (same shape as predictions)

  Returns:
    r2: idempotent tensor containing the r2 value
    update_op: op for updating the value given new data
  """

  res_ss, res_ss_update = tf.metrics.mean(tf.square(preds - targets), weights)

  tot_ss, tot_ss_update = variance(targets, weights)
  r2 = 1. - res_ss / tot_ss

  update_op = tf.group(res_ss_update, tot_ss_update)
  return r2, update_op 
Example #12
Source File: ops.py    From basenji with Apache License 2.0 6 votes vote down vote up
def variance(data, weights=None):
  """Returns the variance of input tensor t, each entry weighted by the
  corresponding index in weights.

  Follows the tf.metrics API for an idempotent tensor and an update tensor.

  Args:
    data: input tensor of arbitrary shape.
    weights: input tensor of same shape as `t`. When None, use a weight of 1 for
      all inputs.

  Returns:
    variance_value: idempotent tensor containing the variance of `t`, whose
      shape is `[1]`
    update_op: A (non-idempotent) op to update the variance value
  """
  if weights is None:
    weights = tf.ones(shape=data.shape, dtype=tf.float32)

  tsquared_mean, tsquared_update = tf.metrics.mean(tf.square(data), weights)
  mean_t, t_update = tf.metrics.mean(data, weights)
  variance_value = tsquared_mean - mean_t * mean_t
  update_op = tf.group(tsquared_update, t_update)

  return variance_value, update_op 
Example #13
Source File: metrics.py    From light-head-rcnn with MIT License 6 votes vote down vote up
def evaluate(self, iou_threshold=0.5):

        self.metrics = {}
        for label in range(self.num_classes):
            self.metrics[label] = evaluate_detector(
                self.groundtruth[label],
                self.detections[label],
                iou_threshold
            )

        if self.num_classes > 1:
            APs = [
                self.metrics[label]['AP']
                for label in range(self.num_classes)
            ]
            self.metrics['mAP'] = np.mean(APs) 
Example #14
Source File: library_matching.py    From deep-molecular-massspec with Apache License 2.0 6 votes vote down vote up
def _make_logging_ops(true_keys, predicted_keys, ranks, log_dir):
  """tf.metrics-compatible ops for saving and logging results."""
  all_true_keys = []
  all_predicted_keys = []
  all_ranks = []

  def _extend_keys(true_batch_keys, predicted_batch_keys, batch_ranks):
    all_true_keys.extend(true_batch_keys)
    all_predicted_keys.extend(predicted_batch_keys)
    all_ranks.extend(batch_ranks)
    return np.int32(0)

  update_op = tf.py_func(_extend_keys, [true_keys, predicted_keys, ranks],
                         [tf.int32])[0]

  def _write_log_to_file(global_step):
    return _log_predictions(all_true_keys, all_predicted_keys, all_ranks,
                            global_step, log_dir)

  value_op = tf.py_func(_write_log_to_file,
                        [tf.train.get_or_create_global_step()], [tf.int32])[0]

  return (value_op, update_op) 
Example #15
Source File: utils.py    From DeepCTR with Apache License 2.0 5 votes vote down vote up
def get_metrics():
    if tf.__version__ < "2.0.0":
        return tf.metrics
    else:
        return tf.compat.v1.metrics 
Example #16
Source File: post_export_metrics.py    From model-analysis with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               example_weight_key: Optional[Text] = None,
               target_prediction_keys: Optional[List[Text]] = None,
               labels_key: Optional[Text] = None,
               metric_tag: Optional[Text] = None,
               tensor_index: Optional[int] = None):
    """Creates a metric that computes mean absolute error.

    Labels and predictions can take any of the float values.

    Args:
      example_weight_key: The key of the example weight column in the features
        dict. If None, all predictions are given a weight of 1.0.
      target_prediction_keys: Optional acceptable keys in predictions_dict in
        descending order of precedence.
      labels_key: Optionally, the key from labels_dict to use.
      metric_tag: If provided, a custom metric tag. Only necessary to
        disambiguate instances of the same metric on different predictions or
        for readability concerns in tool output.
      tensor_index: Optional index to specify class predictions to calculate
        metrics on in the case of multi-class models.
    """
    self._example_weight_key = example_weight_key
    super(_MeanAbsoluteError, self).__init__(
        metric_keys.MEAN_ABSOLUTE_ERROR,
        tf.compat.v1.metrics.mean_absolute_error,
        example_weight_key,
        target_prediction_keys,
        labels_key,
        metric_tag,
        tensor_index=tensor_index) 
Example #17
Source File: post_export_metrics.py    From model-analysis with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               example_weight_key: Optional[Text] = None,
               target_prediction_keys: Optional[List[Text]] = None,
               labels_key: Optional[Text] = None,
               metric_tag: Optional[Text] = None,
               tensor_index: Optional[int] = None):
    """Creates a metric that computes mean squared error.

    Labels and predictions can take any of the float values.

    Args:
      example_weight_key: The key of the example weight column in the features
        dict. If None, all predictions are given a weight of 1.0.
      target_prediction_keys: Optional acceptable keys in predictions_dict in
        descending order of precedence.
      labels_key: Optionally, the key from labels_dict to use.
      metric_tag: If provided, a custom metric tag. Only necessary to
        disambiguate instances of the same metric on different predictions or
        for readability concerns in tool output.
      tensor_index: Optional index to specify class predictions to calculate
        metrics on in the case of multi-class models.
    """
    self._example_weight_key = example_weight_key
    super(_MeanSquaredError, self).__init__(
        metric_keys.MEAN_SQUARED_ERROR,
        tf.compat.v1.metrics.mean_squared_error,
        example_weight_key,
        target_prediction_keys,
        labels_key,
        metric_tag,
        tensor_index=tensor_index) 
Example #18
Source File: hooks.py    From -Learn-Artificial-Intelligence-with-TensorFlow with MIT License 5 votes vote down vote up
def __init__(self, metric, max_metric=0.99, patience=5, epsilon=5e-4):
        """
        Args:
            metric: tuple of (tensor, update_op) as returned by any
                tf.metrics function or any custom_ops metric.
            max_metric: (float) threshold for `metric` such that if
                `metric` exceeds `max_metric`, training will be terminated.
            patience: threshold number of runs to allow `metric` value to
                stay unchanged (in a row), plus or minus `epsilon`, before stopping.
            epsilon: small value used to determine whether `metric` has
                changed significantly.
        """
        self.metric = metric
        self.max_metric = max_metric
        self.max_window = MaxWindow(patience, epsilon) 
Example #19
Source File: post_export_metrics.py    From model-analysis with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               metric_name: Text,
               metric_fn: Callable[
                   [types.TensorType, types.TensorType, types.TensorType],
                   Tuple[types.TensorOrOperationType,
                         types.TensorOrOperationType]],
               example_weight_key: Optional[Text] = None,
               target_prediction_keys: Optional[List[Text]] = None,
               labels_key: Optional[Text] = None,
               metric_tag: Optional[Text] = None,
               tensor_index: Optional[int] = None):
    """Initiates the base metric class.

    Labels and predictions can take any of the float values.

    Args:
      metric_name: Name of the metric to be computed.
      metric_fn: TF metric fn to calculate the metric. This function should
      take three arguments, specifically in following order: 1. label tensor 2.
        prediction tensor 3. weight tensor
      example_weight_key: The key of the example weight column in the features
        dict. If None, all predictions are given a weight of 1.0.
      target_prediction_keys: Optional acceptable keys in predictions_dict in
        descending order of precedence.
      labels_key: Optionally, the key from labels_dict to use.
      metric_tag: If provided, a custom metric tag. Only necessary to
        disambiguate instances of the same metric on different predictions or
        for readability concerns in tool output.
      tensor_index: Optional index to specify class predictions to calculate
        metrics on in the case of multi-class models.
    """
    self._metric_name = metric_name
    self._metric_fn = metric_fn
    self._example_weight_key = example_weight_key
    super(_TFMetricBaseClass, self).__init__(
        target_prediction_keys,
        labels_key,
        metric_tag,
        tensor_index=tensor_index) 
Example #20
Source File: post_export_metrics.py    From model-analysis with Apache License 2.0 5 votes vote down vote up
def get_metric_ops(
      self, features_dict: types.TensorTypeMaybeDict,
      predictions_dict: types.TensorTypeMaybeDict,
      labels_dict: types.TensorTypeMaybeDict
  ) -> Dict[Text, Tuple[types.TensorType, types.TensorType]]:
    value = features_dict[self._example_weight_key]
    return {self._metric_key(metric_keys.EXAMPLE_WEIGHT): metrics.total(value)} 
Example #21
Source File: post_export_metrics.py    From model-analysis with Apache License 2.0 5 votes vote down vote up
def get_metric_ops(
      self, features_dict: types.TensorTypeMaybeDict,
      predictions_dict: types.TensorTypeMaybeDict,
      labels_dict: types.TensorTypeMaybeDict
  ) -> Dict[Text, Tuple[types.TensorType, types.TensorType]]:
    # Note that we have to squeeze predictions, labels, weights so they are all
    # N element vectors (otherwise some of them might be N x 1 tensors, and
    # multiplying a N element vector with a N x 1 tensor uses matrix
    # multiplication rather than element-wise multiplication).
    squeezed_weights = None
    if self._example_weight_key:
      squeezed_weights = tf.squeeze(features_dict[self._example_weight_key])
    prediction_tensor, label_tensor = self._get_labels_and_predictions(
        predictions_dict, labels_dict)
    return {
        self._metric_key(metric_keys.CALIBRATION_PLOT_MATRICES):
            metrics.calibration_plot(
                predictions=tf.squeeze(prediction_tensor),
                labels=tf.squeeze(label_tensor),
                left=0.0,
                right=1.0,
                num_buckets=self._num_buckets,
                weights=squeezed_weights),
        self._metric_key(metric_keys.CALIBRATION_PLOT_BOUNDARIES):
            (tf.range(0.0, self._num_buckets + 1) / self._num_buckets,
             tf.no_op()),
    } 
Example #22
Source File: post_export_metrics.py    From model-analysis with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               example_weight_key: Optional[Text] = None,
               num_buckets: int = _DEFAULT_NUM_BUCKETS,
               target_prediction_keys: Optional[List[Text]] = None,
               labels_key: Optional[Text] = None,
               metric_tag: Optional[Text] = None,
               tensor_index: Optional[int] = None) -> None:
    """Create a plot metric for AUROC and AUPRC.

    Predictions should be one of:
      (a) a single float in [0, 1]
      (b) a dict containing the LOGISTIC key
      (c) a dict containing the PREDICTIONS key, where the prediction is
          in [0, 1]

    Label should be a single float that is in [0, 1] (string labels will be
    converted to 0 or 1 using ALL_CLASSES tensor if present).

    Args:
      example_weight_key: The key of the example weight column in the features
        dict. If None, all predictions are given a weight of 1.0.
      num_buckets: The number of buckets used for plot.
      target_prediction_keys: If provided, the prediction keys to look for in
        order.
      labels_key: If provided, a custom label key.
      metric_tag: If provided, a custom metric tag. Only necessary to
        disambiguate instances of the same metric on different predictions.
      tensor_index: Optional index to specify class predictions to calculate
        metrics on in the case of multi-class models.
    """
    thresholds = [i * 1.0 / num_buckets for i in range(0, num_buckets + 1)]
    thresholds = [-1e-6] + thresholds
    super(_AucPlots, self).__init__(
        example_weight_key=example_weight_key,
        thresholds=thresholds,
        target_prediction_keys=target_prediction_keys,
        labels_key=labels_key,
        metric_tag=metric_tag,
        tensor_index=tensor_index) 
Example #23
Source File: post_export_metrics.py    From model-analysis with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               thresholds: List[float],
               example_weight_key: Optional[Text] = None,
               target_prediction_keys: Optional[List[Text]] = None,
               labels_key: Optional[Text] = None,
               metric_tag: Optional[Text] = None,
               tensor_index: Optional[int] = None) -> None:
    """Create a metric that computes the confusion matrix at given thresholds.

    Predictions should be one of:
      (a) a single float in [0, 1]
      (b) a dict containing the LOGISTIC key
      (c) a dict containing the PREDICTIONS key, where the prediction is
          in [0, 1]

    Label should be a single float that is in [0, 1] (string labels will be
    converted to 0 or 1 using ALL_CLASSES tensor if present).

    Args:
      thresholds: List of thresholds to compute the confusion matrix at.
      example_weight_key: The key of the example weight column in the features
        dict. If None, all predictions are given a weight of 1.0.
      target_prediction_keys: If provided, the prediction keys to look for in
        order.
      labels_key: If provided, a custom label key.
      metric_tag: If provided, a custom metric tag. Only necessary to
        disambiguate instances of the same metric on different predictions.
      tensor_index: Optional index to specify class predictions to calculate
        metrics on in the case of multi-class models.
    """
    self._example_weight_key = example_weight_key
    self._thresholds = sorted(thresholds)
    super(_ConfusionMatrixBasedMetric, self).__init__(
        target_prediction_keys,
        labels_key,
        metric_tag,
        tensor_index=tensor_index) 
Example #24
Source File: post_export_metrics.py    From model-analysis with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               example_weight_key: Optional[Text] = None,
               target_prediction_keys: Optional[List[Text]] = None,
               labels_key: Optional[Text] = None,
               metric_tag: Optional[Text] = None,
               tensor_index: Optional[int] = None):
    """Creates a metric that computes root mean squared error.

    Labels and predictions can take any of the float values.

    Args:
      example_weight_key: The key of the example weight column in the features
        dict. If None, all predictions are given a weight of 1.0.
      target_prediction_keys: Optional acceptable keys in predictions_dict in
        descending order of precedence.
      labels_key: Optionally, the key from labels_dict to use.
      metric_tag: If provided, a custom metric tag. Only necessary to
        disambiguate instances of the same metric on different predictions or
        for readability concerns in tool output.
      tensor_index: Optional index to specify class predictions to calculate
        metrics on in the case of multi-class models.
    """
    self._example_weight_key = example_weight_key
    super(_RootMeanSquaredError, self).__init__(
        metric_keys.ROOT_MEAN_SQUARED_ERROR,
        tf.compat.v1.metrics.root_mean_squared_error,
        example_weight_key,
        target_prediction_keys,
        labels_key,
        metric_tag,
        tensor_index=tensor_index) 
Example #25
Source File: library_matching.py    From deep-molecular-massspec with Apache License 2.0 5 votes vote down vote up
def _find_query_rank(similarities, library_keys, query_keys):
  """tf.py_func wrapper around _find_query_rank_helper.

  Args:
    similarities: [batch_size, num_library_elements] float Tensor. These are not
      assumed to be sorted in any way.
    library_keys: [num_library_elements] string Tensor, where each column j of
      similarities corresponds to library_key j.
    query_keys: [num_queries] string Tensor
  Returns:
    query_ranks: a dictionary with keys 'highest', 'lowest' and 'avg', where
      each value is a [batch_size] Tensor. The 'lowest' Tensor contains
      for each batch the lowest index of a library key that matches the query
      key for that batch element when the library keys are sorted in descending
      order by similarity score. The 'highest' and 'avg'
      Tensors are defined similarly. The first two are tf.int32 and the
      final is a tf.float32.

      Note that the behavior of these metrics is undefined when there are ties
      within a row of similarities.
    best_query_similarities: the value of the similarities evaluated at
      the lowest query rank.
  """

  (highest_rank, lowest_rank, avg_rank, best_query_similarities) = tf.py_func(
      _find_query_rank_helper, [similarities, library_keys, query_keys],
      (tf.int32, tf.int32, tf.float32, tf.float32),
      stateful=False)

  query_ranks = {
      'highest': highest_rank,
      'lowest': lowest_rank,
      'avg': avg_rank
  }

  return query_ranks, best_query_similarities 
Example #26
Source File: model_agnostic_evaluate_graph_test.py    From model-analysis with Apache License 2.0 5 votes vote down vote up
def add_mean_callback(features_dict, predictions_dict, labels_dict):
  """Callback to add our custom post-export metrics."""
  del features_dict

  metric_ops = {}

  # Adding a tf.metrics metric.
  all_values = list(labels_dict.values()) + list(predictions_dict.values())
  metric_ops['tf_metric_mean'] = tf.compat.v1.metrics.mean(all_values)

  # Defining and adding a py_func metric
  # Note that for py_func metrics, you must still store the metric state in
  # tf.Variables.
  total_label = tf.compat.v1.Variable(
      initial_value=0.0,
      dtype=tf.float64,
      trainable=False,
      collections=[
          tf.compat.v1.GraphKeys.METRIC_VARIABLES,
          tf.compat.v1.GraphKeys.LOCAL_VARIABLES
      ],
      validate_shape=True,
      name='total_label')

  def my_func(x):
    return np.sum(x, dtype=np.float64)

  value_op = tf.identity(total_label)
  update_op = tf.compat.v1.assign_add(
      total_label, tf.compat.v1.py_func(my_func, [all_values], tf.float64))

  metric_ops['py_func_total_label'] = value_op, update_op

  return metric_ops 
Example #27
Source File: lstm_models.py    From synvae with MIT License 5 votes vote down vote up
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
    flat_logits = flat_rnn_output
    flat_truth = tf.argmax(flat_x_target, axis=1)
    flat_predictions = tf.argmax(flat_logits, axis=1)
    r_loss = tf.nn.softmax_cross_entropy_with_logits(
        labels=flat_x_target, logits=flat_logits)

    metric_map = {
        'metrics/accuracy':
            tf.metrics.accuracy(flat_truth, flat_predictions),
        'metrics/mean_per_class_accuracy':
            tf.metrics.mean_per_class_accuracy(
                flat_truth, flat_predictions, flat_x_target.shape[-1].value),
    }
    return r_loss, metric_map 
Example #28
Source File: lstm_models.py    From synvae with MIT License 5 votes vote down vote up
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
    """Core loss calculation method for flattened outputs.

    Args:
      flat_x_target: The flattened ground truth vectors, sized
        `[sum(x_length), self._output_depth]`.
      flat_rnn_output: The flattened output from all timeputs of the RNN,
        sized `[sum(x_length), rnn_output_size]`.
    Returns:
      r_loss: The unreduced reconstruction losses, sized `[sum(x_length)]`.
      metric_map: A map of metric names to tuples, each of which contain the
        pair of (value_tensor, update_op) from a tf.metrics streaming metric.
    """
    pass 
Example #29
Source File: base_model.py    From synvae with MIT License 5 votes vote down vote up
def eval(self, input_sequence, output_sequence, sequence_length,
           control_sequence=None):
    """Evaluate on the given sequences, returning metric update ops.

    Args:
      input_sequence: The sequence to be fed to the encoder.
      output_sequence: The sequence expected from the decoder.
      sequence_length: The length of the given sequences (which must be
        identical).
      control_sequence: (Optional) sequence on which to condition the decoder.

    Returns:
      metric_update_ops: tf.metrics update ops.
    """
    metric_map, scalars_to_summarize = self._compute_model_loss(
        input_sequence, output_sequence, sequence_length, control_sequence)

    for n, t in scalars_to_summarize.iteritems():
      metric_map[n] = tf.metrics.mean(t)

    metrics_to_values, metrics_to_updates = (
        tf.contrib.metrics.aggregate_metric_map(metric_map))

    for metric_name, metric_value in metrics_to_values.iteritems():
      tf.summary.scalar(metric_name, metric_value)

    return metrics_to_updates.values() 
Example #30
Source File: vae.py    From disentanglement_lib with Apache License 2.0 5 votes vote down vote up
def make_metric_fn(*names):
  """Utility function to report tf.metrics in model functions."""

  def metric_fn(*args):
    return {name: tf.metrics.mean(vec) for name, vec in zip(names, args)}

  return metric_fn