Python numpy.nanmean() Examples

The following are 30 code examples for showing how to use numpy.nanmean(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: NiBetaSeries   Author: HBClab   File: test_nistats.py    License: MIT License 6 votes vote down vote up
def test_select_confounds(confounds_file, selected_confounds, nan_confounds,
                          expanded_confounds):
    import pandas as pd
    import numpy as np

    confounds_df = pd.read_csv(str(confounds_file), sep='\t', na_values='n/a')

    res_df = _select_confounds(str(confounds_file), selected_confounds)

    # check if the correct columns are selected
    assert set(expanded_confounds) == set(res_df.columns)

    # check if nans are being imputed when expected
    if nan_confounds:
        for nan_c in nan_confounds:
            vals = confounds_df[nan_c].values
            expected_result = np.nanmean(vals[vals != 0])
            assert res_df[nan_c][0] == expected_result 
Example 2
Project: TVQAplus   Author: jayleicn   File: voc_eval.py    License: MIT License 6 votes vote down vote up
def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False):
    """Evaluate on voc dataset.
    Args:
        pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
        gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
        iou_thresh: iou thresh
        use_07_metric: boolean
    Returns:
        dict represents the results
    """
    assert len(gt_boxlists) == len(
        pred_boxlists
    ), "Length of gt and pred lists need to be same."
    prec, rec, n_tp, n_fp, n_pos = calc_detection_voc_prec_rec(
        pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh
    )
    ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
    prec = {k: v.tolist() for k, v in prec.items()}
    rec = {k: v.tolist() for k, v in rec.items()}
    res = [{"ap": ap[k], "class_id": k, "precisions": prec[k], "recalls": rec[k],
            "n_tp": n_tp[k], "n_fp": n_fp[k], "n_positives": n_pos[k]} for k in ap.keys()]
    return res, np.nanmean(ap.values()) 
Example 3
Project: NeuroKit   Author: neuropsychology   File: signal_synchrony.py    License: MIT License 6 votes vote down vote up
def _signal_synchrony_correlation(signal1, signal2, window_size, center=False):
    """Calculates pairwise rolling correlation at each time. Grabs the upper triangle, at each timepoints.

    - window: window size of rolling corr in samples
    - center: whether to center result (Default: False, so correlation values are listed on the right.)

    """
    data = pd.DataFrame({"y1": signal1, "y2": signal2})

    rolled = data.rolling(window=window_size, center=center).corr()
    synchrony = rolled["y1"].loc[rolled.index.get_level_values(1) == "y2"].values

    # Realign
    synchrony = np.append(synchrony[int(window_size / 2) :], np.full(int(window_size / 2), np.nan))
    synchrony[np.isnan(synchrony)] = np.nanmean(synchrony)

    return synchrony 
Example 4
Project: DETAD   Author: HumamAlwassel   File: sensitivity_analysis.py    License: MIT License 6 votes vote down vote up
def compute_average_mAP_N_for_characteristic(sensitivity_analysis, characteristic_name):
    gt_by_characteristic = sensitivity_analysis.ground_truth.groupby(characteristic_name)
    average_mAP_n_by_characteristic_value = OrderedDict()
    
    for characteristic_value, this_characteristic_gt in gt_by_characteristic:
        ap = np.nan*np.zeros(len(sensitivity_analysis.activity_index))
        gt_by_cls = this_characteristic_gt.groupby('label')
        pred_by_cls = sensitivity_analysis.prediction.groupby('label')
        for cls in sensitivity_analysis.activity_index.values():
            this_cls_pred = pred_by_cls.get_group(cls).sort_values(by='score',ascending=False)
            try:
                this_cls_gt = gt_by_cls.get_group(cls)
            except:
                continue
            gt_id_to_keep = np.append(this_cls_gt['gt-id'].values, [np.nan])
        
            for tidx, tiou in enumerate(sensitivity_analysis.tiou_thresholds):
                this_cls_pred = this_cls_pred[this_cls_pred[sensitivity_analysis.matched_gt_id_cols[tidx]].isin(gt_id_to_keep)]
            
            ap[cls] = compute_mAP_N(sensitivity_analysis,this_cls_pred,this_cls_gt)

        average_mAP_n_by_characteristic_value[characteristic_value] = np.nanmean(ap)

    return average_mAP_n_by_characteristic_value 
Example 5
Project: Res2Net-maskrcnn   Author: Res2Net   File: voc_eval.py    License: MIT License 6 votes vote down vote up
def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False):
    """Evaluate on voc dataset.
    Args:
        pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
        gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
        iou_thresh: iou thresh
        use_07_metric: boolean
    Returns:
        dict represents the results
    """
    assert len(gt_boxlists) == len(
        pred_boxlists
    ), "Length of gt and pred lists need to be same."
    prec, rec = calc_detection_voc_prec_rec(
        pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh
    )
    ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
    return {"ap": ap, "map": np.nanmean(ap)} 
Example 6
Project: Attention-Gated-Networks   Author: ozan-oktay   File: metrics.py    License: MIT License 6 votes vote down vote up
def segmentation_scores(label_trues, label_preds, n_class):
    """Returns accuracy score evaluation result.
      - overall accuracy
      - mean accuracy
      - mean IU
      - fwavacc
    """
    hist = np.zeros((n_class, n_class))
    for lt, lp in zip(label_trues, label_preds):
        hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
    acc = np.diag(hist).sum() / hist.sum()
    acc_cls = np.diag(hist) / hist.sum(axis=1)
    acc_cls = np.nanmean(acc_cls)
    iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
    mean_iu = np.nanmean(iu)
    freq = hist.sum(axis=1) / hist.sum()
    fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()

    return {'overall_acc': acc,
            'mean_acc': acc_cls,
            'freq_w_acc': fwavacc,
            'mean_iou': mean_iu} 
Example 7
Project: PLARD   Author: zhechen   File: metrics.py    License: MIT License 6 votes vote down vote up
def get_scores(self):
        """Returns accuracy score evaluation result.
            - overall accuracy
            - mean accuracy
            - mean IU
            - fwavacc
        """
        hist = self.confusion_matrix
        acc = np.diag(hist).sum() / hist.sum()
        acc_cls = np.diag(hist) / hist.sum(axis=1)
        acc_cls = np.nanmean(acc_cls)
        iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
        mean_iu = np.nanmean(iu)
        freq = hist.sum(axis=1) / hist.sum()
        fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
        cls_iu = dict(zip(range(self.n_classes), iu))

        return {'Overall Acc: \t': acc,
                'Mean Acc : \t': acc_cls,
                'FreqW Acc : \t': fwavacc,
                'Mean IoU : \t': mean_iu,}, cls_iu 
Example 8
Project: quail   Author: ContextLab   File: recmat.py    License: MIT License 6 votes vote down vote up
def _recmat_smooth(presented, recalled, features, distance, match):

    if match == 'best':
        func = np.argmax
    elif match == 'smooth':
        func = np.nanmean

    simmtx = _similarity_smooth(presented, recalled, features, distance)


    if match == 'best':
        recmat = np.atleast_3d([func(s, 1) for s in simmtx]).astype(np.float64)
        recmat+=1
        recmat[np.isnan(simmtx).any(2)]=np.nan
    elif match == 'smooth':
        recmat = np.atleast_3d([func(s, 0) for s in simmtx]).astype(np.float64)


    return recmat 
Example 9
Project: quail   Author: ContextLab   File: recmat.py    License: MIT License 6 votes vote down vote up
def _similarity_smooth(presented, recalled, features, distance):
    lists = presented.index.get_values()
    res = np.empty((len(lists), len(features), recalled.iloc[0].shape[0], presented.iloc[0].shape[0]))*np.nan
    for li, l in enumerate(lists):
        p_list = presented.loc[l]
        r_list = recalled.loc[l]
        for i, feature in enumerate(features):
            get_feature = lambda x: np.array(x[feature]) if np.array(pd.notna(x['item'])).any() else np.nan
            p = np.vstack(p_list.apply(get_feature).get_values())
            r = r_list.dropna().apply(get_feature).get_values()
            r = np.vstack(list(filter(lambda x: x is not np.nan, r)))
            tmp = 1 - cdist(r, p, distance)
            res[li, i, :tmp.shape[0], :] =  tmp
    if distance == 'correlation':
        return np.nanmean(res, 1)
    else:
        return np.mean(res, 1) 
Example 10
Project: quail   Author: ContextLab   File: clustering.py    License: MIT License 6 votes vote down vote up
def _get_weight_best(egg, feature, distdict, permute, n_perms, distance):

    if permute:
        return _permute(egg, feature, distdict, _get_weight_best, n_perms)

    rec = list(egg.get_rec_items().values[0])
    if len(rec) <= 2:
        warnings.warn('Not enough recalls to compute fingerprint, returning default'
              'fingerprint.. (everything is .5)')
        return np.nan

    distmat = get_distmat(egg, feature, distdict)
    matchmat = get_match(egg, feature, distdict)

    ranks = []
    for i in range(len(rec)-1):
        cdx, ndx = np.argmin(matchmat[i, :]), np.argmin(matchmat[i+1, :])
        dists = distmat[cdx, :]
        di = dists[ndx]
        dists_filt = np.array([dist for idx, dist in enumerate(dists)])
        ranks.append(np.mean(np.where(np.sort(dists_filt)[::-1] == di)[0]+1) / len(dists_filt))
    return np.nanmean(ranks) 
Example 11
Project: quail   Author: ContextLab   File: clustering.py    License: MIT License 6 votes vote down vote up
def _get_weight_smooth(egg, feature, distdict, permute, n_perms, distance):

    if permute:
        return _permute(egg, feature, distdict, _get_weight_smooth, n_perms)

    rec = list(egg.get_rec_items().values[0])
    if len(rec) <= 2:
        warnings.warn('Not enough recalls to compute fingerprint, returning default'
              'fingerprint.. (everything is .5)')
        return np.nan

    distmat = get_distmat(egg, feature, distdict)
    matchmat = get_match(egg, feature, distdict)

    ranks = []
    for i in range(len(rec)-1):
        cdx, ndx = np.argmin(matchmat[i, :]), np.argmin(matchmat[i+1, :])
        dists = distmat[cdx, :]
        di = dists[ndx]
        dists_filt = np.array([dist for idx, dist in enumerate(dists)])
        ranks.append(np.mean(np.where(np.sort(dists_filt)[::-1] == di)[0]+1) / len(dists_filt))
    return np.nanmean(ranks) 
Example 12
Project: R2CNN.pytorch   Author: Xiangyu-CAS   File: voc_eval.py    License: MIT License 6 votes vote down vote up
def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False):
    """Evaluate on voc dataset.
    Args:
        pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
        gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
        iou_thresh: iou thresh
        use_07_metric: boolean
    Returns:
        dict represents the results
    """
    assert len(gt_boxlists) == len(
        pred_boxlists
    ), "Length of gt and pred lists need to be same."
    prec, rec = calc_detection_voc_prec_rec(
        pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh
    )
    ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
    return {"ap": ap, "map": np.nanmean(ap)} 
Example 13
Project: recruit   Author: Frank-qlu   File: nanfunctions.py    License: Apache License 2.0 6 votes vote down vote up
def _nanquantile_unchecked(a, q, axis=None, out=None, overwrite_input=False,
                           interpolation='linear', keepdims=np._NoValue):
    """Assumes that q is in [0, 1], and is an ndarray"""
    # apply_along_axis in _nanpercentile doesn't handle empty arrays well,
    # so deal them upfront
    if a.size == 0:
        return np.nanmean(a, axis, out=out, keepdims=keepdims)

    r, k = function_base._ureduce(
        a, func=_nanquantile_ureduce_func, q=q, axis=axis, out=out,
        overwrite_input=overwrite_input, interpolation=interpolation
    )
    if keepdims and keepdims is not np._NoValue:
        return r.reshape(q.shape + k)
    else:
        return r 
Example 14
Project: EMANet   Author: XiaLiPKU   File: metric.py    License: GNU General Public License v3.0 6 votes vote down vote up
def cal_scores(hist):
    n_class = settings.N_CLASSES
    acc = np.diag(hist).sum() / hist.sum()
    acc_cls = np.diag(hist) / hist.sum(axis=1)
    acc_cls = np.nanmean(acc_cls)
    iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
    mean_iu = np.nanmean(iu)
    freq = hist.sum(axis=1) / hist.sum()
    fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
    cls_iu = dict(zip(label_names, iu))

    return {
        'pAcc': acc,
        'mAcc': acc_cls,
        'fIoU': fwavacc,
        'mIoU': mean_iu,
    }, cls_iu 
Example 15
Project: EMANet   Author: XiaLiPKU   File: metric.py    License: GNU General Public License v3.0 6 votes vote down vote up
def cal_scores(hist):
    n_class = settings.N_CLASSES
    #acc = np.diag(hist).sum() / hist.sum()
    #acc_cls = np.diag(hist) / hist.sum(axis=1)
    #acc_cls = np.nanmean(acc_cls)
    iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
    mean_iu = np.nanmean(iu)
    freq = hist.sum(axis=1) / hist.sum()
    fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
    cls_iu = dict(zip(label_names, iu))

    return {
        #"pAcc": acc,
        #"mAcc": acc_cls,
        "fIoU": fwavacc,
        "mIoU": mean_iu,
    }#, cls_iu 
Example 16
Project: typhon   Author: atmtools   File: scores.py    License: MIT License 6 votes vote down vote up
def mape(y_pred, y_test):
    r"""
    The Mean Absolute Percentage Error (MAPE)

    The MAPE is computed as the mean of the absolute value of the relative
    error in percent, i.e.:

    .. math::

        \text{MAPE}(\mathbf{y}, \mathbf{y}_{true}) =
        \frac{100\%}{n}\sum_{i = 0}^n \frac{|y_{\text{pred},i} - y_{\text{true},i}|}
             {|y_{\text{true},i}|}

    Arguments:

        y_pred(numpy.array): The predicted scalar values.

        y_test(numpy.array): The true values.

    Returns:

        The MAPE for the given predictions.

    """
    return np.nanmean(100.0 * np.abs(y_test - y_pred.ravel()) / np.abs(y_test).ravel()) 
Example 17
Project: SegmenTron   Author: LikeLy-Journey   File: visualize.py    License: Apache License 2.0 6 votes vote down vote up
def print_iou(iu, mean_pixel_acc, class_names=None, show_no_back=False):
    n = iu.size
    lines = []
    for i in range(n):
        if class_names is None:
            cls = 'Class %d:' % (i + 1)
        else:
            cls = '%d %s' % (i + 1, class_names[i])
        # lines.append('%-8s: %.3f%%' % (cls, iu[i] * 100))
    mean_IU = np.nanmean(iu)
    mean_IU_no_back = np.nanmean(iu[1:])
    if show_no_back:
        lines.append('mean_IU: %.3f%% || mean_IU_no_back: %.3f%% || mean_pixel_acc: %.3f%%' % (
            mean_IU * 100, mean_IU_no_back * 100, mean_pixel_acc * 100))
    else:
        lines.append('mean_IU: %.3f%% || mean_pixel_acc: %.3f%%' % (mean_IU * 100, mean_pixel_acc * 100))
    lines.append('=================================================')
    line = "\n".join(lines)

    print(line) 
Example 18
Project: SPNet   Author: subhc   File: metric.py    License: MIT License 6 votes vote down vote up
def scores(label_trues, label_preds, n_class):
    hist = np.zeros((n_class, n_class))
    for lt, lp in zip(label_trues, label_preds):
        if(lt.size > 0):
            hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
    acc = np.diag(hist).sum() / hist.sum()
    acc_cls = np.diag(hist) / hist.sum(axis=1)
    acc_cls = np.nanmean(acc_cls)
    iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
    mean_iu = np.nanmean(iu)
    freq = hist.sum(axis=1) / hist.sum()
    fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
    cls_iu = dict(zip(range(n_class), iu))

    return {
        "Overall Acc": acc,
        "Mean Acc": acc_cls,
        "FreqW Acc": fwavacc,
        "Mean IoU": mean_iu,
    }, cls_iu 
Example 19
Project: SPNet   Author: subhc   File: metric2.py    License: MIT License 6 votes vote down vote up
def scores(label_trues, label_preds, n_class):
    hist = np.zeros((n_class, n_class))
    for lt, lp in zip(label_trues, label_preds):
        if(lt.size > 0):
            hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
    acc = np.diag(hist).sum() / hist.sum()
    acc_cls = np.diag(hist) / hist.sum(axis=1)
    acc_cls = np.nanmean(acc_cls)
    iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
    mean_iu = np.nanmean(iu)
    freq = hist.sum(axis=1) / hist.sum()
    fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
    cls_iu = dict(zip(range(n_class), iu))

    return {
        "Overall Acc": acc,
        "Mean Acc": acc_cls,
        "FreqW Acc": fwavacc,
        "Mean IoU": mean_iu,
    }, cls_iu 
Example 20
Project: mars   Author: mars-project   File: nanmean.py    License: Apache License 2.0 6 votes vote down vote up
def execute_agg(cls, ctx, op):
        axis = cls.get_axis(op.axis)

        a = ctx[op.inputs[0].key]
        if not isinstance(a, (list, tuple)):
            (inp,), device_id, xp = as_same_device(
                [a], device=op.device, ret_extra=True)

            with device(device_id):
                ctx[op.outputs[0].key] = xp.nanmean(inp, axis=axis, dtype=op.dtype,
                                                    keepdims=bool(op.keepdims))
        else:
            (_data, _count), device_id, xp = as_same_device(
                a, device=op.device, ret_extra=True)

            with device(device_id):
                chunk_count = xp.sum(_count, axis=axis, dtype=op.dtype,
                                     keepdims=bool(op.keepdims))
                chunk_sum = xp.sum(_data, axis=axis, dtype=op.dtype,
                                   keepdims=bool(op.keepdims))
                ctx[op.outputs[0].key] = xp.true_divide(chunk_sum, chunk_count,
                                                        dtype=op.dtype) 
Example 21
Project: lsm   Author: akar43   File: evaluate.py    License: MIT License 6 votes vote down vote up
def print_depth_stats(mids, err):
    shape_ids = np.unique([m[0] for m in mids])
    serr = dict()
    for sid in shape_ids:
        serr[sid] = []

    for ex, e in enumerate(err):
        serr[mids[ex][0]].append(e)

    table = []
    smean = []
    for s in serr:
        sm = np.nanmean(serr[s])
        table.append([s, sm])
        smean.append(sm)
    table.append(['Mean', np.nanmean(smean)])
    ptable = tabulate(table, headers=['SID', 'L1 error'], floatfmt=".4f")
    return smean, ptable 
Example 22
Project: PanopticSegmentation   Author: dmechea   File: metric.py    License: MIT License 6 votes vote down vote up
def scores(label_trues, label_preds, n_class):
    hist = np.zeros((n_class, n_class))
    for lt, lp in zip(label_trues, label_preds):
        hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
    acc = np.diag(hist).sum() / hist.sum()
    acc_cls = np.diag(hist) / hist.sum(axis=1)
    acc_cls = np.nanmean(acc_cls)
    iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
    valid = hist.sum(axis=1) > 0  # added
    mean_iu = np.nanmean(iu[valid])
    freq = hist.sum(axis=1) / hist.sum()
    fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
    cls_iu = dict(zip(range(n_class), iu))

    return {
        "Overall Acc": acc,
        "Mean Acc": acc_cls,
        "FreqW Acc": fwavacc,
        "Mean IoU": mean_iu,
        "Class IoU": cls_iu,
    } 
Example 23
Project: lambda-packs   Author: ryfeus   File: nanfunctions.py    License: MIT License 6 votes vote down vote up
def _nanquantile_unchecked(a, q, axis=None, out=None, overwrite_input=False,
                           interpolation='linear', keepdims=np._NoValue):
    """Assumes that q is in [0, 1], and is an ndarray"""
    # apply_along_axis in _nanpercentile doesn't handle empty arrays well,
    # so deal them upfront
    if a.size == 0:
        return np.nanmean(a, axis, out=out, keepdims=keepdims)

    r, k = function_base._ureduce(
        a, func=_nanquantile_ureduce_func, q=q, axis=axis, out=out,
        overwrite_input=overwrite_input, interpolation=interpolation
    )
    if keepdims and keepdims is not np._NoValue:
        return r.reshape(q.shape + k)
    else:
        return r 
Example 24
Project: DOTA_models   Author: ringringyi   File: object_detection_evaluation.py    License: Apache License 2.0 5 votes vote down vote up
def evaluate(self):
    """Compute evaluation result.

    Returns:
      average_precision_per_class: float numpy array of average precision for
          each class.
      mean_ap: mean average precision of all classes, float scalar
      precisions_per_class: List of precisions, each precision is a float numpy
          array
      recalls_per_class: List of recalls, each recall is a float numpy array
      corloc_per_class: numpy float array
      mean_corloc: Mean CorLoc score for each class, float scalar
    """
    if (self.num_gt_instances_per_class == 0).any():
      logging.warn(
          'The following classes have no ground truth examples: %s',
          np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)))
    for class_index in range(self.num_class):
      if self.num_gt_instances_per_class[class_index] == 0:
        continue
      scores = np.concatenate(self.scores_per_class[class_index])
      tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
      precision, recall = metrics.compute_precision_recall(
          scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
      self.precisions_per_class.append(precision)
      self.recalls_per_class.append(recall)
      average_precision = metrics.compute_average_precision(precision, recall)
      self.average_precision_per_class[class_index] = average_precision

    self.corloc_per_class = metrics.compute_cor_loc(
        self.num_gt_imgs_per_class,
        self.num_images_correctly_detected_per_class)

    mean_ap = np.nanmean(self.average_precision_per_class)
    mean_corloc = np.nanmean(self.corloc_per_class)
    return (self.average_precision_per_class, mean_ap,
            self.precisions_per_class, self.recalls_per_class,
            self.corloc_per_class, mean_corloc) 
Example 25
Project: Pytorch-Project-Template   Author: moemen95   File: metrics.py    License: MIT License 5 votes vote down vote up
def evaluate(self):
        acc = np.diag(self.hist).sum() / self.hist.sum()
        acc_cls = np.diag(self.hist) / self.hist.sum(axis=1)
        acc_cls = np.nanmean(acc_cls)
        iu = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist))
        mean_iu = np.nanmean(iu)
        freq = self.hist.sum(axis=1) / self.hist.sum()
        fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
        return acc, acc_cls, iu, mean_iu, fwavacc 
Example 26
Project: object_detector_app   Author: datitran   File: object_detection_evaluation.py    License: MIT License 5 votes vote down vote up
def evaluate(self):
    """Compute evaluation result.

    Returns:
      average_precision_per_class: float numpy array of average precision for
          each class.
      mean_ap: mean average precision of all classes, float scalar
      precisions_per_class: List of precisions, each precision is a float numpy
          array
      recalls_per_class: List of recalls, each recall is a float numpy array
      corloc_per_class: numpy float array
      mean_corloc: Mean CorLoc score for each class, float scalar
    """
    if (self.num_gt_instances_per_class == 0).any():
      logging.warn(
          'The following classes have no ground truth examples: %s',
          np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)))
    for class_index in range(self.num_class):
      if self.num_gt_instances_per_class[class_index] == 0:
        continue
      scores = np.concatenate(self.scores_per_class[class_index])
      tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
      precision, recall = metrics.compute_precision_recall(
          scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
      self.precisions_per_class.append(precision)
      self.recalls_per_class.append(recall)
      average_precision = metrics.compute_average_precision(precision, recall)
      self.average_precision_per_class[class_index] = average_precision

    self.corloc_per_class = metrics.compute_cor_loc(
        self.num_gt_imgs_per_class,
        self.num_images_correctly_detected_per_class)

    mean_ap = np.nanmean(self.average_precision_per_class)
    mean_corloc = np.nanmean(self.corloc_per_class)
    return (self.average_precision_per_class, mean_ap,
            self.precisions_per_class, self.recalls_per_class,
            self.corloc_per_class, mean_corloc) 
Example 27
Project: overhaul-distillation   Author: clovaai   File: metrics.py    License: MIT License 5 votes vote down vote up
def Pixel_Accuracy_Class(self):
        Acc = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)
        Acc = np.nanmean(Acc)
        return Acc 
Example 28
Project: overhaul-distillation   Author: clovaai   File: metrics.py    License: MIT License 5 votes vote down vote up
def Mean_Intersection_over_Union(self):
        MIoU = np.diag(self.confusion_matrix) / (
                    np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
                    np.diag(self.confusion_matrix))
        MIoU = np.nanmean(MIoU)
        return MIoU 
Example 29
Project: pymoo   Author: msu-coinlab   File: high_tradeoff_inverted.py    License: Apache License 2.0 5 votes vote down vote up
def _do(self, F, **kwargs):

        n, m = F.shape

        if self.normalize:
            F = normalize(F, self.ideal_point, self.nadir_point, estimate_bounds_if_none=True)

        neighbors_finder = NeighborFinder(F, n_min_neigbors="auto")

        mu = np.full(n, - np.inf)

        # for each solution in the set calculate the least amount of improvement per unit deterioration
        for i in range(n):

            # neighbors to the current point
            neighbors = neighbors_finder.find(i)

            # calculate the trade-off to all neighbours
            diff = F[neighbors] - F[i]

            # calculate sacrifice and gain
            sacrifice = np.maximum(0, diff).sum(axis=1)
            gain = np.maximum(0, -diff).sum(axis=1)

            np.warnings.filterwarnings('ignore')
            tradeoff = sacrifice / gain

            # otherwise find the one with the smalled one
            mu[i] = np.nanmean(tradeoff)

        return find_outliers_upper_tail(mu) 
Example 30
Project: NeuroKit   Author: neuropsychology   File: ecg_rsa.py    License: MIT License 5 votes vote down vote up
def _ecg_rsa_p2t(rsp_onsets, rpeaks, sampling_rate, continuous=False, ecg_period=None, rsp_peaks=None):
    """Peak-to-trough algorithm (P2T)"""

    # Find all RSP cycles and the Rpeaks within
    cycles_rri = []
    for idx in range(len(rsp_onsets) - 1):
        cycle_init = rsp_onsets[idx]
        cycle_end = rsp_onsets[idx + 1]
        cycles_rri.append(rpeaks[np.logical_and(rpeaks >= cycle_init, rpeaks < cycle_end)])

    # Iterate over all cycles
    rsa_values = np.full(len(cycles_rri), np.nan)
    for i, cycle in enumerate(cycles_rri):
        # Estimate of RSA during each breath
        RRis = np.diff(cycle) / sampling_rate
        if len(RRis) > 1:
            rsa_values[i] = np.max(RRis) - np.min(RRis)

    if continuous is False:
        rsa = {"RSA_P2T_Mean": np.nanmean(rsa_values)}
        rsa["RSA_P2T_Mean_log"] = np.log(rsa["RSA_P2T_Mean"])  # pylint: disable=E1111
        rsa["RSA_P2T_SD"] = np.nanstd(rsa_values, ddof=1)
        rsa["RSA_P2T_NoRSA"] = len(pd.Series(rsa_values).index[pd.Series(rsa_values).isnull()])
    else:
        rsa = signal_interpolate(
            x_values=rsp_peaks[~np.isnan(rsa_values)],
            y_values=rsa_values[~np.isnan(rsa_values)],
            x_new=np.arange(len(ecg_period)),
        )

    return rsa