Python sklearn.metrics.roc_curve() Examples

The following are 30 code examples of sklearn.metrics.roc_curve(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sklearn.metrics , or try the search function .
Example #1
Source File: vanilla_model.py    From OpenChem with MIT License 10 votes vote down vote up
def fit_model(self, data, cross_val_data, cross_val_labels):
        eval_metrics = []
        for i in range(self.n_ensemble):
            train_sm = np.concatenate(cross_val_data[:i] +
                                      cross_val_data[(i + 1):])
            test_sm = cross_val_data[i]
            train_labels = np.concatenate(cross_val_labels[:i] +
                                          cross_val_labels[(i + 1):])
            test_labels = cross_val_labels[i]
            fp_train = get_fp(train_sm)
            fp_test = get_fp(test_sm)
            self.model[i].fit(fp_train, train_labels.ravel())
            predicted = self.model[i].predict(fp_test)
            if self.model_type == 'classifier':
                fpr, tpr, thresholds = metrics.roc_curve(test_labels, predicted)
                eval_metrics.append(metrics.auc(fpr, tpr))
                metrics_type = 'AUC'
            elif self.model_type == 'regressor':
                r2 = metrics.r2_score(test_labels, predicted)
                eval_metrics.append(r2)
                metrics_type = 'R^2 score'
        return eval_metrics, metrics_type 
Example #2
Source File: util.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 8 votes vote down vote up
def compute_roc(y_true, y_pred, plot=False):
    """
    TODO
    :param y_true: ground truth
    :param y_pred: predictions
    :param plot:
    :return:
    """
    fpr, tpr, _ = roc_curve(y_true, y_pred)
    auc_score = auc(fpr, tpr)
    if plot:
        plt.figure(figsize=(7, 6))
        plt.plot(fpr, tpr, color='blue',
                 label='ROC (AUC = %0.4f)' % auc_score)
        plt.legend(loc='lower right')
        plt.title("ROC Curve")
        plt.xlabel("FPR")
        plt.ylabel("TPR")
        plt.show()

    return fpr, tpr, auc_score 
Example #3
Source File: metrics_util.py    From DeepLearningSmells with Apache License 2.0 8 votes vote down vote up
def get_all_metrics_(eval_labels, pred_labels):
    fpr, tpr, thresholds_keras = roc_curve(eval_labels, pred_labels)
    auc_ = auc(fpr, tpr)
    print("auc_keras:" + str(auc_))

    precision = precision_score(eval_labels, pred_labels)
    print('Precision score: {0:0.2f}'.format(precision))

    recall = recall_score(eval_labels, pred_labels)
    print('Recall score: {0:0.2f}'.format(recall))

    f1 = f1_score(eval_labels, pred_labels)
    print('F1 score: {0:0.2f}'.format(f1))

    average_precision = average_precision_score(eval_labels, pred_labels)
    print('Average precision-recall score: {0:0.2f}'.format(average_precision))

    return auc_, precision, recall, f1, average_precision, fpr, tpr 
Example #4
Source File: __init__.py    From EDeN with MIT License 7 votes vote down vote up
def plot_roc_curve(y_true, y_score, size=None):
    """plot_roc_curve."""
    false_positive_rate, true_positive_rate, thresholds = roc_curve(
        y_true, y_score)
    if size is not None:
        plt.figure(figsize=(size, size))
        plt.axis('equal')
    plt.plot(false_positive_rate, true_positive_rate, lw=2, color='navy')
    plt.plot([0, 1], [0, 1], color='gray', lw=1, linestyle='--')
    plt.xlabel('False positive rate')
    plt.ylabel('True positive rate')
    plt.ylim([-0.05, 1.05])
    plt.xlim([-0.05, 1.05])
    plt.grid()
    plt.title('Receiver operating characteristic AUC={0:0.2f}'.format(
        roc_auc_score(y_true, y_score))) 
Example #5
Source File: model.py    From fake-news-detection with MIT License 7 votes vote down vote up
def print_roc(self, y_true, y_scores, filename):
        '''
        Prints the ROC for this model.
        '''
        fpr, tpr, thresholds = metrics.roc_curve(y_true, y_scores)
        plt.figure()
        plt.plot(fpr, tpr, color='darkorange', label='ROC curve (area = %0.2f)' % self.roc_auc)
        plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('Receiver operating characteristic')
        plt.legend(loc="lower right")
        plt.savefig(filename)
        plt.close() 
Example #6
Source File: metric.py    From tensorflow_end2end_speech_recognition with MIT License 6 votes vote down vote up
def compute_auc(y_true, y_pred, label_index):
    """Compute Area Under the Curve (AUC) metric.
    Args:
        y_true: true class
        y_pred: probabilities for a class
        label_index:
            label_index == 1 => laughter (class1) vs. others (class0)
            label_index == 2 => filler (class1) vs. others (class0)
    Returns:
        auc_val: AUC metric accuracy
    """
    for i in range(y_true.shape[0]):
        y_true[i] = 0 if y_true[i] != label_index else 1

    y_true = np.reshape(y_true, (-1,))
    y_pred = np.reshape(y_pred[:, label_index], (-1,))

    try:
        fpr, tpr, _ = roc_curve(y_true, y_pred, pos_label=1)
    except UndefinedMetricWarning:
        pass
    auc_val = auc(fpr, tpr)
    return auc_val 
Example #7
Source File: util.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
    """
    TODO
    :param probs_neg:
    :param probs_pos:
    :param plot:
    :return:
    """
    probs = np.concatenate((probs_neg, probs_pos))
    labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
    fpr, tpr, _ = roc_curve(labels, probs)
    auc_score = auc(fpr, tpr)
    if plot:
        plt.figure(figsize=(7, 6))
        plt.plot(fpr, tpr, color='blue',
                 label='ROC (AUC = %0.4f)' % auc_score)
        plt.legend(loc='lower right')
        plt.title("ROC Curve")
        plt.xlabel("FPR")
        plt.ylabel("TPR")
        plt.show()

    return fpr, tpr, auc_score 
Example #8
Source File: classification.py    From Kaggler with MIT License 6 votes vote down vote up
def plot_roc_curve(y, p):
    fpr, tpr, _ = roc_curve(y, p)

    plt.plot(fpr, tpr)
    plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate') 
Example #9
Source File: noduleCADEvaluationLUNA16.py    From DeepLung with GNU General Public License v3.0 6 votes vote down vote up
def computeFROC(FROCGTList, FROCProbList, totalNumberOfImages, excludeList):
    # Remove excluded candidates
    FROCGTList_local = []
    FROCProbList_local = []
    for i in range(len(excludeList)):
        if excludeList[i] == False:
            FROCGTList_local.append(FROCGTList[i])
            FROCProbList_local.append(FROCProbList[i])
    
    numberOfDetectedLesions = sum(FROCGTList_local)
    totalNumberOfLesions = sum(FROCGTList)
    totalNumberOfCandidates = len(FROCProbList_local)
    fpr, tpr, thresholds = skl_metrics.roc_curve(FROCGTList_local, FROCProbList_local)
    if sum(FROCGTList) == len(FROCGTList): # Handle border case when there are no false positives and ROC analysis give nan values.
      print "WARNING, this system has no false positives.."
      fps = np.zeros(len(fpr))
    else:
      fps = fpr * (totalNumberOfCandidates - numberOfDetectedLesions) / totalNumberOfImages
    sens = (tpr * numberOfDetectedLesions) / totalNumberOfLesions
    return fps, sens, thresholds 
Example #10
Source File: link_prediction.py    From edge2vec with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def plot_roc(true,pred):
    '''
    plot the ROC curve
    '''
    fpr, tpr, thresholds = metrics.roc_curve(true, pred, pos_label=1)
    plt.plot(fpr, tpr,c = "blue",markersize=2,label='edge2vec')
    plt.show() 
Example #11
Source File: test_ranking.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_roc_returns_consistency():
    # Test whether the returned threshold matches up with tpr
    # make small toy dataset
    y_true, _, probas_pred = make_prediction(binary=True)
    fpr, tpr, thresholds = roc_curve(y_true, probas_pred)

    # use the given thresholds to determine the tpr
    tpr_correct = []
    for t in thresholds:
        tp = np.sum((probas_pred >= t) & y_true)
        p = np.sum(y_true)
        tpr_correct.append(1.0 * tp / p)

    # compare tpr and tpr_correct to see if the thresholds' order was correct
    assert_array_almost_equal(tpr, tpr_correct, decimal=2)
    assert_equal(fpr.shape, tpr.shape)
    assert_equal(fpr.shape, thresholds.shape) 
Example #12
Source File: results_processor.py    From pipgcn with GNU General Public License v3.0 6 votes vote down vote up
def roc(self, data, model, tt, name):
        scores = self.get_predictions_loss(data, model, tt)[0]
        labels = [prot["label"][:, 2] for prot in data[tt]]
        fprs = []
        tprs = []
        roc_aucs = []
        for s, l in zip(scores, labels):
            fpr, tpr, _ = roc_curve(l, s)
            roc_auc = auc(fpr, tpr)
            fprs.append(fpr)
            tprs.append(tpr)
            roc_aucs.append(roc_auc)
        auc_prot_med = np.median(roc_aucs)
        auc_prot_ave = np.mean(roc_aucs)
        printt("{} average protein auc: {:0.3f}".format(name, auc_prot_ave))
        printt("{} median protein auc: {:0.3f}".format(name, auc_prot_med))
        return ["auc_prot_ave_" + tt, "auc_prot_med_" + tt], [auc_prot_ave, auc_prot_med] 
Example #13
Source File: metrics_util.py    From DeepLearningSmells with Apache License 2.0 6 votes vote down vote up
def get_all_metrics(model, eval_data, eval_labels, pred_labels):
    fpr, tpr, thresholds_keras = roc_curve(eval_labels, pred_labels)
    auc_ = auc(fpr, tpr)
    print("auc_keras:" + str(auc_))

    score = model.evaluate(eval_data, eval_labels, verbose=0)
    print("Test accuracy: " + str(score[1]))

    precision = precision_score(eval_labels, pred_labels)
    print('Precision score: {0:0.2f}'.format(precision))

    recall = recall_score(eval_labels, pred_labels)
    print('Recall score: {0:0.2f}'.format(recall))

    f1 = f1_score(eval_labels, pred_labels)
    print('F1 score: {0:0.2f}'.format(f1))

    average_precision = average_precision_score(eval_labels, pred_labels)
    print('Average precision-recall score: {0:0.2f}'.format(average_precision))

    return auc_, score[1], precision, recall, f1, average_precision, fpr, tpr 
Example #14
Source File: conv_featuremaps_visualization.py    From MCF-3D-CNN with MIT License 6 votes vote down vote up
def accuracy(y_true, y_pred):        
    # 计算混淆矩阵
    y = np.zeros(len(y_true))
    y_ = np.zeros(len(y_true))    
    for i in range(len(y_true)): 
        y[i] = np.argmax(y_true[i,:])
        y_[i] = np.argmax(y_pred[i,:])
    cnf_mat = confusion_matrix(y, y_)
    
    # Acc = 1.0*(cnf_mat[1][1]+cnf_mat[0][0])/len(y_true)
    # Sens = 1.0*cnf_mat[1][1]/(cnf_mat[1][1]+cnf_mat[1][0])
    # Spec = 1.0*cnf_mat[0][0]/(cnf_mat[0][0]+cnf_mat[0][1])
    
    # # 绘制ROC曲线
    # fpr, tpr, thresholds = roc_curve(y_true[:,0], y_pred[:,0])
    # Auc = auc(fpr, tpr)
    
    
    # 计算多分类评价值
    Sens = recall_score(y, y_, average='macro')
    Prec = precision_score(y, y_, average='macro')
    F1 = f1_score(y, y_, average='weighted') 
    Support = precision_recall_fscore_support(y, y_, beta=0.5, average=None)
    return Sens, Prec, F1, cnf_mat 
Example #15
Source File: evaluate.py    From object_centric_VAD with MIT License 6 votes vote down vote up
def compute_eer(loss_file,reverse,smoothing):
    if not os.path.isdir(loss_file):
        loss_file_list = [loss_file]
    else:
        loss_file_list = os.listdir(loss_file)
        loss_file_list = [os.path.join(loss_file, sub_loss_file) for sub_loss_file in loss_file_list]

    optimal_results = RecordResult(auc=np.inf)
    for sub_loss_file in loss_file_list:
        dataset, scores, labels = get_scores_labels(sub_loss_file,reverse,smoothing)
        fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)
        eer = cal_eer(fpr, tpr)

        results = RecordResult(fpr, tpr, eer, dataset, sub_loss_file)

        if optimal_results > results:
            optimal_results = results

        if os.path.isdir(loss_file):
            print(results)
    print('##### optimal result and model EER = {}'.format(optimal_results))
    return optimal_results 
Example #16
Source File: metric_interface.py    From FATE with Apache License 2.0 6 votes vote down vote up
def roc(self, labels, pred_scores):
        if self.eval_type == consts.BINARY:
            fpr, tpr, thresholds = roc_curve(np.array(labels), np.array(pred_scores), drop_intermediate=1)
            fpr, tpr, thresholds = list(map(float, fpr)), list(map(float, tpr)), list(map(float, thresholds))

            filt_thresholds, cuts = self.__filt_threshold(thresholds=thresholds, step=0.01)
            new_thresholds = []
            new_tpr = []
            new_fpr = []
            for threshold in filt_thresholds:
                index = thresholds.index(threshold)
                new_tpr.append(tpr[index])
                new_fpr.append(fpr[index])
                new_thresholds.append(threshold)

            fpr = new_fpr
            tpr = new_tpr
            thresholds = new_thresholds
            return fpr, tpr, thresholds, cuts
        else:
            logging.warning("roc_curve is just suppose Binary Classification! return None as results")
            fpr, tpr, thresholds, cuts = None, None, None, None

            return fpr, tpr, thresholds, cuts 
Example #17
Source File: auc_test.py    From allennlp with Apache License 2.0 6 votes vote down vote up
def test_auc_gold_labels_behaviour(self, device: str):
        # Check that it works with different pos_label
        auc = Auc(positive_label=4)

        predictions = torch.randn(8, device=device)
        labels = torch.randint(3, 5, (8,), dtype=torch.long, device=device)
        # We make sure that the positive label is always present.
        labels[0] = 4
        auc(predictions, labels)
        computed_auc_value = auc.get_metric(reset=True)

        false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
            labels.cpu().numpy(), predictions.cpu().numpy(), pos_label=4
        )
        real_auc_value = metrics.auc(false_positive_rates, true_positive_rates)
        assert_allclose(real_auc_value, computed_auc_value)

        # Check that it errs on getting more than 2 labels.
        with pytest.raises(ConfigurationError) as _:
            labels = torch.tensor([3, 4, 5, 6, 7, 8, 9, 10], device=device)
            auc(predictions, labels) 
Example #18
Source File: noduleCADEvaluationLUNA16.py    From luna16 with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def computeFROC(FROCGTList, FROCProbList, totalNumberOfImages, excludeList):
    # Remove excluded candidates
    FROCGTList_local = []
    FROCProbList_local = []
    for i in range(len(excludeList)):
        if excludeList[i] == False:
            FROCGTList_local.append(FROCGTList[i])
            FROCProbList_local.append(FROCProbList[i])

    numberOfDetectedLesions = sum(FROCGTList_local)
    totalNumberOfLesions = sum(FROCGTList)
    totalNumberOfCandidates = len(FROCProbList_local)
    fpr, tpr, thresholds = skl_metrics.roc_curve(FROCGTList_local, FROCProbList_local)
    if sum(FROCGTList) == len(FROCGTList): # Handle border case when there are no false positives and ROC analysis give nan values.
      print "WARNING, this system has no false positives.."
      fps = np.zeros(len(fpr))
    else:
      fps = fpr * (totalNumberOfCandidates - numberOfDetectedLesions) / totalNumberOfImages
    sens = (tpr * numberOfDetectedLesions) / totalNumberOfLesions
    return fps, sens, thresholds 
Example #19
Source File: test_ranking.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_roc_curve_one_label():
    y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
    y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
    # assert there are warnings
    w = UndefinedMetricWarning
    fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
    # all true labels, all fpr should be nan
    assert_array_equal(fpr, np.full(len(thresholds), np.nan))
    assert_equal(fpr.shape, tpr.shape)
    assert_equal(fpr.shape, thresholds.shape)

    # assert there are warnings
    fpr, tpr, thresholds = assert_warns(w, roc_curve,
                                        [1 - x for x in y_true],
                                        y_pred)
    # all negative labels, all tpr should be nan
    assert_array_equal(tpr, np.full(len(thresholds), np.nan))
    assert_equal(fpr.shape, tpr.shape)
    assert_equal(fpr.shape, thresholds.shape) 
Example #20
Source File: display.py    From diogenes with MIT License 6 votes vote down vote up
def add_graph_for_best(self, func_name):
        """Adds a graph to report that gives performance of the best Trial

        Parameters
        ----------
        func_name : str
            Name of a function that can be run on a Trial that returns a 
            figure. For example 'roc_curve' or 'prec_recall_curve'
        """
        if self.__exp is None:
            raise ReportError('No experiment provided for this report. '
                              'Cannot add graph for best trial.')
        best_trial = max(
            self.__exp.trials, 
            key=lambda trial: trial.average_score())
        fig = getattr(best_trial, func_name)()
        self.add_fig(fig)
        self.add_text('Best trial is trial {} ({})]'.format(
            self.__back_indices[best_trial],
            best_trial))
        plt.close() 
Example #21
Source File: test_ranking.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_roc_curve_hard():
    # roc_curve for hard decisions
    y_true, pred, probas_pred = make_prediction(binary=True)

    # always predict one
    trivial_pred = np.ones(y_true.shape)
    fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
    roc_auc = auc(fpr, tpr)
    assert_array_almost_equal(roc_auc, 0.50, decimal=2)
    assert_equal(fpr.shape, tpr.shape)
    assert_equal(fpr.shape, thresholds.shape)

    # always predict zero
    trivial_pred = np.zeros(y_true.shape)
    fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
    roc_auc = auc(fpr, tpr)
    assert_array_almost_equal(roc_auc, 0.50, decimal=2)
    assert_equal(fpr.shape, tpr.shape)
    assert_equal(fpr.shape, thresholds.shape)

    # hard decisions
    fpr, tpr, thresholds = roc_curve(y_true, pred)
    roc_auc = auc(fpr, tpr)
    assert_array_almost_equal(roc_auc, 0.78, decimal=2)
    assert_equal(fpr.shape, tpr.shape)
    assert_equal(fpr.shape, thresholds.shape) 
Example #22
Source File: test_ranking.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_roc_curve_fpr_tpr_increasing():
    # Ensure that fpr and tpr returned by roc_curve are increasing.
    # Construct an edge case with float y_score and sample_weight
    # when some adjacent values of fpr and tpr are actually the same.
    y_true = [0, 0, 1, 1, 1]
    y_score = [0.1, 0.7, 0.3, 0.4, 0.5]
    sample_weight = np.repeat(0.2, 5)
    fpr, tpr, _ = roc_curve(y_true, y_score, sample_weight=sample_weight)
    assert_equal((np.diff(fpr) < 0).sum(), 0)
    assert_equal((np.diff(tpr) < 0).sum(), 0) 
Example #23
Source File: test_ranking.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_roc_curve_drop_intermediate():
    # Test that drop_intermediate drops the correct thresholds
    y_true = [0, 0, 0, 0, 1, 1]
    y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
    tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
    assert_array_almost_equal(thresholds, [2., 1., 0.7, 0.])

    # Test dropping thresholds with repeating scores
    y_true = [0, 0, 0, 0, 0, 0, 0,
              1, 1, 1, 1, 1, 1]
    y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
               0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
    tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
    assert_array_almost_equal(thresholds,
                              [2.0, 1.0, 0.9, 0.7, 0.6, 0.]) 
Example #24
Source File: analysis.py    From PointNetGPD with MIT License 5 votes vote down vote up
def roc_curve(self, plot=False, line_width=2, font_size=15, color='b', style='-', label=''):
        pred_probs_vec, labels_vec = self.label_vectors()
        fpr, tpr, thresholds = sm.roc_curve(labels_vec, pred_probs_vec)

        if plot:
            plt.plot(fpr, tpr, linewidth=line_width, color=color, linestyle=style, label=label)
            plt.xlabel('FPR', fontsize=font_size)
            plt.ylabel('TPR', fontsize=font_size)
        return fpr, tpr, thresholds 
Example #25
Source File: auc_regressor.py    From xam with MIT License 5 votes vote down vote up
def score(self, X, y):
        fpr, tpr, _ = metrics.roc_curve(y, sp.dot(X, self.coef_))
        return metrics.auc(fpr, tpr) 
Example #26
Source File: consensus_test_from_CV.py    From conv_qsar_fast with MIT License 5 votes vote down vote up
def parity_plot(true, pred, set_label):
		if len(true) == 0:
			print('skipping parity plot for empty dataset')
			return

		try:
			# Trim it to recorded values (not NaN)

			true = np.array(true).flatten()
			pred = np.array(pred).flatten()

			pred = pred[~np.isnan(true)]
			true = true[~np.isnan(true)]

			true = np.array(true).flatten()
			pred = np.array(pred).flatten()

			# For TOX21
			from sklearn.metrics import roc_auc_score, roc_curve, auc
			roc_x, roc_y, _ = roc_curve(true, pred)
			AUC = roc_auc_score(true, pred)
			plt.figure()
			lw = 2
			plt.plot(roc_x, roc_y, color='darkorange',
				lw = lw, label = 'ROC curve (area = %0.3f)' % AUC)
			plt.plot([0, 1], [0, 1], color='navy', lw = lw, linestyle = '--')
			plt.xlim([0.0, 1.0])
			plt.ylim([0.0, 1.05])
			plt.xlabel('False Positive Rate')
			plt.ylabel('True Positive Rate')
			plt.title('ROC for {}'.format(set_label))
			plt.legend(loc = "lower right")
			plt.savefig(os.path.join(test_fpath, ' {} ROC.png'.format(set_label)), bbox_inches = 'tight')
			plt.clf()

		except Exception as e:
			print(e)

	# Create plots for datasets 
Example #27
Source File: auc.py    From allennlp with Apache License 2.0 5 votes vote down vote up
def get_metric(self, reset: bool = False):
        if self._all_gold_labels.shape[0] == 0:
            return 0.5
        false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
            self._all_gold_labels.cpu().numpy(),
            self._all_predictions.cpu().numpy(),
            pos_label=self._positive_label,
        )
        auc = metrics.auc(false_positive_rates, true_positive_rates)
        if reset:
            self.reset()
        return auc 
Example #28
Source File: auc_test.py    From allennlp with Apache License 2.0 5 votes vote down vote up
def test_auc_computation(self, device: str):
        auc = Auc()
        all_predictions = []
        all_labels = []
        for _ in range(5):
            predictions = torch.randn(8, device=device)
            labels = torch.randint(0, 2, (8,), dtype=torch.long, device=device)

            auc(predictions, labels)

            all_predictions.append(predictions)
            all_labels.append(labels)

        computed_auc_value = auc.get_metric(reset=True)

        false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
            torch.cat(all_labels, dim=0).cpu().numpy(),
            torch.cat(all_predictions, dim=0).cpu().numpy(),
        )
        real_auc_value = metrics.auc(false_positive_rates, true_positive_rates)
        assert_allclose(real_auc_value, computed_auc_value)

        # One more computation to assure reset works.
        predictions = torch.randn(8, device=device)
        labels = torch.randint(0, 2, (8,), dtype=torch.long, device=device)

        auc(predictions, labels)
        computed_auc_value = auc.get_metric(reset=True)

        false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
            labels.cpu().numpy(), predictions.cpu().numpy()
        )
        real_auc_value = metrics.auc(false_positive_rates, true_positive_rates)
        assert_allclose(real_auc_value, computed_auc_value) 
Example #29
Source File: test_stacking.py    From civisml-extensions with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fit_predict_measure_clf(model, xtrain, ytrain, xtest, ytest):
    model.fit(xtrain, ytrain)
    ypred = model.predict_proba(xtest)
    fpr, tpr, _ = roc_curve(ytest, ypred[:, 1])
    return auc(fpr, tpr) 
Example #30
Source File: auc_regressor.py    From xam with MIT License 5 votes vote down vote up
def _auc_loss(self, coef, X, y):
        fpr, tpr, _ = metrics.roc_curve(y, sp.dot(X, coef))
        return -metrics.auc(fpr, tpr)