Python sklearn.metrics.label_ranking_loss() Examples

The following are 7 code examples of sklearn.metrics.label_ranking_loss(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sklearn.metrics , or try the search function .
Example #1
Source File: test_ranking.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_ranking_appropriate_input_shape():
    # Check that y_true.shape != y_score.shape raise the proper exception
    assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
    assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
    assert_raises(ValueError, label_ranking_loss,
                  [[0, 1], [0, 1]], [[0], [1]])

    assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
    assert_raises(ValueError, label_ranking_loss,
                  [[0], [1]], [[0, 1], [0, 1]])
    assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]]) 
Example #2
Source File: test_ranking.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_ranking_loss_ties_handling():
    # Tie handling
    assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
    assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
    assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
                        1 / 2)
    assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
                        1 / 2)
    assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
    assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
    assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
    assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1) 
Example #3
Source File: test_ranking.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_ranking_appropriate_input_shape():
    # Check that y_true.shape != y_score.shape raise the proper exception
    assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
    assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
    assert_raises(ValueError, label_ranking_loss,
                  [[0, 1], [0, 1]], [[0], [1]])

    assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
    assert_raises(ValueError, label_ranking_loss,
                  [[0], [1]], [[0, 1], [0, 1]])
    assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]]) 
Example #4
Source File: test_ranking.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_ranking_loss_ties_handling():
    # Tie handling
    assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
    assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
    assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
                        1 / 2)
    assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
                        1 / 2)
    assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
    assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
    assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
    assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1) 
Example #5
Source File: test_ranking.py    From Mastering-Elasticsearch-7.0 with MIT License 4 votes vote down vote up
def test_label_ranking_loss():
    assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
    assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)

    assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
                        0)
    assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
                        1 / 2)
    assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
                        0)
    assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
                        2 / 2)
    assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
                        1 / 2)
    assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
                        2 / 2)

    # Undefined metrics -  the ranking doesn't matter
    assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
    assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
    assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
    assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)

    assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
                        0)
    assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
                        0)
    assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
                        0)
    assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)

    # Non trival case
    assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
                                           [[0.1, 10., -3], [0, 1, 3]]),
                        (0 + 2 / 2) / 2.)

    assert_almost_equal(label_ranking_loss(
        [[0, 1, 0], [1, 1, 0], [0, 1, 1]],
        [[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
        (0 + 2 / 2 + 1 / 2) / 3.)

    assert_almost_equal(label_ranking_loss(
        [[0, 1, 0], [1, 1, 0], [0, 1, 1]],
        [[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
        (0 + 2 / 2 + 1 / 2) / 3.)

    # Sparse csr matrices
    assert_almost_equal(label_ranking_loss(
        csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
        [[0.1, 10, -3], [3, 1, 3]]),
        (0 + 2 / 2) / 2.) 
Example #6
Source File: evaluate.py    From bird-species-classification with MIT License 4 votes vote down vote up
def evaluate(experiment_path, meta_data=False, xml_dir="", train_dir="",
             submission_file=""):
    pickle_path = os.path.join(experiment_path, "predictions.pkl")
    with open(pickle_path, 'rb') as input:
        y_trues = pickle.load(input)
        y_scores = pickle.load(input)
        training_segments = pickle.load(input)

    if meta_data:
        elevation_scores = compute_elevation_scores(training_segments, xml_dir,
                                                   train_dir)

        ## Combine the scores using Bayes Thm.
        normalize = np.array([np.sum(y_s * e_s) for y_s, e_s in zip(y_scores,
                                                                elevation_scores)])
        y_scores = y_scores * elevation_scores / normalize[:, None]

    if submission_file:
        write_to_submission_file(submission_file, y_scores, training_segments,
                                 train_dir)
        return

    map_score = mean_average_precision(y_trues, y_scores)
    auroc_score = area_under_roc_curve(y_trues, y_scores)

    # coverage error
    coverage_error = metrics.coverage_error(y_trues, y_scores)
    # label ranking average precision
    lrap = metrics.label_ranking_average_precision_score(y_trues, y_scores)
    # ranking loss
    ranking_loss = metrics.label_ranking_loss(y_trues, y_scores)

    print("")
    print("- Top 1:", top_n(y_trues, y_scores, 1))
    print("- Top 2:", top_n(y_trues, y_scores, 2))
    print("- Top 3:", top_n(y_trues, y_scores, 3))
    print("- Top 4:", top_n(y_trues, y_scores, 4))
    print("- Top 5:", top_n(y_trues, y_scores, 5))
    print("")
    print("Mean Average Precision: ", map_score)
    print("Area Under ROC Curve: ", auroc_score)
    print("Coverage Error: ", coverage_error)
    print("Label Ranking Average Precision: ", lrap)
    print("Ranking Loss: ", ranking_loss)
    print("Total predictions: ", len(y_scores))

    return {
        "map":map_score,
        "auroc":auroc_score,
        "coverage_error":coverage_error,
        "lrap":lrap,
        "ranking_loss": ranking_loss,
        "top_1":top_n(y_trues, y_scores, 1),
        "top_5":top_n(y_trues, y_scores, 5),
    } 
Example #7
Source File: test_ranking.py    From twitter-stock-recommendation with MIT License 4 votes vote down vote up
def test_label_ranking_loss():
    assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
    assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)

    assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
                        0)
    assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
                        1 / 2)
    assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
                        0)
    assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
                        2 / 2)
    assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
                        1 / 2)
    assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
                        2 / 2)

    # Undefined metrics -  the ranking doesn't matter
    assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
    assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
    assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
    assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)

    assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
                        0)
    assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
                        0)
    assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
                        0)
    assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)

    # Non trival case
    assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
                                           [[0.1, 10., -3], [0, 1, 3]]),
                        (0 + 2 / 2) / 2.)

    assert_almost_equal(label_ranking_loss(
        [[0, 1, 0], [1, 1, 0], [0, 1, 1]],
        [[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
        (0 + 2 / 2 + 1 / 2) / 3.)

    assert_almost_equal(label_ranking_loss(
        [[0, 1, 0], [1, 1, 0], [0, 1, 1]],
        [[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
        (0 + 2 / 2 + 1 / 2) / 3.)

    # Sparse csr matrices
    assert_almost_equal(label_ranking_loss(
        csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
        [[0.1, 10, -3], [3, 1, 3]]),
        (0 + 2 / 2) / 2.)