Python numpy.sum() Examples

The following are code examples for showing how to use numpy.sum(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulenet.py    MIT License 10 votes vote down vote up
def test(model, data):
    x_test, y_test = data
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-'*50)
    print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])

    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image

    img = combine_images(np.concatenate([x_test[:50],x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save("real_and_recon.png")
    print()
    print('Reconstructed images are saved to ./real_and_recon.png')
    print('-'*50)
    plt.imshow(plt.imread("real_and_recon.png", ))
    plt.show() 
Example 2
Project: StructEngPy   Author: zhuoju36   File: dynamic.py    MIT License 7 votes vote down vote up
def solve_modal(model,k:int):
    """
    Solve eigen mode of the MDOF system
    
    params:
        model: FEModel.
        k: number of modes to extract.
    """
    K_,M_=model.K_,model.M_
    if k>model.DOF:
        logger.info('Warning: the modal number to extract is larger than the system DOFs, only %d modes are available'%model.DOF)
        k=model.DOF
    omega2s,modes = sl.eigsh(K_,k,M_,sigma=0,which='LM')
    delta = modes/np.sum(modes,axis=0)
    model.is_solved=True
    model.mode_=delta
    model.omega_=np.sqrt(omega2s).reshape((k,1)) 
Example 3
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 6 votes vote down vote up
def iter_apply(Xs, Ms, Ys):
    # fns = [lambda x: np.concatenate(x, 0), lambda x: float(np.sum(x))]
    logits = []
    cost = 0
    with chainer.using_config('train', False), \
            chainer.using_config('enable_backprop', False):
        for xmb, mmb, ymb in iter_data(
                Xs, Ms, Ys, n_batch=n_batch_train, truncate=False, verbose=True):
            n = len(xmb)
            XMB = model.xp.asarray(xmb)
            YMB = model.xp.asarray(ymb)
            MMB = model.xp.asarray(mmb)
            h = model(XMB)
            clf_logits = clf_head(h, XMB)
            clf_logits *= n
            clf_losses = compute_loss_fct(
                XMB, YMB, MMB, clf_logits, only_return_losses=True)
            clf_losses *= n
            logits.append(cuda.to_cpu(clf_logits.array))
            cost += cuda.to_cpu(F.sum(clf_losses).array)
        logits = np.concatenate(logits, 0)
    return logits, cost 
Example 4
Project: rhodonite   Author: nestauk   File: phylomemetic.py    MIT License 6 votes vote down vote up
def community_density(community, co_graph, density_prop, fill=0):
    """community_density
    Calculate the density of a clique based on the number of occurrences
    and coocurrences of the terms within it. Based on Callon et al. 1991.

    Parameters
    ----------
        community : :obj:`iter` of :obj:`int`: 
            A set of terms that comprise a single clique.
        g : :obj:`graph_tool.Graph` 
            The coocurrence graph from which the clique originated
        fill :obj:`float`: 
            A number to fill in for the cooccurrence value if none exists.
    Returns
    -------
        density :obj:`float`: The density of the clique.
    """
    card = len(community)
    densities = density_prop.a[community]
    density = 1 / card * np.sum(densities)
    return density 
Example 5
Project: prediction-constrained-topic-models   Author: dtak   File: calc_N_d_K__vb_qpiDir_qzCat.py    MIT License 6 votes vote down vote up
def make_initial_P_d_K(
        init_name,
        prng=np.random,
        alpha_K=None,
        init_P_d_K_list=None):
    K = alpha_K.size

    if init_name.count('warm'):
        return init_P_d_K_list.pop()
    elif init_name.count('uniform_sample'):
        return prng.dirichlet(np.ones(K))
    elif init_name.count('prior_sample'):
        return prng.dirichlet(alpha_K)
    elif init_name.count("prior_mean"):
        return alpha_K / np.sum(alpha_K) #np.zeros(K, dtype=alpha_K.dtype)
    else:
        raise ValueError("Unrecognized vb lstep_init_name: " + init_name) 
Example 6
Project: DataHack2018   Author: InnovizTech   File: iou_evaluator.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def evaluate_frame(gt_labels, pred_labels):
    assert np.all(np.isin(pred_labels, (0, 1))), \
        'Invalid values: pred labels value should be either 0 or 1, got {}'.format(set(pred_labels))

    correct_predictions = gt_labels == pred_labels
    positive_predictions = pred_labels == 1

    # correct, positive prediction -> True positive
    tp = np.sum(correct_predictions & positive_predictions)

    # incorrect, negative prediction (using De Morgan's law) -> False negative
    fn = np.sum(np.logical_not(correct_predictions | positive_predictions))

    # incorrect, positive prediction -> False positive
    fp = np.sum(np.logical_not(correct_predictions) & positive_predictions)

    return tp, fn, fp 
Example 7
Project: Lane-And-Vehicle-Detection   Author: JustinHeaton   File: main.py    MIT License 6 votes vote down vote up
def found_search(self, x, y):
        '''
        This function is applied when the lane lines have been detected in the previous frame.
        It uses a sliding window to search for lane pixels in close proximity (+/- 25 pixels in the x direction)
        around the previous detected polynomial.
        '''
        xvals = []
        yvals = []
        if self.found == True:
            i = 720
            j = 630
            while j >= 0:
                yval = np.mean([i,j])
                xval = (np.mean(self.fit0))*yval**2 + (np.mean(self.fit1))*yval + (np.mean(self.fit2))
                x_idx = np.where((((xval - 25) < x)&(x < (xval + 25))&((y > j) & (y < i))))
                x_window, y_window = x[x_idx], y[x_idx]
                if np.sum(x_window) != 0:
                    np.append(xvals, x_window)
                    np.append(yvals, y_window)
                i -= 90
                j -= 90
        if np.sum(xvals) == 0:
            self.found = False # If no lane pixels were detected then perform blind search
        return xvals, yvals, self.found 
Example 8
Project: multi-embedding-cws   Author: wangjksjtu   File: pw_lstm_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi, tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 9
Project: multi-embedding-cws   Author: wangjksjtu   File: pw_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi, tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 10
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_cnn_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, inp, tX, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX.shape[0]
    numBatch = int((tX.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp: tX[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            best_sequence = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(best_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl

    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 11
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi, tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 12
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm4_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi,tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 13
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_time_paper.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi, tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 14
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp, tX, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX.shape[0]
    numBatch = int((tX.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp: tX[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 15
Project: multi-embedding-cws   Author: wangjksjtu   File: nopy_fc_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_wubi,tX_char, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 16
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_train_paper.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi, tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 17
Project: multi-embedding-cws   Author: wangjksjtu   File: nowubi_fc_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin,tX_char, tX_pinyin, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 18
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_train_paper.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi, tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 19
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi,tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 20
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp, tX, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX.shape[0]
    numBatch = int((tX.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp: tX[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 21
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi, tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 22
Project: multi-embedding-cws   Author: wangjksjtu   File: nowubi_share_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, tX_char, tX_pinyin, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 23
Project: multi-embedding-cws   Author: wangjksjtu   File: nopy_share_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_wubi, tX_char, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 24
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_time.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi,tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 25
Project: s2g   Author: caesar0301   File: bonus.py    MIT License 5 votes vote down vote up
def line_distance(coords):
    """Return total road distance in kilometers"""
    dist = []
    for i in range(0, len(coords) - 1):
        dist.append(great_circle_dist(coords[i], coords[i + 1]))
    return np.sum(dist) 
Example 26
Project: SpatialPooler   Author: CSDUMMI   File: test_spatial_pooler.py    GNU General Public License v3.0 5 votes vote down vote up
def test_init_collumn():
    test_cols = spatial_pooler_instance.init_collumn(500,600,0.75)

    assert type(test_cols[0]['permanences']) == type(np.array([0.5],dtype=np.float))
    assert np.sum(test_cols[0]['potential_pool'] > 0) > 0.6 # The potential pool should be at least 60 % of all collumns if not more 
Example 27
Project: SpatialPooler   Author: CSDUMMI   File: test_spatial_pooler.py    GNU General Public License v3.0 5 votes vote down vote up
def test_run():
    input_sp = np.random.randn(100) > 0
    output_sp = spatial_pooler_instance.run(input_sp)

    # Has the same shape as desired
    assert output_sp.shape[0] == 50

    # Less than 20% of the output_sp is on.
    # Sparsity
    assert np.sum(output_sp)/output_sp.shape[0] < 0.2

    assert type(output_sp) == type(np.array([],dtype=np.bool_)) 
Example 28
Project: SpatialPooler   Author: CSDUMMI   File: spatial_pooler.py    GNU General Public License v3.0 5 votes vote down vote up
def init_collumn(self,num_collumns,input_size,size_of_potential_pool=0.75):
        collumns = [{} for i in range(num_collumns)]
        for i in range(num_collumns):
            collumns[i]['potential_pool'] = np.random.rand(input_size) > size_of_potential_pool # Create a random potential pool with a certain percantage of potential connections
            collumns[i]['permanences'] = np.random.rand(np.sum(collumns[i]['potential_pool'])) # Random values for the permanence between  0-1
        return collumns 
Example 29
Project: SpatialPooler   Author: CSDUMMI   File: spatial_pooler.py    GNU General Public License v3.0 5 votes vote down vote up
def activation(self,state):
        """
        Sum state and then compare that to threshhold_activation
        """
        length = state.shape[0]
        return ((np.sum(state)/length) > self.threshhold_activation) 
Example 30
Project: chainer-openai-transformer-lm   Author: soskek   File: utils.py    MIT License 5 votes vote down vote up
def np_softmax(x, t=1):
    x = x / t
    x = x - np.max(x, axis=-1, keepdims=True)
    ex = np.exp(x)
    return ex / np.sum(ex, axis=-1, keepdims=True) 
Example 31
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 5 votes vote down vote up
def __call__(
            self,
            X,
            Y,
            M,
            clf_logits,
            lm_logits=None,
            only_return_losses=False):
        # Language modeling loss
        if lm_logits is not None:
            x_shifted = X[:, :, 1:, 0].reshape(-1)           # Shape: 252
            M = M.reshape(-1, M.shape[2])
            lm_losses = self.lm_criterion(lm_logits, x_shifted)
            lm_losses = lm_losses.reshape(
                X.shape[0] * X.shape[1], X.shape[2] - 1)
            lm_losses = lm_losses * M[:, 1:]
            lm_losses = F.sum(lm_losses, axis=1) / F.sum(M[:, 1:], axis=1)
        # Classification loss
        clf_losses = self.clf_criterion(clf_logits, Y)
        if only_return_losses:
            return (
                clf_losses,
                lm_losses) if lm_logits is not None else clf_losses

        if self.lm_coef > 0 and lm_logits is not None:
            train_loss = F.sum(clf_losses) + self.lm_coef * F.sum(lm_losses)
        else:
            train_loss = F.sum(clf_losses)

        if self.opt is not None:
            self.opt.target.cleargrads()
        train_loss.backward()
        if self.opt is not None:
            self.opt.update()
            self.opt.target.cleargrads()
        return train_loss.array 
Example 32
Project: explirefit   Author: codogogo   File: confusion_matrix.py    Apache License 2.0 5 votes vote down vote up
def compute_all_scores(self):
		self.class_performances = {}
		for i in range(len(self.labels)):
			tp = np.float32(self.matrix[i][i])
			fp_plus_tp = np.float32(np.sum(self.matrix, axis = 0)[i])
			fn_plus_tp = np.float32(np.sum(self.matrix, axis = 1)[i])
			p = tp / fp_plus_tp
			r = tp / fn_plus_tp
			self.class_performances[self.labels[i]] = (p, r, 2*p*r/(p+r))

		self.microf1 = np.float32(np.trace(self.matrix)) / np.sum(self.matrix)
		self.macrof1 = float(sum([x[2] for x in self.class_performances.values()])) / float(len(self.labels))
		self.macroP = float(sum([x[0] for x in self.class_performances.values()])) / float(len(self.labels))
		self.macroR = float(sum([x[1] for x in self.class_performances.values()])) / float(len(self.labels))
		self.accuracy = float(sum([self.matrix[i, i] for i in range(len(self.labels))])) / float(np.sum(self.matrix)) 
Example 33
Project: rhodonite   Author: nestauk   File: cumulative.py    MIT License 5 votes vote down vote up
def label_new_vertex(g, o_props, label_steps=False):
    '''label_new_vertex

    Parameters
    ----------
    g : :obj:`graph_tool.Graph` 
        A graph.
    o_props : :obj:`dict` 
        A dictionary of vertex property maps containing the vertex occurrence 
        values at each step.
    label_steps : :obj:`bool` 
        If `True`, returns a vertex property map` that indicates the first step
        that each vertex appeared.

    Returns
    -------
        new_vertex_props : :obj:`dict`
        vertex_step_prop : :obj:`graph_tool.PropertyMap`
    '''
    new_vertex_props = {}
    _vertex_tracker = g.new_vertex_property('bool')
    for step, o_prop in o_props.items():
        new_vertex_prop = g.new_vertex_property('bool')
        new_vertex_prop.a = (o_prop.a > 0) & (_vertex_tracker.a == False)
        new_vertex_props[step] = new_vertex_prop
        _vertex_tracker.a = _vertex_tracker.a + new_vertex_prop.a
    if label_steps:
        vertex_step_prop = g.new_vertex_property('int')
        start = 0
        end = np.sum(new_vertex_props[steps[0]].a)
        for i, step in enumerate(steps):
            if i > 0:
                start = int(start + np.sum(new_vertex_props[step - 1].a))
                end = int(end + np.sum(new_vertex_props[step].a))
            vertex_step_prop.a[start:end] = step
        return (new_vertex_props, vertex_step_prop)
    else:
        return new_vertex_props 
Example 34
Project: autodmri   Author: samuelstjean   File: gamma.py    MIT License 5 votes vote down vote up
def maxlk_sigma(m, xold=None, eps=1e-8, max_iter=100):
    '''Maximum likelihood equation to estimate sigma from gamma distributed values'''

    sum_m2 = np.sum(m**2)
    K = m.size
    sum_log_m2 = np.sum(np.log(m**2))

    def f(sigma):
        return digamma(sum_m2/(2*K*sigma**2)) - sum_log_m2/K + np.log(2*sigma**2)

    def fprime(sigma):
        return -sum_m2 * polygamma(1, sum_m2/(2*K*sigma**2)) / (K*sigma**3) + 2/sigma

    if xold is None:
        xold = m.std()

    for _ in range(max_iter):

        xnew = xold - f(xold) / fprime(xold)

        if np.abs(xold - xnew) < eps:
            break

        xold = xnew

    return xnew 
Example 35
Project: keras_mixnets   Author: titu1994   File: mixnets.py    MIT License 5 votes vote down vote up
def _split_channels(total_filters, num_groups):
    split = [total_filters // num_groups for _ in range(num_groups)]
    split[0] += total_filters - sum(split)
    return split


# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/mixnet_model.py 
Example 36
Project: Att-ChemdNER   Author: lingluodlut   File: utils.py    Apache License 2.0 5 votes vote down vote up
def shared(shape, name):
#{{{
    """
    Create a shared object of a numpy array.
    """ 
    init=initializations.get('glorot_uniform');
    if len(shape) == 1:
        value = np.zeros(shape)  # bias are initialized with zeros
        return theano.shared(value=value.astype(theano.config.floatX), name=name)
    else:
        drange = np.sqrt(6. / (np.sum(shape)))
        value = drange * np.random.uniform(low=-1.0, high=1.0, size=shape)
        return init(shape=shape,name=name);
#}}} 
Example 37
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: voc_eval.py    MIT License 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
  """ ap = voc_ap(rec, prec, [use_07_metric])
  Compute VOC AP given precision and recall.
  If use_07_metric is true, uses the
  VOC 07 11 point method (default:False).
  """
  if use_07_metric:
    # 11 point metric
    ap = 0.
    for t in np.arange(0., 1.1, 0.1):
      if np.sum(rec >= t) == 0:
        p = 0
      else:
        p = np.max(prec[rec >= t])
      ap = ap + p / 11.
  else:
    # correct AP calculation
    # first append sentinel values at the end
    mrec = np.concatenate(([0.], rec, [1.]))
    mpre = np.concatenate(([0.], prec, [0.]))

    # compute the precision envelope
    for i in range(mpre.size - 1, 0, -1):
      mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

    # to calculate area under PR curve, look for points
    # where X axis (recall) changes value
    i = np.where(mrec[1:] != mrec[:-1])[0]

    # and sum (\Delta recall) * prec
    ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
  return ap 
Example 38
Project: prediction-constrained-topic-models   Author: dtak   File: calc_N_d_K__vb_qpiDir_qzCat.py    MIT License 5 votes vote down vote up
def calc_elbo_for_single_doc__simplified_from_N_d_K(
        word_ct_d_Ud=None,
        log_lik_d_UdK=None,
        alpha_K=None,
        N_d_K=None):
    theta_d_K = N_d_K + alpha_K
    E_log_pi_d_K = digamma(theta_d_K) - digamma(np.sum(theta_d_K))
    log_resp_d_UK = log_lik_d_UdK + E_log_pi_d_K[np.newaxis,:]
    return (
        np.inner(word_ct_d_Ud, logsumexp(log_resp_d_UK, axis=1))
        + c_Dir_1D(alpha_K) - c_Dir_1D(theta_d_K)
        + np.inner(alpha_K - theta_d_K, E_log_pi_d_K)
        ) 
Example 39
Project: prediction-constrained-topic-models   Author: dtak   File: calc_N_d_K__vb_qpiDir_qzCat.py    MIT License 5 votes vote down vote up
def c_Dir_1D(alpha_K):
    return gammaln(np.sum(alpha_K)) - np.sum(gammaln(alpha_K)) 
Example 40
Project: prediction-constrained-topic-models   Author: dtak   File: calc_coherence_metrics.py    MIT License 5 votes vote down vote up
def calc_pairwise_cooccurance_counts(
        x_csr_DV=None,
        dataset=None,
        ):
    """ Calculate word cooccurances across a corpus of D documents

    Returns
    -------
    ndocs_V : 1D array, size V
        entry v counts the number of documents that contain v at least once
    ndocs_csc_VV : 2D csc sparse matrix, V x V
        entry v,w counts the number of documents which contain
        the word pair (v, w) at least once

    Examples
    --------
    >>> x_DV = np.arange(6)[:,np.newaxis] * np.hstack([np.eye(6), np.zeros((6, 3))])
    >>> x_DV[:3, :3] += 1
    >>> x_DV[4, 5] += 17
    >>> ndocs_V, ndocs_csc_VV = calc_pairwise_cooccurance_counts(x_csr_DV=x_DV)
    >>> ndocs_V.astype(np.int32).tolist()
    [3, 3, 3, 1, 1, 2, 0, 0, 0]
    >>> ndocs_csc_VV.toarray()[:3, :3]
    array([[ 3.,  3.,  3.],
           [ 3.,  3.,  3.],
           [ 3.,  3.,  3.]])
    """
    if x_csr_DV is None:
        x_csr_DV = dataset['x_csr_DV']
    x_csr_DV = scipy.sparse.csr_matrix(x_csr_DV, dtype=np.float64)

    binx_csr_DV = x_csr_DV.copy()
    binx_csr_DV.data[:] = 1.0

    ndocs_V = np.squeeze(np.asarray(binx_csr_DV.sum(axis=0)))

    ndocs_csc_VV = (binx_csr_DV.T * binx_csr_DV).tocsc()
    return ndocs_V, ndocs_csc_VV 
Example 41
Project: prediction-constrained-topic-models   Author: dtak   File: train_and_eval_sklearn_binary_classifier.py    MIT License 5 votes vote down vote up
def calcfrac(bmask):
    return np.sum(bmask) / float(bmask.size) 
Example 42
Project: prediction-constrained-topic-models   Author: dtak   File: calc_roc_auc_via_bootstrap.py    MIT License 5 votes vote down vote up
def verify_min_examples_per_label(y_NC, min_examples_per_label):
    '''
    
    Examples
    --------
    >>> y_all_0 = np.zeros(10)
    >>> y_all_1 = np.ones(30)
    >>> verify_min_examples_per_label(y_all_0, 3)
    False
    >>> verify_min_examples_per_label(y_all_1, 2)
    False
    >>> verify_min_examples_per_label(np.hstack([y_all_0, y_all_1]), 10)
    True
    >>> verify_min_examples_per_label(np.eye(3), 2)
    False
    '''
    if y_NC.ndim < 2:
        y_NC = np.atleast_2d(y_NC).T
    n_C = np.sum(np.isfinite(y_NC), axis=0)
    n_pos_C = n_C * np.nanmean(y_NC, axis=0)
    min_neg = np.max(n_C - n_pos_C)
    min_pos = np.min(n_pos_C)
    if min_pos < min_examples_per_label:
        return False
    elif min_neg < min_examples_per_label:
        return False
    return True 
Example 43
Project: VAE-MF-TensorFlow   Author: dongwookim-ml   File: matrix_vae.py    MIT License 5 votes vote down vote up
def train_test_validation(self, M, train_idx, test_idx, valid_idx, n_steps=100000, result_path='result/'):
        nonzero_user_idx = M.nonzero()[0]
        nonzero_item_idx = M.nonzero()[1]

        trainM = np.zeros(M.shape)
        trainM[nonzero_user_idx[train_idx], nonzero_item_idx[train_idx]] = M[nonzero_user_idx[train_idx], nonzero_item_idx[train_idx]]

        validM = np.zeros(M.shape)
        validM[nonzero_user_idx[valid_idx], nonzero_item_idx[valid_idx]] = M[nonzero_user_idx[valid_idx], nonzero_item_idx[valid_idx]]

        testM = np.zeros(M.shape)
        testM[nonzero_user_idx[test_idx], nonzero_item_idx[test_idx]] = M[nonzero_user_idx[test_idx], nonzero_item_idx[test_idx]]

        for i in range(self.num_user):
            if np.sum(trainM[i]) == 0:
                testM[i] = 0
                validM[i] = 0

        train_writer = tf.summary.FileWriter(
            result_path + '/train', graph=self.sess.graph)

        best_val_rmse = np.inf
        best_test_rmse = 0

        self.sess.run(tf.global_variables_initializer())
        for step in range(1, n_steps):
            feed_dict = {self.user: trainM, self.valid_rating:validM, self.test_rating:testM}

            _, mse, mae, valid_rmse, test_rmse,  summary_str = self.sess.run(
                [self.train_step, self.MSE, self.MAE, self.valid_RMSE, self.test_RMSE, self.summary_op], feed_dict=feed_dict)
            train_writer.add_summary(summary_str, step)
            print("Iter {0} Train RMSE:{1}, Valid RMSE:{2}, Test RMSE:{3}".format(step, np.sqrt(mse), valid_rmse, test_rmse))

            if best_val_rmse > valid_rmse:
                best_val_rmse = valid_rmse
                best_test_rmse = test_rmse

        self.saver.save(self.sess, result_path + "/model.ckpt")
        return best_test_rmse 
Example 44
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_quaternion(R):
    d = np.diagonal(R)
    t = np.sum(d)
    if t + 1 < 0.25:
        symmetric_mat = R + R.T
        asymmetric_mat = R - R.T
        symmetric_diag = np.diagonal(symmetric_mat)
        i_max = np.argmax(symmetric_diag)
        q = np.empty(4)
        if i_max == 0:
            q[1] = np.sqrt(symmetric_diag[0] - t + 1) / 2
            normalizer = 1 / q[1]
            q[2] = symmetric_mat[1, 0] / 4 * normalizer
            q[3] = symmetric_mat[2, 0] / 4 * normalizer
            q[0] = asymmetric_mat[2, 1] / 4 * normalizer
        elif i_max == 1:
            q[2] = np.sqrt(symmetric_diag[1] - t + 1) / 2
            normalizer = 1 / q[2]
            q[1] = symmetric_mat[1, 0] / 4 * normalizer
            q[3] = symmetric_mat[2, 1] / 4 * normalizer
            q[0] = asymmetric_mat[0, 2] / 4 * normalizer
        elif i_max == 2:
            q[3] = np.sqrt(symmetric_diag[2] - t + 1) / 2
            normalizer = 1 / q[3]
            q[1] = symmetric_mat[2, 0] / 4 * normalizer
            q[2] = symmetric_mat[1, 2] / 4 * normalizer
            q[0] = asymmetric_mat[1, 0] / 4 * normalizer
    else:
        r = np.sqrt(1+t)
        s = 0.5 / r
        q = np.array([0.5*r, (R[2, 1] - R[1, 2])*s, (R[0, 2] - R[2, 0])*s, (R[1, 0] - R[0, 1])*s])

    return q 
Example 45
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def SID(gt, rc):
    N = gt.shape[0]
    err = np.zeros(N)
    for i in range(N):
        err[i] = abs(np.sum(rc[i] * np.log10((rc[i] + 1e-3) / (gt[i] + 1e-3))) +
                        np.sum(gt[i] * np.log10((gt[i] + 1e-3) / (rc[i] + 1e-3))))
    return err.mean() 
Example 46
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def APPSA(gt, rc):
    nom = np.sum(gt * rc, axis=0)
    denom = np.linalg.norm(gt, axis=0) * np.linalg.norm(rc, axis=0)
    
    cos = np.where((nom / (denom + 1e-3)) > 1, 1, (nom / (denom + 1e-3)))
    appsa = np.arccos(cos)
    
    return np.sum(appsa) / (gt.shape[1] * gt.shape[0]) 
Example 47
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def SID(gt, rc):
    N = gt.shape[2]
    err = np.zeros(N)
    for i in range(N):
        err[i] = abs(np.sum(rc[:,:,i] * np.log10((rc[:,:,i] + 1e-3)/(gt[:,:,i] + 1e-3))) +
                     np.sum(gt[:,:,i] * np.log10((gt[:,:,i] + 1e-3)/(rc[:,:,i] + 1e-3))))
    SIDs = err / (gt.shape[1] * gt.shape[0])
    return np.mean(SIDs) 
Example 48
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def APPSA(gt, rc):
    nom = np.sum(gt * rc, axis=0)
    denom = np.linalg.norm(gt, axis=0) * np.linalg.norm(rc, axis=0)
    
    cos = np.where((nom / (denom + 1e-3)) > 1, 1, (nom / (denom + 1e-3)))
    appsa = np.arccos(cos)
    
    return np.sum(appsa) / (gt.shape[1] * gt.shape[0]) 
Example 49
Project: nonogram-solver   Author: mprat   File: solver.py    MIT License 5 votes vote down vote up
def possibilities_generator(
        prior, min_pos, max_start_pos, constraint_len, total_filled):
    """
    Given a row prior, a min_pos, max_start_pos, and constraint length,
    yield each potential row

    prior is an array of:
        -1 (unknown),
        0 (definitely empty),
        1 (definitely filled)
    """
    prior_filled = np.zeros(len(prior)).astype(bool)
    prior_filled[prior == 1] = True
    prior_empty = np.zeros(len(prior)).astype(bool)
    prior_empty[prior == 0] = True
    for start_pos in range(min_pos, max_start_pos + 1):
        possible = -1 * np.ones(len(prior))
        possible[start_pos:start_pos + constraint_len] = 1
        if start_pos + constraint_len < len(possible):
            possible[start_pos + constraint_len] = 0
        if start_pos > 0:
            possible[start_pos - 1] = 0

        # add in the prior
        possible[np.logical_and(possible == -1, prior == 0)] = 0
        possible[np.logical_and(possible == -1, prior == 1)] = 1

        # if contradiction with prior, continue
        # 1. possible changes prior = 1 to something else
        # 2. possible changes prior = 0 to something else
        # 3. everything is assigned in possible but there are not
        #    enough filled in
        # 4. possible changes nothing about the prior
        if np.any(possible[np.where(prior == 1)[0]] != 1) or \
                np.any(possible[np.where(prior == 0)[0]] != 0) or \
                np.sum(possible == 1) > total_filled or \
                (np.all(possible >= 0) and np.sum(possible == 1) <
                    total_filled) or \
                np.all(prior == possible):
            continue
        yield possible 
Example 50
Project: nonogram-solver   Author: mprat   File: solver.py    MIT License 5 votes vote down vote up
def _puzzle_is_solved(self):
        if np.sum(self.puzzle_state == -1) == 0:
            return True
        return False 
Example 51
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: voc_eval.py    MIT License 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
    """ ap = voc_ap(rec, prec, [use_07_metric])
    Compute VOC AP given precision and recall.
    If use_07_metric is true, uses the
    VOC 07 11 point method (default:False).
    """
    if use_07_metric:
        # 11 point metric
        ap = 0.
        for t in np.arange(0., 1.1, 0.1):
            if np.sum(rec >= t) == 0:
                p = 0
            else:
                p = np.max(prec[rec >= t])
            ap = ap + p / 11.
    else:
        # correct AP calculation
        # first append sentinel values at the end
        mrec = np.concatenate(([0.], rec, [1.]))
        mpre = np.concatenate(([0.], prec, [0.]))

        # compute the precision envelope
        for i in range(mpre.size - 1, 0, -1):
            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

        # to calculate area under PR curve, look for points
        # where X axis (recall) changes value
        i = np.where(mrec[1:] != mrec[:-1])[0]

        # and sum (\Delta recall) * prec
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
    return ap 
Example 52
Project: ml_news_popularity   Author: khaledJabr   File: lr.py    MIT License 5 votes vote down vote up
def loss(self):
        return np.sum(np.square(self.targets - self.predicted))\
            + self.alpha * np.sum(np.abs(self.betas)) 
Example 53
Project: ml_news_popularity   Author: khaledJabr   File: svr.py    MIT License 5 votes vote down vote up
def calc_K_Mat(self, input_mat, degree, c ) : 
		k_mat = np.ones([input_mat.shape[0]+1 ,input_mat.shape[0]+1 ])
		k_mat[0][0] = 0
		i_matrix = np.identity(input_mat.shape[0])
		for i in range(1, k_mat.shape[0]) : 
		    for j in range (1, k_mat.shape[0]) : 
		    	if i % 500 == 0 : 
		    		print("Fitting : K Matrix ( {} , {} )".format(i , j))
		    	k = (np.sum((input_mat[i-1:i , : ]).T * input_mat[j-1:j , :]) ** degree)  + (1/c) * i_matrix[i-1][j-1]
		    	k_mat[i][j] = k
		return k_mat 
Example 54
Project: ml_news_popularity   Author: khaledJabr   File: svr.py    MIT License 5 votes vote down vote up
def calc_K_matrix_input(self, input_data ): 
		input_k_matrix = np.ones([input_data.shape[0] +1 , self.training_data.shape[0] + 1] )
		input_k_matrix[0][0] = 0 
		for i in range(1, input_k_matrix.shape[0]):
			for j in range(1, input_k_matrix.shape[1]):
				if i % 500 == 0 : 
					print("Predicting : K Matrix ( {} , {} )".format(i , j))
				input_k_matrix[i][j] = (np.sum((input_data[i-1:i , : ]).T * self.training_data[j-1:j , :] ) ** self.degree)

		return input_k_matrix

		return [] 
Example 55
Project: fbpconv_tf   Author: panakino   File: unet.py    GNU General Public License v3.0 5 votes vote down vote up
def error_rate(predictions, labels):
    """
    Return the error rate based on dense predictions and 1-hot labels.
    """

    return 100.0 - ( 100.0 *
        np.sum(np.argmax(predictions, 3) == np.argmax(labels, 3)) /
        (predictions.shape[0]*predictions.shape[1]*predictions.shape[2])) 
Example 56
Project: Lane-And-Vehicle-Detection   Author: JustinHeaton   File: main.py    MIT License 5 votes vote down vote up
def blind_search(self, x, y, image):
        '''
        This function is applied in the first few frames and/or if the lane was not successfully detected
        in the previous frame. It uses a slinding window approach to detect peaks in a histogram of the
        binary thresholded image. Pixels in close proimity to the detected peaks are considered to belong
        to the lane lines.
        '''
        xvals = []
        yvals = []
        if self.found == False:
            i = 720
            j = 630
            histogram = np.sum(image[image.shape[0]//2:], axis=0)
            if self == Right:
                peak = np.argmax(histogram[image.shape[1]//2:]) + image.shape[1]//2
            else:
                peak = np.argmax(histogram[:image.shape[1]//2])
            while j >= 0:
                x_idx = np.where((((peak - 100) < x)&(x < (peak + 100))&((y > j) & (y < i))))
                x_window, y_window = x[x_idx], y[x_idx]
                if np.sum(x_window) != 0:
                    xvals.extend(x_window)
                    yvals.extend(y_window)
                if np.sum(x_window) > 100:
                    peak = np.int(np.mean(x_window))
                i -= 90
                j -= 90
        if np.sum(xvals) > 0:
            self.found = True
        else:
            yvals = self.Y
            xvals = self.X
        return xvals, yvals, self.found 
Example 57
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulenet.py    MIT License 5 votes vote down vote up
def margin_loss(y_true, y_pred):
    """
    Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.
    :param y_true: [None, n_classes]
    :param y_pred: [None, num_capsule]
    :return: a scalar loss value.
    """
    L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
        0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))

    return K.mean(K.sum(L, 1)) 
Example 58
Project: HushUtility   Author: Deathhush   File: StockEvaluation.py    MIT License 5 votes vote down vote up
def generate_daily_df(symbol, year='', file_path = 'D:\\Testland\\stock_data\\'):
    if (year != ''):
        year_part = '\\%s\\' % year        
    result_df = pandas.read_csv('%s%s%s.csv' % (file_path, year_part, symbol), header=None, names=[u'date', u'time', u'open', u'high', u'low', u'close', u'volume',u'amount'])    
    result_df = result_df.groupby('date').agg({'high':np.max, 'low':np.min, 'volume':np.sum, 'amount':np.sum, 'open':'first', 'close':'last'})        
    result_df['ma5']=pd.rolling_mean(result_df['close'] , 5)
    result_df['ma10']=pd.rolling_mean(result_df['close'] , 10)
    analyzed_path = '%s\\analyzed\\%s.%s.daily.analyzed.csv' % (file_path, symbol, year)
    result_df.to_csv(analyzed_path)
    result_df = pandas.read_csv(analyzed_path)      
    return result_df 
Example 59
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def mle_single(data, x, k=10):
    data = np.asarray(data, dtype=np.float32)
    x = np.asarray(x, dtype=np.float32)
    if x.ndim == 1:
        x = x.reshape((-1, x.shape[0]))
    # dim = x.shape[1]

    k = min(k, len(data)-1)
    f = lambda v: - k / np.sum(np.log(v/v[-1]))
    a = cdist(x, data)
    a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1]
    a = np.apply_along_axis(f, axis=1, arr=a)
    return a[0]

# lid of a batch of query points X 
Example 60
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def mle_batch(data, batch, k):
    data = np.asarray(data, dtype=np.float32)
    batch = np.asarray(batch, dtype=np.float32)

    k = min(k, len(data)-1)
    f = lambda v: - k / np.sum(np.log(v/v[-1]))
    a = cdist(batch, data)
    a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1]
    a = np.apply_along_axis(f, axis=1, arr=a)
    return a

# mean distance of x to its k nearest neighbours 
Example 61
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils_keras.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_get_logits(self):
        import tensorflow as tf
        model = KerasModelWrapper(self.model)
        x = tf.placeholder(tf.float32, shape=(None, 100))
        preds = model.get_probs(x)
        logits = model.get_logits(x)

        x_val = np.random.rand(2, 100)
        tf.global_variables_initializer().run(session=self.sess)
        p_val, logits = self.sess.run([preds, logits], feed_dict={x: x_val})
        p_gt = np.exp(logits)/np.sum(np.exp(logits), axis=1, keepdims=True)
        self.assertTrue(np.allclose(p_val, p_gt, atol=1e-6)) 
Example 62
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils_keras.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_get_probs(self):
        import tensorflow as tf
        model = KerasModelWrapper(self.model)
        x = tf.placeholder(tf.float32, shape=(None, 100))
        preds = model.get_probs(x)

        x_val = np.random.rand(2, 100)
        tf.global_variables_initializer().run(session=self.sess)
        p_val = self.sess.run(preds, feed_dict={x: x_val})
        self.assertTrue(np.allclose(np.sum(p_val, axis=1), 1, atol=1e-6))
        self.assertTrue(np.all(p_val>=0))
        self.assertTrue(np.all(p_val<=1)) 
Example 63
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np(self):
        x_val = np.random.rand(100, 1000)
        perturbation = self.attack.generate_np(x_val) - x_val
        perturbation_norm = np.sqrt(np.sum(perturbation**2, axis=1))
        # test perturbation norm
        self.assertClose(perturbation_norm, self.attack.eps) 
Example 64
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_attack_strength(self):
        """
        This test generates a random source and guide and feeds them in a
        randomly initialized CNN. Checks if an adversarial example can get
        at least 50% closer to the guide compared to the original distance of
        the source and the guide.
        """
        tf.set_random_seed(1234)
        input_shape = self.input_shape
        x_src = tf.abs(tf.random_uniform(input_shape, 0., 1.))
        x_guide = tf.abs(tf.random_uniform(input_shape, 0., 1.))

        layer = 'fc7'
        attack_params = {'eps': 5./256, 'clip_min': 0., 'clip_max': 1.,
                         'nb_iter': 10, 'eps_iter': 0.005,
                         'layer': layer}
        x_adv = self.attack.generate(x_src, x_guide, **attack_params)
        h_adv = self.model.fprop(x_adv)[layer]
        h_src = self.model.fprop(x_src)[layer]
        h_guide = self.model.fprop(x_guide)[layer]

        init = tf.global_variables_initializer()
        self.sess.run(init)

        ha, hs, hg, xa, xs, xg = self.sess.run(
            [h_adv, h_src, h_guide, x_adv, x_src, x_guide])
        d_as = np.sqrt(((hs-ha)*(hs-ha)).sum())
        d_ag = np.sqrt(((hg-ha)*(hg-ha)).sum())
        d_sg = np.sqrt(((hg-hs)*(hg-hs)).sum())
        print("L2 distance between source and adversarial example `%s`: %.4f" %
              (layer, d_as))
        print("L2 distance between guide and adversarial example `%s`: %.4f" %
              (layer, d_ag))
        print("L2 distance between source and guide `%s`: %.4f" %
              (layer, d_sg))
        print("d_ag/d_sg*100 `%s`: %.4f" % (layer, d_ag*100/d_sg))
        self.assertTrue(d_ag*100/d_sg < 50.) 
Example 65
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils_tf.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def numpy_kl_with_logits(p_logits, q_logits):
    def numpy_softmax(logits):
        logits -= np.max(logits, axis=1, keepdims=True)
        exp_logits = np.exp(logits)
        return exp_logits / np.sum(exp_logits, axis=1, keepdims=True)

    p = numpy_softmax(p_logits)
    log_p = p_logits - np.log(np.sum(np.exp(p_logits), axis=1, keepdims=True))
    log_q = q_logits - np.log(np.sum(np.exp(q_logits), axis=1, keepdims=True))
    return (p * (log_p - log_q)).sum(axis=1).mean() 
Example 66
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils_tf.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_l2_batch_normalize(self):
        with tf.Session() as sess:
            x = tf.random_normal((100, 1000))
            x_norm = sess.run(utils_tf.l2_batch_normalize(x))
            self.assertTrue(
                np.allclose(np.sum(x_norm**2, axis=1), 1, atol=1e-6)) 
Example 67
Project: rhodonite   Author: nestauk   File: cumulative.py    MIT License 4 votes vote down vote up
def label_new_edge(g, co_props, label_old=True, label_steps=False):
    '''label_new_edge
    Determines whether an edge has appeared for the first time at a given step.

    Parameters
    ----------
    g : :obj:`graph_tool.Graph` 
        A graph.
    co_props : :obj:`dict` 
        Contains property maps of edge cooccurrence values.
    label_old : :obj:`bool` 
        If True will also return property maps that indicate whether an edge 
        has existed at a previous step. Defaults to True.
    label_steps : :obj:`bool` 
        If True will also return a property map of type `int` that indicates 
        the step at which an edge first appeared.

    Returns
    -------
    new_edge_props : :obj:`dict`
    old_edge_props : :obj:`dict`
    edge_step_prop : :obj:`PropertyMap`
        
        
    '''
    new_edge_props = {}
    if label_old:
        old_edge_props = {}

    _edge_tracker = g.new_edge_property('bool')

    for step, co_prop in co_props.items():
        new_edge_prop = g.new_edge_property('bool')
        new_edge_prop.a = (co_prop.a > 0) & (_edge_tracker.a == False)
        new_edge_props[step] = new_edge_prop
        if label_old:
            old_edge_prop = g.new_edge_property('bool')
            old_edge_prop.a = _edge_tracker.a
            old_edge_props[step] = old_edge_prop
        _edge_tracker.a = _edge_tracker.a + new_edge_prop.a
        
    if label_steps:
        steps = list(co_props.keys())
        edge_step_prop = g.new_edge_property('int')
        start = 0
        end = np.sum(new_edge_props[steps[0]].a)
        for i, step in enumerate(steps):
            if i > 0:
                start = int(start + np.sum(new_edge_props[steps[i-1]].a))
                end = int(end + np.sum(new_edge_props[step].a))
            edge_step_prop.a[start:end] = step

    if label_old & (not label_steps):
        return (new_edge_props, old_edge_props)
    elif label_steps & (not label_old):
        return (new_edge_props, edge_step_prop)
    elif label_old & label_steps:
        return (new_edge_props, old_edge_props, edge_step_prop)
    else:
        return new_edge_props 
Example 68
Project: rhodonite   Author: nestauk   File: normalise.py    MIT License 4 votes vote down vote up
def association_strength(g, o_vprop, co_eprop, log=False):
    """assocation_strength
    Calculates the association strength, a symmetric and probabalistic cooccurrence
    edge weight normalisation.

    The assocation strength is calculated as defined in van Eck 2009.

    .. math::
       a = \\frac{2 N c_{ij}}{o_{i} o{j}}
    
    if the graph is directed, or

    .. math::
       a = \\frac{N c_{ij}}{o_{i} o{j}}
    
    if the graph is undirected, where N is the total number of cooccurrences, 
    :math:`c_{ij}` is the number of cooccurrences between vertices :math:`i` 
    and :math:`j`, and :math:`o_{i}` and :math:`o_{j}` are the respective 
    occurrence frequencies for those vertices.


    Parameters
    ----------
        g : obj:`Graph` 
            Graph to use to calculate assocation strength.
        o_vprop : obj:`graph_tool.VertexPropertyMap` 
            A vertex property map containing vertex occurrence frequencies.
        co_eprop : obj:`graph_tool.EdgePropertyMap` 
            An edge property map containing edge cooccurrence frequencies.
        log : obj:`bool` 
            If `True` association strength values are logged to base 10. 
            Defaults to `False`.


    Returns
    -------
        a_s : obj:`PropertyMap` Assocation strength edge property map.
    """
    o_source = edge_endpoint_property(g, o_vprop, 'source')
    o_target = edge_endpoint_property(g, o_vprop, 'target')
    n_cooccurrences = np.sum(co_eprop.get_array())

    a_s = g.new_edge_property('float')
    if g.is_directed():
        a_s.a = (
            (n_cooccurrences * co_eprop.a) / 
            (o_source.a * o_target.a)
                )
    else:
        a_s.a = (
            (2 * n_cooccurrences * co_eprop.a) / 
            (o_source.a * o_target.a)
                )

    if log:
        a_s.a = np.log10(a_s.get_array())
    return a_s 
Example 69
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions.py    MIT License 4 votes vote down vote up
def log(x):
	''' Compute the natural log of an AutoDiff object and its derivative.
	
	INPUTS
	======
	x: an AutoDiff object
	
	RETURNS
	=======
	A new AutoDiff object with calculated value and derivative.
	
	EXAMPLES
	========
	x = AutoDiff(4, 2)
	>>> myAutoDiff = log(x)
	>>> myAutoDiff.val
	1.3862943611198906
	>>> myAutoDiff.der
	0.5
	>>> myAutoDiff.jacobian
	0.25
	
	'''
	try:
		new_val = np.log(x.val)
		# Derivative not defined when x = 0
		new_der = (1/(x.val*np.sum(1)))*x.der # if x.val != 0 else None
		new_jacobian = (1/(x.val*np.sum(1)))*x.jacobian # if x.val != 0 else None
		return AutoDiff(new_val, new_der, x.n, 0, new_jacobian)
	except AttributeError:
		try:
			if(x.Real==0):
				real = -np.inf
				dual = np.inf
			else:
				real = np.log(x.Real)
				dual = x.Dual/x.Real
			return Dual(real, dual)		
		except AttributeError:
			try:
				return Dual(log(x.Real), x.Dual/x.Real)
			except AttributeError:
			# Constant
				return_val = np.log(x)
				return return_val

# log base 10 
Example 70
Project: prediction-constrained-topic-models   Author: dtak   File: slda_utils__dataset_manager.py    MIT License 4 votes vote down vote up
def slice_dataset(
        cur_slice=None, dataset=None, 
        max_n_examples_per_slice=np.inf,
        include_rowmask=True,
        **kwargs):
    ''' Create slice of provided dataset.

    Returns
    -------
    slice_dict : dict of subset of data
    '''
    if cur_slice is None:
        cur_slice = slice(0, dataset['n_docs'])
    if cur_slice.stop - cur_slice.start > max_n_examples_per_slice:
        cur_slice = slice(
            cur_slice.start,
            cur_slice.start + max_n_examples_per_slice)
    
    n_vocabs = dataset['n_vocabs']
    doc_indptr_Dp1 = dataset['doc_indptr_Dp1']
    word_id_U = dataset['word_id_U']
    word_ct_U = dataset['word_ct_U']
    u_start = doc_indptr_Dp1[cur_slice.start]
    u_stop = doc_indptr_Dp1[cur_slice.stop]
    d_start = cur_slice.start
    d_stop = cur_slice.stop

    n_docs = cur_slice.stop - cur_slice.start
    word_id_U = word_id_U[u_start:u_stop]
    word_ct_U = word_ct_U[u_start:u_stop]
    doc_indptr_Dp1 = doc_indptr_Dp1[d_start:(d_stop + 1)].copy()
    doc_indptr_Dp1 = doc_indptr_Dp1 - doc_indptr_Dp1[0]

    slice_dict = dict(
        n_docs=n_docs,
        n_vocabs=n_vocabs,
        word_id_U=word_id_U,
        word_ct_U=word_ct_U,
        doc_indptr_Dp1=doc_indptr_Dp1,
        x_csr_DV=scipy.sparse.csr_matrix(
            (word_ct_U, word_id_U, doc_indptr_Dp1),
            shape=(n_docs, n_vocabs)),
        )
    if 'y_DC' in dataset:
        slice_dict['y_DC'] = dataset['y_DC'][cur_slice]
        slice_dict['n_labels'] = slice_dict['y_DC'].shape[1]
    if include_rowmask and 'y_rowmask' in dataset:
        slice_dict['y_rowmask'] = dataset['y_rowmask'][cur_slice]
    elif 'y_rowmask' in dataset:
        # handle case where some things are real nan values
        y_rowmask = np.all(np.isfinite(dataset['y_DC'][cur_slice]), axis=1).astype(np.int32)
        if np.sum(y_rowmask) < y_rowmask.size:
            slice_dict['y_rowmask'] = y_rowmask
    return slice_dict 
Example 71
Project: prediction-constrained-topic-models   Author: dtak   File: slda_utils__dataset_manager.py    MIT License 4 votes vote down vote up
def describe_bow_dataset(
        dataset=None,
        dataset_name='Unknown dataset',
        percentiles=[0, 1, 10, 50, 90, 99, 100],
        label_list=None,
        **kwargs):
    n_docs, n_vocabs = dataset['x_csr_DV'].shape
    n_utokens_D = np.diff(dataset['doc_indptr_Dp1'])
    token_ct_D = np.squeeze(
        np.asarray(np.sum(dataset['x_csr_DV'], axis=1)))
    if dataset_name is not None:
        msg = "%s" % dataset_name
    else:
        msg = ""
    msg += "\n%d docs" % (n_docs)
    msg += "\n%d vocab words" % (n_vocabs)
    msg += "\nunique tokens per doc %s" % (
        make_percentile_str(n_utokens_D, percentiles, fmt_str='%7d'))
    msg += "\n total tokens per doc %s" % (
        make_percentile_str(token_ct_D, percentiles, fmt_str='%7d'))
    if 'y_DC' in dataset:
        msg += "\n%d labels" % dataset['n_labels']
        for c in xrange(dataset['n_labels']):
            y_c_D = dataset['y_DC'][:,c]
            try:
                rowmask = dataset['y_rowmask']
                y_c_D = y_c_D[rowmask == 1]
                assert np.all(np.isfinite(y_c_D))
            except KeyError:
                pass
            n_y_docs = y_c_D.size
            if c == 0:
                msg += "\n%.3f (%d/%d) docs are labeled" % (
                    float(n_y_docs)/n_docs, n_y_docs, n_docs)
            n_y_pos = np.sum(y_c_D == 1)
            if label_list:
                fmt_str = '%%-%ds' % (np.max(map(len, label_list)))
                label_c = (fmt_str + ' (%2d/%d)') % (
                    label_list[c], c+1, dataset['n_labels']) 
            else:
                label_c = 'label %2d/%d' % (c+1, dataset['n_labels']) 
            msg += '\n %s frac positive %.3f (%6d/%d)' % (
                label_c,
                float(n_y_pos) / float(n_y_docs), n_y_pos, n_y_docs)
            #msg += '\n frac negative %.3f (%6d/%d)' % (
            #    n_y_neg / float(n_y_docs), n_y_neg, n_y_docs)            
    return msg + "\n" 
Example 72
Project: prediction-constrained-topic-models   Author: dtak   File: calc_coherence_metrics.py    MIT License 4 votes vote down vote up
def calc_umass_coherence_for_top_ranked_terms_in_topic(
        top_vocab_ids=None,
        ndocs_V=None,
        ndocs_csc_VV=None,
        topics_KV=None,
        k=None,
        dataset=None,
        pair_smooth_eps=0.1,
        marg_smooth_eps=1e-9,
        ):
    """ Compute Coherence metric for given topic's top-ranked terms.

    Returns
    -------
    coherence_score : float
        Larger values indicate more coherent topics.

    Examples
    --------
    >>> x_DV = np.arange(6)[:,np.newaxis] * np.hstack([np.eye(6), np.zeros((6, 3))])
    >>> x_DV[:3, :3] += 1
    >>> x_DV[4, 5] += 17
    >>> ndocs_V, ndocs_csc_VV = calc_pairwise_cooccurance_counts(x_csr_DV=x_DV)
    >>> coh = calc_umass_coherence_for_top_ranked_terms_in_topic([0, 8], ndocs_V, ndocs_csc_VV)
    >>> coh2 = np.log(0.1 / 3.0)
    >>> np.allclose(coh, coh2)
    True
    >>> coh_good = calc_umass_coherence_for_top_ranked_terms_in_topic([0, 1, 2], ndocs_V, ndocs_csc_VV)
    >>> coh_bad = calc_umass_coherence_for_top_ranked_terms_in_topic([0, 4, 5], ndocs_V, ndocs_csc_VV)
    >>> coh_worst = calc_umass_coherence_for_top_ranked_terms_in_topic([0, 3, 7], ndocs_V, ndocs_csc_VV)
    >>> coh_good > coh_bad
    True
    >>> coh_bad > coh_worst
    True
    """
    V = ndocs_V.size
    top_vocab_ids = np.asarray(top_vocab_ids, dtype=np.int32)
    M = top_vocab_ids.size
    coherence_score = 0.0
    for mm, v in enumerate(top_vocab_ids[:-1]):
        Mrem = M - mm - 1
        counts_Mrem = ndocs_csc_VV[v, top_vocab_ids[mm+1:]]
        try:
            counts_Mrem = counts_Mrem.toarray()
        except AttributeError:
            pass
        assert counts_Mrem.size == Mrem
        coherence_score += (
            np.sum(np.log(counts_Mrem + pair_smooth_eps))
            - Mrem * np.log(ndocs_V[v] + marg_smooth_eps)
            )
    return coherence_score 
Example 73
Project: prediction-constrained-topic-models   Author: dtak   File: select_best_runs_and_snapshots.py    MIT License 4 votes vote down vote up
def simplify_best_df_and_make_unicode_friendly(
        best_df,
        replacements={'WEIGHT_Y':'λ', '==':'=', "'":""},
        replacements_ascii={'λ':'WEIGHT_Y', '=':''},
        at_best_keys=[
            'LOGPDF_X_PERTOK_AT_BEST_SNAPSHOT',
            'LOGPDF_Y_PERDOC_AT_BEST_SNAPSHOT'],
        ):
    ''' Update legend names to be shorter/unicode friendly.

    Also adds _AT_BEST_SNAPSHOT fields
    '''
    legcolid = best_df.columns.tolist().index('LEGEND_NAME')
    best_df["LEGEND_NAME"] = best_df["LEGEND_NAME"].apply(lambda x: unicode(x))
    best_df["LEGEND_NAME_ASCII"] = best_df["LEGEND_NAME"].apply(lambda x: unicode(x))
    for row_id in range(best_df.shape[0]):
        leg_str = best_df.iloc[row_id, legcolid]
        for before, after in replacements.items():
            leg_str = leg_str.replace(before, after)
        leg_str = ' '.join(leg_str.split())
        best_df.iloc[row_id, legcolid] = leg_str

        # Now make ascii-safe version of each name
        leg_str_ascii = leg_str
        for before, after in replacements_ascii.items():
            leg_str_ascii = leg_str_ascii.replace(before, after)
        best_df.loc[row_id, 'LEGEND_NAME_ASCII'] = (
            ' '.join(leg_str_ascii.decode('ascii').split())).replace(' ', '_')
        
    at_best_row_mask = best_df.IS_BEST_SNAPSHOT.values > 0
    for leg in np.unique(best_df['_UNIQUE_LEGEND_NAME'].values):
        for split in np.unique(best_df['SPLIT_NAME'].values):
            leg_split_row_mask = np.logical_and(
                best_df._UNIQUE_LEGEND_NAME.values == leg,
                best_df.SPLIT_NAME.values == split)
            best_leg_split_row_mask = np.logical_and(
                at_best_row_mask, leg_split_row_mask)

            assert np.sum(best_leg_split_row_mask) == 1
            assert np.sum(best_leg_split_row_mask) < np.sum(leg_split_row_mask)
            for at_best_key in at_best_keys:
                target_key = at_best_key.replace('_AT_BEST_SNAPSHOT', '')
                best_leg_split_row_id = np.flatnonzero(best_leg_split_row_mask)[0]
                val_at_best = best_df[target_key].values[best_leg_split_row_id]
                best_df.loc[leg_split_row_mask, at_best_key] = val_at_best

    # Verify all row indices are unique
    assert best_df.shape[0] == np.unique(best_df.index.values).size

    return best_df 
Example 74
Project: FRIDA   Author: LCAV   File: bands_selection.py    MIT License 4 votes vote down vote up
def select_bands(samples, freq_range, fs, nfft, win, n_bands, div=1):
    '''
    Selects the bins with most energy in a frequency range.

    It is possible to specify a div factor. Then the range is subdivided
    into div equal subbands and n_bands / div per subband are selected.
    '''

    if win is not None and isinstance(win, bool):
        if win:
            win = np.hanning(nfft)
        else:
            win = None

    # Read the signals in a single array
    sig = [wavfile.read(s)[1] for s in samples]
    L = max([s.shape[0] for s in sig])
    signals = np.zeros((L,len(samples)), dtype=np.float32)
    for i in range(signals.shape[1]):
        signals[:sig[i].shape[0],i] = sig[i] / np.std(sig[i][sig[i] > 1e-2])

    sum_sig = np.sum(signals, axis=1)

    sum_STFT = pra.stft(sum_sig, nfft, nfft, win=win, transform=rfft).T
    sum_STFT_avg = np.mean(np.abs(sum_STFT)**2, axis=1)

    # Do some band selection
    bnds = np.linspace(freq_range[0], freq_range[1], div+1)

    freq_hz = np.zeros(n_bands)
    freq_bins = np.zeros(n_bands, dtype=int)

    nsb = n_bands // div

    for i in range(div):

        bl = int(bnds[i] / fs * nfft)
        bh = int(bnds[i+1] / fs * nfft)

        k = np.argsort(sum_STFT_avg[bl:bh])[-nsb:]

        freq_hz[nsb*i:nsb*(i+1)] = (bl + k) / nfft * fs
        freq_bins[nsb*i:nsb*(i+1)] = k + bl

    freq_hz = freq_hz[:n_bands]

    return np.unique(freq_hz), np.unique(freq_bins) 
Example 75
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def __init__(self, mat=None, vecs=None, rt=None):
        """

        :param mat: 4x4 transformation matrix. translation is at the last column.
        :param vecs: tuple of two vectors (rotation, translation). If rotation has 3 elements it is assumed to be
            rotation angles around the axes (x, y, z), if it has 4 elements it is assumed to be quaternion
        :param rt: tuple of a 3x3 matrix and a 3 element vector of rotation and translation respectively
        """
        self._rotation_angles = None
        self._translation = None
        self._axis_angle = None
        self._q = None
        self._mat = None
        assert sum((mat is not None, vecs is not None, rt is not None)) == 1

        # received 4x4 matrix as input
        if mat is not None:
            assert (mat.shape == (4, 4))
            self._mat = mat
            self._rotation = self._mat[:3, :3]
            self._translation = self._mat[:3, 3]
        # received rotation vector and translation vector
        elif vecs is not None:
            assert all(isinstance(vec, np.ndarray) for vec in vecs)
            assert len(vecs) == 2 and vecs[1].shape == (3,)
            if vecs[0].shape == (3,):
                # rotation angles
                self._rotation_angles = vecs[0]
                self._rotation = rotation_angles_to_rotation(vecs[0])
                self._translation = vecs[1]
            else:
                # quaternion
                assert vecs[0].shape == (4,)
                self._q = vecs[0]
                self._rotation = quaternion_to_rotation(vecs[0])
                self._translation = vecs[1]
        # received rotation matrix and translation vector
        else:
            assert len(rt) == 2 and all(isinstance(i, np.ndarray) for i in rt)
            assert rt[0].shape == (3, 3) and rt[1].shape == (3,)
            self._translation = rt[1]
            self._rotation = rt[0] 
Example 76
Project: em-prior-adjust   Author: aesuli   File: em.py    GNU General Public License v3.0 4 votes vote down vote up
def em(y, posteriors_zero, priors_zero, epsilon=1e-6, positive_class=1):
    """
    Implements the prior correction method based on EM presented in:
    "Adjusting the Outputs of a Classifier to New a Priori Probabilities: A Simple Procedure"
    Saerens, Latinne and Decaestecker, 2002
    http://www.isys.ucl.ac.be/staff/marco/Publications/Saerens2002a.pdf

    :param y: true labels of test items, to measure accuracy, precision and recall.
    :param posteriors_zero: posterior probabilities on test items, as returned by a classifier. A 2D-array with shape
    Ø(items, classes).
    :param priors_zero: prior probabilities measured on training set.
    :param epsilon: stopping threshold.
    :param positive_class: class index to be considered the positive one, for precision and recall.
    :return: posteriors_s, priors_s, history: final adjusted posteriors, final adjusted priors, a list of length s
    where each element is a tuple with the step counter, the current priors (as list), the stopping criterium value,
    accuracy, precision and recall.
    """
    s = 0
    priors_s = np.copy(priors_zero)
    posteriors_s = np.copy(posteriors_zero)
    val = 2 * epsilon
    history = list()
    acc = np.mean((y == positive_class) == (posteriors_zero[:, positive_class] > 0.5))
    rec = np.sum(np.logical_and((y == positive_class), (posteriors_zero[:, positive_class] > 0.5))) / np.sum(
        y == positive_class)
    prec = np.sum(np.logical_and((y == positive_class), (posteriors_zero[:, positive_class] > 0.5))) / np.sum(
        posteriors_zero[:, positive_class] > 0.5)
    history.append((s, list(priors_s), 1, acc, prec, rec))
    while not val < epsilon:
        # E step
        ratios = priors_s / priors_zero
        denominators = 0
        for c in range(priors_zero.shape[0]):
            denominators += ratios[c] * posteriors_zero[:, c]
        for c in range(priors_zero.shape[0]):
            posteriors_s[:, c] = ratios[c] * posteriors_zero[:, c] / denominators

        acc = np.mean((y == positive_class) == (posteriors_s[:, positive_class] > 0.5))
        rec = np.sum(np.logical_and((y == positive_class), (posteriors_s[:, positive_class] > 0.5))) / np.sum(
            y == positive_class)
        prec = np.sum(np.logical_and((y == positive_class), (posteriors_s[:, positive_class] > 0.5))) / np.sum(
            posteriors_s[:, positive_class] > 0.5)
        priors_s_minus_one = priors_s.copy()

        # M step
        priors_s = posteriors_s.mean(0)

        # check for stop
        val = 0
        for i in range(len(priors_s_minus_one)):
            val += abs(priors_s_minus_one[i] - priors_s[i])
        s += 1
        history.append((s, list(priors_s), val, acc, prec, rec))

    return posteriors_s, priors_s, history 
Example 77
Project: fbpconv_tf   Author: panakino   File: unet.py    GNU General Public License v3.0 4 votes vote down vote up
def _get_cost(self, logits, cost_name, cost_kwargs):
        """
        Constructs the cost function, either cross_entropy, weighted cross_entropy or dice_coefficient.
        Optional arguments are:
        class_weights: weights for the different classes in case of multi-class imbalance
        regularizer: power of the L2 regularizers added to the loss function
        """

        with tf.device('/gpu:0'):
		flat_logits = tf.reshape(logits, [-1, self.n_class])
		flat_labels = tf.reshape(self.y, [-1, self.n_class])
		if cost_name == "cross_entropy":
		    class_weights = cost_kwargs.pop("class_weights", None)

		    if class_weights is not None:
			class_weights = tf.constant(np.array(class_weights, dtype=np.float32))

			weight_map = tf.multiply(flat_labels, class_weights)
			weight_map = tf.reduce_sum(weight_map, axis=1)

			loss_map = tf.nn.softmax_cross_entropy_with_logits(logits=flat_logits,
									   labels=flat_labels)
			weighted_loss = tf.multiply(loss_map, weight_map)

			loss = tf.reduce_mean(weighted_loss)

		    else:
			loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=flat_logits,
										      labels=flat_labels))
		elif cost_name == "dice_coefficient":
		    eps = 1e-5
		    prediction = pixel_wise_softmax_2(logits)
		    intersection = tf.reduce_sum(prediction * self.y)
		    union =  eps + tf.reduce_sum(prediction) + tf.reduce_sum(self.y)
		    loss = -(2 * intersection/ (union))
		elif cost_name == "euclidean":
		    loss=100.0*tf.reduce_mean(tf.square(tf.abs(flat_logits-flat_labels)))

		else:
		    raise ValueError("Unknown cost function: "%cost_name)

		regularizer = cost_kwargs.pop("regularizer", None)
		if regularizer is not None:
		    regularizers = sum([tf.nn.l2_loss(variable) for variable in self.variables])
		    loss += (regularizer * regularizers)

        return loss 
Example 78
Project: mmdetection   Author: open-mmlab   File: mean_ap.py    Apache License 2.0 4 votes vote down vote up
def average_precision(recalls, precisions, mode='area'):
    """Calculate average precision (for single or multiple scales).

    Args:
        recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )
        precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )
        mode (str): 'area' or '11points', 'area' means calculating the area
            under precision-recall curve, '11points' means calculating
            the average precision of recalls at [0, 0.1, ..., 1]

    Returns:
        float or ndarray: calculated average precision
    """
    no_scale = False
    if recalls.ndim == 1:
        no_scale = True
        recalls = recalls[np.newaxis, :]
        precisions = precisions[np.newaxis, :]
    assert recalls.shape == precisions.shape and recalls.ndim == 2
    num_scales = recalls.shape[0]
    ap = np.zeros(num_scales, dtype=np.float32)
    if mode == 'area':
        zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
        ones = np.ones((num_scales, 1), dtype=recalls.dtype)
        mrec = np.hstack((zeros, recalls, ones))
        mpre = np.hstack((zeros, precisions, zeros))
        for i in range(mpre.shape[1] - 1, 0, -1):
            mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
        for i in range(num_scales):
            ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]
            ap[i] = np.sum(
                (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])
    elif mode == '11points':
        for i in range(num_scales):
            for thr in np.arange(0, 1 + 1e-3, 0.1):
                precs = precisions[i, recalls[i, :] >= thr]
                prec = precs.max() if precs.size > 0 else 0
                ap[i] += prec
            ap /= 11
    else:
        raise ValueError(
            'Unrecognized mode, only "area" and "11points" are supported')
    if no_scale:
        ap = ap[0]
    return ap 
Example 79
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulenet-multi-gpu.py    MIT License 4 votes vote down vote up
def test(model, data):
    x_test, y_test = data
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-'*50)
    print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])

    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image

    img = combine_images(np.concatenate([x_test[:50],x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save("real_and_recon.png")
    print()
    print('Reconstructed images are saved to ./real_and_recon.png')
    print('-'*50)
    plt.imshow(plt.imread("real_and_recon.png", ))
    plt.show() 
Example 80
Project: neural-fingerprinting   Author: StephanZheng   File: attacks_tf.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def saliency_map(grads_target, grads_other, search_domain, increase):
    """
    TensorFlow implementation for computing saliency maps
    :param grads_target: a matrix containing forward derivatives for the
                         target class
    :param grads_other: a matrix where every element is the sum of forward
                        derivatives over all non-target classes at that index
    :param search_domain: the set of input indices that we are considering
    :param increase: boolean; true if we are increasing pixels, false otherwise
    :return: (i, j, search_domain) the two input indices selected and the
             updated search domain
    """
    # Compute the size of the input (the number of features)
    nf = len(grads_target)

    # Remove the already-used input features from the search space
    invalid = list(set(range(nf)) - search_domain)
    increase_coef = (2 * int(increase) - 1)
    grads_target[invalid] = -increase_coef * np.max(np.abs(grads_target))
    grads_other[invalid] = increase_coef * np.max(np.abs(grads_other))

    # Create a 2D numpy array of the sum of grads_target and grads_other
    target_sum = grads_target.reshape((1, nf)) + grads_target.reshape((nf, 1))
    other_sum = grads_other.reshape((1, nf)) + grads_other.reshape((nf, 1))

    # Create a mask to only keep features that match saliency map conditions
    if increase:
        scores_mask = ((target_sum > 0) & (other_sum < 0))
    else:
        scores_mask = ((target_sum < 0) & (other_sum > 0))

    # Create a 2D numpy array of the scores for each pair of candidate features
    scores = scores_mask * (-target_sum * other_sum)

    # A pixel can only be selected (and changed) once
    np.fill_diagonal(scores, 0)

    # Extract the best two pixels
    best = np.argmax(scores)
    p1, p2 = best % nf, best // nf

    # Remove used pixels from our search domain
    search_domain.discard(p1)
    search_domain.discard(p2)

    return p1, p2, search_domain