Python numpy.sum() Examples

The following are code examples for showing how to use numpy.sum(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulenet.py    MIT License 9 votes vote down vote up
def test(model, data):
    x_test, y_test = data
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-'*50)
    print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])

    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image

    img = combine_images(np.concatenate([x_test[:50],x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save("real_and_recon.png")
    print()
    print('Reconstructed images are saved to ./real_and_recon.png')
    print('-'*50)
    plt.imshow(plt.imread("real_and_recon.png", ))
    plt.show() 
Example 2
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 6 votes vote down vote up
def iter_apply(Xs, Ms, Ys):
    # fns = [lambda x: np.concatenate(x, 0), lambda x: float(np.sum(x))]
    logits = []
    cost = 0
    with chainer.using_config('train', False), \
            chainer.using_config('enable_backprop', False):
        for xmb, mmb, ymb in iter_data(
                Xs, Ms, Ys, n_batch=n_batch_train, truncate=False, verbose=True):
            n = len(xmb)
            XMB = model.xp.asarray(xmb)
            YMB = model.xp.asarray(ymb)
            MMB = model.xp.asarray(mmb)
            h = model(XMB)
            clf_logits = clf_head(h, XMB)
            clf_logits *= n
            clf_losses = compute_loss_fct(
                XMB, YMB, MMB, clf_logits, only_return_losses=True)
            clf_losses *= n
            logits.append(cuda.to_cpu(clf_logits.array))
            cost += cuda.to_cpu(F.sum(clf_losses).array)
        logits = np.concatenate(logits, 0)
    return logits, cost 
Example 3
Project: rhodonite   Author: nestauk   File: phylomemetic.py    MIT License 6 votes vote down vote up
def community_density(community, co_graph, density_prop, fill=0):
    """community_density
    Calculate the density of a clique based on the number of occurrences
    and coocurrences of the terms within it. Based on Callon et al. 1991.

    Parameters
    ----------
        community : :obj:`iter` of :obj:`int`: 
            A set of terms that comprise a single clique.
        g : :obj:`graph_tool.Graph` 
            The coocurrence graph from which the clique originated
        fill :obj:`float`: 
            A number to fill in for the cooccurrence value if none exists.
    Returns
    -------
        density :obj:`float`: The density of the clique.
    """
    card = len(community)
    densities = density_prop.a[community]
    density = 1 / card * np.sum(densities)
    return density 
Example 4
Project: prediction-constrained-topic-models   Author: dtak   File: calc_N_d_K__vb_qpiDir_qzCat.py    MIT License 6 votes vote down vote up
def make_initial_P_d_K(
        init_name,
        prng=np.random,
        alpha_K=None,
        init_P_d_K_list=None):
    K = alpha_K.size

    if init_name.count('warm'):
        return init_P_d_K_list.pop()
    elif init_name.count('uniform_sample'):
        return prng.dirichlet(np.ones(K))
    elif init_name.count('prior_sample'):
        return prng.dirichlet(alpha_K)
    elif init_name.count("prior_mean"):
        return alpha_K / np.sum(alpha_K) #np.zeros(K, dtype=alpha_K.dtype)
    else:
        raise ValueError("Unrecognized vb lstep_init_name: " + init_name) 
Example 5
Project: DataHack2018   Author: InnovizTech   File: iou_evaluator.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def evaluate_frame(gt_labels, pred_labels):
    assert np.all(np.isin(pred_labels, (0, 1))), \
        'Invalid values: pred labels value should be either 0 or 1, got {}'.format(set(pred_labels))

    correct_predictions = gt_labels == pred_labels
    positive_predictions = pred_labels == 1

    # correct, positive prediction -> True positive
    tp = np.sum(correct_predictions & positive_predictions)

    # incorrect, negative prediction (using De Morgan's law) -> False negative
    fn = np.sum(np.logical_not(correct_predictions | positive_predictions))

    # incorrect, positive prediction -> False positive
    fp = np.sum(np.logical_not(correct_predictions) & positive_predictions)

    return tp, fn, fp 
Example 6
Project: StructEngPy   Author: zhuoju36   File: dynamic.py    MIT License 6 votes vote down vote up
def solve_modal(model,k:int):
    """
    Solve eigen mode of the MDOF system
    
    params:
        model: FEModel.
        k: number of modes to extract.
    """
    K_,M_=model.K_,model.M_
    if k>model.DOF:
        logger.info('Warning: the modal number to extract is larger than the system DOFs, only %d modes are available'%model.DOF)
        k=model.DOF
    omega2s,modes = sl.eigsh(K_,k,M_,sigma=0,which='LM')
    delta = modes/np.sum(modes,axis=0)
    model.is_solved=True
    model.mode_=delta
    model.omega_=np.sqrt(omega2s).reshape((k,1)) 
Example 7
Project: Lane-And-Vehicle-Detection   Author: JustinHeaton   File: main.py    MIT License 6 votes vote down vote up
def found_search(self, x, y):
        '''
        This function is applied when the lane lines have been detected in the previous frame.
        It uses a sliding window to search for lane pixels in close proximity (+/- 25 pixels in the x direction)
        around the previous detected polynomial.
        '''
        xvals = []
        yvals = []
        if self.found == True:
            i = 720
            j = 630
            while j >= 0:
                yval = np.mean([i,j])
                xval = (np.mean(self.fit0))*yval**2 + (np.mean(self.fit1))*yval + (np.mean(self.fit2))
                x_idx = np.where((((xval - 25) < x)&(x < (xval + 25))&((y > j) & (y < i))))
                x_window, y_window = x[x_idx], y[x_idx]
                if np.sum(x_window) != 0:
                    np.append(xvals, x_window)
                    np.append(yvals, y_window)
                i -= 90
                j -= 90
        if np.sum(xvals) == 0:
            self.found = False # If no lane pixels were detected then perform blind search
        return xvals, yvals, self.found 
Example 8
Project: multi-embedding-cws   Author: wangjksjtu   File: pw_lstm_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi, tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 9
Project: multi-embedding-cws   Author: wangjksjtu   File: pw_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi, tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 10
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_cnn_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, inp, tX, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX.shape[0]
    numBatch = int((tX.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp: tX[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            best_sequence = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(best_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl

    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 11
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi, tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 12
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm4_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi,tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 13
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_time_paper.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi, tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 14
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp, tX, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX.shape[0]
    numBatch = int((tX.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp: tX[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 15
Project: multi-embedding-cws   Author: wangjksjtu   File: nopy_fc_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_wubi,tX_char, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 16
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm_crf_train_paper.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi, tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 17
Project: multi-embedding-cws   Author: wangjksjtu   File: nowubi_fc_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin,tX_char, tX_pinyin, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 18
Project: multi-embedding-cws   Author: wangjksjtu   File: share_lstm3_crf_train_paper.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi, tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 19
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi,tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 20
Project: multi-embedding-cws   Author: wangjksjtu   File: lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp, tX, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX.shape[0]
    numBatch = int((tX.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp: tX[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 21
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi, tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 22
Project: multi-embedding-cws   Author: wangjksjtu   File: nowubi_share_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, tX_char, tX_pinyin, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 23
Project: multi-embedding-cws   Author: wangjksjtu   File: nopy_share_lstm3_crf_train.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_wubi, tX_char, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        #feed_dict_pinyin = {inp_pinyin: tX_pinyin[i * batchSize:endOff]}
        #feed_dict_wubi = {inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 24
Project: multi-embedding-cws   Author: wangjksjtu   File: fc_lstm3_crf_time.py    MIT License 5 votes vote down vote up
def test_evaluate(sess, unary_score, test_sequence_length, transMatrix, inp_char, inp_pinyin, inp_wubi,tX_char, tX_pinyin, tX_wubi, tY):
    totalEqual = 0
    batchSize = FLAGS.batch_size
    totalLen = tX_char.shape[0]
    numBatch = int((tX_char.shape[0] - 1) / batchSize) + 1
    correct_labels = 0
    total_labels = 0

    for i in range(numBatch):
        endOff = (i + 1) * batchSize
        if endOff > totalLen:
            endOff = totalLen

        y = tY[i * batchSize:endOff]
        feed_dict = {inp_char: tX_char[i * batchSize:endOff], inp_pinyin:tX_pinyin[i * batchSize:endOff], inp_wubi: tX_wubi[i * batchSize:endOff]}
        unary_score_val, test_sequence_length_val = sess.run(
            [unary_score, test_sequence_length], feed_dict)

        for tf_unary_scores_, y_, sequence_length_ in zip(
                unary_score_val, y, test_sequence_length_val):

            tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
            y_ = y_[:sequence_length_]

            viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
                tf_unary_scores_, transMatrix)

            # Evaluate word-level accuracy.
            correct_labels += np.sum(np.equal(viterbi_sequence, y_))
            total_labels += sequence_length_

    cl = np.float64(correct_labels)
    tl = np.float64(total_labels)
    accuracy = 100.0 * cl / tl
    print("Accuracy: %.3f%%" % accuracy)
    return accuracy 
Example 25
Project: s2g   Author: caesar0301   File: bonus.py    MIT License 5 votes vote down vote up
def line_distance(coords):
    """Return total road distance in kilometers"""
    dist = []
    for i in range(0, len(coords) - 1):
        dist.append(great_circle_dist(coords[i], coords[i + 1]))
    return np.sum(dist) 
Example 26
Project: SpatialPooler   Author: CSDUMMI   File: test_spatial_pooler.py    GNU General Public License v3.0 5 votes vote down vote up
def test_init_collumn():
    test_cols = spatial_pooler_instance.init_collumn(500,600,0.75)

    assert type(test_cols[0]['permanences']) == type(np.array([0.5],dtype=np.float))
    assert np.sum(test_cols[0]['potential_pool'] > 0) > 0.6 # The potential pool should be at least 60 % of all collumns if not more 
Example 27
Project: SpatialPooler   Author: CSDUMMI   File: test_spatial_pooler.py    GNU General Public License v3.0 5 votes vote down vote up
def test_run():
    input_sp = np.random.randn(100) > 0
    output_sp = spatial_pooler_instance.run(input_sp)

    # Has the same shape as desired
    assert output_sp.shape[0] == 50

    # Less than 20% of the output_sp is on.
    # Sparsity
    assert np.sum(output_sp)/output_sp.shape[0] < 0.2

    assert type(output_sp) == type(np.array([],dtype=np.bool_)) 
Example 28
Project: SpatialPooler   Author: CSDUMMI   File: spatial_pooler.py    GNU General Public License v3.0 5 votes vote down vote up
def init_collumn(self,num_collumns,input_size,size_of_potential_pool=0.75):
        collumns = [{} for i in range(num_collumns)]
        for i in range(num_collumns):
            collumns[i]['potential_pool'] = np.random.rand(input_size) > size_of_potential_pool # Create a random potential pool with a certain percantage of potential connections
            collumns[i]['permanences'] = np.random.rand(np.sum(collumns[i]['potential_pool'])) # Random values for the permanence between  0-1
        return collumns 
Example 29
Project: SpatialPooler   Author: CSDUMMI   File: spatial_pooler.py    GNU General Public License v3.0 5 votes vote down vote up
def activation(self,state):
        """
        Sum state and then compare that to threshhold_activation
        """
        length = state.shape[0]
        return ((np.sum(state)/length) > self.threshhold_activation) 
Example 30
Project: chainer-openai-transformer-lm   Author: soskek   File: utils.py    MIT License 5 votes vote down vote up
def np_softmax(x, t=1):
    x = x / t
    x = x - np.max(x, axis=-1, keepdims=True)
    ex = np.exp(x)
    return ex / np.sum(ex, axis=-1, keepdims=True) 
Example 31
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 5 votes vote down vote up
def __call__(
            self,
            X,
            Y,
            M,
            clf_logits,
            lm_logits=None,
            only_return_losses=False):
        # Language modeling loss
        if lm_logits is not None:
            x_shifted = X[:, :, 1:, 0].reshape(-1)           # Shape: 252
            M = M.reshape(-1, M.shape[2])
            lm_losses = self.lm_criterion(lm_logits, x_shifted)
            lm_losses = lm_losses.reshape(
                X.shape[0] * X.shape[1], X.shape[2] - 1)
            lm_losses = lm_losses * M[:, 1:]
            lm_losses = F.sum(lm_losses, axis=1) / F.sum(M[:, 1:], axis=1)
        # Classification loss
        clf_losses = self.clf_criterion(clf_logits, Y)
        if only_return_losses:
            return (
                clf_losses,
                lm_losses) if lm_logits is not None else clf_losses

        if self.lm_coef > 0 and lm_logits is not None:
            train_loss = F.sum(clf_losses) + self.lm_coef * F.sum(lm_losses)
        else:
            train_loss = F.sum(clf_losses)

        if self.opt is not None:
            self.opt.target.cleargrads()
        train_loss.backward()
        if self.opt is not None:
            self.opt.update()
            self.opt.target.cleargrads()
        return train_loss.array 
Example 32
Project: explirefit   Author: codogogo   File: confusion_matrix.py    Apache License 2.0 5 votes vote down vote up
def compute_all_scores(self):
		self.class_performances = {}
		for i in range(len(self.labels)):
			tp = np.float32(self.matrix[i][i])
			fp_plus_tp = np.float32(np.sum(self.matrix, axis = 0)[i])
			fn_plus_tp = np.float32(np.sum(self.matrix, axis = 1)[i])
			p = tp / fp_plus_tp
			r = tp / fn_plus_tp
			self.class_performances[self.labels[i]] = (p, r, 2*p*r/(p+r))

		self.microf1 = np.float32(np.trace(self.matrix)) / np.sum(self.matrix)
		self.macrof1 = float(sum([x[2] for x in self.class_performances.values()])) / float(len(self.labels))
		self.macroP = float(sum([x[0] for x in self.class_performances.values()])) / float(len(self.labels))
		self.macroR = float(sum([x[1] for x in self.class_performances.values()])) / float(len(self.labels))
		self.accuracy = float(sum([self.matrix[i, i] for i in range(len(self.labels))])) / float(np.sum(self.matrix)) 
Example 33
Project: rhodonite   Author: nestauk   File: cumulative.py    MIT License 5 votes vote down vote up
def label_new_vertex(g, o_props, label_steps=False):
    '''label_new_vertex

    Parameters
    ----------
    g : :obj:`graph_tool.Graph` 
        A graph.
    o_props : :obj:`dict` 
        A dictionary of vertex property maps containing the vertex occurrence 
        values at each step.
    label_steps : :obj:`bool` 
        If `True`, returns a vertex property map` that indicates the first step
        that each vertex appeared.

    Returns
    -------
        new_vertex_props : :obj:`dict`
        vertex_step_prop : :obj:`graph_tool.PropertyMap`
    '''
    new_vertex_props = {}
    _vertex_tracker = g.new_vertex_property('bool')
    for step, o_prop in o_props.items():
        new_vertex_prop = g.new_vertex_property('bool')
        new_vertex_prop.a = (o_prop.a > 0) & (_vertex_tracker.a == False)
        new_vertex_props[step] = new_vertex_prop
        _vertex_tracker.a = _vertex_tracker.a + new_vertex_prop.a
    if label_steps:
        vertex_step_prop = g.new_vertex_property('int')
        start = 0
        end = np.sum(new_vertex_props[steps[0]].a)
        for i, step in enumerate(steps):
            if i > 0:
                start = int(start + np.sum(new_vertex_props[step - 1].a))
                end = int(end + np.sum(new_vertex_props[step].a))
            vertex_step_prop.a[start:end] = step
        return (new_vertex_props, vertex_step_prop)
    else:
        return new_vertex_props 
Example 34
Project: autodmri   Author: samuelstjean   File: gamma.py    MIT License 5 votes vote down vote up
def maxlk_sigma(m, xold=None, eps=1e-8, max_iter=100):
    '''Maximum likelihood equation to estimate sigma from gamma distributed values'''

    sum_m2 = np.sum(m**2)
    K = m.size
    sum_log_m2 = np.sum(np.log(m**2))

    def f(sigma):
        return digamma(sum_m2/(2*K*sigma**2)) - sum_log_m2/K + np.log(2*sigma**2)

    def fprime(sigma):
        return -sum_m2 * polygamma(1, sum_m2/(2*K*sigma**2)) / (K*sigma**3) + 2/sigma

    if xold is None:
        xold = m.std()

    for _ in range(max_iter):

        xnew = xold - f(xold) / fprime(xold)

        if np.abs(xold - xnew) < eps:
            break

        xold = xnew

    return xnew 
Example 35
Project: keras_mixnets   Author: titu1994   File: mixnets.py    MIT License 5 votes vote down vote up
def _split_channels(total_filters, num_groups):
    split = [total_filters // num_groups for _ in range(num_groups)]
    split[0] += total_filters - sum(split)
    return split


# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/mixnet_model.py 
Example 36
Project: Att-ChemdNER   Author: lingluodlut   File: utils.py    Apache License 2.0 5 votes vote down vote up
def shared(shape, name):
#{{{
    """
    Create a shared object of a numpy array.
    """ 
    init=initializations.get('glorot_uniform');
    if len(shape) == 1:
        value = np.zeros(shape)  # bias are initialized with zeros
        return theano.shared(value=value.astype(theano.config.floatX), name=name)
    else:
        drange = np.sqrt(6. / (np.sum(shape)))
        value = drange * np.random.uniform(low=-1.0, high=1.0, size=shape)
        return init(shape=shape,name=name);
#}}} 
Example 37
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: voc_eval.py    MIT License 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
  """ ap = voc_ap(rec, prec, [use_07_metric])
  Compute VOC AP given precision and recall.
  If use_07_metric is true, uses the
  VOC 07 11 point method (default:False).
  """
  if use_07_metric:
    # 11 point metric
    ap = 0.
    for t in np.arange(0., 1.1, 0.1):
      if np.sum(rec >= t) == 0:
        p = 0
      else:
        p = np.max(prec[rec >= t])
      ap = ap + p / 11.
  else:
    # correct AP calculation
    # first append sentinel values at the end
    mrec = np.concatenate(([0.], rec, [1.]))
    mpre = np.concatenate(([0.], prec, [0.]))

    # compute the precision envelope
    for i in range(mpre.size - 1, 0, -1):
      mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

    # to calculate area under PR curve, look for points
    # where X axis (recall) changes value
    i = np.where(mrec[1:] != mrec[:-1])[0]

    # and sum (\Delta recall) * prec
    ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
  return ap 
Example 38
Project: prediction-constrained-topic-models   Author: dtak   File: calc_N_d_K__vb_qpiDir_qzCat.py    MIT License 5 votes vote down vote up
def calc_elbo_for_single_doc__simplified_from_N_d_K(
        word_ct_d_Ud=None,
        log_lik_d_UdK=None,
        alpha_K=None,
        N_d_K=None):
    theta_d_K = N_d_K + alpha_K
    E_log_pi_d_K = digamma(theta_d_K) - digamma(np.sum(theta_d_K))
    log_resp_d_UK = log_lik_d_UdK + E_log_pi_d_K[np.newaxis,:]
    return (
        np.inner(word_ct_d_Ud, logsumexp(log_resp_d_UK, axis=1))
        + c_Dir_1D(alpha_K) - c_Dir_1D(theta_d_K)
        + np.inner(alpha_K - theta_d_K, E_log_pi_d_K)
        ) 
Example 39
Project: prediction-constrained-topic-models   Author: dtak   File: calc_N_d_K__vb_qpiDir_qzCat.py    MIT License 5 votes vote down vote up
def c_Dir_1D(alpha_K):
    return gammaln(np.sum(alpha_K)) - np.sum(gammaln(alpha_K)) 
Example 40
Project: prediction-constrained-topic-models   Author: dtak   File: calc_coherence_metrics.py    MIT License 5 votes vote down vote up
def calc_pairwise_cooccurance_counts(
        x_csr_DV=None,
        dataset=None,
        ):
    """ Calculate word cooccurances across a corpus of D documents

    Returns
    -------
    ndocs_V : 1D array, size V
        entry v counts the number of documents that contain v at least once
    ndocs_csc_VV : 2D csc sparse matrix, V x V
        entry v,w counts the number of documents which contain
        the word pair (v, w) at least once

    Examples
    --------
    >>> x_DV = np.arange(6)[:,np.newaxis] * np.hstack([np.eye(6), np.zeros((6, 3))])
    >>> x_DV[:3, :3] += 1
    >>> x_DV[4, 5] += 17
    >>> ndocs_V, ndocs_csc_VV = calc_pairwise_cooccurance_counts(x_csr_DV=x_DV)
    >>> ndocs_V.astype(np.int32).tolist()
    [3, 3, 3, 1, 1, 2, 0, 0, 0]
    >>> ndocs_csc_VV.toarray()[:3, :3]
    array([[ 3.,  3.,  3.],
           [ 3.,  3.,  3.],
           [ 3.,  3.,  3.]])
    """
    if x_csr_DV is None:
        x_csr_DV = dataset['x_csr_DV']
    x_csr_DV = scipy.sparse.csr_matrix(x_csr_DV, dtype=np.float64)

    binx_csr_DV = x_csr_DV.copy()
    binx_csr_DV.data[:] = 1.0

    ndocs_V = np.squeeze(np.asarray(binx_csr_DV.sum(axis=0)))

    ndocs_csc_VV = (binx_csr_DV.T * binx_csr_DV).tocsc()
    return ndocs_V, ndocs_csc_VV 
Example 41
Project: prediction-constrained-topic-models   Author: dtak   File: train_and_eval_sklearn_binary_classifier.py    MIT License 5 votes vote down vote up
def calcfrac(bmask):
    return np.sum(bmask) / float(bmask.size) 
Example 42
Project: prediction-constrained-topic-models   Author: dtak   File: calc_roc_auc_via_bootstrap.py    MIT License 5 votes vote down vote up
def verify_min_examples_per_label(y_NC, min_examples_per_label):
    '''
    
    Examples
    --------
    >>> y_all_0 = np.zeros(10)
    >>> y_all_1 = np.ones(30)
    >>> verify_min_examples_per_label(y_all_0, 3)
    False
    >>> verify_min_examples_per_label(y_all_1, 2)
    False
    >>> verify_min_examples_per_label(np.hstack([y_all_0, y_all_1]), 10)
    True
    >>> verify_min_examples_per_label(np.eye(3), 2)
    False
    '''
    if y_NC.ndim < 2:
        y_NC = np.atleast_2d(y_NC).T
    n_C = np.sum(np.isfinite(y_NC), axis=0)
    n_pos_C = n_C * np.nanmean(y_NC, axis=0)
    min_neg = np.max(n_C - n_pos_C)
    min_pos = np.min(n_pos_C)
    if min_pos < min_examples_per_label:
        return False
    elif min_neg < min_examples_per_label:
        return False
    return True 
Example 43
Project: VAE-MF-TensorFlow   Author: dongwookim-ml   File: matrix_vae.py    MIT License 5 votes vote down vote up
def train_test_validation(self, M, train_idx, test_idx, valid_idx, n_steps=100000, result_path='result/'):
        nonzero_user_idx = M.nonzero()[0]
        nonzero_item_idx = M.nonzero()[1]

        trainM = np.zeros(M.shape)
        trainM[nonzero_user_idx[train_idx], nonzero_item_idx[train_idx]] = M[nonzero_user_idx[train_idx], nonzero_item_idx[train_idx]]

        validM = np.zeros(M.shape)
        validM[nonzero_user_idx[valid_idx], nonzero_item_idx[valid_idx]] = M[nonzero_user_idx[valid_idx], nonzero_item_idx[valid_idx]]

        testM = np.zeros(M.shape)
        testM[nonzero_user_idx[test_idx], nonzero_item_idx[test_idx]] = M[nonzero_user_idx[test_idx], nonzero_item_idx[test_idx]]

        for i in range(self.num_user):
            if np.sum(trainM[i]) == 0:
                testM[i] = 0
                validM[i] = 0

        train_writer = tf.summary.FileWriter(
            result_path + '/train', graph=self.sess.graph)

        best_val_rmse = np.inf
        best_test_rmse = 0

        self.sess.run(tf.global_variables_initializer())
        for step in range(1, n_steps):
            feed_dict = {self.user: trainM, self.valid_rating:validM, self.test_rating:testM}

            _, mse, mae, valid_rmse, test_rmse,  summary_str = self.sess.run(
                [self.train_step, self.MSE, self.MAE, self.valid_RMSE, self.test_RMSE, self.summary_op], feed_dict=feed_dict)
            train_writer.add_summary(summary_str, step)
            print("Iter {0} Train RMSE:{1}, Valid RMSE:{2}, Test RMSE:{3}".format(step, np.sqrt(mse), valid_rmse, test_rmse))

            if best_val_rmse > valid_rmse:
                best_val_rmse = valid_rmse
                best_test_rmse = test_rmse

        self.saver.save(self.sess, result_path + "/model.ckpt")
        return best_test_rmse 
Example 44
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_quaternion(R):
    d = np.diagonal(R)
    t = np.sum(d)
    if t + 1 < 0.25:
        symmetric_mat = R + R.T
        asymmetric_mat = R - R.T
        symmetric_diag = np.diagonal(symmetric_mat)
        i_max = np.argmax(symmetric_diag)
        q = np.empty(4)
        if i_max == 0:
            q[1] = np.sqrt(symmetric_diag[0] - t + 1) / 2
            normalizer = 1 / q[1]
            q[2] = symmetric_mat[1, 0] / 4 * normalizer
            q[3] = symmetric_mat[2, 0] / 4 * normalizer
            q[0] = asymmetric_mat[2, 1] / 4 * normalizer
        elif i_max == 1:
            q[2] = np.sqrt(symmetric_diag[1] - t + 1) / 2
            normalizer = 1 / q[2]
            q[1] = symmetric_mat[1, 0] / 4 * normalizer
            q[3] = symmetric_mat[2, 1] / 4 * normalizer
            q[0] = asymmetric_mat[0, 2] / 4 * normalizer
        elif i_max == 2:
            q[3] = np.sqrt(symmetric_diag[2] - t + 1) / 2
            normalizer = 1 / q[3]
            q[1] = symmetric_mat[2, 0] / 4 * normalizer
            q[2] = symmetric_mat[1, 2] / 4 * normalizer
            q[0] = asymmetric_mat[1, 0] / 4 * normalizer
    else:
        r = np.sqrt(1+t)
        s = 0.5 / r
        q = np.array([0.5*r, (R[2, 1] - R[1, 2])*s, (R[0, 2] - R[2, 0])*s, (R[1, 0] - R[0, 1])*s])

    return q 
Example 45
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def SID(gt, rc):
    N = gt.shape[0]
    err = np.zeros(N)
    for i in range(N):
        err[i] = abs(np.sum(rc[i] * np.log10((rc[i] + 1e-3) / (gt[i] + 1e-3))) +
                        np.sum(gt[i] * np.log10((gt[i] + 1e-3) / (rc[i] + 1e-3))))
    return err.mean() 
Example 46
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def APPSA(gt, rc):
    nom = np.sum(gt * rc, axis=0)
    denom = np.linalg.norm(gt, axis=0) * np.linalg.norm(rc, axis=0)
    
    cos = np.where((nom / (denom + 1e-3)) > 1, 1, (nom / (denom + 1e-3)))
    appsa = np.arccos(cos)
    
    return np.sum(appsa) / (gt.shape[1] * gt.shape[0]) 
Example 47
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def SID(gt, rc):
    N = gt.shape[2]
    err = np.zeros(N)
    for i in range(N):
        err[i] = abs(np.sum(rc[:,:,i] * np.log10((rc[:,:,i] + 1e-3)/(gt[:,:,i] + 1e-3))) +
                     np.sum(gt[:,:,i] * np.log10((gt[:,:,i] + 1e-3)/(rc[:,:,i] + 1e-3))))
    SIDs = err / (gt.shape[1] * gt.shape[0])
    return np.mean(SIDs) 
Example 48
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def APPSA(gt, rc):
    nom = np.sum(gt * rc, axis=0)
    denom = np.linalg.norm(gt, axis=0) * np.linalg.norm(rc, axis=0)
    
    cos = np.where((nom / (denom + 1e-3)) > 1, 1, (nom / (denom + 1e-3)))
    appsa = np.arccos(cos)
    
    return np.sum(appsa) / (gt.shape[1] * gt.shape[0]) 
Example 49
Project: nonogram-solver   Author: mprat   File: solver.py    MIT License 5 votes vote down vote up
def possibilities_generator(
        prior, min_pos, max_start_pos, constraint_len, total_filled):
    """
    Given a row prior, a min_pos, max_start_pos, and constraint length,
    yield each potential row

    prior is an array of:
        -1 (unknown),
        0 (definitely empty),
        1 (definitely filled)
    """
    prior_filled = np.zeros(len(prior)).astype(bool)
    prior_filled[prior == 1] = True
    prior_empty = np.zeros(len(prior)).astype(bool)
    prior_empty[prior == 0] = True
    for start_pos in range(min_pos, max_start_pos + 1):
        possible = -1 * np.ones(len(prior))
        possible[start_pos:start_pos + constraint_len] = 1
        if start_pos + constraint_len < len(possible):
            possible[start_pos + constraint_len] = 0
        if start_pos > 0:
            possible[start_pos - 1] = 0

        # add in the prior
        possible[np.logical_and(possible == -1, prior == 0)] = 0
        possible[np.logical_and(possible == -1, prior == 1)] = 1

        # if contradiction with prior, continue
        # 1. possible changes prior = 1 to something else
        # 2. possible changes prior = 0 to something else
        # 3. everything is assigned in possible but there are not
        #    enough filled in
        # 4. possible changes nothing about the prior
        if np.any(possible[np.where(prior == 1)[0]] != 1) or \
                np.any(possible[np.where(prior == 0)[0]] != 0) or \
                np.sum(possible == 1) > total_filled or \
                (np.all(possible >= 0) and np.sum(possible == 1) <
                    total_filled) or \
                np.all(prior == possible):
            continue
        yield possible 
Example 50
Project: nonogram-solver   Author: mprat   File: solver.py    MIT License 5 votes vote down vote up
def _puzzle_is_solved(self):
        if np.sum(self.puzzle_state == -1) == 0:
            return True
        return False