Python numpy.concatenate() Examples

The following are code examples for showing how to use numpy.concatenate(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: mmdetection   Author: open-mmlab   File: sampler.py    Apache License 2.0 9 votes vote down vote up
def __iter__(self):
        indices = []
        for i, size in enumerate(self.group_sizes):
            if size == 0:
                continue
            indice = np.where(self.flag == i)[0]
            assert len(indice) == size
            np.random.shuffle(indice)
            num_extra = int(np.ceil(size / self.samples_per_gpu)
                            ) * self.samples_per_gpu - len(indice)
            indice = np.concatenate(
                [indice, np.random.choice(indice, num_extra)])
            indices.append(indice)
        indices = np.concatenate(indices)
        indices = [
            indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
            for i in np.random.permutation(
                range(len(indices) // self.samples_per_gpu))
        ]
        indices = np.concatenate(indices)
        indices = indices.astype(np.int64).tolist()
        assert len(indices) == self.num_samples
        return iter(indices) 
Example 2
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulenet.py    MIT License 9 votes vote down vote up
def test(model, data):
    x_test, y_test = data
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-'*50)
    print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])

    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image

    img = combine_images(np.concatenate([x_test[:50],x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save("real_and_recon.png")
    print()
    print('Reconstructed images are saved to ./real_and_recon.png')
    print('-'*50)
    plt.imshow(plt.imread("real_and_recon.png", ))
    plt.show() 
Example 3
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg):
    """
    TODO
    :param densities_pos:
    :param densities_neg:
    :param uncerts_pos:
    :param uncerts_neg:
    :return:
    """
    values_neg = np.concatenate(
        (densities_neg.reshape((1, -1)),
         uncerts_neg.reshape((1, -1))),
        axis=0).transpose([1, 0])
    values_pos = np.concatenate(
        (densities_pos.reshape((1, -1)),
         uncerts_pos.reshape((1, -1))),
        axis=0).transpose([1, 0])

    values = np.concatenate((values_neg, values_pos))
    labels = np.concatenate(
        (np.zeros_like(densities_neg), np.ones_like(densities_pos)))

    lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels)

    return values, labels, lr 
Example 4
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 6 votes vote down vote up
def iter_apply(Xs, Ms, Ys):
    # fns = [lambda x: np.concatenate(x, 0), lambda x: float(np.sum(x))]
    logits = []
    cost = 0
    with chainer.using_config('train', False), \
            chainer.using_config('enable_backprop', False):
        for xmb, mmb, ymb in iter_data(
                Xs, Ms, Ys, n_batch=n_batch_train, truncate=False, verbose=True):
            n = len(xmb)
            XMB = model.xp.asarray(xmb)
            YMB = model.xp.asarray(ymb)
            MMB = model.xp.asarray(mmb)
            h = model(XMB)
            clf_logits = clf_head(h, XMB)
            clf_logits *= n
            clf_losses = compute_loss_fct(
                XMB, YMB, MMB, clf_logits, only_return_losses=True)
            clf_losses *= n
            logits.append(cuda.to_cpu(clf_logits.array))
            cost += cuda.to_cpu(F.sum(clf_losses).array)
        logits = np.concatenate(logits, 0)
    return logits, cost 
Example 5
Project: face-attendance-machine   Author: matiji66   File: encoding_images.py    Apache License 2.0 6 votes vote down vote up
def load_encodings():
    """
    加载保存的历史人脸向量,以及name向量,并返回
    :return:
    """
    known_face_encodings = np.load(KNOWN_FACE_ENCODINGS)
    known_face_names = np.load(KNOWN_FACE_NANE)
    if not os.path.exists(KNOWN_FACE_NANE) or not os.path.exists(KNOWN_FACE_ENCODINGS):
        encoding_images(data_path)
    aa = [file for file in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, file)) and file.endswith("npy")]
    # ("known_face_encodings_") or file.startswith("known_face_name_"))
    for data in aa:
        if data.startswith('known_face_encodings_'):
            tmp_face_encodings = np.load(os.path.join(data_path,data))
            known_face_encodings = np.concatenate((known_face_encodings, tmp_face_encodings), axis=0)
            print("load ", data)
        elif data.startswith('known_face_name_'):
            tmp_face_name = np.load(os.path.join(data_path, data))
            known_face_names = np.concatenate((known_face_names, tmp_face_name), axis=0)
            print("load ", data)
        else:
            print('skip to load original ', data)
    return known_face_encodings,known_face_names 
Example 6
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def box2dtobox3d(boxes2d, z_translation=0.0, z_size=0.0, z_angle=0.0):
    """
    tranforms 2d boxes to 3d boxes
    :param boxes2d: np array shaped N,4. box = [x1,y1,x2,xy] (1-bottom left, 2 upper right)
    :return: boxes3d np array shaped N,7. box = [t1,t2,t3,s1,s2,s3,z_angle]
    """
    ctr_x = np.mean(boxes2d[:, [0, 2]], axis=-1, keepdims=True)
    ctr_y = np.mean(boxes2d[:, [1, 3]], axis=-1, keepdims=True)
    ctr_z = np.full([boxes2d.shape[0], 1], z_translation)
    ctr = np.concatenate((ctr_x, ctr_y, ctr_z), -1)

    size_x = boxes2d[:, 2:3] - boxes2d[:, 0:1]
    size_y = boxes2d[:, 3:4] - boxes2d[:, 1:2]
    size_z = np.full([boxes2d.shape[0], 1], z_size)
    size = np.concatenate((size_x, size_y, size_z), -1)

    z_angle = np.full([boxes2d.shape[0], 1], z_angle)

    return np.concatenate((ctr, size, z_angle), -1) 
Example 7
Project: fbpconv_tf   Author: panakino   File: util.py    GNU General Public License v3.0 6 votes vote down vote up
def combine_img_prediction(data, gt, pred):
    """
    Combines the data, grouth thruth and the prediction into one rgb image
    
    :param data: the data tensor
    :param gt: the ground thruth tensor
    :param pred: the prediction tensor
    
    :returns img: the concatenated rgb image 
    """
    ny = pred.shape[2]
    ch = data.shape[3]
    img = np.concatenate((to_rgb(crop_to_shape(data, pred.shape).reshape(-1, ny, ch)), 
                          to_rgb(crop_to_shape(gt[..., 1], pred.shape).reshape(-1, ny, 1)), 
                          to_rgb(pred[..., 1].reshape(-1, ny, 1))), axis=1)
    return img 
Example 8
Project: good-semi-bad-gan   Author: christiancosgrove   File: cifar10_data.py    MIT License 6 votes vote down vote up
def load(data_dir, subset='train'):
    maybe_download_and_extract(data_dir)
    if subset=='train':
        train_data = [unpickle(os.path.join(data_dir,'cifar-10-batches-py/data_batch_' + str(i))) for i in range(1,6)]
        trainx = np.concatenate([d['x'] for d in train_data],axis=0)
        trainy = np.concatenate([d['y'] for d in train_data],axis=0)
        return trainx, trainy
    elif subset=='test':
        test_data = unpickle(os.path.join(data_dir,'cifar-10-batches-py/test_batch'))
        testx = test_data['x']
        testy = test_data['y']
        return testx, testy
    else:
        raise NotImplementedError('subset should be either train or test')


# load cars 
Example 9
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
    """
    TODO
    :param probs_neg:
    :param probs_pos:
    :param plot:
    :return:
    """
    probs = np.concatenate((probs_neg, probs_pos))
    labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
    fpr, tpr, _ = roc_curve(labels, probs)
    auc_score = auc(fpr, tpr)
    if plot:
        plt.figure(figsize=(7, 6))
        plt.plot(fpr, tpr, color='blue',
                 label='ROC (AUC = %0.4f)' % auc_score)
        plt.legend(loc='lower right')
        plt.title("ROC Curve")
        plt.xlabel("FPR")
        plt.ylabel("TPR")
        plt.show()

    return fpr, tpr, auc_score 
Example 10
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def block_split(X, Y):
    """
    Split the data into 80% for training and 20% for testing
    in a block size of 100.
    :param X: 
    :param Y: 
    :return: 
    """
    print("Isolated split 80%, 20% for training and testing")
    num_samples = X.shape[0]
    partition = int(num_samples/3)
    X_adv, Y_adv = X[:partition], Y[:partition]
    X_norm, Y_norm = X[partition:2*partition], Y[partition:2*partition]
    X_noisy, Y_noisy = X[2*partition:], Y[2*partition:]

    num_train = int(partition * 0.008) * 100
    X_train = np.concatenate((X_adv[:num_train], X_norm[:num_train], X_noisy[:num_train]))
    Y_train = np.concatenate((Y_adv[:num_train], Y_norm[:num_train], Y_noisy[:num_train]))

    X_test = np.concatenate((X_adv[num_train:], X_norm[num_train:], X_noisy[num_train:]))
    Y_test = np.concatenate((Y_adv[num_train:], Y_norm[num_train:], Y_noisy[num_train:]))

    return X_train, Y_train, X_test, Y_test 
Example 11
Project: models   Author: kipoi   File: dataloader_m.py    MIT License 6 votes vote down vote up
def _prepro_cpg(self, states, dists):
        """Preprocess the state and distance of neighboring CpG sites."""
        prepro_states = []
        prepro_dists = []
        for state, dist in zip(states, dists):
            nan = state == dat.CPG_NAN
            if np.any(nan):
                state[nan] = np.random.binomial(1, state[~nan].mean(),
                                                nan.sum())
                dist[nan] = self.cpg_max_dist
            dist = np.minimum(dist, self.cpg_max_dist) / self.cpg_max_dist
            prepro_states.append(np.expand_dims(state, 1))
            prepro_dists.append(np.expand_dims(dist, 1))
        prepro_states = np.concatenate(prepro_states, axis=1)
        prepro_dists = np.concatenate(prepro_dists, axis=1)
        if self.cpg_wlen:
            center = prepro_states.shape[2] // 2
            delta = self.cpg_wlen // 2
            tmp = slice(center - delta, center + delta)
            prepro_states = prepro_states[:, :, tmp]
            prepro_dists = prepro_dists[:, :, tmp]
        return (prepro_states, prepro_dists) 
Example 12
Project: SyNEThesia   Author: RunOrVeith   File: feature_creators.py    MIT License 5 votes vote down vote up
def _split_into_chunks(signal, chunks_per_second=24):
    # TODO currently broken
    raise NotImplemented("Splitting to chunks is currently broken.")
    window_length_ms = 1/chunks_per_second * 1000
    intervals = np.arange(window_length_ms, signal.shape[0], window_length_ms, dtype=np.int32)
    chunks = np.array_split(signal, intervals, axis=0)
    pad_to = _next_power_of_two(np.max([chunk.shape[0] for chunk in chunks]))
    padded_chunks = np.stack(np.concatenate([chunk, np.zeros((pad_to - chunk.shape[0],))]) for chunk in chunks)
    return padded_chunks 
Example 13
Project: SyNEThesia   Author: RunOrVeith   File: feature_creators.py    MIT License 5 votes vote down vote up
def logfbank_features(signal, samplerate=44100, fps=24, num_filt=40, num_cepstra=40, nfft=8192, **kwargs):
    winstep = 2 / fps
    winlen = winstep * 2
    feat, energy = psf.fbank(signal=signal, samplerate=samplerate,
                             winlen=winlen, winstep=winstep, nfilt=num_filt,
                             nfft=nfft)
    feat = np.log(feat)
    feat = psf.dct(feat, type=2, axis=1, norm='ortho')[:, :num_cepstra]
    feat = psf.lifter(feat, L=22)
    feat = np.asarray(feat)

    energy = np.log(energy)
    energy = energy.reshape([energy.shape[0],1])

    if feat.shape[0] > 1:
        std = 0.5 * np.std(feat, axis=0)
        mat = (feat - np.mean(feat, axis=0)) / std
    else:
        mat = feat

    mat = np.concatenate((mat, energy), axis=1)

    duration = signal.shape[0] / samplerate
    expected_frames = fps * duration
    assert mat.shape[0] - expected_frames <= 1, "Producted feature number does not match framerate"
    return mat 
Example 14
Project: b2ac   Author: hbldh   File: reference.py    MIT License 5 votes vote down vote up
def fit_improved_B2AC(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting uses float storage during calculations and performs the
    eigensolver on a float array.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    points = np.array(points, 'float')
    S = _calculate_scatter_matrix_py(points[:, 0], points[:, 1])
    S3 = S[3:, 3:]
    S3 = np.array([S3[0, 0], S3[0, 1], S3[0, 2], S3[1, 1], S3[1, 2], S3[2, 2]])
    S3_inv = inverse_symmetric_3by3_double(S3).reshape((3, 3))
    S2 = S[:3, 3:]
    T = -np.dot(S3_inv, S2.T)
    M = S[:3, :3] + np.dot(S2, T)
    inv_mat = np.array([[0, 0, 0.5], [0, -1, 0], [0.5, 0, 0]], 'float')
    M = inv_mat.dot(M)

    e_vals, e_vect = np.linalg.eig(M)

    try:
        elliptical_solution_index = np.where(((4 * e_vect[0, :] * e_vect[2, :]) - ((e_vect[1, :] ** 2))) > 0)[0][0]
    except:
        # No positive eigenvalues. Fit was not ellipse.
        raise ArithmeticError("No elliptical solution found.")

    a = e_vect[:, elliptical_solution_index]
    if a[0] < 0:
        a = -a
    return np.concatenate((a, np.dot(T, a))) 
Example 15
Project: b2ac   Author: hbldh   File: reference.py    MIT License 5 votes vote down vote up
def fit_improved_B2AC_int(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting uses int64 storage during calculations and performs the
    eigensolver on an integer array.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    S = _calculate_scatter_matrix_c(points[:, 0], points[:, 1])
    S1 = np.array([S[0, 0], S[0, 1], S[0, 2], S[1, 1], S[1, 2], S[2, 2]])
    S3 = np.array([S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]])
    adj_S3, det_S3 = inverse_symmetric_3by3_int(S3)
    S2 = S[:3, 3:]
    T_no_det = - np.dot(np.array(adj_S3.reshape((3, 3)), 'int64'), np.array(S2.T, 'int64'))
    M_term2 = np.dot(np.array(S2, 'int64'), T_no_det) // det_S3
    M = add_symmetric_matrix(M_term2, S1)
    M[[0, 2], :] /= 2
    M[1, :] = -M[1, :]

    e_vals, e_vect = np.linalg.eig(M)

    try:
        elliptical_solution_index = np.where(((4 * e_vect[0, :] * e_vect[2, :]) - ((e_vect[1, :] ** 2))) > 0)[0][0]
    except:
        # No positive eigenvalues. Fit was not ellipse.
        raise ArithmeticError("No elliptical solution found.")
    a = e_vect[:, elliptical_solution_index]
    return np.concatenate((a, np.dot(T_no_det, a) / det_S3)) 
Example 16
Project: b2ac   Author: hbldh   File: int.py    MIT License 5 votes vote down vote up
def fit_improved_B2AC_int(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting uses int64 storage during calculations and performs the
    eigensolver on an integer array.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section coefficients array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    e_conds = []
    M, T_no_det, determinant_S3 = _calculate_M_and_T_int64(points)

    e_vals = sorted(QR_algorithm_shift_Givens_int(M)[0])

    a = None
    for ev_ind in [1, 2, 0]:
        # Find the eigenvector that matches this eigenvector.
        eigenvector, e_norm = inverse_iteration_for_eigenvector_int(M, e_vals[ev_ind])
        # See if that eigenvector yields an elliptical solution.
        elliptical_condition = (4 * eigenvector[0] * eigenvector[2]) - (eigenvector[1] ** 2)
        e_conds.append(elliptical_condition)
        if elliptical_condition > 0:
            a = eigenvector
            break

    if a is None:
        raise ArithmeticError("No elliptical solution found.")

    conic_coefficients = np.concatenate((a, np.dot(T_no_det, a) // determinant_S3))
    return conic_coefficients 
Example 17
Project: b2ac   Author: hbldh   File: double.py    MIT License 5 votes vote down vote up
def fit_improved_B2AC_double(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting uses float storage during calculations and performs the
    eigensolver on a float array. It only uses `b2ac` package methods for fitting, to
    be as similar to the integer implementation as possible.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    e_conds = []
    points = np.array(points, 'float')

    M, T = _calculate_M_and_T_double(points)

    e_vals = sorted(qr.QR_algorithm_shift_Givens_double(M)[0])

    a = None
    for ev_ind in [1, 2, 0]:
        # Find the eigenvector that matches this eigenvector.
        eigenvector = inv_iter.inverse_iteration_for_eigenvector_double(M, e_vals[ev_ind], 5)

        # See if that eigenvector yields an elliptical solution.
        elliptical_condition = (4 * eigenvector[0] * eigenvector[2]) - (eigenvector[1] ** 2)
        e_conds.append(elliptical_condition)
        if elliptical_condition > 0:
            a = eigenvector
            break

    if a is None:
        print("Eigenvalues = {0}".format(e_vals))
        print("Elliptical conditions = {0}".format(e_conds))
        raise ArithmeticError("No elliptical solution found.")

    conic_coefficients = np.concatenate((a, np.dot(T, a)))
    return conic_coefficients 
Example 18
Project: b2ac   Author: hbldh   File: polygon.py    MIT License 5 votes vote down vote up
def get_closed_polygon(self):
        """Appends the first point to the end of point array, in order to "close" the polygon."""
        if not self.is_closed:
            return np.concatenate([self.polygon_points, [self.polygon_points[0, :]]])
        else:
            return self.polygon_points 
Example 19
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 5 votes vote down vote up
def iter_predict(Xs, Ms):
    logits = []
    with chainer.using_config('train', False), \
            chainer.using_config('enable_backprop', False):
        for xmb, mmb in iter_data(
                Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True):
            n = len(xmb)
            XMB = model.xp.asarray(xmb)
            MMB = model.xp.asarray(mmb)
            h = model(XMB)
            clf_logits = clf_head(h, XMB)
            logits.append(cuda.to_cpu(clf_logits.array))
    logits = np.concatenate(logits, 0)
    return logits 
Example 20
Project: explirefit   Author: codogogo   File: text_embeddings.py    Apache License 2.0 5 votes vote down vote up
def add_word(self, lang, word, vector = None):
		if word not in self.lang_vocabularies[lang]:
			self.lang_vocabularies[lang][word] = len(self.lang_vocabularies[lang])
			rvec = np.random.uniform(-1.0, 1.0, size = [self.emb_sizes[lang]]) if vector is None else vector
			rnrm = np.linalg.norm(rvec, 2)
			self.lang_embeddings[lang] = np.vstack((self.lang_embeddings[lang], rvec))
			self.lang_emb_norms[lang] = np.concatenate((self.lang_emb_norms[lang], [rnrm])) 
Example 21
Project: explirefit   Author: codogogo   File: text_embeddings.py    Apache License 2.0 5 votes vote down vote up
def merge_embedding_spaces(self, languages, emb_size, merge_name = 'merge', lang_prefix_delimiter = '__', special_tokens = None):
		print("Merging embedding spaces...")
		merge_vocabulary = {}
		merge_embs = []
		merge_norms = []

		for lang in languages:
			print("For language: " + lang)
			norms =[]
			embs = []
			for word in self.lang_vocabularies[lang]:
				if special_tokens is None or word not in special_tokens:
					merge_vocabulary[lang + lang_prefix_delimiter + word] = len(merge_vocabulary)
				else:
					merge_vocabulary[word] = len(merge_vocabulary)
				embs.append(self.get_vector(lang, word))
				norms.append(self.get_norm(lang, word))
			merge_embs =  np.copy(embs) if len(merge_embs) == 0 else np.vstack((merge_embs, embs))
			merge_norms = np.copy(norms) if len(merge_norms) == 0 else np.concatenate((merge_norms, norms))

		#if padding_token is not None:
		#	merge_vocabulary[padding_token] = len(merge_vocabulary)
		#	rvec = np.random.uniform(-1.0, 1.0, size = [emb_size])
		#	rnrm = np.linalg.norm(rvec, 2)
		#	merge_embs = np.vstack((merge_embs, rvec))
		#	merge_norms = np.concatenate((merge_norms, [rnrm]))
			
		self.lang_vocabularies[merge_name] = merge_vocabulary
		self.lang_embeddings[merge_name] = merge_embs
		self.lang_emb_norms[merge_name] = merge_norms
		self.emb_sizes[merge_name] = emb_size 
Example 22
Project: explirefit   Author: codogogo   File: trainer.py    Apache License 2.0 5 votes vote down vote up
def test(self, test_data, batch_size, eval_params = None, print_batches = False):
		epoch_loss = 0
		batches_eval = batcher.batch_iter(test_data, batch_size, 1, shuffle = False)
		eval_batch_counter = 1
				
		for batch_eval in batches_eval:
			if (len(batch_eval) == batch_size):
				feed_dict_eval, golds_batch_eval = self.feed_dict_function(self.model, batch_eval, None, predict = True)	
				preds_batch_eval = self.predict(feed_dict_eval)
				batch_eval_loss = self.model.loss.eval(session = self.session, feed_dict = feed_dict_eval)
				epoch_loss += batch_eval_loss

				if eval_batch_counter == 1:
					golds = golds_batch_eval
					preds = preds_batch_eval
				else:
					golds = np.concatenate((golds, golds_batch_eval), axis = 0)
					preds = np.concatenate((preds, preds_batch_eval), axis = 0)
				if print_batches:
					print(eval_batch_counter)
			eval_batch_counter += 1

		if self.eval_func is not None:
			score = self.eval_func(golds, preds, eval_params)
			return preds, epoch_loss, score
		else:
			return preds, epoch_loss 
Example 23
Project: explirefit   Author: codogogo   File: trainer.py    Apache License 2.0 5 votes vote down vote up
def cross_validate(self, data, batch_size, max_num_epochs, num_folds = 5, num_devs_not_better_end = 5, batch_dev_perf = 100, print_batch_losses = True, dev_score_maximize = True, configuration = None, print_training = False, micro_performance = True, shuffle_data = True):
		folds = np.array_split(data, num_folds)
		results = {}

		for i in range(num_folds):
			train_data = []
			for j in range(num_folds):
				if j != i:
					train_data.extend(folds[j])
			dev_data = folds[i]

			print("Sizes: train " + str(len(train_data)) + "; dev " + str(len(dev_data)))
			print("Fold " + str(i+1) + ", creating model...")
			model, conf_str, session = self.config_func(configuration)
			self.model = model
			self.session = session
			print("Fold " + str(i+1) + ", training the model...")
			results[conf_str + "__fold-" + str(i+1)] = self.train_dev(train_data, dev_data, batch_size, max_num_epochs, num_devs_not_better_end, batch_dev_perf, print_batch_losses, dev_score_maximize, configuration, print_training, shuffle_data = shuffle_data)
			
			print("Closing session, reseting the default graph (freeing memory)")
			self.session.close()
			tf.reset_default_graph()
			print("Performance: " + str(results[conf_str + "__fold-" + str(i+1)][1]))
		
		if micro_performance:
			print("Concatenating fold predictions for micro-performance computation")
			cntr = 0
			for k in results:
				cntr += 1
				if cntr == 1:
					all_preds = results[k][2]
					all_golds = results[k][3]
				else:
					all_preds = np.concatenate((all_preds, results[k][2]), axis = 0)
					all_golds = np.concatenate((all_golds, results[k][3]), axis = 0)	
			micro_perf = self.eval_func(all_golds, all_preds, self.labels)
			return results, micro_perf
		else: 
			return results 
Example 24
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharatransform.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_transform_unit_simple(self):
        """Class SpharaTransform, mode='unit', simple triangular mesh

        Determine the SPHARA forward and inverse transform with unit
        edge weight for a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA transform instance for the mesh
        st_unit_simple = st.SpharaTransform(testtrimesh, mode='unit')

        # the data to transform
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(st_unit_simple.basis()[0])])

        # SPHARA analysis
        coef_unit_simple = st_unit_simple.analysis(data)

        # SPHARA synthesis
        recon_unit_simple = st_unit_simple.synthesis(coef_unit_simple)

        self.assertTrue(
            np.allclose(
                np.absolute(coef_unit_simple),
                [[0.0, 0.0, 0.0],
                 [1.73205081, 0.0, 0.0],
                 [1.0, 0.0, 0.0],
                 [0.0, 1.0, 0.0],
                 [0.0, 0.0, 1.0]])
            and
            np.allclose(
                recon_unit_simple,
                data)) 
Example 25
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharatransform.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_transform_ie_simple(self):
        """Class SpharaTransform, mode='inv_euclidean', simple triangular mesh

        Determine the SPHARA forward and inverse transform with
        inverse Euclidean edge weight for a simple triangular mesh, 3
        vertices, single triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA transform instance for the mesh
        st_ie_simple = st.SpharaTransform(testtrimesh, mode='inv_euclidean')

        # the data to transform
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(st_ie_simple.basis()[0])])

        # SPHARA analysis
        coef_ie_simple = st_ie_simple.analysis(data)

        # SPHARA synthesis
        recon_ie_simple = st_ie_simple.synthesis(coef_ie_simple)

        self.assertTrue(
            np.allclose(
                np.absolute(coef_ie_simple),
                [[0.0, 0.0, 0.0],
                 [1.73205081, 0.0, 0.0],
                 [1.0, 0.0, 0.0],
                 [0.0, 1.0, 0.0],
                 [0.0, 0.0, 1.0]])
            and
            np.allclose(
                recon_ie_simple,
                data)) 
Example 26
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharatransform.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_transform_fem_simple(self):
        """Class SpharaTransform, mode='fem', simple triangular mesh

        Determine the SPHARA forward and inverse transform with fem
        discretisation for a simple triangular mesh, 3 vertices,
        single triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA transform instance for the mesh
        st_fem_simple = st.SpharaTransform(testtrimesh, mode='fem')

        # the data to transform
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(st_fem_simple.basis()[0])])

        # SPHARA analysis
        coef_fem_simple = st_fem_simple.analysis(data)

        # SPHARA synthesis
        recon_fem_simple = st_fem_simple.synthesis(coef_fem_simple)

        self.assertTrue(
            np.allclose(
                np.absolute(coef_fem_simple),
                [[0.0, 0.0, 0.0],
                 [1.87082868, 0.0, 0.0],
                 [1.0, 0.0, 0.0],
                 [0.0, 1.0, 0.0],
                 [0.0, 0.0, 1.0]])
            and
            np.allclose(
                recon_fem_simple,
                data)) 
Example 27
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_unit_allpass_simple(self):
        """Class SpharaFilter, mode='unit', allpass, simple mesh

        Apply a SPHARA spatial allpass filter with unit edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_unit_simple = sf.SpharaFilter(testtrimesh, mode='unit',
                                         specification=0)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_unit_simple.basis()[0])])

        # apply SPHARA based spatial allpass filter
        data_filt_unit_simple = sf_unit_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_unit_simple,
                data)) 
Example 28
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_unit_dc_simple(self):
        """Class SpharaFilter, mode='unit', dc-pass, simple mesh

        Apply a SPHARA spatial dc-pass filter with unit edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_unit_simple = sf.SpharaFilter(testtrimesh, mode='unit',
                                         specification=1)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_unit_simple.basis()[0])])

        # reference for filtered data
        data_filt_ref = data.copy()
        data_filt_ref[3] = [0., 0., 0.]
        data_filt_ref[4] = [0., 0., 0.]

        # apply SPHARA based spatial dc-pass filter
        data_filt_unit_simple = sf_unit_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_unit_simple,
                data_filt_ref)) 
Example 29
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_ie_allpass_simple(self):
        """Class SpharaFilter, mode='inv_euclidean', allpass, simple mesh

        Apply a SPHARA spatial allpass filter with inv_euclidean edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_ie_simple = sf.SpharaFilter(testtrimesh, mode='inv_euclidean',
                                       specification=0)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_ie_simple.basis()[0])])

        # apply SPHARA based spatial allpass filter
        data_filt_ie_simple = sf_ie_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_ie_simple,
                data)) 
Example 30
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_ie_dc_simple(self):
        """Class SpharaFilter, mode='inv_euclidean', dc-pass, simple mesh

        Apply a SPHARA spatial dc-pass filter with inv_euclidean edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_ie_simple = sf.SpharaFilter(testtrimesh, mode='inv_euclidean',
                                       specification=1)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_ie_simple.basis()[0])])

        # reference for filtered data
        data_filt_ref = data.copy()
        data_filt_ref[3] = [0., 0., 0.]
        data_filt_ref[4] = [0., 0., 0.]

        # apply SPHARA based spatial dc-pass filter
        data_filt_ie_simple = sf_ie_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_ie_simple,
                data_filt_ref)) 
Example 31
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_ie_low_simple(self):
        """Class SpharaFilter, mode='inv_euclidean', lowpass, simple mesh

        Apply a SPHARA spatial lowpass filter with inv_euclidean edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_ie_simple = sf.SpharaFilter(testtrimesh, mode='inv_euclidean',
                                         specification=[1., 1., 0.])

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_ie_simple.basis()[0])])

        # reference for filtered data
        data_filt_ref = data.copy()
        data_filt_ref[4] = [0., 0., 0.]

        # apply SPHARA based spatial lowpass filter
        data_filt_ie_simple = sf_ie_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_ie_simple,
                data_filt_ref)) 
Example 32
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_fem_allpass_simple(self):
        """Class SpharaFilter, mode='fem', allpass, simple mesh

        Apply a SPHARA spatial allpass filter with fem edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_fem_simple = sf.SpharaFilter(testtrimesh, mode='fem',
                                        specification=0)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_fem_simple.basis()[0])])

        # apply SPHARA based spatial allpass filter
        data_filt_fem_simple = sf_fem_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_fem_simple,
                data)) 
Example 33
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_fem_dc_simple(self):
        """Class SpharaFilter, mode='fem', dc-pass, simple mesh

        Apply a SPHARA spatial dc-pass filter with fem edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_fem_simple = sf.SpharaFilter(testtrimesh, mode='fem',
                                        specification=1)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_fem_simple.basis()[0])])

        # reference for filtered data
        data_filt_ref = data.copy()
        data_filt_ref[3] = [0., 0., 0.]
        data_filt_ref[4] = [0., 0., 0.]

        # apply SPHARA based spatial dc-pass filter
        data_filt_fem_simple = sf_fem_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_fem_simple,
                data_filt_ref)) 
Example 34
Project: aospy   Author: spencerahill   File: test_utils_vertcoord.py    Apache License 2.0 5 votes vote down vote up
def setUp(self):
        self.p_in_hpa = np.array([1000, 925, 850, 775, 700, 600, 500, 400, 300,
                                  200, 150, 100, 70, 50, 30, 20, 10],
                                 dtype=np.float64)
        self.p_in_pa = self.p_in_hpa*1e2
        self.p_top = 0
        self.p_bot = 1.1e5
        self.p_edges = 0.5*(self.p_in_pa[1:] + 0.5*self.p_in_pa[:-1])
        self.phalf = np.concatenate(([self.p_bot], self.p_edges, [self.p_top])) 
Example 35
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: voc_eval.py    MIT License 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
  """ ap = voc_ap(rec, prec, [use_07_metric])
  Compute VOC AP given precision and recall.
  If use_07_metric is true, uses the
  VOC 07 11 point method (default:False).
  """
  if use_07_metric:
    # 11 point metric
    ap = 0.
    for t in np.arange(0., 1.1, 0.1):
      if np.sum(rec >= t) == 0:
        p = 0
      else:
        p = np.max(prec[rec >= t])
      ap = ap + p / 11.
  else:
    # correct AP calculation
    # first append sentinel values at the end
    mrec = np.concatenate(([0.], rec, [1.]))
    mpre = np.concatenate(([0.], prec, [0.]))

    # compute the precision envelope
    for i in range(mpre.size - 1, 0, -1):
      mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

    # to calculate area under PR curve, look for points
    # where X axis (recall) changes value
    i = np.where(mrec[1:] != mrec[:-1])[0]

    # and sum (\Delta recall) * prec
    ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
  return ap 
Example 36
Project: deep-siamese-text-similarity   Author: dhwajraj   File: input_helpers.py    MIT License 5 votes vote down vote up
def getDataSets(self, training_paths, max_document_length, percent_dev, batch_size, is_char_based):
        if is_char_based:
            x1_text, x2_text, y=self.getTsvDataCharBased(training_paths)
        else:
            x1_text, x2_text, y=self.getTsvData(training_paths)
        # Build vocabulary
        print("Building vocabulary")
        vocab_processor = MyVocabularyProcessor(max_document_length,min_frequency=0,is_char_based=is_char_based)
        vocab_processor.fit_transform(np.concatenate((x2_text,x1_text),axis=0))
        print("Length of loaded vocabulary ={}".format( len(vocab_processor.vocabulary_)))
        i1=0
        train_set=[]
        dev_set=[]
        sum_no_of_batches = 0
        x1 = np.asarray(list(vocab_processor.transform(x1_text)))
        x2 = np.asarray(list(vocab_processor.transform(x2_text)))
        # Randomly shuffle data
        np.random.seed(131)
        shuffle_indices = np.random.permutation(np.arange(len(y)))
        x1_shuffled = x1[shuffle_indices]
        x2_shuffled = x2[shuffle_indices]
        y_shuffled = y[shuffle_indices]
        dev_idx = -1*len(y_shuffled)*percent_dev//100
        del x1
        del x2
        # Split train/test set
        self.dumpValidation(x1_text,x2_text,y,shuffle_indices,dev_idx,0)
        # TODO: This is very crude, should use cross-validation
        x1_train, x1_dev = x1_shuffled[:dev_idx], x1_shuffled[dev_idx:]
        x2_train, x2_dev = x2_shuffled[:dev_idx], x2_shuffled[dev_idx:]
        y_train, y_dev = y_shuffled[:dev_idx], y_shuffled[dev_idx:]
        print("Train/Dev split for {}: {:d}/{:d}".format(training_paths, len(y_train), len(y_dev)))
        sum_no_of_batches = sum_no_of_batches+(len(y_train)//batch_size)
        train_set=(x1_train,x2_train,y_train)
        dev_set=(x1_dev,x2_dev,y_dev)
        gc.collect()
        return train_set,dev_set,vocab_processor,sum_no_of_batches 
Example 37
Project: FRIDA   Author: LCAV   File: tools_fri_doa_plane.py    MIT License 5 votes vote down vote up
def Rmtx_ri(coef_ri, K, D, L):
    coef_ri = np.squeeze(coef_ri)
    coef_r = coef_ri[:K + 1]
    coef_i = coef_ri[K + 1:]
    R_r = linalg.toeplitz(np.concatenate((np.array([coef_r[-1]]),
                                          np.zeros(L - K - 1))),
                          np.concatenate((coef_r[::-1],
                                          np.zeros(L - K - 1)))
                          )
    R_i = linalg.toeplitz(np.concatenate((np.array([coef_i[-1]]),
                                          np.zeros(L - K - 1))),
                          np.concatenate((coef_i[::-1],
                                          np.zeros(L - K - 1)))
                          )
    return np.dot(np.vstack((np.hstack((R_r, -R_i)), np.hstack((R_i, R_r)))), D) 
Example 38
Project: FRIDA   Author: LCAV   File: tools_fri_doa_plane.py    MIT License 5 votes vote down vote up
def compute_b(G_lst, GtG_lst, beta_lst, Rc0, num_bands, a_ri):
    """
    compute the uniform sinusoidal samples b from the updated annihilating
    filter coeffiients.
    :param GtG_lst: list of G^H G for different subbands
    :param beta_lst: list of beta-s for different subbands
    :param Rc0: right-dual matrix, here it is the convolution matrix associated with c
    :param num_bands: number of bands
    :param L: size of b: L by 1
    :param a_ri: a 2D numpy array. each column corresponds to the measurements within a subband
    :return:
    """
    b_lst = []
    a_Gb_lst = []
    for loop in range(num_bands):
        GtG_loop = GtG_lst[loop]
        beta_loop = beta_lst[loop]
        b_loop = beta_loop - \
                 linalg.solve(GtG_loop,
                              np.dot(Rc0.T,
                                     linalg.solve(np.dot(Rc0, linalg.solve(GtG_loop, Rc0.T)),
                                                  np.dot(Rc0, beta_loop)))
                              )

        b_lst.append(b_loop)
        a_Gb_lst.append(a_ri[:, loop] - np.dot(G_lst[loop], b_loop))

    return np.column_stack(b_lst), linalg.norm(np.concatenate(a_Gb_lst)) 
Example 39
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: connected.py    MIT License 5 votes vote down vote up
def recollect(self, val):
        w = val['weights']
        b = val['biases']
        if w is None: self.w = val; return
        if self.inp_idx is not None:
            w = np.take(w, self.inp_idx, 0)
            
        keep_b = np.take(b, self.keep)
        keep_w = np.take(w, self.keep, 1)
        train_b = b[self.train:]
        train_w = w[:, self.train:]
        self.w['biases'] = np.concatenate(
            (keep_b, train_b), axis = 0)
        self.w['weights'] = np.concatenate(
            (keep_w, train_w), axis = 1) 
Example 40
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: data.py    MIT License 5 votes vote down vote up
def shuffle(self):
    batch = self.FLAGS.batch
    data = self.parse()
    size = len(data)

    print('Dataset of {} instance(s)'.format(size))
    if batch > size: self.FLAGS.batch = batch = size
    batch_per_epoch = int(size / batch)

    for i in range(self.FLAGS.epoch):
        shuffle_idx = perm(np.arange(size))
        for b in range(batch_per_epoch):
            # yield these
            x_batch = list()
            feed_batch = dict()

            for j in range(b*batch, b*batch+batch):
                train_instance = data[shuffle_idx[j]]
                try:
                    inp, new_feed = self._batch(train_instance)
                except ZeroDivisionError:
                    print("This image's width or height are zeros: ", train_instance[0])
                    print('train_instance:', train_instance)
                    print('Please remove or fix it then try again.')
                    raise

                if inp is None: continue
                x_batch += [np.expand_dims(inp, 0)]

                for key in new_feed:
                    new = new_feed[key]
                    old_feed = feed_batch.get(key, 
                        np.zeros((0,) + new.shape))
                    feed_batch[key] = np.concatenate([ 
                        old_feed, [new] 
                    ])      
            
            x_batch = np.concatenate(x_batch, 0)
            yield x_batch, feed_batch
        
        print('Finish {} epoch(es)'.format(i + 1)) 
Example 41
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def apply_transform_complete_pc(self, points):
        assert points.ndim == 2 or points.ndim == 1
        assert points.shape[-1] == 3 or points.shape[-1] == 4 or points.shape[-1] == 5
        pc = points[:, :3]
        trans_pc = np.matmul(pc, self._rotation.T) + self._translation[None, :]

        return np.concatenate((trans_pc, points[:, 3:]), axis=-1) 
Example 42
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: voc_eval.py    MIT License 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
    """ ap = voc_ap(rec, prec, [use_07_metric])
    Compute VOC AP given precision and recall.
    If use_07_metric is true, uses the
    VOC 07 11 point method (default:False).
    """
    if use_07_metric:
        # 11 point metric
        ap = 0.
        for t in np.arange(0., 1.1, 0.1):
            if np.sum(rec >= t) == 0:
                p = 0
            else:
                p = np.max(prec[rec >= t])
            ap = ap + p / 11.
    else:
        # correct AP calculation
        # first append sentinel values at the end
        mrec = np.concatenate(([0.], rec, [1.]))
        mpre = np.concatenate(([0.], prec, [0.]))

        # compute the precision envelope
        for i in range(mpre.size - 1, 0, -1):
            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

        # to calculate area under PR curve, look for points
        # where X axis (recall) changes value
        i = np.where(mrec[1:] != mrec[:-1])[0]

        # and sum (\Delta recall) * prec
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
    return ap 
Example 43
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    MIT License 5 votes vote down vote up
def create_cifar10(tfrecord_dir, cifar10_dir):
    print('Loading CIFAR-10 from "%s"' % cifar10_dir)
    import pickle
    images = []
    labels = []
    for batch in range(1, 6):
        with open(os.path.join(cifar10_dir, 'data_batch_%d' % batch), 'rb') as file:
            data = pickle.load(file, encoding='latin1')
        images.append(data['data'].reshape(-1, 3, 32, 32))
        labels.append(data['labels'])
    images = np.concatenate(images)
    labels = np.concatenate(labels)
    assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (50000,) and labels.dtype == np.int32
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 9
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0

    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
Example 44
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    MIT License 5 votes vote down vote up
def create_svhn(tfrecord_dir, svhn_dir):
    print('Loading SVHN from "%s"' % svhn_dir)
    import pickle
    images = []
    labels = []
    for batch in range(1, 4):
        with open(os.path.join(svhn_dir, 'train_%d.pkl' % batch), 'rb') as file:
            data = pickle.load(file, encoding='latin1')
        images.append(data[0])
        labels.append(data[1])
    images = np.concatenate(images)
    labels = np.concatenate(labels)
    assert images.shape == (73257, 3, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (73257,) and labels.dtype == np.uint8
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 9
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0

    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
Example 45
Project: Electrolyte_Analysis_FTIR   Author: Samuel-Buteau   File: Constant_run.py    MIT License 5 votes vote down vote up
def get(self, n):
        """
        will return a list of n random numbers in self.GetFresh_list
        - Samuel Buteau, October 2018
        """
        if n >= self.get_fresh_count:
            return numpy.concatenate((self.get(int(n/2)),self.get(n- int(n/2))))


        reshuffle_flag = False

        n_immediate_fulfill = min(n, self.get_fresh_count - self.get_fresh_pos)
        batch_of_indecies = numpy.empty([n], dtype=numpy.int32)
        for i in range(0, n_immediate_fulfill):
            batch_of_indecies[i] = self.GetFresh_list[i + self.get_fresh_pos]

        self.get_fresh_pos += n_immediate_fulfill
        if self.get_fresh_pos >= self.get_fresh_count:
            self.get_fresh_pos -= self.get_fresh_count
            reshuffle_flag = True

            # Now, the orders that needed to be satisfied are satisfied.
        n_delayed_fulfill = max(0, n - n_immediate_fulfill)
        if reshuffle_flag:
            numpy.random.shuffle(self.GetFresh_list)

        if n_delayed_fulfill > 0:
            for i in range(0, n_delayed_fulfill):
                batch_of_indecies[i + n_immediate_fulfill] = self.GetFresh_list[i]
            self.get_fresh_pos = n_delayed_fulfill

        return batch_of_indecies 
Example 46
Project: mmdetection   Author: open-mmlab   File: dataset_wrappers.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, datasets):
        super(ConcatDataset, self).__init__(datasets)
        self.CLASSES = datasets[0].CLASSES
        if hasattr(datasets[0], 'flag'):
            flags = []
            for i in range(0, len(datasets)):
                flags.append(datasets[i].flag)
            self.flag = np.concatenate(flags) 
Example 47
Project: mmdetection   Author: open-mmlab   File: eval_hooks.py    Apache License 2.0 5 votes vote down vote up
def evaluate(self, runner, results):
        gt_bboxes = []
        gt_labels = []
        gt_ignore = []
        for i in range(len(self.dataset)):
            ann = self.dataset.get_ann_info(i)
            bboxes = ann['bboxes']
            labels = ann['labels']
            if 'bboxes_ignore' in ann:
                ignore = np.concatenate([
                    np.zeros(bboxes.shape[0], dtype=np.bool),
                    np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
                ])
                gt_ignore.append(ignore)
                bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
                labels = np.concatenate([labels, ann['labels_ignore']])
            gt_bboxes.append(bboxes)
            gt_labels.append(labels)
        if not gt_ignore:
            gt_ignore = None
        # If the dataset is VOC2007, then use 11 points mAP evaluation.
        if hasattr(self.dataset, 'year') and self.dataset.year == 2007:
            ds_name = 'voc07'
        else:
            ds_name = self.dataset.CLASSES
        mean_ap, eval_results = eval_map(
            results,
            gt_bboxes,
            gt_labels,
            gt_ignore=gt_ignore,
            scale_ranges=None,
            iou_thr=0.5,
            dataset=ds_name,
            print_summary=True)
        runner.log_buffer.output['mAP'] = mean_ap
        runner.log_buffer.ready = True 
Example 48
Project: mmdetection   Author: open-mmlab   File: voc_eval.py    Apache License 2.0 5 votes vote down vote up
def voc_eval(result_file, dataset, iou_thr=0.5):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        if 'bboxes_ignore' in ann:
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
    if not gt_ignore:
        gt_ignore = None
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = dataset.CLASSES
    eval_map(
        det_results,
        gt_bboxes,
        gt_labels,
        gt_ignore=gt_ignore,
        scale_ranges=None,
        iou_thr=iou_thr,
        dataset=dataset_name,
        print_summary=True) 
Example 49
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: utils.py    MIT License 5 votes vote down vote up
def MinMaxBestBaseStacking(input_folder, best_base, output_path):
    sub_base = pd.read_csv(best_base)
    all_files = os.listdir(input_folder)

    # Read and concatenate submissions
    outs = [pd.read_csv(os.path.join(input_folder, f), index_col=0) for f in all_files]
    concat_sub = pd.concat(outs, axis=1)
    cols = list(map(lambda x: "is_iceberg_" + str(x), range(len(concat_sub.columns))))
    concat_sub.columns = cols
    concat_sub.reset_index(inplace=True)

    # get the data fields ready for stacking
    concat_sub['is_iceberg_max'] = concat_sub.iloc[:, 1:6].max(axis=1)
    concat_sub['is_iceberg_min'] = concat_sub.iloc[:, 1:6].min(axis=1)
    concat_sub['is_iceberg_mean'] = concat_sub.iloc[:, 1:6].mean(axis=1)
    concat_sub['is_iceberg_median'] = concat_sub.iloc[:, 1:6].median(axis=1)

    # set up cutoff threshold for lower and upper bounds, easy to twist
    cutoff_lo = 0.67
    cutoff_hi = 0.33

    concat_sub['is_iceberg_base'] = sub_base['is_iceberg']
    concat_sub['is_iceberg'] = np.where(np.all(concat_sub.iloc[:, 1:6] > cutoff_lo, axis=1),
                                        concat_sub['is_iceberg_max'],
                                        np.where(np.all(concat_sub.iloc[:, 1:6] < cutoff_hi, axis=1),
                                                 concat_sub['is_iceberg_min'],
                                                 concat_sub['is_iceberg_base']))
    concat_sub[['id', 'is_iceberg']].to_csv(output_path,
                                            index=False, float_format='%.12f') 
Example 50
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: utils.py    MIT License 5 votes vote down vote up
def getStatoilTrainValLoaders(args):
    fixSeed(args)
    local_data = pd.read_json('/home/adodd202/train.json')

    local_data = shuffle(local_data)  # otherwise same validation set each time!
    local_data = local_data.reindex(np.random.permutation(local_data.index))

    local_data['band_1'] = local_data['band_1'].apply(lambda x: np.array(x).reshape(75, 75))
    local_data['band_2'] = local_data['band_2'].apply(lambda x: np.array(x).reshape(75, 75))
    local_data['inc_angle'] = pd.to_numeric(local_data['inc_angle'], errors='coerce')
    local_data['inc_angle'].fillna(0, inplace=True)

    band_1 = np.concatenate([im for im in local_data['band_1']]).reshape(-1, 75, 75)
    band_2 = np.concatenate([im for im in local_data['band_2']]).reshape(-1, 75, 75)
    # band_3=(band_1+band_2)/2
    local_full_img = np.stack([band_1, band_2], axis=1)

    train_imgs = XnumpyToTensor(local_full_img, args)
    train_targets = YnumpyToTensor(local_data['is_iceberg'].values, args)
    dset_train = TensorDataset(train_imgs, train_targets)

    local_train_ds, local_val_ds = trainTestSplit(dset_train, args.validationRatio)
    local_train_loader = torch.utils.data.DataLoader(local_train_ds, batch_size=args.batch_size, shuffle=False,
                                                     num_workers=args.workers)
    local_val_loader = torch.utils.data.DataLoader(local_val_ds, batch_size=args.batch_size, shuffle=False,
                                                   num_workers=args.workers)
    return local_train_loader, local_val_loader, local_train_ds, local_val_ds 
Example 51
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulenet.py    MIT License 5 votes vote down vote up
def load_icebergs():

    #Import data
    train = pd.read_json('../train.json') #online is '../train.json'
    #train = pd.read_json('/Users/adodd202/Documents/GitHub/Statoil_Data/train.json')
    y_old=train['is_iceberg']

    y = np.zeros((y_old.size, 2))
    y[np.arange(y_old.size),y_old] = 1

    #Generate the training data
    x_band_1=-1*np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in train["band_1"]])
    # x_band_1= x_band_1/np.amax(x_band_1)
    x_band_2=-1*np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in train["band_2"]])
    # x_band_2= x_band_2/np.amax(x_band_2)
    x_band_3=(x_band_1+x_band_2)/2
    # x_band_3= x_band_3/np.amax(x_band_3)
    #X_band_3=np.array([np.full((75, 75), angel).astype(np.float32) for angel in train["inc_angle"]])
    x_data = np.concatenate([x_band_1[:, :, :, np.newaxis]
                          , x_band_2[:, :, :, np.newaxis]
                         , x_band_3[:, :, :, np.newaxis]], axis=-1)

    #lets multiply by -1, and divide by max value for each pic

    # print (type(y))
    # print (y.shape)
    # print (type(x_data))
    # print (x_data.shape)

    x_train,x_test,y_train,y_test = train_test_split(x_data,y,test_size=0.2)

    return (x_train, y_train), (x_test, y_test) 
Example 52
Project: subword-qac   Author: clovaai   File: metric.py    MIT License 5 votes vote down vote up
def mrl_summary(recover_lengths, seens, n_candidates):
    recover_lengths = np.array(recover_lengths)
    seens = np.array(seens)
    mrl = np.concatenate((recover_lengths[seens == 1].mean(0).reshape((1, -1)),
                          recover_lengths[seens == 0].mean(0).reshape((1, -1)),
                          recover_lengths.mean(0).reshape((1, -1))), 0)

    logs = []
    for i in range(1, n_candidates + 1):
        i_str = ' '.join(f"{mrl[s, i]:.4f} ({seen_str})" for s, seen_str in enumerate(['seen', 'unseen', 'all']))
        logs.append(f"mrl @{i:-2d}: {i_str}")
    logs.append(" ")
    return logs 
Example 53
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, transform=None, target_transform=None, filename="adv_set_e_2.p", transp = False):
        """

        :param transform:
        :param target_transform:
        :param filename:
        :param transp: Set shuff= False for PGD based attacks
        :return:
        """
        self.transform = transform
        self.target_transform = target_transform
        self.adv_dict={}
        self.adv_dict["adv_input"]=None
        self.adv_dict["adv_labels"]= None

        for i in range(16):
            if("Test" in filename):
                print('OK')
                new_adv_dict=pickle.load(open(filename.split(".")[0]+str(i)+"."+filename.split(".")[1],"rb"))
            else:
                new_adv_dict=pickle.load(open(filename.split(".")[0]+"_"+str(i)+"."+filename.split(".")[1],"rb"))

            if(self.adv_dict["adv_input"] is None):
                self.adv_dict["adv_input"] = (new_adv_dict["adv_input"])
                self.adv_dict["adv_labels"] = (new_adv_dict["adv_labels"])
            else:
                 self.adv_dict["adv_input"] = np.concatenate((new_adv_dict["adv_input"],self.adv_dict["adv_input"]))
                 self.adv_dict["adv_labels"] = np.concatenate((new_adv_dict["adv_labels"],self.adv_dict["adv_labels"]))

        self.adv_flat=self.adv_dict["adv_input"]
        self.num_adv=np.shape(self.adv_flat)[0]
        self.shuff = transp
        self.sample_num = 0 
Example 54
Project: neural-fingerprinting   Author: StephanZheng   File: gen_noisy.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_checkpoint(sess,model):
    dataset = 'cifar'

    #with tf.Session() as sess:
    # Iterate over the samples batch-by-batch
    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    adv_x_samples=[]
    adv_y_samples=[]
    for ibatch in range(num_batches):
      bstart = ibatch * eval_batch_size
      bend = min(bstart + eval_batch_size, num_eval_examples)

      x_batch = mnist.test.images[bstart:bend,:]
      y_batch = mnist.test.labels[bstart:bend]

      x_batch_adv = attack.perturb(x_batch, y_batch, sess)
      if(ibatch == 0):
          adv_x_samples = x_batch_adv
          adv_y_samples = y_batch
      else:
          adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0)
          adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0)
    if(args.attack == 'xent'):
      atck = 'pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    elif(args.attack == 'cw_pgd'):
      atck = 'cw_pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    else:
      f = open(os.path.join(args.log_dir, "custom.p"), "w")
    pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f)
    f.close() 
Example 55
Project: neural-fingerprinting   Author: StephanZheng   File: gen_whitebox_adv.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_checkpoint(sess,model):
    dataset = 'cifar'

    #with tf.Session() as sess:
    # Iterate over the samples batch-by-batch
    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    adv_x_samples=[]
    adv_y_samples=[]
    for ibatch in range(num_batches):
      bstart = ibatch * eval_batch_size
      bend = min(bstart + eval_batch_size, num_eval_examples)

      x_batch = mnist.test.images[bstart:bend,:]
      y_batch = mnist.test.labels[bstart:bend]

      x_batch_adv = attack.perturb(x_batch, y_batch, sess)
      if(ibatch == 0):
          adv_x_samples = x_batch_adv
          adv_y_samples = y_batch
      else:
          adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0)
          adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0)
    if(args.attack == 'xent'):
      atck = 'pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    elif(args.attack == 'cw_pgd'):
      atck = 'cw_pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    else:
      f = open(os.path.join(args.log_dir, "custom.p"), "w")
    pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f)
    f.close() 
Example 56
Project: neural-fingerprinting   Author: StephanZheng   File: gen_whitebox_adv.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_checkpoint(sess,model):
    dataset = 'mnist'

    #with tf.Session() as sess:
    # Iterate over the samples batch-by-batch
    num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
    adv_x_samples=[]
    adv_y_samples=[]
    for ibatch in range(num_batches):
      bstart = ibatch * eval_batch_size
      bend = min(bstart + eval_batch_size, num_eval_examples)

      x_batch = mnist.test.images[bstart:bend,:]
      y_batch = mnist.test.labels[bstart:bend]

      dict_nat = {model.x_input: x_batch,
                  model.y_input: y_batch}

      x_batch_adv = attack.perturb(x_batch, y_batch, sess)
      if(ibatch == 0):
          adv_x_samples = x_batch_adv
          adv_y_samples = y_batch
      else:
          adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0)
          adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0)
    if(args.attack == 'xent'):
      atck = 'pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    elif(args.attack == 'cw_pgd'):
      atck = 'cw_pgd'
      f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w")
    else:
      f = open(os.path.join(args.log_dir, "custom.p"), "w")
    pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f)
    f.close() 
Example 57
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def kmean_pca_batch(data, batch, k=10):
    data = np.asarray(data, dtype=np.float32)
    batch = np.asarray(batch, dtype=np.float32)
    a = np.zeros(batch.shape[0])
    for i in np.arange(batch.shape[0]):
        tmp = np.concatenate((data, [batch[i]]))
        tmp_pca = PCA(n_components=2).fit_transform(tmp)
        a[i] = mle_single(tmp_pca[:-1], tmp_pca[-1], k=k)
    return a 
Example 58
Project: neural-fingerprinting   Author: StephanZheng   File: utils_cifar.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def read_CIFAR100(data_folder):
    """ Reads and parses examples from CIFAR100 python data files """

    train_img = []
    train_label = []
    test_img = []
    test_label = []

    train_file_list = ["cifar-100-python/train"]
    test_file_list = ["cifar-100-python/test"]

    tmp_dict = unpickle(os.path.join(data_folder, train_file_list[0]))
    train_img.append(tmp_dict["data"])
    train_label.append(tmp_dict["fine_labels"])

    tmp_dict = unpickle(os.path.join(data_folder, test_file_list[0]))
    test_img.append(tmp_dict["data"])
    test_label.append(tmp_dict["fine_labels"])

    train_img = np.concatenate(train_img)
    train_label = np.concatenate(train_label)
    test_img = np.concatenate(test_img)
    test_label = np.concatenate(test_label)

    train_img = np.reshape(
        train_img, [NUM_TRAIN_IMG, NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH])
    test_img = np.reshape(
        test_img, [NUM_TEST_IMG, NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH])

    # change format from [B, C, H, W] to [B, H, W, C] for feeding to Tensorflow
    train_img = np.transpose(train_img, [0, 2, 3, 1])
    test_img = np.transpose(test_img, [0, 2, 3, 1])
    mean_img = np.mean(np.concatenate([train_img, test_img]), axis=0)

    CIFAR100_data = {}
    CIFAR100_data["train_img"] = train_img - mean_img
    CIFAR100_data["test_img"] = test_img - mean_img
    CIFAR100_data["train_label"] = train_label
    CIFAR100_data["test_label"] = test_label

    return CIFAR100_data 
Example 59
Project: voice-recognition   Author: golabies   File: filters.py    MIT License 5 votes vote down vote up
def smooth(signal, wsz):
        out0 = np.convolve(signal, np.ones(wsz, dtype=float), 'valid') / wsz
        r = np.arange(1, wsz - 1, 2)
        start = np.cumsum(signal[:wsz - 1])[::2] / r
        stop = (np.cumsum(signal[:-wsz:-1])[::2] / r)[::-1]
        return np.concatenate((start, out0, stop)) 
Example 60
Project: tensorflow-DeepFM   Author: ChenglongChen   File: DeepFM.py    MIT License 5 votes vote down vote up
def predict(self, Xi, Xv):
        """
        :param Xi: list of list of feature indices of each sample in the dataset
        :param Xv: list of list of feature values of each sample in the dataset
        :return: predicted probability of each sample
        """
        # dummy y
        dummy_y = [1] * len(Xi)
        batch_index = 0
        Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
        y_pred = None
        while len(Xi_batch) > 0:
            num_batch = len(y_batch)
            feed_dict = {self.feat_index: Xi_batch,
                         self.feat_value: Xv_batch,
                         self.label: y_batch,
                         self.dropout_keep_fm: [1.0] * len(self.dropout_fm),
                         self.dropout_keep_deep: [1.0] * len(self.dropout_deep),
                         self.train_phase: False}
            batch_out = self.sess.run(self.out, feed_dict=feed_dict)

            if batch_index == 0:
                y_pred = np.reshape(batch_out, (num_batch,))
            else:
                y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,))))

            batch_index += 1
            Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)

        return y_pred 
Example 61
Project: models   Author: kipoi   File: dataloader.py    MIT License 5 votes vote down vote up
def __next__(self):
        super_out = super().__next__()
        return {
            'inputs': np.concatenate([
                model.predict(super_out['inputs_mut']).values,
                model.predict(super_out['inputs']).values
            ]),
            'metadata': super_out['metadata']
        } 
Example 62
Project: models   Author: kipoi   File: model.py    MIT License 5 votes vote down vote up
def predict_on_batch(self, inputs):
        '''inputs shape (,10), corresponding to 5 module predictions of mut and wt'''
        X_alt, X_ref = inputs[:, 5:], inputs[:, :-5]
        X = transform(X_alt - X_ref, True)
        X = np.concatenate([X_ref, X_alt, X[:, -3:]], axis=-1)
        return LOGISTIC_MODEL.predict_proba(X) 
Example 63
Project: models   Author: kipoi   File: dataloader.py    MIT License 5 votes vote down vote up
def __next__(self):
        super_out = super().__next__()
        return {
            'inputs': np.concatenate([
                model.predict(super_out['inputs_mut']).values,
                model.predict(super_out['inputs']).values
            ]),
            'metadata': super_out['metadata']
        } 
Example 64
Project: models   Author: kipoi   File: dataloader.py    MIT License 5 votes vote down vote up
def __next__(self):
        super_out = super().__next__()
        return {
            'inputs': np.concatenate([
                model.predict(super_out['inputs_mut']).values,
                model.predict(super_out['inputs']).values
            ]),
            'metadata': super_out['metadata']
        } 
Example 65
Project: oceanmapper   Author: vtamsitt   File: oceanmapper.py    Apache License 2.0 4 votes vote down vote up
def topography3d(mode,topo_x=None,topo_y=None,topo_z=None,topo_limits=None,zscale=500.,topo_vmin=None,topo_vmax=None,topo_cmap='bone',topo_cmap_reverse=False,land_constant=False,land_color=(0.7,0.7,0.7),set_view=None):
    """
    mode: string; coordinate system of 3D projection. Options are 'rectangle' (default), 'sphere' or 'cylinder'
    topo: array_like, optional; input topography file, default is etopo 30 
##TODO: need to define sign of topography 
    topo_limits: array_like, optional; longitude and latitude limits for 3d topography plot [lon min, lon max, lat min, lat max], longitudes range -180 to 180, latitude -90 to 90, default is entire globe
    zscale: scalar, optional; change vertical scaling for plotting, such that the vertical axis is scaled as topo_z/zscale (assumes topo_z units are m); default zscale is 500
    topo_cmap: string, optional; set colormap for topography, default is bone 
    topo_cmap_reverse: string, optional; reverse topography colormap, default is false
    land_constant: string optional; if True, land is set to one colour, default is False
    land_color: color, optional; RGB triplet specifying land colour, defauly is gret
    set_view: array_like, optional; set the mayavi camera angle with input [azimuth, elevation, distance, focal point], default is None 
    """
        
    #load topo data
    if topo_x is not None and topo_y is not None and topo_z is not None:
        xraw = topo_x
        yraw = topo_y
        zraw = topo_z

    else:
        tfile = np.load('etopo1_30min.npz')
        xraw = tfile['x']
        yraw = tfile['y']
        zraw = np.swapaxes(tfile['z'][:,:],0,1)
    

    #create coordinate variables
    phi = (yraw[:]*np.pi*2)/360.+np.pi/2.
    theta = (xraw[:]*np.pi*2)/360.
    c = zraw
    theta=np.append(theta,theta[0])
    c = np.concatenate((c,np.expand_dims(c[0,:],axis=0)),axis=0)

    if topo_vmin is None:
        tvmin = 0
    else:
        tvmin = topo_vmin
    if topo_vmax is None:
        tvmax = 7000
    else:
        tvmax = topo_vmax
   
    if topo_limits is not None:
        phi_1 = topo_limits[2]
        phi_2 = topo_limits[3]
        theta_1 = topo_limits[0]
        theta_2 = topo_limits[1]

        phi_ind1 = np.argmin(np.abs(yraw-phi_1))
        phi_ind2 = np.argmin(np.abs(yraw-phi_2))
        theta_ind1 = np.argmin(np.abs(xraw-theta_1))
        theta_ind2 = np.argmin(np.abs(xraw-theta_2))

        #restrict topo extent
        phi=phi[phi_ind1:phi_ind2]
        theta=theta[theta_ind1:theta_ind2]
        c = c[theta_ind1:theta_ind2:,phi_ind1:phi_ind2] 
Example 66
Project: b2ac   Author: hbldh   File: reference.py    MIT License 4 votes vote down vote up
def fit_improved_B2AC_numpy(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting simply applies NumPy:s methods for calculating
    the conic section, modelled after the Matlab code in the paper:

    .. code-block::

        function a = fit_ellipse(x, y)

        D1 = [x .ˆ 2, x .* y, y .ˆ 2]; % quadratic part of the design matrix
        D2 = [x, y, ones(size(x))]; % linear part of the design matrix
        S1 = D1’ * D1; % quadratic part of the scatter matrix
        S2 = D1’ * D2; % combined part of the scatter matrix
        S3 = D2’ * D2; % linear part of the scatter matrix
        T = - inv(S3) * S2’; % for getting a2 from a1
        M = S1 + S2 * T; % reduced scatter matrix
        M = [M(3, :) ./ 2; - M(2, :); M(1, :) ./ 2]; % premultiply by inv(C1)
        [evec, eval] = eig(M); % solve eigensystem
        cond = 4 * evec(1, :) .* evec(3, :) - evec(2, :) .ˆ 2; % evaluate a’Ca
        a1 = evec(:, find(cond > 0)); % eigenvector for min. pos. eigenvalue
        a = [a1; T * a1]; % ellipse coefficients

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    x = points[:, 0]
    y = points[:, 1]

    D1 = np.vstack([x ** 2, x * y, y ** 2]).T
    D2 = np.vstack([x, y, np.ones((len(x), ), dtype=x.dtype)]).T
    S1 = D1.T.dot(D1)
    S2 = D1.T.dot(D2)
    S3 = D2.T.dot(D2)
    T = -np.linalg.inv(S3).dot(S2.T)
    M = S1 + S2.dot(T)
    M = np.array([M[2, :] / 2, -M[1, :], M[0, :] / 2])
    eval, evec = np.linalg.eig(M)
    cond = (4 * evec[:, 0] * evec[:, 2]) - (evec[:, 1] ** 2)
    I = np.where(cond > 0)[0]
    a1 = evec[:, I[np.argmin(cond[I])]]
    return np.concatenate([a1, T.dot(a1)]) 
Example 67
Project: b2ac   Author: hbldh   File: overlap_functions.py    MIT License 4 votes vote down vote up
def quickhull(sample):
    """Calculates the convex hull of an arbitrary 2D point cloud.

    This is a pure Python version of the Quick Hull algorithm.
    It's based on the version of ``literateprograms``, but fixes some
    old-style Numeric function calls.

    This version works with numpy version > 1.2.1

    References:

    * `Literateprograms <http://en.literateprograms.org/Quickhull_(Python,_arrays)>`_
    * `Wikipedia <http://en.wikipedia.org/wiki/QuickHull>`_

    Code adapted from:

    `<http://members.home.nl/wim.h.bakker/python/quickhull2d.py>`_

    :param sample: Points to which the convex hull is desired to be found.
    :type sample: :py:class:`numpy.ndarray`
    :return: The convex hull of the points.
    :rtype: :py:class:`numpy.ndarray`

    """

    def calculate_convex_hull(sample):
        link = lambda a, b: np.concatenate((a, b[1:]))
        edge = lambda a, b: np.concatenate(([a], [b]))

        def dome(sample, base):
            h, t = base
            dists = np.dot(sample-h, np.dot(((0, -1), (1, 0)), (t - h)))
            outer = np.repeat(sample, dists > 0, axis=0)

            if len(outer):
                pivot = sample[np.argmax(dists)]
                return link(dome(outer, edge(h, pivot)),
                            dome(outer, edge(pivot, t)))
            else:
                return base

        if len(sample) > 2:
            axis = sample[:, 0]
            base = np.take(sample, [np.argmin(axis), np.argmax(axis)], axis=0)
            return link(dome(sample, base),
                        dome(sample, base[::-1]))
        else:
            return sample

    # Perform a reversal of points here to get points ordered clockwise instead of
    # counter clockwise that the QuickHull above returns.
    return calculate_convex_hull(sample)[::-1, :] 
Example 68
Project: comet-commonsense   Author: atcbosselut   File: gpt.py    Apache License 2.0 4 votes vote down vote up
def load_openai_pretrained_model(model, n_ctx=-1, n_special=-1, n_transfer=12,
                                 n_embd=768, path='./model/', path_names='./'):
    # Load weights from TF model
    print("Loading weights...")
    names = json.load(open(path_names + 'parameters_names.json'))
    shapes = json.load(open(path + 'params_shapes.json'))
    offsets = np.cumsum([np.prod(shape) for shape in shapes])
    init_params = [np.load(path + 'params_{}.npy'.format(n)) for n in range(10)]
    init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
    init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
    if n_ctx > 0:
        init_params[0] = init_params[0][:n_ctx]
    if n_special > 0:
        init_params[0] = np.concatenate(
            [init_params[1],
             (np.random.randn(n_special, n_embd) * 0.02).astype(np.float32),
             init_params[0]
             ], 0)
    else:
        init_params[0] = np.concatenate(
            [init_params[1],
             init_params[0]
             ], 0)
    del init_params[1]
    if n_transfer == -1:
        n_transfer = 0
    else:
        n_transfer = 1 + n_transfer * 12
    init_params = [arr.squeeze() for arr in init_params]

    try:
        assert model.embed.weight.shape == init_params[0].shape
    except AssertionError as e:
        e.args += (model.embed.weight.shape, init_params[0].shape)
        raise

    model.embed.weight.data = torch.from_numpy(init_params[0])

    for name, ip in zip(names[1:n_transfer], init_params[1:n_transfer]):
        name = name[6:]  # skip "model/"
        assert name[-2:] == ":0"
        name = name[:-2]
        name = name.split('/')
        pointer = model
        for m_name in name:
            if re.fullmatch(r'[A-Za-z]+\d+', m_name):
                l = re.split(r'(\d+)', m_name)
            else:
                l = [m_name]
            pointer = getattr(pointer, l[0])
            if len(l) >= 2:
                num = int(l[1])
                pointer = pointer[num]
        try:
            assert pointer.shape == ip.shape
        except AssertionError as e:
            e.args += (pointer.shape, ip.shape)
            raise
        pointer.data = torch.from_numpy(ip) 
Example 69
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: flow.py    MIT License 4 votes vote down vote up
def predict(self):
    inp_path = self.FLAGS.imgdir
    all_inps = os.listdir(inp_path)
    all_inps = [i for i in all_inps if self.framework.is_inp(i)]
    if not all_inps:
        msg = 'Failed to find any images in {} .'
        exit('Error: {}'.format(msg.format(inp_path)))

    batch = min(self.FLAGS.batch, len(all_inps))

    # predict in batches
    n_batch = int(math.ceil(len(all_inps) / batch))
    for j in range(n_batch):
        from_idx = j * batch
        to_idx = min(from_idx + batch, len(all_inps))

        # collect images input in the batch
        this_batch = all_inps[from_idx:to_idx]
        inp_feed = pool.map(lambda inp: (
            np.expand_dims(self.framework.preprocess(
                os.path.join(inp_path, inp)), 0)), this_batch)

        # Feed to the net
        feed_dict = {self.inp : np.concatenate(inp_feed, 0)}    
        self.say('Forwarding {} inputs ...'.format(len(inp_feed)))
        start = time.time()
        out = self.sess.run(self.out, feed_dict)
        stop = time.time(); last = stop - start
        self.say('Total time = {}s / {} inps = {} ips'.format(
            last, len(inp_feed), len(inp_feed) / last))

        # Post processing
        self.say('Post processing {} inputs ...'.format(len(inp_feed)))
        start = time.time()
        pool.map(lambda p: (lambda i, prediction:
            self.framework.postprocess(
               prediction, os.path.join(inp_path, this_batch[i])))(*p),
            enumerate(out))
        stop = time.time(); last = stop - start

        # Timing
        self.say('Total time = {}s / {} inps = {} ips'.format(
            last, len(inp_feed), len(inp_feed) / last)) 
Example 70
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: misc.py    MIT License 4 votes vote down vote up
def profile(self, net):
    pass
#     data = self.parse(exclusive = True)
#     size = len(data); batch = self.FLAGS.batch
#     all_inp_ = [x[0] for x in data]
#     net.say('Will cycle through {} examples {} times'.format(
#         len(all_inp_), net.FLAGS.epoch))

#     fetch = list(); mvave = list(); names = list();
#     this = net.top
#     conv_lay = ['convolutional', 'connected', 'local', 'conv-select']
#     while this.inp is not None:
#         if this.lay.type in conv_lay:
#             fetch = [this.out] + fetch
#             names = [this.lay.signature] + names
#             mvave = [None] + mvave 
#         this = this.inp
#     print(names)

#     total = int(); allofthem = len(all_inp_) * net.FLAGS.epoch
#     batch = min(net.FLAGS.batch, len(all_inp_))
#     for count in range(net.FLAGS.epoch):
#         net.say('EPOCH {}'.format(count))
#         for j in range(len(all_inp_)/batch):
#             inp_feed = list(); new_all = list()
#             all_inp = all_inp_[j*batch: (j*batch+batch)]
#             for inp in all_inp:
#                 new_all += [inp]
#                 this_inp = os.path.join(net.FLAGS.dataset, inp)
#                 this_inp = net.framework.preprocess(this_inp)
#                 expanded = np.expand_dims(this_inp, 0)
#                 inp_feed.append(expanded)
#             all_inp = new_all
#             feed_dict = {net.inp : np.concatenate(inp_feed, 0)}
#             out = net.sess.run(fetch, feed_dict)

#             for i, o in enumerate(out):
#                 oi = out[i];
#                 dim = len(oi.shape) - 1
#                 ai = mvave[i]; 
#                 mi = np.mean(oi, tuple(range(dim)))
#                 vi = np.var(oi, tuple(range(dim)))
#                 if ai is None: mvave[i] = [mi, vi]
#                 elif 'banana ninja yada yada':
#                     ai[0] = (1 - _MVA) * ai[0] + _MVA * mi
#                     ai[1] = (1 - _MVA) * ai[1] + _MVA * vi
#             total += len(inp_feed)
#             net.say('{} / {} = {}%'.format(
#                 total, allofthem, 100. * total / allofthem))

#         with open('profile', 'wb') as f:
#             pickle.dump([mvave], f, protocol = -1) 
Example 71
Project: mmdetection   Author: open-mmlab   File: inference.py    Apache License 2.0 4 votes vote down vote up
def show_result(img,
                result,
                class_names,
                score_thr=0.3,
                wait_time=0,
                show=True,
                out_file=None):
    """Visualize the detection results on the image.

    Args:
        img (str or np.ndarray): Image filename or loaded image.
        result (tuple[list] or list): The detection result, can be either
            (bbox, segm) or just bbox.
        class_names (list[str] or tuple[str]): A list of class names.
        score_thr (float): The threshold to visualize the bboxes and masks.
        wait_time (int): Value of waitKey param.
        show (bool, optional): Whether to show the image with opencv or not.
        out_file (str, optional): If specified, the visualization result will
            be written to the out file instead of shown in a window.

    Returns:
        np.ndarray or None: If neither `show` nor `out_file` is specified, the
            visualized image is returned, otherwise None is returned.
    """
    assert isinstance(class_names, (tuple, list))
    img = mmcv.imread(img)
    img = img.copy()
    if isinstance(result, tuple):
        bbox_result, segm_result = result
    else:
        bbox_result, segm_result = result, None
    bboxes = np.vstack(bbox_result)
    # draw segmentation masks
    if segm_result is not None:
        segms = mmcv.concat_list(segm_result)
        inds = np.where(bboxes[:, -1] > score_thr)[0]
        for i in inds:
            color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
            mask = maskUtils.decode(segms[i]).astype(np.bool)
            img[mask] = img[mask] * 0.5 + color_mask * 0.5
    # draw bounding boxes
    labels = [
        np.full(bbox.shape[0], i, dtype=np.int32)
        for i, bbox in enumerate(bbox_result)
    ]
    labels = np.concatenate(labels)
    mmcv.imshow_det_bboxes(
        img,
        bboxes,
        labels,
        class_names=class_names,
        score_thr=score_thr,
        show=show,
        wait_time=wait_time,
        out_file=out_file)
    if not (show or out_file):
        return img 
Example 72
Project: mmdetection   Author: open-mmlab   File: base.py    Apache License 2.0 4 votes vote down vote up
def show_result(self, data, result, dataset=None, score_thr=0.3):
        if isinstance(result, tuple):
            bbox_result, segm_result = result
        else:
            bbox_result, segm_result = result, None

        img_tensor = data['img'][0]
        img_metas = data['img_meta'][0].data[0]
        imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
        assert len(imgs) == len(img_metas)

        if dataset is None:
            class_names = self.CLASSES
        elif isinstance(dataset, str):
            class_names = get_classes(dataset)
        elif isinstance(dataset, (list, tuple)):
            class_names = dataset
        else:
            raise TypeError(
                'dataset must be a valid dataset name or a sequence'
                ' of class names, not {}'.format(type(dataset)))

        for img, img_meta in zip(imgs, img_metas):
            h, w, _ = img_meta['img_shape']
            img_show = img[:h, :w, :]

            bboxes = np.vstack(bbox_result)
            # draw segmentation masks
            if segm_result is not None:
                segms = mmcv.concat_list(segm_result)
                inds = np.where(bboxes[:, -1] > score_thr)[0]
                for i in inds:
                    color_mask = np.random.randint(
                        0, 256, (1, 3), dtype=np.uint8)
                    mask = maskUtils.decode(segms[i]).astype(np.bool)
                    img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5
            # draw bounding boxes
            labels = [
                np.full(bbox.shape[0], i, dtype=np.int32)
                for i, bbox in enumerate(bbox_result)
            ]
            labels = np.concatenate(labels)
            mmcv.imshow_det_bboxes(
                img_show,
                bboxes,
                labels,
                class_names=class_names,
                score_thr=score_thr) 
Example 73
Project: mmdetection   Author: open-mmlab   File: iou_balanced_neg_sampler.py    Apache License 2.0 4 votes vote down vote up
def _sample_neg(self, assign_result, num_expected, **kwargs):
        neg_inds = torch.nonzero(assign_result.gt_inds == 0)
        if neg_inds.numel() != 0:
            neg_inds = neg_inds.squeeze(1)
        if len(neg_inds) <= num_expected:
            return neg_inds
        else:
            max_overlaps = assign_result.max_overlaps.cpu().numpy()
            # balance sampling for negative samples
            neg_set = set(neg_inds.cpu().numpy())

            if self.floor_thr > 0:
                floor_set = set(
                    np.where(
                        np.logical_and(max_overlaps >= 0,
                                       max_overlaps < self.floor_thr))[0])
                iou_sampling_set = set(
                    np.where(max_overlaps >= self.floor_thr)[0])
            elif self.floor_thr == 0:
                floor_set = set(np.where(max_overlaps == 0)[0])
                iou_sampling_set = set(
                    np.where(max_overlaps > self.floor_thr)[0])
            else:
                floor_set = set()
                iou_sampling_set = set(
                    np.where(max_overlaps > self.floor_thr)[0])
                # for sampling interval calculation
                self.floor_thr = 0

            floor_neg_inds = list(floor_set & neg_set)
            iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
            num_expected_iou_sampling = int(num_expected *
                                            (1 - self.floor_fraction))
            if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
                if self.num_bins >= 2:
                    iou_sampled_inds = self.sample_via_interval(
                        max_overlaps, set(iou_sampling_neg_inds),
                        num_expected_iou_sampling)
                else:
                    iou_sampled_inds = self.random_choice(
                        iou_sampling_neg_inds, num_expected_iou_sampling)
            else:
                iou_sampled_inds = np.array(
                    iou_sampling_neg_inds, dtype=np.int)
            num_expected_floor = num_expected - len(iou_sampled_inds)
            if len(floor_neg_inds) > num_expected_floor:
                sampled_floor_inds = self.random_choice(
                    floor_neg_inds, num_expected_floor)
            else:
                sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
            sampled_inds = np.concatenate(
                (sampled_floor_inds, iou_sampled_inds))
            if len(sampled_inds) < num_expected:
                num_extra = num_expected - len(sampled_inds)
                extra_inds = np.array(list(neg_set - set(sampled_inds)))
                if len(extra_inds) > num_extra:
                    extra_inds = self.random_choice(extra_inds, num_extra)
                sampled_inds = np.concatenate((sampled_inds, extra_inds))
            sampled_inds = torch.from_numpy(sampled_inds).long().to(
                assign_result.gt_inds.device)
            return sampled_inds 
Example 74
Project: mmdetection   Author: open-mmlab   File: test_robustness.py    Apache License 2.0 4 votes vote down vote up
def voc_eval_with_return(result_file,
                         dataset,
                         iou_thr=0.5,
                         print_summary=True,
                         only_ap=True):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        if 'bboxes_ignore' in ann:
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
    if not gt_ignore:
        gt_ignore = gt_ignore
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = dataset.CLASSES
    mean_ap, eval_results = eval_map(
        det_results,
        gt_bboxes,
        gt_labels,
        gt_ignore=gt_ignore,
        scale_ranges=None,
        iou_thr=iou_thr,
        dataset=dataset_name,
        print_summary=print_summary)

    if only_ap:
        eval_results = [{
            'ap': eval_results[i]['ap']
        } for i in range(len(eval_results))]

    return mean_ap, eval_results 
Example 75
Project: mmdetection   Author: open-mmlab   File: analyze_logs.py    Apache License 2.0 4 votes vote down vote up
def plot_curve(log_dicts, args):
    if args.backend is not None:
        plt.switch_backend(args.backend)
    sns.set_style(args.style)
    # if legend is None, use {filename}_{key} as legend
    legend = args.legend
    if legend is None:
        legend = []
        for json_log in args.json_logs:
            for metric in args.keys:
                legend.append('{}_{}'.format(json_log, metric))
    assert len(legend) == (len(args.json_logs) * len(args.keys))
    metrics = args.keys

    num_metrics = len(metrics)
    for i, log_dict in enumerate(log_dicts):
        epochs = list(log_dict.keys())
        for j, metric in enumerate(metrics):
            print('plot curve of {}, metric is {}'.format(
                args.json_logs[i], metric))
            if metric not in log_dict[epochs[0]]:
                raise KeyError('{} does not contain metric {}'.format(
                    args.json_logs[i], metric))

            if 'mAP' in metric:
                xs = np.arange(1, max(epochs) + 1)
                ys = []
                for epoch in epochs:
                    ys += log_dict[epoch][metric]
                ax = plt.gca()
                ax.set_xticks(xs)
                plt.xlabel('epoch')
                plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o')
            else:
                xs = []
                ys = []
                num_iters_per_epoch = log_dict[epochs[0]]['iter'][-1]
                for epoch in epochs:
                    iters = log_dict[epoch]['iter']
                    if log_dict[epoch]['mode'][-1] == 'val':
                        iters = iters[:-1]
                    xs.append(
                        np.array(iters) + (epoch - 1) * num_iters_per_epoch)
                    ys.append(np.array(log_dict[epoch][metric][:len(iters)]))
                xs = np.concatenate(xs)
                ys = np.concatenate(ys)
                plt.xlabel('iter')
                plt.plot(
                    xs, ys, label=legend[i * num_metrics + j], linewidth=0.5)
            plt.legend()
        if args.title is not None:
            plt.title(args.title)
    if args.out is None:
        plt.show()
    else:
        print('save curve to: {}'.format(args.out))
        plt.savefig(args.out)
        plt.cla() 
Example 76
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulenet-multi-gpu.py    MIT License 4 votes vote down vote up
def test(model, data):
    x_test, y_test = data
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-'*50)
    print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])

    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image

    img = combine_images(np.concatenate([x_test[:50],x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save("real_and_recon.png")
    print()
    print('Reconstructed images are saved to ./real_and_recon.png')
    print('-'*50)
    plt.imshow(plt.imread("real_and_recon.png", ))
    plt.show() 
Example 77
Project: neural-fingerprinting   Author: StephanZheng   File: utils_svhn.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def read_SVHN(data_folder):
    """ Reads and parses examples from SVHN data files """

    train_img = []
    train_label = []
    test_img = []
    test_label = []

    train_file_list = [
        'train_32x32.mat', 'extra_32x32.mat'
    ]
    test_file_list = ["test_32x32.mat"]

    for i in xrange(len(train_file_list)):
        tmp_dict = sio.loadmat(os.path.join(data_folder, train_file_list[i]))
        train_img.append(tmp_dict["X"])
        train_label.append(tmp_dict["y"])

    tmp_dict = sio.loadmat(
        os.path.join(data_folder, test_file_list[0]))
    test_img.append(tmp_dict["X"])
    test_label.append(tmp_dict["y"])

    train_img = np.concatenate(train_img, axis=-1)
    train_label = np.concatenate(train_label).flatten()
    test_img = np.concatenate(test_img, axis=-1)
    test_label = np.concatenate(test_label).flatten()

    # change format from [H, W, C, B] to [B, H, W, C] for feeding to Tensorflow
    train_img = np.transpose(train_img, [3, 0, 1, 2])
    test_img = np.transpose(test_img, [3, 0, 1, 2])

    mean_img = np.mean(np.concatenate([train_img, test_img]), axis=0)

    train_img = train_img - mean_img
    test_img = test_img - mean_img
    train_y = train_label - 1  # 0-based label
    test_y = test_label - 1    # 0-based label

    train_label = np.eye(10)[train_y]
    test_label = np.eye(10)[test_y]

    return train_img, train_label, test_img, test_label 
Example 78
Project: models   Author: kipoi   File: dataloader.py    MIT License 4 votes vote down vote up
def __getitem__(self, idx):
        if self.fasta_extractor is None:
            # Fasta
            self.fasta_extractor = FastaExtractor(self.fasta_file)
            # DNase
            self.dnase_extractor = BigwigExtractor(self.dnase_file)
            self.mappability_extractor = BigwigExtractor(self.mappability_file)

        # Get the interval
        interval = self.bt[idx]
        if interval.stop - interval.start != self.SEQ_WIDTH:
            center = (interval.start + interval.stop) // 2
            interval.start = center - self.SEQ_WIDTH // 2
            interval.end = center + self.SEQ_WIDTH // 2 + self.SEQ_WIDTH % 2
        # Get the gencode features
        gencode_counts = np.array([v[idx].count for k, v in self.overlap_beds],
                                  dtype=bool)

        # Run the fasta extractor
        seq = np.squeeze(self.fasta_extractor([interval]), axis=0)
        seq_rc = seq[::-1, ::-1]

        # Dnase
        dnase = np.squeeze(self.dnase_extractor([interval], axis=0))[:, np.newaxis]
        dnase[np.isnan(dnase)] = 0  # NA fill
        dnase_rc = dnase[::-1]

        bigwig_list = [seq]
        bigwig_rc_list = [seq_rc]
        mappability = np.squeeze(self.mappability_extractor([interval], axis=0))[:, np.newaxis]
        mappability[np.isnan(mappability)] = 0  # NA fill
        mappability_rc = mappability[::-1]
        bigwig_list.append(mappability)
        bigwig_rc_list.append(mappability_rc)
        bigwig_list.append(dnase)
        bigwig_rc_list.append(dnase_rc)

        ranges = GenomicRanges.from_interval(interval)
        ranges_rc = GenomicRanges.from_interval(interval)
        ranges_rc.strand = "-"

        return {
            "inputs": [
                np.concatenate(bigwig_list, axis=-1),  # stack along the last axis
                np.concatenate(bigwig_rc_list, axis=-1),  # RC version
                gencode_counts
            ],
            "targets": {},  # No Targets
            "metadata": {
                "ranges": ranges,
                "ranges_rc": ranges_rc
            }
        } 
Example 79
Project: models   Author: kipoi   File: dataloader.py    MIT License 4 votes vote down vote up
def __getitem__(self, idx):
        if self.fasta_extractor is None:
            # Fasta
            self.fasta_extractor = FastaExtractor(self.fasta_file)
            # DNase
            self.dnase_extractor = BigwigExtractor(self.dnase_file)
            self.mappability_extractor = BigwigExtractor(self.mappability_file)

        # Get the interval
        interval = self.bt[idx]
        if interval.stop - interval.start != self.SEQ_WIDTH:
            center = (interval.start + interval.stop) // 2
            interval.start = center - self.SEQ_WIDTH // 2
            interval.end = center + self.SEQ_WIDTH // 2 + self.SEQ_WIDTH % 2
        # Get the gencode features
        gencode_counts = np.array([v[idx].count for k, v in self.overlap_beds],
                                  dtype=bool)

        # Run the fasta extractor
        seq = np.squeeze(self.fasta_extractor([interval]), axis=0)
        seq_rc = seq[::-1, ::-1]

        # Dnase
        dnase = np.squeeze(self.dnase_extractor([interval], axis=0))[:, np.newaxis]
        dnase[np.isnan(dnase)] = 0  # NA fill
        dnase_rc = dnase[::-1]

        bigwig_list = [seq]
        bigwig_rc_list = [seq_rc]
        mappability = np.squeeze(self.mappability_extractor([interval], axis=0))[:, np.newaxis]
        mappability[np.isnan(mappability)] = 0  # NA fill
        mappability_rc = mappability[::-1]
        bigwig_list.append(mappability)
        bigwig_rc_list.append(mappability_rc)
        bigwig_list.append(dnase)
        bigwig_rc_list.append(dnase_rc)

        ranges = GenomicRanges.from_interval(interval)
        ranges_rc = GenomicRanges.from_interval(interval)
        ranges_rc.strand = "-"

        return {
            "inputs": [
                np.concatenate(bigwig_list, axis=-1),  # stack along the last axis
                np.concatenate(bigwig_rc_list, axis=-1),  # RC version
                gencode_counts
            ],
            "targets": {},  # No Targets
            "metadata": {
                "ranges": ranges,
                "ranges_rc": ranges_rc
            }
        } 
Example 80
Project: models   Author: kipoi   File: dataloader.py    MIT License 4 votes vote down vote up
def __getitem__(self, idx):
        if self.fasta_extractor is None:
            # Fasta
            self.fasta_extractor = FastaExtractor(self.fasta_file)
            # DNase
            self.dnase_extractor = BigwigExtractor(self.dnase_file)
            self.mappability_extractor = BigwigExtractor(self.mappability_file)

        # Get the interval
        interval = self.bt[idx]
        if interval.stop - interval.start != self.SEQ_WIDTH:
            center = (interval.start + interval.stop) // 2
            interval.start = center - self.SEQ_WIDTH // 2
            interval.end = center + self.SEQ_WIDTH // 2 + self.SEQ_WIDTH % 2

        # Run the fasta extractor
        seq = np.squeeze(self.fasta_extractor([interval]), axis=0)
        seq_rc = seq[::-1, ::-1]

        # Dnase
        dnase = np.squeeze(self.dnase_extractor([interval], axis=0))[:, np.newaxis]
        dnase[np.isnan(dnase)] = 0  # NA fill
        dnase_rc = dnase[::-1]

        bigwig_list = [seq]
        bigwig_rc_list = [seq_rc]
        mappability = np.squeeze(self.mappability_extractor([interval], axis=0))[:, np.newaxis]
        mappability[np.isnan(mappability)] = 0  # NA fill
        mappability_rc = mappability[::-1]
        bigwig_list.append(mappability)
        bigwig_rc_list.append(mappability_rc)
        bigwig_list.append(dnase)
        bigwig_rc_list.append(dnase_rc)

        ranges = GenomicRanges.from_interval(interval)
        ranges_rc = GenomicRanges.from_interval(interval)
        ranges_rc.strand = "-"

        return {
            "inputs": [
                np.concatenate(bigwig_list, axis=-1),  # stack along the last axis
                np.concatenate(bigwig_rc_list, axis=-1),  # RC version
            ],
            "targets": {},  # No Targets
            "metadata": {
                "ranges": ranges,
                "ranges_rc": ranges_rc
            }
        }