Python numpy.concatenate() Examples

The following are code examples for showing how to use numpy.concatenate(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 6 votes vote down vote up
def iter_apply(Xs, Ms, Ys):
    # fns = [lambda x: np.concatenate(x, 0), lambda x: float(np.sum(x))]
    logits = []
    cost = 0
    with chainer.using_config('train', False), \
            chainer.using_config('enable_backprop', False):
        for xmb, mmb, ymb in iter_data(
                Xs, Ms, Ys, n_batch=n_batch_train, truncate=False, verbose=True):
            n = len(xmb)
            XMB = model.xp.asarray(xmb)
            YMB = model.xp.asarray(ymb)
            MMB = model.xp.asarray(mmb)
            h = model(XMB)
            clf_logits = clf_head(h, XMB)
            clf_logits *= n
            clf_losses = compute_loss_fct(
                XMB, YMB, MMB, clf_logits, only_return_losses=True)
            clf_losses *= n
            logits.append(cuda.to_cpu(clf_logits.array))
            cost += cuda.to_cpu(F.sum(clf_losses).array)
        logits = np.concatenate(logits, 0)
    return logits, cost 
Example 2
Project: face-attendance-machine   Author: matiji66   File: encoding_images.py    Apache License 2.0 6 votes vote down vote up
def load_encodings():
    """
    加载保存的历史人脸向量,以及name向量,并返回
    :return:
    """
    known_face_encodings = np.load(KNOWN_FACE_ENCODINGS)
    known_face_names = np.load(KNOWN_FACE_NANE)
    if not os.path.exists(KNOWN_FACE_NANE) or not os.path.exists(KNOWN_FACE_ENCODINGS):
        encoding_images(data_path)
    aa = [file for file in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, file)) and file.endswith("npy")]
    # ("known_face_encodings_") or file.startswith("known_face_name_"))
    for data in aa:
        if data.startswith('known_face_encodings_'):
            tmp_face_encodings = np.load(os.path.join(data_path,data))
            known_face_encodings = np.concatenate((known_face_encodings, tmp_face_encodings), axis=0)
            print("load ", data)
        elif data.startswith('known_face_name_'):
            tmp_face_name = np.load(os.path.join(data_path, data))
            known_face_names = np.concatenate((known_face_names, tmp_face_name), axis=0)
            print("load ", data)
        else:
            print('skip to load original ', data)
    return known_face_encodings,known_face_names 
Example 3
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def box2dtobox3d(boxes2d, z_translation=0.0, z_size=0.0, z_angle=0.0):
    """
    tranforms 2d boxes to 3d boxes
    :param boxes2d: np array shaped N,4. box = [x1,y1,x2,xy] (1-bottom left, 2 upper right)
    :return: boxes3d np array shaped N,7. box = [t1,t2,t3,s1,s2,s3,z_angle]
    """
    ctr_x = np.mean(boxes2d[:, [0, 2]], axis=-1, keepdims=True)
    ctr_y = np.mean(boxes2d[:, [1, 3]], axis=-1, keepdims=True)
    ctr_z = np.full([boxes2d.shape[0], 1], z_translation)
    ctr = np.concatenate((ctr_x, ctr_y, ctr_z), -1)

    size_x = boxes2d[:, 2:3] - boxes2d[:, 0:1]
    size_y = boxes2d[:, 3:4] - boxes2d[:, 1:2]
    size_z = np.full([boxes2d.shape[0], 1], z_size)
    size = np.concatenate((size_x, size_y, size_z), -1)

    z_angle = np.full([boxes2d.shape[0], 1], z_angle)

    return np.concatenate((ctr, size, z_angle), -1) 
Example 4
Project: fbpconv_tf   Author: panakino   File: util.py    GNU General Public License v3.0 6 votes vote down vote up
def combine_img_prediction(data, gt, pred):
    """
    Combines the data, grouth thruth and the prediction into one rgb image
    
    :param data: the data tensor
    :param gt: the ground thruth tensor
    :param pred: the prediction tensor
    
    :returns img: the concatenated rgb image 
    """
    ny = pred.shape[2]
    ch = data.shape[3]
    img = np.concatenate((to_rgb(crop_to_shape(data, pred.shape).reshape(-1, ny, ch)), 
                          to_rgb(crop_to_shape(gt[..., 1], pred.shape).reshape(-1, ny, 1)), 
                          to_rgb(pred[..., 1].reshape(-1, ny, 1))), axis=1)
    return img 
Example 5
Project: good-semi-bad-gan   Author: christiancosgrove   File: cifar10_data.py    MIT License 6 votes vote down vote up
def load(data_dir, subset='train'):
    maybe_download_and_extract(data_dir)
    if subset=='train':
        train_data = [unpickle(os.path.join(data_dir,'cifar-10-batches-py/data_batch_' + str(i))) for i in range(1,6)]
        trainx = np.concatenate([d['x'] for d in train_data],axis=0)
        trainy = np.concatenate([d['y'] for d in train_data],axis=0)
        return trainx, trainy
    elif subset=='test':
        test_data = unpickle(os.path.join(data_dir,'cifar-10-batches-py/test_batch'))
        testx = test_data['x']
        testy = test_data['y']
        return testx, testy
    else:
        raise NotImplementedError('subset should be either train or test')


# load cars 
Example 6
Project: mmdetection   Author: open-mmlab   File: sampler.py    Apache License 2.0 6 votes vote down vote up
def __iter__(self):
        indices = []
        for i, size in enumerate(self.group_sizes):
            if size == 0:
                continue
            indice = np.where(self.flag == i)[0]
            assert len(indice) == size
            np.random.shuffle(indice)
            num_extra = int(np.ceil(size / self.samples_per_gpu)
                            ) * self.samples_per_gpu - len(indice)
            indice = np.concatenate(
                [indice, np.random.choice(indice, num_extra)])
            indices.append(indice)
        indices = np.concatenate(indices)
        indices = [
            indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
            for i in np.random.permutation(
                range(len(indices) // self.samples_per_gpu))
        ]
        indices = np.concatenate(indices)
        indices = indices.astype(np.int64).tolist()
        assert len(indices) == self.num_samples
        return iter(indices) 
Example 7
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulenet.py    MIT License 6 votes vote down vote up
def test(model, data):
    x_test, y_test = data
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-'*50)
    print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])

    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image

    img = combine_images(np.concatenate([x_test[:50],x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save("real_and_recon.png")
    print()
    print('Reconstructed images are saved to ./real_and_recon.png')
    print('-'*50)
    plt.imshow(plt.imread("real_and_recon.png", ))
    plt.show() 
Example 8
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulenet-multi-gpu.py    MIT License 6 votes vote down vote up
def test(model, data):
    x_test, y_test = data
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-'*50)
    print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])

    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image

    img = combine_images(np.concatenate([x_test[:50],x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save("real_and_recon.png")
    print()
    print('Reconstructed images are saved to ./real_and_recon.png')
    print('-'*50)
    plt.imshow(plt.imread("real_and_recon.png", ))
    plt.show() 
Example 9
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg):
    """
    TODO
    :param densities_pos:
    :param densities_neg:
    :param uncerts_pos:
    :param uncerts_neg:
    :return:
    """
    values_neg = np.concatenate(
        (densities_neg.reshape((1, -1)),
         uncerts_neg.reshape((1, -1))),
        axis=0).transpose([1, 0])
    values_pos = np.concatenate(
        (densities_pos.reshape((1, -1)),
         uncerts_pos.reshape((1, -1))),
        axis=0).transpose([1, 0])

    values = np.concatenate((values_neg, values_pos))
    labels = np.concatenate(
        (np.zeros_like(densities_neg), np.ones_like(densities_pos)))

    lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels)

    return values, labels, lr 
Example 10
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
    """
    TODO
    :param probs_neg:
    :param probs_pos:
    :param plot:
    :return:
    """
    probs = np.concatenate((probs_neg, probs_pos))
    labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
    fpr, tpr, _ = roc_curve(labels, probs)
    auc_score = auc(fpr, tpr)
    if plot:
        plt.figure(figsize=(7, 6))
        plt.plot(fpr, tpr, color='blue',
                 label='ROC (AUC = %0.4f)' % auc_score)
        plt.legend(loc='lower right')
        plt.title("ROC Curve")
        plt.xlabel("FPR")
        plt.ylabel("TPR")
        plt.show()

    return fpr, tpr, auc_score 
Example 11
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def block_split(X, Y):
    """
    Split the data into 80% for training and 20% for testing
    in a block size of 100.
    :param X: 
    :param Y: 
    :return: 
    """
    print("Isolated split 80%, 20% for training and testing")
    num_samples = X.shape[0]
    partition = int(num_samples/3)
    X_adv, Y_adv = X[:partition], Y[:partition]
    X_norm, Y_norm = X[partition:2*partition], Y[partition:2*partition]
    X_noisy, Y_noisy = X[2*partition:], Y[2*partition:]

    num_train = int(partition * 0.008) * 100
    X_train = np.concatenate((X_adv[:num_train], X_norm[:num_train], X_noisy[:num_train]))
    Y_train = np.concatenate((Y_adv[:num_train], Y_norm[:num_train], Y_noisy[:num_train]))

    X_test = np.concatenate((X_adv[num_train:], X_norm[num_train:], X_noisy[num_train:]))
    Y_test = np.concatenate((Y_adv[num_train:], Y_norm[num_train:], Y_noisy[num_train:]))

    return X_train, Y_train, X_test, Y_test 
Example 12
Project: models   Author: kipoi   File: dataloader_m.py    MIT License 6 votes vote down vote up
def _prepro_cpg(self, states, dists):
        """Preprocess the state and distance of neighboring CpG sites."""
        prepro_states = []
        prepro_dists = []
        for state, dist in zip(states, dists):
            nan = state == dat.CPG_NAN
            if np.any(nan):
                state[nan] = np.random.binomial(1, state[~nan].mean(),
                                                nan.sum())
                dist[nan] = self.cpg_max_dist
            dist = np.minimum(dist, self.cpg_max_dist) / self.cpg_max_dist
            prepro_states.append(np.expand_dims(state, 1))
            prepro_dists.append(np.expand_dims(dist, 1))
        prepro_states = np.concatenate(prepro_states, axis=1)
        prepro_dists = np.concatenate(prepro_dists, axis=1)
        if self.cpg_wlen:
            center = prepro_states.shape[2] // 2
            delta = self.cpg_wlen // 2
            tmp = slice(center - delta, center + delta)
            prepro_states = prepro_states[:, :, tmp]
            prepro_dists = prepro_dists[:, :, tmp]
        return (prepro_states, prepro_dists) 
Example 13
Project: SyNEThesia   Author: RunOrVeith   File: feature_creators.py    MIT License 5 votes vote down vote up
def _split_into_chunks(signal, chunks_per_second=24):
    # TODO currently broken
    raise NotImplemented("Splitting to chunks is currently broken.")
    window_length_ms = 1/chunks_per_second * 1000
    intervals = np.arange(window_length_ms, signal.shape[0], window_length_ms, dtype=np.int32)
    chunks = np.array_split(signal, intervals, axis=0)
    pad_to = _next_power_of_two(np.max([chunk.shape[0] for chunk in chunks]))
    padded_chunks = np.stack(np.concatenate([chunk, np.zeros((pad_to - chunk.shape[0],))]) for chunk in chunks)
    return padded_chunks 
Example 14
Project: SyNEThesia   Author: RunOrVeith   File: feature_creators.py    MIT License 5 votes vote down vote up
def logfbank_features(signal, samplerate=44100, fps=24, num_filt=40, num_cepstra=40, nfft=8192, **kwargs):
    winstep = 2 / fps
    winlen = winstep * 2
    feat, energy = psf.fbank(signal=signal, samplerate=samplerate,
                             winlen=winlen, winstep=winstep, nfilt=num_filt,
                             nfft=nfft)
    feat = np.log(feat)
    feat = psf.dct(feat, type=2, axis=1, norm='ortho')[:, :num_cepstra]
    feat = psf.lifter(feat, L=22)
    feat = np.asarray(feat)

    energy = np.log(energy)
    energy = energy.reshape([energy.shape[0],1])

    if feat.shape[0] > 1:
        std = 0.5 * np.std(feat, axis=0)
        mat = (feat - np.mean(feat, axis=0)) / std
    else:
        mat = feat

    mat = np.concatenate((mat, energy), axis=1)

    duration = signal.shape[0] / samplerate
    expected_frames = fps * duration
    assert mat.shape[0] - expected_frames <= 1, "Producted feature number does not match framerate"
    return mat 
Example 15
Project: b2ac   Author: hbldh   File: reference.py    MIT License 5 votes vote down vote up
def fit_improved_B2AC(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting uses float storage during calculations and performs the
    eigensolver on a float array.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    points = np.array(points, 'float')
    S = _calculate_scatter_matrix_py(points[:, 0], points[:, 1])
    S3 = S[3:, 3:]
    S3 = np.array([S3[0, 0], S3[0, 1], S3[0, 2], S3[1, 1], S3[1, 2], S3[2, 2]])
    S3_inv = inverse_symmetric_3by3_double(S3).reshape((3, 3))
    S2 = S[:3, 3:]
    T = -np.dot(S3_inv, S2.T)
    M = S[:3, :3] + np.dot(S2, T)
    inv_mat = np.array([[0, 0, 0.5], [0, -1, 0], [0.5, 0, 0]], 'float')
    M = inv_mat.dot(M)

    e_vals, e_vect = np.linalg.eig(M)

    try:
        elliptical_solution_index = np.where(((4 * e_vect[0, :] * e_vect[2, :]) - ((e_vect[1, :] ** 2))) > 0)[0][0]
    except:
        # No positive eigenvalues. Fit was not ellipse.
        raise ArithmeticError("No elliptical solution found.")

    a = e_vect[:, elliptical_solution_index]
    if a[0] < 0:
        a = -a
    return np.concatenate((a, np.dot(T, a))) 
Example 16
Project: b2ac   Author: hbldh   File: reference.py    MIT License 5 votes vote down vote up
def fit_improved_B2AC_int(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting uses int64 storage during calculations and performs the
    eigensolver on an integer array.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    S = _calculate_scatter_matrix_c(points[:, 0], points[:, 1])
    S1 = np.array([S[0, 0], S[0, 1], S[0, 2], S[1, 1], S[1, 2], S[2, 2]])
    S3 = np.array([S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]])
    adj_S3, det_S3 = inverse_symmetric_3by3_int(S3)
    S2 = S[:3, 3:]
    T_no_det = - np.dot(np.array(adj_S3.reshape((3, 3)), 'int64'), np.array(S2.T, 'int64'))
    M_term2 = np.dot(np.array(S2, 'int64'), T_no_det) // det_S3
    M = add_symmetric_matrix(M_term2, S1)
    M[[0, 2], :] /= 2
    M[1, :] = -M[1, :]

    e_vals, e_vect = np.linalg.eig(M)

    try:
        elliptical_solution_index = np.where(((4 * e_vect[0, :] * e_vect[2, :]) - ((e_vect[1, :] ** 2))) > 0)[0][0]
    except:
        # No positive eigenvalues. Fit was not ellipse.
        raise ArithmeticError("No elliptical solution found.")
    a = e_vect[:, elliptical_solution_index]
    return np.concatenate((a, np.dot(T_no_det, a) / det_S3)) 
Example 17
Project: b2ac   Author: hbldh   File: int.py    MIT License 5 votes vote down vote up
def fit_improved_B2AC_int(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting uses int64 storage during calculations and performs the
    eigensolver on an integer array.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section coefficients array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    e_conds = []
    M, T_no_det, determinant_S3 = _calculate_M_and_T_int64(points)

    e_vals = sorted(QR_algorithm_shift_Givens_int(M)[0])

    a = None
    for ev_ind in [1, 2, 0]:
        # Find the eigenvector that matches this eigenvector.
        eigenvector, e_norm = inverse_iteration_for_eigenvector_int(M, e_vals[ev_ind])
        # See if that eigenvector yields an elliptical solution.
        elliptical_condition = (4 * eigenvector[0] * eigenvector[2]) - (eigenvector[1] ** 2)
        e_conds.append(elliptical_condition)
        if elliptical_condition > 0:
            a = eigenvector
            break

    if a is None:
        raise ArithmeticError("No elliptical solution found.")

    conic_coefficients = np.concatenate((a, np.dot(T_no_det, a) // determinant_S3))
    return conic_coefficients 
Example 18
Project: b2ac   Author: hbldh   File: double.py    MIT License 5 votes vote down vote up
def fit_improved_B2AC_double(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting uses float storage during calculations and performs the
    eigensolver on a float array. It only uses `b2ac` package methods for fitting, to
    be as similar to the integer implementation as possible.

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    e_conds = []
    points = np.array(points, 'float')

    M, T = _calculate_M_and_T_double(points)

    e_vals = sorted(qr.QR_algorithm_shift_Givens_double(M)[0])

    a = None
    for ev_ind in [1, 2, 0]:
        # Find the eigenvector that matches this eigenvector.
        eigenvector = inv_iter.inverse_iteration_for_eigenvector_double(M, e_vals[ev_ind], 5)

        # See if that eigenvector yields an elliptical solution.
        elliptical_condition = (4 * eigenvector[0] * eigenvector[2]) - (eigenvector[1] ** 2)
        e_conds.append(elliptical_condition)
        if elliptical_condition > 0:
            a = eigenvector
            break

    if a is None:
        print("Eigenvalues = {0}".format(e_vals))
        print("Elliptical conditions = {0}".format(e_conds))
        raise ArithmeticError("No elliptical solution found.")

    conic_coefficients = np.concatenate((a, np.dot(T, a)))
    return conic_coefficients 
Example 19
Project: b2ac   Author: hbldh   File: polygon.py    MIT License 5 votes vote down vote up
def get_closed_polygon(self):
        """Appends the first point to the end of point array, in order to "close" the polygon."""
        if not self.is_closed:
            return np.concatenate([self.polygon_points, [self.polygon_points[0, :]]])
        else:
            return self.polygon_points 
Example 20
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 5 votes vote down vote up
def iter_predict(Xs, Ms):
    logits = []
    with chainer.using_config('train', False), \
            chainer.using_config('enable_backprop', False):
        for xmb, mmb in iter_data(
                Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True):
            n = len(xmb)
            XMB = model.xp.asarray(xmb)
            MMB = model.xp.asarray(mmb)
            h = model(XMB)
            clf_logits = clf_head(h, XMB)
            logits.append(cuda.to_cpu(clf_logits.array))
    logits = np.concatenate(logits, 0)
    return logits 
Example 21
Project: explirefit   Author: codogogo   File: text_embeddings.py    Apache License 2.0 5 votes vote down vote up
def add_word(self, lang, word, vector = None):
		if word not in self.lang_vocabularies[lang]:
			self.lang_vocabularies[lang][word] = len(self.lang_vocabularies[lang])
			rvec = np.random.uniform(-1.0, 1.0, size = [self.emb_sizes[lang]]) if vector is None else vector
			rnrm = np.linalg.norm(rvec, 2)
			self.lang_embeddings[lang] = np.vstack((self.lang_embeddings[lang], rvec))
			self.lang_emb_norms[lang] = np.concatenate((self.lang_emb_norms[lang], [rnrm])) 
Example 22
Project: explirefit   Author: codogogo   File: text_embeddings.py    Apache License 2.0 5 votes vote down vote up
def merge_embedding_spaces(self, languages, emb_size, merge_name = 'merge', lang_prefix_delimiter = '__', special_tokens = None):
		print("Merging embedding spaces...")
		merge_vocabulary = {}
		merge_embs = []
		merge_norms = []

		for lang in languages:
			print("For language: " + lang)
			norms =[]
			embs = []
			for word in self.lang_vocabularies[lang]:
				if special_tokens is None or word not in special_tokens:
					merge_vocabulary[lang + lang_prefix_delimiter + word] = len(merge_vocabulary)
				else:
					merge_vocabulary[word] = len(merge_vocabulary)
				embs.append(self.get_vector(lang, word))
				norms.append(self.get_norm(lang, word))
			merge_embs =  np.copy(embs) if len(merge_embs) == 0 else np.vstack((merge_embs, embs))
			merge_norms = np.copy(norms) if len(merge_norms) == 0 else np.concatenate((merge_norms, norms))

		#if padding_token is not None:
		#	merge_vocabulary[padding_token] = len(merge_vocabulary)
		#	rvec = np.random.uniform(-1.0, 1.0, size = [emb_size])
		#	rnrm = np.linalg.norm(rvec, 2)
		#	merge_embs = np.vstack((merge_embs, rvec))
		#	merge_norms = np.concatenate((merge_norms, [rnrm]))
			
		self.lang_vocabularies[merge_name] = merge_vocabulary
		self.lang_embeddings[merge_name] = merge_embs
		self.lang_emb_norms[merge_name] = merge_norms
		self.emb_sizes[merge_name] = emb_size 
Example 23
Project: explirefit   Author: codogogo   File: trainer.py    Apache License 2.0 5 votes vote down vote up
def test(self, test_data, batch_size, eval_params = None, print_batches = False):
		epoch_loss = 0
		batches_eval = batcher.batch_iter(test_data, batch_size, 1, shuffle = False)
		eval_batch_counter = 1
				
		for batch_eval in batches_eval:
			if (len(batch_eval) == batch_size):
				feed_dict_eval, golds_batch_eval = self.feed_dict_function(self.model, batch_eval, None, predict = True)	
				preds_batch_eval = self.predict(feed_dict_eval)
				batch_eval_loss = self.model.loss.eval(session = self.session, feed_dict = feed_dict_eval)
				epoch_loss += batch_eval_loss

				if eval_batch_counter == 1:
					golds = golds_batch_eval
					preds = preds_batch_eval
				else:
					golds = np.concatenate((golds, golds_batch_eval), axis = 0)
					preds = np.concatenate((preds, preds_batch_eval), axis = 0)
				if print_batches:
					print(eval_batch_counter)
			eval_batch_counter += 1

		if self.eval_func is not None:
			score = self.eval_func(golds, preds, eval_params)
			return preds, epoch_loss, score
		else:
			return preds, epoch_loss 
Example 24
Project: explirefit   Author: codogogo   File: trainer.py    Apache License 2.0 5 votes vote down vote up
def cross_validate(self, data, batch_size, max_num_epochs, num_folds = 5, num_devs_not_better_end = 5, batch_dev_perf = 100, print_batch_losses = True, dev_score_maximize = True, configuration = None, print_training = False, micro_performance = True, shuffle_data = True):
		folds = np.array_split(data, num_folds)
		results = {}

		for i in range(num_folds):
			train_data = []
			for j in range(num_folds):
				if j != i:
					train_data.extend(folds[j])
			dev_data = folds[i]

			print("Sizes: train " + str(len(train_data)) + "; dev " + str(len(dev_data)))
			print("Fold " + str(i+1) + ", creating model...")
			model, conf_str, session = self.config_func(configuration)
			self.model = model
			self.session = session
			print("Fold " + str(i+1) + ", training the model...")
			results[conf_str + "__fold-" + str(i+1)] = self.train_dev(train_data, dev_data, batch_size, max_num_epochs, num_devs_not_better_end, batch_dev_perf, print_batch_losses, dev_score_maximize, configuration, print_training, shuffle_data = shuffle_data)
			
			print("Closing session, reseting the default graph (freeing memory)")
			self.session.close()
			tf.reset_default_graph()
			print("Performance: " + str(results[conf_str + "__fold-" + str(i+1)][1]))
		
		if micro_performance:
			print("Concatenating fold predictions for micro-performance computation")
			cntr = 0
			for k in results:
				cntr += 1
				if cntr == 1:
					all_preds = results[k][2]
					all_golds = results[k][3]
				else:
					all_preds = np.concatenate((all_preds, results[k][2]), axis = 0)
					all_golds = np.concatenate((all_golds, results[k][3]), axis = 0)	
			micro_perf = self.eval_func(all_golds, all_preds, self.labels)
			return results, micro_perf
		else: 
			return results 
Example 25
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharatransform.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_transform_unit_simple(self):
        """Class SpharaTransform, mode='unit', simple triangular mesh

        Determine the SPHARA forward and inverse transform with unit
        edge weight for a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA transform instance for the mesh
        st_unit_simple = st.SpharaTransform(testtrimesh, mode='unit')

        # the data to transform
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(st_unit_simple.basis()[0])])

        # SPHARA analysis
        coef_unit_simple = st_unit_simple.analysis(data)

        # SPHARA synthesis
        recon_unit_simple = st_unit_simple.synthesis(coef_unit_simple)

        self.assertTrue(
            np.allclose(
                np.absolute(coef_unit_simple),
                [[0.0, 0.0, 0.0],
                 [1.73205081, 0.0, 0.0],
                 [1.0, 0.0, 0.0],
                 [0.0, 1.0, 0.0],
                 [0.0, 0.0, 1.0]])
            and
            np.allclose(
                recon_unit_simple,
                data)) 
Example 26
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharatransform.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_transform_ie_simple(self):
        """Class SpharaTransform, mode='inv_euclidean', simple triangular mesh

        Determine the SPHARA forward and inverse transform with
        inverse Euclidean edge weight for a simple triangular mesh, 3
        vertices, single triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA transform instance for the mesh
        st_ie_simple = st.SpharaTransform(testtrimesh, mode='inv_euclidean')

        # the data to transform
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(st_ie_simple.basis()[0])])

        # SPHARA analysis
        coef_ie_simple = st_ie_simple.analysis(data)

        # SPHARA synthesis
        recon_ie_simple = st_ie_simple.synthesis(coef_ie_simple)

        self.assertTrue(
            np.allclose(
                np.absolute(coef_ie_simple),
                [[0.0, 0.0, 0.0],
                 [1.73205081, 0.0, 0.0],
                 [1.0, 0.0, 0.0],
                 [0.0, 1.0, 0.0],
                 [0.0, 0.0, 1.0]])
            and
            np.allclose(
                recon_ie_simple,
                data)) 
Example 27
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharatransform.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_transform_fem_simple(self):
        """Class SpharaTransform, mode='fem', simple triangular mesh

        Determine the SPHARA forward and inverse transform with fem
        discretisation for a simple triangular mesh, 3 vertices,
        single triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA transform instance for the mesh
        st_fem_simple = st.SpharaTransform(testtrimesh, mode='fem')

        # the data to transform
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(st_fem_simple.basis()[0])])

        # SPHARA analysis
        coef_fem_simple = st_fem_simple.analysis(data)

        # SPHARA synthesis
        recon_fem_simple = st_fem_simple.synthesis(coef_fem_simple)

        self.assertTrue(
            np.allclose(
                np.absolute(coef_fem_simple),
                [[0.0, 0.0, 0.0],
                 [1.87082868, 0.0, 0.0],
                 [1.0, 0.0, 0.0],
                 [0.0, 1.0, 0.0],
                 [0.0, 0.0, 1.0]])
            and
            np.allclose(
                recon_fem_simple,
                data)) 
Example 28
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_unit_allpass_simple(self):
        """Class SpharaFilter, mode='unit', allpass, simple mesh

        Apply a SPHARA spatial allpass filter with unit edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_unit_simple = sf.SpharaFilter(testtrimesh, mode='unit',
                                         specification=0)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_unit_simple.basis()[0])])

        # apply SPHARA based spatial allpass filter
        data_filt_unit_simple = sf_unit_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_unit_simple,
                data)) 
Example 29
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_unit_dc_simple(self):
        """Class SpharaFilter, mode='unit', dc-pass, simple mesh

        Apply a SPHARA spatial dc-pass filter with unit edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_unit_simple = sf.SpharaFilter(testtrimesh, mode='unit',
                                         specification=1)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_unit_simple.basis()[0])])

        # reference for filtered data
        data_filt_ref = data.copy()
        data_filt_ref[3] = [0., 0., 0.]
        data_filt_ref[4] = [0., 0., 0.]

        # apply SPHARA based spatial dc-pass filter
        data_filt_unit_simple = sf_unit_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_unit_simple,
                data_filt_ref)) 
Example 30
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_ie_allpass_simple(self):
        """Class SpharaFilter, mode='inv_euclidean', allpass, simple mesh

        Apply a SPHARA spatial allpass filter with inv_euclidean edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_ie_simple = sf.SpharaFilter(testtrimesh, mode='inv_euclidean',
                                       specification=0)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_ie_simple.basis()[0])])

        # apply SPHARA based spatial allpass filter
        data_filt_ie_simple = sf_ie_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_ie_simple,
                data)) 
Example 31
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_ie_dc_simple(self):
        """Class SpharaFilter, mode='inv_euclidean', dc-pass, simple mesh

        Apply a SPHARA spatial dc-pass filter with inv_euclidean edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_ie_simple = sf.SpharaFilter(testtrimesh, mode='inv_euclidean',
                                       specification=1)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_ie_simple.basis()[0])])

        # reference for filtered data
        data_filt_ref = data.copy()
        data_filt_ref[3] = [0., 0., 0.]
        data_filt_ref[4] = [0., 0., 0.]

        # apply SPHARA based spatial dc-pass filter
        data_filt_ie_simple = sf_ie_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_ie_simple,
                data_filt_ref)) 
Example 32
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_ie_low_simple(self):
        """Class SpharaFilter, mode='inv_euclidean', lowpass, simple mesh

        Apply a SPHARA spatial lowpass filter with inv_euclidean edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_ie_simple = sf.SpharaFilter(testtrimesh, mode='inv_euclidean',
                                         specification=[1., 1., 0.])

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_ie_simple.basis()[0])])

        # reference for filtered data
        data_filt_ref = data.copy()
        data_filt_ref[4] = [0., 0., 0.]

        # apply SPHARA based spatial lowpass filter
        data_filt_ie_simple = sf_ie_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_ie_simple,
                data_filt_ref)) 
Example 33
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_fem_allpass_simple(self):
        """Class SpharaFilter, mode='fem', allpass, simple mesh

        Apply a SPHARA spatial allpass filter with fem edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_fem_simple = sf.SpharaFilter(testtrimesh, mode='fem',
                                        specification=0)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_fem_simple.basis()[0])])

        # apply SPHARA based spatial allpass filter
        data_filt_fem_simple = sf_fem_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_fem_simple,
                data)) 
Example 34
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_fem_dc_simple(self):
        """Class SpharaFilter, mode='fem', dc-pass, simple mesh

        Apply a SPHARA spatial dc-pass filter with fem edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_fem_simple = sf.SpharaFilter(testtrimesh, mode='fem',
                                        specification=1)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_fem_simple.basis()[0])])

        # reference for filtered data
        data_filt_ref = data.copy()
        data_filt_ref[3] = [0., 0., 0.]
        data_filt_ref[4] = [0., 0., 0.]

        # apply SPHARA based spatial dc-pass filter
        data_filt_fem_simple = sf_fem_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_fem_simple,
                data_filt_ref)) 
Example 35
Project: aospy   Author: spencerahill   File: test_utils_vertcoord.py    Apache License 2.0 5 votes vote down vote up
def setUp(self):
        self.p_in_hpa = np.array([1000, 925, 850, 775, 700, 600, 500, 400, 300,
                                  200, 150, 100, 70, 50, 30, 20, 10],
                                 dtype=np.float64)
        self.p_in_pa = self.p_in_hpa*1e2
        self.p_top = 0
        self.p_bot = 1.1e5
        self.p_edges = 0.5*(self.p_in_pa[1:] + 0.5*self.p_in_pa[:-1])
        self.phalf = np.concatenate(([self.p_bot], self.p_edges, [self.p_top])) 
Example 36
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: voc_eval.py    MIT License 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
  """ ap = voc_ap(rec, prec, [use_07_metric])
  Compute VOC AP given precision and recall.
  If use_07_metric is true, uses the
  VOC 07 11 point method (default:False).
  """
  if use_07_metric:
    # 11 point metric
    ap = 0.
    for t in np.arange(0., 1.1, 0.1):
      if np.sum(rec >= t) == 0:
        p = 0
      else:
        p = np.max(prec[rec >= t])
      ap = ap + p / 11.
  else:
    # correct AP calculation
    # first append sentinel values at the end
    mrec = np.concatenate(([0.], rec, [1.]))
    mpre = np.concatenate(([0.], prec, [0.]))

    # compute the precision envelope
    for i in range(mpre.size - 1, 0, -1):
      mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

    # to calculate area under PR curve, look for points
    # where X axis (recall) changes value
    i = np.where(mrec[1:] != mrec[:-1])[0]

    # and sum (\Delta recall) * prec
    ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
  return ap 
Example 37
Project: deep-siamese-text-similarity   Author: dhwajraj   File: input_helpers.py    MIT License 5 votes vote down vote up
def getDataSets(self, training_paths, max_document_length, percent_dev, batch_size, is_char_based):
        if is_char_based:
            x1_text, x2_text, y=self.getTsvDataCharBased(training_paths)
        else:
            x1_text, x2_text, y=self.getTsvData(training_paths)
        # Build vocabulary
        print("Building vocabulary")
        vocab_processor = MyVocabularyProcessor(max_document_length,min_frequency=0,is_char_based=is_char_based)
        vocab_processor.fit_transform(np.concatenate((x2_text,x1_text),axis=0))
        print("Length of loaded vocabulary ={}".format( len(vocab_processor.vocabulary_)))
        i1=0
        train_set=[]
        dev_set=[]
        sum_no_of_batches = 0
        x1 = np.asarray(list(vocab_processor.transform(x1_text)))
        x2 = np.asarray(list(vocab_processor.transform(x2_text)))
        # Randomly shuffle data
        np.random.seed(131)
        shuffle_indices = np.random.permutation(np.arange(len(y)))
        x1_shuffled = x1[shuffle_indices]
        x2_shuffled = x2[shuffle_indices]
        y_shuffled = y[shuffle_indices]
        dev_idx = -1*len(y_shuffled)*percent_dev//100
        del x1
        del x2
        # Split train/test set
        self.dumpValidation(x1_text,x2_text,y,shuffle_indices,dev_idx,0)
        # TODO: This is very crude, should use cross-validation
        x1_train, x1_dev = x1_shuffled[:dev_idx], x1_shuffled[dev_idx:]
        x2_train, x2_dev = x2_shuffled[:dev_idx], x2_shuffled[dev_idx:]
        y_train, y_dev = y_shuffled[:dev_idx], y_shuffled[dev_idx:]
        print("Train/Dev split for {}: {:d}/{:d}".format(training_paths, len(y_train), len(y_dev)))
        sum_no_of_batches = sum_no_of_batches+(len(y_train)//batch_size)
        train_set=(x1_train,x2_train,y_train)
        dev_set=(x1_dev,x2_dev,y_dev)
        gc.collect()
        return train_set,dev_set,vocab_processor,sum_no_of_batches 
Example 38
Project: FRIDA   Author: LCAV   File: tools_fri_doa_plane.py    MIT License 5 votes vote down vote up
def Rmtx_ri(coef_ri, K, D, L):
    coef_ri = np.squeeze(coef_ri)
    coef_r = coef_ri[:K + 1]
    coef_i = coef_ri[K + 1:]
    R_r = linalg.toeplitz(np.concatenate((np.array([coef_r[-1]]),
                                          np.zeros(L - K - 1))),
                          np.concatenate((coef_r[::-1],
                                          np.zeros(L - K - 1)))
                          )
    R_i = linalg.toeplitz(np.concatenate((np.array([coef_i[-1]]),
                                          np.zeros(L - K - 1))),
                          np.concatenate((coef_i[::-1],
                                          np.zeros(L - K - 1)))
                          )
    return np.dot(np.vstack((np.hstack((R_r, -R_i)), np.hstack((R_i, R_r)))), D) 
Example 39
Project: FRIDA   Author: LCAV   File: tools_fri_doa_plane.py    MIT License 5 votes vote down vote up
def compute_b(G_lst, GtG_lst, beta_lst, Rc0, num_bands, a_ri):
    """
    compute the uniform sinusoidal samples b from the updated annihilating
    filter coeffiients.
    :param GtG_lst: list of G^H G for different subbands
    :param beta_lst: list of beta-s for different subbands
    :param Rc0: right-dual matrix, here it is the convolution matrix associated with c
    :param num_bands: number of bands
    :param L: size of b: L by 1
    :param a_ri: a 2D numpy array. each column corresponds to the measurements within a subband
    :return:
    """
    b_lst = []
    a_Gb_lst = []
    for loop in range(num_bands):
        GtG_loop = GtG_lst[loop]
        beta_loop = beta_lst[loop]
        b_loop = beta_loop - \
                 linalg.solve(GtG_loop,
                              np.dot(Rc0.T,
                                     linalg.solve(np.dot(Rc0, linalg.solve(GtG_loop, Rc0.T)),
                                                  np.dot(Rc0, beta_loop)))
                              )

        b_lst.append(b_loop)
        a_Gb_lst.append(a_ri[:, loop] - np.dot(G_lst[loop], b_loop))

    return np.column_stack(b_lst), linalg.norm(np.concatenate(a_Gb_lst)) 
Example 40
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: connected.py    MIT License 5 votes vote down vote up
def recollect(self, val):
        w = val['weights']
        b = val['biases']
        if w is None: self.w = val; return
        if self.inp_idx is not None:
            w = np.take(w, self.inp_idx, 0)
            
        keep_b = np.take(b, self.keep)
        keep_w = np.take(w, self.keep, 1)
        train_b = b[self.train:]
        train_w = w[:, self.train:]
        self.w['biases'] = np.concatenate(
            (keep_b, train_b), axis = 0)
        self.w['weights'] = np.concatenate(
            (keep_w, train_w), axis = 1) 
Example 41
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: data.py    MIT License 5 votes vote down vote up
def shuffle(self):
    batch = self.FLAGS.batch
    data = self.parse()
    size = len(data)

    print('Dataset of {} instance(s)'.format(size))
    if batch > size: self.FLAGS.batch = batch = size
    batch_per_epoch = int(size / batch)

    for i in range(self.FLAGS.epoch):
        shuffle_idx = perm(np.arange(size))
        for b in range(batch_per_epoch):
            # yield these
            x_batch = list()
            feed_batch = dict()

            for j in range(b*batch, b*batch+batch):
                train_instance = data[shuffle_idx[j]]
                try:
                    inp, new_feed = self._batch(train_instance)
                except ZeroDivisionError:
                    print("This image's width or height are zeros: ", train_instance[0])
                    print('train_instance:', train_instance)
                    print('Please remove or fix it then try again.')
                    raise

                if inp is None: continue
                x_batch += [np.expand_dims(inp, 0)]

                for key in new_feed:
                    new = new_feed[key]
                    old_feed = feed_batch.get(key, 
                        np.zeros((0,) + new.shape))
                    feed_batch[key] = np.concatenate([ 
                        old_feed, [new] 
                    ])      
            
            x_batch = np.concatenate(x_batch, 0)
            yield x_batch, feed_batch
        
        print('Finish {} epoch(es)'.format(i + 1)) 
Example 42
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def apply_transform_complete_pc(self, points):
        assert points.ndim == 2 or points.ndim == 1
        assert points.shape[-1] == 3 or points.shape[-1] == 4 or points.shape[-1] == 5
        pc = points[:, :3]
        trans_pc = np.matmul(pc, self._rotation.T) + self._translation[None, :]

        return np.concatenate((trans_pc, points[:, 3:]), axis=-1) 
Example 43
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: voc_eval.py    MIT License 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
    """ ap = voc_ap(rec, prec, [use_07_metric])
    Compute VOC AP given precision and recall.
    If use_07_metric is true, uses the
    VOC 07 11 point method (default:False).
    """
    if use_07_metric:
        # 11 point metric
        ap = 0.
        for t in np.arange(0., 1.1, 0.1):
            if np.sum(rec >= t) == 0:
                p = 0
            else:
                p = np.max(prec[rec >= t])
            ap = ap + p / 11.
    else:
        # correct AP calculation
        # first append sentinel values at the end
        mrec = np.concatenate(([0.], rec, [1.]))
        mpre = np.concatenate(([0.], prec, [0.]))

        # compute the precision envelope
        for i in range(mpre.size - 1, 0, -1):
            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

        # to calculate area under PR curve, look for points
        # where X axis (recall) changes value
        i = np.where(mrec[1:] != mrec[:-1])[0]

        # and sum (\Delta recall) * prec
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
    return ap 
Example 44
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    MIT License 5 votes vote down vote up
def create_cifar10(tfrecord_dir, cifar10_dir):
    print('Loading CIFAR-10 from "%s"' % cifar10_dir)
    import pickle
    images = []
    labels = []
    for batch in range(1, 6):
        with open(os.path.join(cifar10_dir, 'data_batch_%d' % batch), 'rb') as file:
            data = pickle.load(file, encoding='latin1')
        images.append(data['data'].reshape(-1, 3, 32, 32))
        labels.append(data['labels'])
    images = np.concatenate(images)
    labels = np.concatenate(labels)
    assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (50000,) and labels.dtype == np.int32
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 9
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0

    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
Example 45
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    MIT License 5 votes vote down vote up
def create_svhn(tfrecord_dir, svhn_dir):
    print('Loading SVHN from "%s"' % svhn_dir)
    import pickle
    images = []
    labels = []
    for batch in range(1, 4):
        with open(os.path.join(svhn_dir, 'train_%d.pkl' % batch), 'rb') as file:
            data = pickle.load(file, encoding='latin1')
        images.append(data[0])
        labels.append(data[1])
    images = np.concatenate(images)
    labels = np.concatenate(labels)
    assert images.shape == (73257, 3, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (73257,) and labels.dtype == np.uint8
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 9
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0

    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
Example 46
Project: Electrolyte_Analysis_FTIR   Author: Samuel-Buteau   File: Constant_run.py    MIT License 5 votes vote down vote up
def get(self, n):
        """
        will return a list of n random numbers in self.GetFresh_list
        - Samuel Buteau, October 2018
        """
        if n >= self.get_fresh_count:
            return numpy.concatenate((self.get(int(n/2)),self.get(n- int(n/2))))


        reshuffle_flag = False

        n_immediate_fulfill = min(n, self.get_fresh_count - self.get_fresh_pos)
        batch_of_indecies = numpy.empty([n], dtype=numpy.int32)
        for i in range(0, n_immediate_fulfill):
            batch_of_indecies[i] = self.GetFresh_list[i + self.get_fresh_pos]

        self.get_fresh_pos += n_immediate_fulfill
        if self.get_fresh_pos >= self.get_fresh_count:
            self.get_fresh_pos -= self.get_fresh_count
            reshuffle_flag = True

            # Now, the orders that needed to be satisfied are satisfied.
        n_delayed_fulfill = max(0, n - n_immediate_fulfill)
        if reshuffle_flag:
            numpy.random.shuffle(self.GetFresh_list)

        if n_delayed_fulfill > 0:
            for i in range(0, n_delayed_fulfill):
                batch_of_indecies[i + n_immediate_fulfill] = self.GetFresh_list[i]
            self.get_fresh_pos = n_delayed_fulfill

        return batch_of_indecies 
Example 47
Project: mmdetection   Author: open-mmlab   File: dataset_wrappers.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, datasets):
        super(ConcatDataset, self).__init__(datasets)
        self.CLASSES = datasets[0].CLASSES
        if hasattr(datasets[0], 'flag'):
            flags = []
            for i in range(0, len(datasets)):
                flags.append(datasets[i].flag)
            self.flag = np.concatenate(flags) 
Example 48
Project: mmdetection   Author: open-mmlab   File: eval_hooks.py    Apache License 2.0 5 votes vote down vote up
def evaluate(self, runner, results):
        gt_bboxes = []
        gt_labels = []
        gt_ignore = []
        for i in range(len(self.dataset)):
            ann = self.dataset.get_ann_info(i)
            bboxes = ann['bboxes']
            labels = ann['labels']
            if 'bboxes_ignore' in ann:
                ignore = np.concatenate([
                    np.zeros(bboxes.shape[0], dtype=np.bool),
                    np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
                ])
                gt_ignore.append(ignore)
                bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
                labels = np.concatenate([labels, ann['labels_ignore']])
            gt_bboxes.append(bboxes)
            gt_labels.append(labels)
        if not gt_ignore:
            gt_ignore = None
        # If the dataset is VOC2007, then use 11 points mAP evaluation.
        if hasattr(self.dataset, 'year') and self.dataset.year == 2007:
            ds_name = 'voc07'
        else:
            ds_name = self.dataset.CLASSES
        mean_ap, eval_results = eval_map(
            results,
            gt_bboxes,
            gt_labels,
            gt_ignore=gt_ignore,
            scale_ranges=None,
            iou_thr=0.5,
            dataset=ds_name,
            print_summary=True)
        runner.log_buffer.output['mAP'] = mean_ap
        runner.log_buffer.ready = True 
Example 49
Project: mmdetection   Author: open-mmlab   File: voc_eval.py    Apache License 2.0 5 votes vote down vote up
def voc_eval(result_file, dataset, iou_thr=0.5):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        if 'bboxes_ignore' in ann:
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
    if not gt_ignore:
        gt_ignore = None
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = dataset.CLASSES
    eval_map(
        det_results,
        gt_bboxes,
        gt_labels,
        gt_ignore=gt_ignore,
        scale_ranges=None,
        iou_thr=iou_thr,
        dataset=dataset_name,
        print_summary=True) 
Example 50
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: utils.py    MIT License 5 votes vote down vote up
def MinMaxBestBaseStacking(input_folder, best_base, output_path):
    sub_base = pd.read_csv(best_base)
    all_files = os.listdir(input_folder)

    # Read and concatenate submissions
    outs = [pd.read_csv(os.path.join(input_folder, f), index_col=0) for f in all_files]
    concat_sub = pd.concat(outs, axis=1)
    cols = list(map(lambda x: "is_iceberg_" + str(x), range(len(concat_sub.columns))))
    concat_sub.columns = cols
    concat_sub.reset_index(inplace=True)

    # get the data fields ready for stacking
    concat_sub['is_iceberg_max'] = concat_sub.iloc[:, 1:6].max(axis=1)
    concat_sub['is_iceberg_min'] = concat_sub.iloc[:, 1:6].min(axis=1)
    concat_sub['is_iceberg_mean'] = concat_sub.iloc[:, 1:6].mean(axis=1)
    concat_sub['is_iceberg_median'] = concat_sub.iloc[:, 1:6].median(axis=1)

    # set up cutoff threshold for lower and upper bounds, easy to twist
    cutoff_lo = 0.67
    cutoff_hi = 0.33

    concat_sub['is_iceberg_base'] = sub_base['is_iceberg']
    concat_sub['is_iceberg'] = np.where(np.all(concat_sub.iloc[:, 1:6] > cutoff_lo, axis=1),
                                        concat_sub['is_iceberg_max'],
                                        np.where(np.all(concat_sub.iloc[:, 1:6] < cutoff_hi, axis=1),
                                                 concat_sub['is_iceberg_min'],
                                                 concat_sub['is_iceberg_base']))
    concat_sub[['id', 'is_iceberg']].to_csv(output_path,
                                            index=False, float_format='%.12f')