Python numpy.linalg.norm() Examples

The following are 30 code examples of numpy.linalg.norm(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy.linalg , or try the search function .
Example #1
Source File: predict.py    From License-Plate-Recognition with MIT License 6 votes vote down vote up
def preprocess_hog(digits):
	samples = []
	for img in digits:
		gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
		gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
		mag, ang = cv2.cartToPolar(gx, gy)
		bin_n = 16
		bin = np.int32(bin_n*ang/(2*np.pi))
		bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
		mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
		hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
		hist = np.hstack(hists)
		
		# transform to Hellinger kernel
		eps = 1e-7
		hist /= hist.sum() + eps
		hist = np.sqrt(hist)
		hist /= norm(hist) + eps
		
		samples.append(hist)
	return np.float32(samples)
#不能保证包括所有省份 
Example #2
Source File: RealSenseVideo.py    From laplacian-meshes with GNU General Public License v3.0 6 votes vote down vote up
def doAmplification(self, W, alpha):
        self.rawAmplification = False
#        #Step 1: Get the right singular vectors
#        Mu = tde_mean(self.origDeltaCoords, W)
#        (Y, S) = tde_rightsvd(self.origDeltaCoords, W, Mu)
#        
#        #Step 2: Choose which components to amplify by a visual inspection
#        chooser = PCChooser(Y, (alpha, DEFAULT_NPCs))
#        Alpha = chooser.getAlphaVec(alpha)
        
        #Step 3: Perform the amplification
        #Add the amplified delta coordinates back to the original delta coordinates
        self.ampDeltaCoords = self.origDeltaCoords + subspace_tde_amplification(self.origDeltaCoords, self.origDeltaCoords.shape, W, alpha*np.ones((1,DEFAULT_NPCs)), self.origDeltaCoords.shape[1]/2)
        
#        self.ampDeltaCoords = self.origDeltaCoords + tde_amplifyPCs(self.origDeltaCoords, W, Mu, Y, Alpha)
#        print 'normalized error:',(linalg.norm(self.ampDeltaCoords-other_delta_coords)/linalg.norm(self.ampDeltaCoords))
#        print "Finished Amplifying"

    #Perform an amplification on the raw XYZ coordinates 
Example #3
Source File: test_mtl.py    From celer with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_GroupLasso_Lasso_equivalence(sparse_X, fit_intercept, normalize):
    """Check that GroupLasso with groups of size 1 gives Lasso."""
    n_features = 1000
    X, y = build_dataset(
        n_samples=100, n_features=n_features, sparse_X=sparse_X)
    alpha_max = norm(X.T @ y, ord=np.inf) / len(y)
    alpha = alpha_max / 10
    clf = Lasso(alpha, tol=1e-12, fit_intercept=fit_intercept,
                normalize=normalize, verbose=0)
    clf.fit(X, y)
    # take groups of size 1:
    clf1 = GroupLasso(alpha=alpha, groups=1, tol=1e-12,
                      fit_intercept=fit_intercept, normalize=normalize,
                      verbose=0)
    clf1.fit(X, y)

    np.testing.assert_allclose(clf1.coef_, clf.coef_, atol=1e-6)
    np.testing.assert_allclose(clf1.intercept_, clf.intercept_, rtol=1e-4) 
Example #4
Source File: test_mtl.py    From celer with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_MultiTaskLasso(fit_intercept):
    """Test that our MultiTaskLasso behaves as sklearn's."""
    X, Y = build_dataset(n_samples=20, n_features=30, n_targets=10)
    alpha_max = np.max(norm(X.T.dot(Y), axis=1)) / X.shape[0]

    alpha = alpha_max / 2.
    params = dict(alpha=alpha, fit_intercept=fit_intercept, tol=1e-10,
                  normalize=True)
    clf = MultiTaskLasso(**params)
    clf.verbose = 2
    clf.fit(X, Y)

    clf2 = sklearn_MultiTaskLasso(**params)
    clf2.fit(X, Y)
    np.testing.assert_allclose(clf.coef_, clf2.coef_, rtol=1e-5)
    if fit_intercept:
        np.testing.assert_allclose(clf.intercept_, clf2.intercept_)

    clf.tol = 1e-7
    check_estimator(clf) 
Example #5
Source File: test_lasso.py    From celer with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_celer_path_logreg():
    X, y = build_dataset(
        n_samples=50, n_features=100, sparse_X=True)
    y = np.sign(y)
    alpha_max = norm(X.T.dot(y), ord=np.inf) / 2
    alphas = alpha_max * np.geomspace(1, 1e-2, 10)

    tol = 1e-8
    coefs, Cs, n_iters = _logistic_regression_path(
        X, y, Cs=1. / alphas, fit_intercept=False, penalty='l1',
        solver='liblinear', tol=tol)

    _, coefs_c, gaps = celer_path(
        X, y, "logreg", alphas=alphas, tol=tol, verbose=2)

    np.testing.assert_array_less(gaps, tol)
    np.testing.assert_allclose(coefs != 0, coefs_c.T != 0)
    np.testing.assert_allclose(coefs, coefs_c.T, atol=1e-5, rtol=1e-3) 
Example #6
Source File: test_lasso.py    From celer with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_Lasso(sparse_X, fit_intercept, positive):
    """Test that our Lasso class behaves as sklearn's Lasso."""
    X, y = build_dataset(n_samples=20, n_features=30, sparse_X=sparse_X)
    if not positive:
        alpha_max = norm(X.T.dot(y), ord=np.inf) / X.shape[0]
    else:
        alpha_max = X.T.dot(y).max() / X.shape[0]

    alpha = alpha_max / 2.
    params = dict(alpha=alpha, fit_intercept=fit_intercept, tol=1e-10,
                  normalize=True, positive=positive)
    clf = Lasso(**params)
    clf.fit(X, y)

    clf2 = sklearn_Lasso(**params)
    clf2.fit(X, y)
    np.testing.assert_allclose(clf.coef_, clf2.coef_, rtol=1e-5)
    if fit_intercept:
        np.testing.assert_allclose(clf.intercept_, clf2.intercept_)

    check_estimator(Lasso) 
Example #7
Source File: test_linalg.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_matrix_3x3(self):
        # This test has been added because the 2x2 example
        # happened to have equal nuclear norm and induced 1-norm.
        # The 1/10 scaling factor accommodates the absolute tolerance
        # used in assert_almost_equal.
        A = (1 / 10) * \
            self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
        assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
        assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
        assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
        assert_almost_equal(norm(A, inf), 1.1)
        assert_almost_equal(norm(A, -inf), 0.6)
        assert_almost_equal(norm(A, 1), 1.0)
        assert_almost_equal(norm(A, -1), 0.4)
        assert_almost_equal(norm(A, 2), 0.88722940323461277)
        assert_almost_equal(norm(A, -2), 0.19456584790481812) 
Example #8
Source File: test_linalg.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_bad_args(self):
        # Check that bad arguments raise the appropriate exceptions.

        A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
        B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)

        # Using `axis=<integer>` or passing in a 1-D array implies vector
        # norms are being computed, so also using `ord='fro'`
        # or `ord='nuc'` raises a ValueError.
        assert_raises(ValueError, norm, A, 'fro', 0)
        assert_raises(ValueError, norm, A, 'nuc', 0)
        assert_raises(ValueError, norm, [3, 4], 'fro', None)
        assert_raises(ValueError, norm, [3, 4], 'nuc', None)

        # Similarly, norm should raise an exception when ord is any finite
        # number other than 1, 2, -1 or -2 when computing matrix norms.
        for order in [0, 3]:
            assert_raises(ValueError, norm, A, order, None)
            assert_raises(ValueError, norm, A, order, (0, 1))
            assert_raises(ValueError, norm, B, order, (1, 2))

        # Invalid axis
        assert_raises(np.AxisError, norm, B, None, 3)
        assert_raises(np.AxisError, norm, B, None, (2, 3))
        assert_raises(ValueError, norm, B, None, (0, 1, 2)) 
Example #9
Source File: test_linalg.py    From lambda-packs with MIT License 6 votes vote down vote up
def test_matrix_3x3(self):
        # This test has been added because the 2x2 example
        # happened to have equal nuclear norm and induced 1-norm.
        # The 1/10 scaling factor accommodates the absolute tolerance
        # used in assert_almost_equal.
        A = (1 / 10) * \
            np.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
        assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
        assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
        assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
        assert_almost_equal(norm(A, inf), 1.1)
        assert_almost_equal(norm(A, -inf), 0.6)
        assert_almost_equal(norm(A, 1), 1.0)
        assert_almost_equal(norm(A, -1), 0.4)
        assert_almost_equal(norm(A, 2), 0.88722940323461277)
        assert_almost_equal(norm(A, -2), 0.19456584790481812) 
Example #10
Source File: test_linalg.py    From lambda-packs with MIT License 6 votes vote down vote up
def test_bad_args(self):
        # Check that bad arguments raise the appropriate exceptions.

        A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
        B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)

        # Using `axis=<integer>` or passing in a 1-D array implies vector
        # norms are being computed, so also using `ord='fro'`
        # or `ord='nuc'` raises a ValueError.
        assert_raises(ValueError, norm, A, 'fro', 0)
        assert_raises(ValueError, norm, A, 'nuc', 0)
        assert_raises(ValueError, norm, [3, 4], 'fro', None)
        assert_raises(ValueError, norm, [3, 4], 'nuc', None)

        # Similarly, norm should raise an exception when ord is any finite
        # number other than 1, 2, -1 or -2 when computing matrix norms.
        for order in [0, 3]:
            assert_raises(ValueError, norm, A, order, None)
            assert_raises(ValueError, norm, A, order, (0, 1))
            assert_raises(ValueError, norm, B, order, (1, 2))

        # Invalid axis
        assert_raises(ValueError, norm, B, None, 3)
        assert_raises(ValueError, norm, B, None, (2, 3))
        assert_raises(ValueError, norm, B, None, (0, 1, 2)) 
Example #11
Source File: digits.py    From OpenCV-Python-Tutorial with MIT License 6 votes vote down vote up
def preprocess_hog(digits):
    samples = []
    for img in digits:
        gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
        gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
        mag, ang = cv2.cartToPolar(gx, gy)
        bin_n = 16
        bin = np.int32(bin_n*ang/(2*np.pi))
        bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
        mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
        hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
        hist = np.hstack(hists)

        # transform to Hellinger kernel
        eps = 1e-7
        hist /= hist.sum() + eps
        hist = np.sqrt(hist)
        hist /= norm(hist) + eps

        samples.append(hist)
    return np.float32(samples) 
Example #12
Source File: test_ndarray.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_norm(ctx=default_context()):
    try:
        import scipy
        assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
        from scipy.linalg import norm as sp_norm
    except (AssertionError, ImportError):
        print("Could not import scipy.linalg.norm or scipy is too old. "
              "Falling back to numpy.linalg.norm which is not numerically stable.")
        from numpy.linalg import norm as sp_norm

    def l1norm(input_data, axis=0, keepdims=False):
        return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
    def l2norm(input_data, axis=0, keepdims=False):
        return sp_norm(input_data, axis=axis, keepdims=keepdims)

    in_data_dim = random_sample([4,5,6], 1)[0]
    for force_reduce_dim1 in [True, False]:
        in_data_shape = rand_shape_nd(in_data_dim)
        if force_reduce_dim1:
            in_data_shape = in_data_shape[:3] + (1, ) + in_data_shape[4:]
        np_arr = np.random.uniform(-1, 1, in_data_shape).astype(np.float32)
        mx_arr = mx.nd.array(np_arr, ctx=ctx)
        for ord in [1, 2]:
            for keep_dims in [True, False]:
                for i in range(4):
                    npy_out = l1norm(np_arr, i, keep_dims) if ord == 1 else l2norm(
                        np_arr, i, keep_dims)
                    mx_out = mx.nd.norm(mx_arr, ord=ord, axis=i, keepdims=keep_dims)
                    assert npy_out.shape == mx_out.shape
                    mx.test_utils.assert_almost_equal(npy_out, mx_out.asnumpy())
                    if (i < 3):
                        npy_out = l1norm(np_arr, (i, i + 1), keep_dims) if ord == 1 else l2norm(
                            np_arr, (i, i + 1), keep_dims)
                        mx_out = mx.nd.norm(mx_arr, ord=ord, axis=(i, i + 1), keepdims=keep_dims)
                        assert npy_out.shape == mx_out.shape
                        mx.test_utils.assert_almost_equal(npy_out, mx_out.asnumpy()) 
Example #13
Source File: distance.py    From DensityPeakCluster with MIT License 5 votes vote down vote up
def distance(self, vec1, vec2):
    """
    Compute distance of two vector by consine distance
    """
    super(ConsineDistance, self).distance(vec1, vec2)      #super method
    num = np.dot(vec1, vec2)
    denom = linalg.norm(vec1) * linalg.norm(vec2)
    if num == 0:
      return 1
    return - num / denom
#end ConsineDistance 
Example #14
Source File: mood.py    From weiboanalysis with Apache License 2.0 5 votes vote down vote up
def _ecl_sim(inA, inB):
    return 1.0 / (1.0 + la.norm(inA - inB))


# 皮尔逊相关系数,范围-1->+1, 越大越相似 
Example #15
Source File: mood.py    From weiboanalysis with Apache License 2.0 5 votes vote down vote up
def _cos_sim(inA, inB):
    num = float(inB * inA.T)
    de_nom = la.norm(inA) * la.norm(inB)
    return 0.5 + 0.5 * (num / de_nom) 
Example #16
Source File: test_unified_information_explainer.py    From interpret-text with MIT License 5 votes vote down vote up
def test_explain_model_BERT_seq_classification(self):
        device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
        mnli_test_dataset = get_mnli_test_dataset("train")
        mnli_test_dataset = mnli_test_dataset["sentence1"]
        text = "rare bird has more than enough charm to make it memorable."
        model = get_bert_model()
        model.to(device)
        interpreter_unified = UnifiedInformationExplainer(
            model=model,
            train_dataset=list(mnli_test_dataset),
            device=device,
            target_layer=14,
        )
        explanation_unified = interpreter_unified.explain_local(text)
        valid_imp_vals = np.array(
            [
                0.16004231572151184,
                0.17308972775936127,
                0.18205846846103668,
                0.26146841049194336,
                0.25957807898521423,
                0.3549807369709015,
                0.23873654007911682,
                0.2826242744922638,
                0.2700383961200714,
                0.3673151433467865,
                0.3899800479412079,
                0.20173774659633636
            ]
        )
        print(explanation_unified.local_importance_values)
        local_importance_values = np.array(explanation_unified.local_importance_values)
        cos_sim = dot(valid_imp_vals, local_importance_values) / (
            norm(valid_imp_vals) * norm(local_importance_values)
        )
        assert cos_sim >= 0.80 
Example #17
Source File: coords.py    From pyberny with Mozilla Public License 2.0 5 votes vote down vote up
def eval(self, coords, grad=False):
        v = (coords[self.i] - coords[self.j]) * angstrom
        r = norm(v)
        if not grad:
            return r
        return r, [v / r, -v / r] 
Example #18
Source File: coords.py    From pyberny with Mozilla Public License 2.0 5 votes vote down vote up
def eval(self, coords, grad=False):
        v1 = (coords[self.i] - coords[self.j]) * angstrom
        v2 = (coords[self.k] - coords[self.j]) * angstrom
        dot_product = np.dot(v1, v2) / (norm(v1) * norm(v2))
        if dot_product < -1:
            dot_product = -1
        elif dot_product > 1:
            dot_product = 1
        phi = np.arccos(dot_product)
        if not grad:
            return phi
        if abs(phi) > pi - 1e-6:
            grad = [
                (pi - phi) / (2 * norm(v1) ** 2) * v1,
                (1 / norm(v1) - 1 / norm(v2)) * (pi - phi) / (2 * norm(v1)) * v1,
                (pi - phi) / (2 * norm(v2) ** 2) * v2,
            ]
        else:
            grad = [
                1 / np.tan(phi) * v1 / norm(v1) ** 2
                - v2 / (norm(v1) * norm(v2) * np.sin(phi)),
                (v1 + v2) / (norm(v1) * norm(v2) * np.sin(phi))
                - 1 / np.tan(phi) * (v1 / norm(v1) ** 2 + v2 / norm(v2) ** 2),
                1 / np.tan(phi) * v2 / norm(v2) ** 2
                - v1 / (norm(v1) * norm(v2) * np.sin(phi)),
            ]
        return phi, grad 
Example #19
Source File: berny.py    From pyberny with Mozilla Public License 2.0 5 votes vote down vote up
def update_trust(trust, dE, dE_predicted, dq, log=no_log):
    if dE != 0:
        r = dE / dE_predicted  # Fletcher's parameter
    else:
        r = 1.0
    log("Trust update: Fletcher's parameter: {:.3}".format(r))
    if r < 0.25:
        return norm(dq) / 4
    elif r > 0.75 and abs(norm(dq) - trust) < 1e-10:
        return 2 * trust
    else:
        return trust 
Example #20
Source File: berny.py    From pyberny with Mozilla Public License 2.0 5 votes vote down vote up
def quadratic_step(g, H, w, trust, log=no_log):
    ev = np.linalg.eigvalsh((H + H.T) / 2)
    rfo = np.vstack((np.hstack((H, g[:, None])), np.hstack((g, 0))[None, :]))
    D, V = np.linalg.eigh((rfo + rfo.T) / 2)
    dq = V[:-1, 0] / V[-1, 0]
    l = D[0]
    if norm(dq) <= trust:
        log('Pure RFO step was performed:')
        on_sphere = False
    else:

        def steplength(l):
            return norm(np.linalg.solve(l * eye(H.shape[0]) - H, g)) - trust

        l = Math.findroot(steplength, ev[0])  # minimization on sphere
        dq = np.linalg.solve(l * eye(H.shape[0]) - H, g)
        on_sphere = True
        log('Minimization on sphere was performed:')
    dE = dot(g, dq) + 0.5 * dq.dot(H).dot(dq)  # predicted energy change
    log('* Trust radius: {:.2}'.format(trust))
    log('* Number of negative eigenvalues: {}'.format((ev < 0).sum()))
    log('* Lowest eigenvalue: {:.3}'.format(ev[0]))
    log('* lambda: {:.3}'.format(l))
    log('Quadratic step: RMS: {:.3}, max: {:.3}'.format(Math.rms(dq), max(abs(dq))))
    log('* Predicted energy change: {:.3}'.format(dE))
    return dq, dE, on_sphere 
Example #21
Source File: face_recognition.py    From insightface with MIT License 5 votes vote down vote up
def compute_sim(self, img1, img2):
        emb1 = self.get_embedding(img1).flatten()
        emb2 = self.get_embedding(img2).flatten()
        from numpy.linalg import norm
        sim = np.dot(emb1, emb2)/(norm(emb1)*norm(emb2))
        return sim 
Example #22
Source File: face_analysis.py    From insightface with MIT License 5 votes vote down vote up
def get(self, img, det_thresh = 0.8, det_scale = 1.0, max_num = 0):
        bboxes, landmarks = self.det_model.detect(img, threshold=det_thresh, scale = det_scale)
        if bboxes.shape[0]==0:
            return []
        if max_num>0 and bboxes.shape[0]>max_num:
            area = (bboxes[:,2]-bboxes[:,0])*(bboxes[:,3]-bboxes[:,1])
            img_center = img.shape[0]//2, img.shape[1]//2
            offsets = np.vstack([ (bboxes[:,0]+bboxes[:,2])/2-img_center[1], (bboxes[:,1]+bboxes[:,3])/2-img_center[0] ])
            offset_dist_squared = np.sum(np.power(offsets,2.0),0)
            values = area-offset_dist_squared*2.0 # some extra weight on the centering
            bindex = np.argsort(values)[::-1] # some extra weight on the centering
            bindex = bindex[0:max_num]
            bboxes = bboxes[bindex, :]
            landmarks = landmarks[bindex, :]
        ret = []
        for i in range(bboxes.shape[0]):
            bbox = bboxes[i, 0:4]
            det_score = bboxes[i,4]
            landmark = landmarks[i]
            _img = face_align.norm_crop(img, landmark = landmark)
            embedding = None
            embedding_norm = None
            normed_embedding = None
            gender = None
            age = None
            if self.rec_model is not None:
                embedding = self.rec_model.get_embedding(_img).flatten()
                embedding_norm = norm(embedding)
                normed_embedding = embedding / embedding_norm
            if self.ga_model is not None:
                gender, age = self.ga_model.get(_img)
            face = Face(bbox = bbox, landmark = landmark, det_score = det_score, embedding = embedding, gender = gender, age = age
                    , normed_embedding=normed_embedding, embedding_norm = embedding_norm)
            ret.append(face)
        return ret 
Example #23
Source File: matrixforwardsim.py    From pyGSTi with Apache License 2.0 5 votes vote down vote up
def _check(self, evalTree, prMxToFill=None, dprMxToFill=None, hprMxToFill=None, clipTo=None):
        # compare with older slower version that should do the same thing (for debugging)
        master_circuit_list = evalTree.generate_circuit_list(permute=False)  # raw operation sequences

        for spamTuple, (fInds, gInds) in evalTree.spamtuple_indices.items():
            circuit_list = master_circuit_list[gInds]

            if prMxToFill is not None:
                check_vp = _np.array([self.prs(spamTuple[0], [spamTuple[1]], circuit, clipTo, False)[0]
                                      for circuit in circuit_list])
                if _nla.norm(prMxToFill[fInds] - check_vp) > 1e-6:
                    _warnings.warn("norm(vp-check_vp) = %g - %g = %g" %
                                   (_nla.norm(prMxToFill[fInds]),
                                    _nla.norm(check_vp),
                                    _nla.norm(prMxToFill[fInds] - check_vp)))  # pragma: no cover

            if dprMxToFill is not None:
                check_vdp = _np.concatenate(
                    [self.dpr(spamTuple, circuit, False, clipTo)
                     for circuit in circuit_list], axis=0)
                if _nla.norm(dprMxToFill[fInds] - check_vdp) > 1e-6:
                    _warnings.warn("norm(vdp-check_vdp) = %g - %g = %g" %
                                   (_nla.norm(dprMxToFill[fInds]),
                                    _nla.norm(check_vdp),
                                    _nla.norm(dprMxToFill[fInds] - check_vdp)))  # pragma: no cover

            if hprMxToFill is not None:
                check_vhp = _np.concatenate(
                    [self.hpr(spamTuple, circuit, False, False, clipTo)
                     for circuit in circuit_list], axis=0)
                if _nla.norm(hprMxToFill[fInds][0] - check_vhp[0]) > 1e-6:
                    _warnings.warn("norm(vhp-check_vhp) = %g - %g = %g" %
                                   (_nla.norm(hprMxToFill[fInds]),
                                    _nla.norm(check_vhp),
                                    _nla.norm(hprMxToFill[fInds] - check_vhp)))  # pragma: no cover 
Example #24
Source File: germselection.py    From pyGSTi with Apache License 2.0 5 votes vote down vote up
def _SuperOpForPerfectTwirl(wrt, eps):
    """Return super operator for doing a perfect twirl with respect to wrt.
    """
    assert wrt.shape[0] == wrt.shape[1]  # only square matrices allowed
    dim = wrt.shape[0]
    SuperOp = _np.zeros((dim**2, dim**2), 'complex')

    # Get spectrum and eigenvectors of wrt
    wrtEvals, wrtEvecs = _np.linalg.eig(wrt)
    wrtEvecsInv = _np.linalg.inv(wrtEvecs)

    # We want to project  X -> M * (Proj_i * (Minv * X * M) * Proj_i) * Minv,
    # where M = wrtEvecs. So A = B = M * Proj_i * Minv and so
    # superop = A tensor B^T == A tensor A^T
    # NOTE: this == (A^T tensor A)^T while *Maple* germ functions seem to just
    # use A^T tensor A -> ^T difference
    for i in range(dim):
        # Create projector onto i-th eigenspace (spanned by i-th eigenvector
        # and other degenerate eigenvectors)
        Proj_i = _np.diag([(1 if (abs(wrtEvals[i] - wrtEvals[j]) <= eps)
                            else 0) for j in range(dim)])
        A = _np.dot(wrtEvecs, _np.dot(Proj_i, wrtEvecsInv))
        #if _np.linalg.norm(A.imag) > 1e-6:
        #    print("DB: imag = ",_np.linalg.norm(A.imag))
        #assert(_np.linalg.norm(A.imag) < 1e-6)
        #A = _np.real(A)
        # Need to normalize, because we are overcounting projectors onto
        # subspaces of dimension d > 1, giving us d * Proj_i tensor Proj_i^T.
        # We can fix this with a division by tr(Proj_i) = d.
        SuperOp += _np.kron(A, A.T) / _np.trace(Proj_i)
        # SuperOp += _np.kron(A.T,A) # Mimic Maple version (but I think this is
        # wrong... or it doesn't matter?)
    return SuperOp  # a op_dim^2 x op_dim^2 matrix 
Example #25
Source File: test_mtl.py    From celer with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_GroupLasso_MultitaskLasso_equivalence():
    "GroupLasso and MultitaskLasso equivalence."""
    n_samples, n_features = 30, 50
    X_, Y_ = build_dataset(n_samples, n_features, n_targets=3)
    y = Y_.reshape(-1, order='F')
    X = np.zeros([3 * n_samples, 3 * n_features], order='F')

    # block filling new design
    for i in range(3):
        X[i * n_samples:(i + 1) * n_samples, i *
          n_features:(i + 1) * n_features] = X_

    grp_indices = np.arange(
        3 * n_features).reshape(3, -1).reshape(-1, order='F').astype(np.int32)
    grp_ptr = 3 * np.arange(n_features + 1).astype(np.int32)

    alpha_max = np.max(norm(X_.T @ Y_, axis=1)) / len(Y_)

    X_data = np.empty([1], dtype=X.dtype)
    X_indices = np.empty([1], dtype=np.int32)
    X_indptr = np.empty([1], dtype=np.int32)
    other = dnorm_grp(
        False, y, grp_ptr, grp_indices, X, X_data,
        X_indices, X_indptr, X_data, len(grp_ptr) - 1,
        np.zeros(1, dtype=np.int32), False)
    np.testing.assert_allclose(alpha_max, other / len(Y_))

    alpha = alpha_max / 10
    clf = MultiTaskLasso(alpha, fit_intercept=False, tol=1e-8, verbose=2)
    clf.fit(X_, Y_)

    groups = [grp.tolist() for grp in grp_indices.reshape(50, 3)]
    clf1 = GroupLasso(alpha=alpha / 3, groups=groups,
                      fit_intercept=False, tol=1e-8, verbose=2)
    clf1.fit(X, y)

    np.testing.assert_allclose(clf1.coef_, clf.coef_.reshape(-1), atol=1e-4) 
Example #26
Source File: test_mtl.py    From celer with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_mtl_path():
    X, Y = build_dataset(n_targets=3)
    tol = 1e-10
    params = dict(eps=0.01, tol=tol, n_alphas=10)
    alphas, coefs, gaps = mtl_path(X, Y, **params)
    np.testing.assert_array_less(gaps, tol)

    sk_alphas, sk_coefs, sk_gaps = lasso_path(X, Y, **params, max_iter=10000)
    np.testing.assert_array_less(sk_gaps, tol * np.linalg.norm(Y, 'fro')**2)
    np.testing.assert_array_almost_equal(coefs, sk_coefs, decimal=5)
    np.testing.assert_allclose(alphas, sk_alphas) 
Example #27
Source File: test_lasso.py    From celer with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_celer_single_alpha(sparse_X, pb):
    X, y = build_dataset(n_samples=20, n_features=100, sparse_X=sparse_X)
    if pb == "logreg":
        y = np.sign(y)
    alpha_max = norm(X.T.dot(y), ord=np.inf) / X.shape[0]

    tol = 1e-6
    _, coefs, gaps = celer_path(X, y, pb, alphas=[alpha_max / 10.], tol=tol)
    np.testing.assert_array_less(gaps, tol) 
Example #28
Source File: test_lasso.py    From celer with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_zero_column(sparse_X):
    X, y = build_dataset(n_samples=60, n_features=50, sparse_X=sparse_X)
    n_zero_columns = 20
    if sparse_X:
        X.data[:X.indptr[n_zero_columns]].fill(0.)
    else:
        X[:, :n_zero_columns].fill(0.)
    alpha_max = norm(X.T.dot(y), ord=np.inf) / X.shape[0]
    tol = 1e-6
    _, coefs, gaps = celer_path(
        X, y, "lasso", alphas=[alpha_max / 10.], tol=tol, p0=50, prune=0)
    w = coefs.T[0]
    np.testing.assert_array_less(gaps, tol)
    np.testing.assert_equal(w.shape[0], X.shape[1]) 
Example #29
Source File: test_linalg.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_empty(self):
        assert_equal(norm([]), 0.0)
        assert_equal(norm(array([], dtype=self.dt)), 0.0)
        assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) 
Example #30
Source File: test_linalg.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_vector_return_type(self):
        a = np.array([1, 0, 1])

        exact_types = np.typecodes['AllInteger']
        inexact_types = np.typecodes['AllFloat']

        all_types = exact_types + inexact_types

        for each_inexact_types in all_types:
            at = a.astype(each_inexact_types)

            an = norm(at, -np.inf)
            assert_(issubclass(an.dtype.type, np.floating))
            assert_almost_equal(an, 0.0)

            with suppress_warnings() as sup:
                sup.filter(RuntimeWarning, "divide by zero encountered")
                an = norm(at, -1)
                assert_(issubclass(an.dtype.type, np.floating))
                assert_almost_equal(an, 0.0)

            an = norm(at, 0)
            assert_(issubclass(an.dtype.type, np.floating))
            assert_almost_equal(an, 2)

            an = norm(at, 1)
            assert_(issubclass(an.dtype.type, np.floating))
            assert_almost_equal(an, 2.0)

            an = norm(at, 2)
            assert_(issubclass(an.dtype.type, np.floating))
            assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0))

            an = norm(at, 4)
            assert_(issubclass(an.dtype.type, np.floating))
            assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0))

            an = norm(at, np.inf)
            assert_(issubclass(an.dtype.type, np.floating))
            assert_almost_equal(an, 1.0)