Python scipy.linalg.norm() Examples

The following are 30 code examples of scipy.linalg.norm(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.linalg , or try the search function .
Example #1
Source File: point_cloud.py    From FRIDA with MIT License 7 votes vote down vote up
def doa(self, receiver, source):
        ''' Computes the direction of arrival wrt a source and receiver '''

        s_ind = self.key2ind(source)
        r_ind = self.key2ind(receiver)

        # vector from receiver to source
        v = self.X[:,s_ind] - self.X[:,r_ind]

        azimuth = np.arctan2(v[1], v[0])
        elevation = np.arctan2(v[2], la.norm(v[:2]))

        azimuth = azimuth + 2*np.pi if azimuth < 0. else azimuth
        elevation = elevation + 2*np.pi if elevation < 0. else elevation

        return np.array([azimuth, elevation]) 
Example #2
Source File: skipthoughts.py    From text-to-image with MIT License 6 votes vote down vote up
def nn(model, text, vectors, query, k=5):
	"""
	Return the nearest neighbour sentences to query
	text: list of sentences
	vectors: the corresponding representations for text
	query: a string to search
	"""
	qf = encode(model, [query])
	qf /= norm(qf)
	scores = numpy.dot(qf, vectors.T).flatten()
	sorted_args = numpy.argsort(scores)[::-1]
	sentences = [text[a] for a in sorted_args[:k]]
	print(('QUERY: ' + query))
	print('NEAREST: ')
	for i, s in enumerate(sentences):
		print((s, sorted_args[i])) 
Example #3
Source File: skipthoughts.py    From StackGAN with MIT License 6 votes vote down vote up
def nn(model, text, vectors, query, k=5):
	"""
	Return the nearest neighbour sentences to query
	text: list of sentences
	vectors: the corresponding representations for text
	query: a string to search
	"""
	qf = encode(model, [query])
	qf /= norm(qf)
	scores = numpy.dot(qf, vectors.T).flatten()
	sorted_args = numpy.argsort(scores)[::-1]
	sentences = [text[a] for a in sorted_args[:k]]
	print 'QUERY: ' + query
	print 'NEAREST: '
	for i, s in enumerate(sentences):
		print s, sorted_args[i] 
Example #4
Source File: skipthoughts.py    From text-to-image with MIT License 6 votes vote down vote up
def nn(model, text, vectors, query, k=5):
	"""
	Return the nearest neighbour sentences to query
	text: list of sentences
	vectors: the corresponding representations for text
	query: a string to search
	"""
	qf = encode(model, [query])
	qf /= norm(qf)
	scores = numpy.dot(qf, vectors.T).flatten()
	sorted_args = numpy.argsort(scores)[::-1]
	sentences = [text[a] for a in sorted_args[:k]]
	print 'QUERY: ' + query
	print 'NEAREST: '
	for i, s in enumerate(sentences):
		print s, sorted_args[i] 
Example #5
Source File: hrf.py    From pybids with MIT License 6 votes vote down vote up
def _orthogonalize(X):
    """Orthogonalize every column of design `X` w.r.t preceding columns

    Parameters
    ----------
    X : array of shape(n, p)
       the data to be orthogonalized

    Returns
    -------
    X : array of shape(n, p)
       the data after orthogonalization

    Notes
    -----
    X is changed in place. The columns are not normalized.
    """
    if X.size == X.shape[0]:
        return X
    from scipy.linalg import pinv, norm
    for i in range(1, X.shape[1]):
        X[:, i] -= np.dot(np.dot(X[:, i], X[:, :i]), pinv(X[:, :i]))
        # X[:, i] /= norm(X[:, i])
    return X 
Example #6
Source File: bicluster.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
        """Find the ``n_best`` vectors that are best approximated by piecewise
        constant vectors.

        The piecewise vectors are found by k-means; the best is chosen
        according to Euclidean distance.

        """
        def make_piecewise(v):
            centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
            return centroid[labels].ravel()
        piecewise_vectors = np.apply_along_axis(make_piecewise,
                                                axis=1, arr=vectors)
        dists = np.apply_along_axis(norm, axis=1,
                                    arr=(vectors - piecewise_vectors))
        result = vectors[np.argsort(dists)[:n_best]]
        return result 
Example #7
Source File: util.py    From sfa-numpy with MIT License 6 votes vote down vote up
def norm_of_columns(A, p=2):
    """Vector p-norm of each column of a matrix.

    Parameters
    ----------
    A : array_like
        Input matrix.
    p : int, optional
        p-th norm.

    Returns
    -------
    array_like
        p-norm of each column of A.
    """
    _, N = A.shape
    return np.asarray([linalg.norm(A[:, j], ord=p) for j in range(N)]) 
Example #8
Source File: util.py    From sfa-numpy with MIT License 6 votes vote down vote up
def coherence_of_columns(A):
    """Mutual coherence of columns of A.

    Parameters
    ----------
    A : array_like
        Input matrix.
    p : int, optional
        p-th norm.

    Returns
    -------
    array_like
        Mutual coherence of columns of A.
    """
    A = np.asmatrix(A)
    _, N = A.shape
    A = A * np.asmatrix(np.diag(1/norm_of_columns(A)))
    Gram_A = A.H*A
    for j in range(N):
        Gram_A[j, j] = 0
    return np.max(np.abs(Gram_A)) 
Example #9
Source File: test_ksvd.py    From ksvd with Apache License 2.0 6 votes vote down vote up
def test_size():
    np.random.seed(0)
    N = 50
    L = 12
    n_features = 16
    D = np.random.randn(L, n_features)
    B = np.array(sp.sparse.random(N, L, density=0.5).todense())
    X = np.dot(B, D)
    dico1 = ApproximateKSVD(n_components=L, transform_n_nonzero_coefs=L)
    dico1.fit(X)
    gamma1 = dico1.transform(X)
    e1 = norm(X - gamma1.dot(dico1.components_))

    dico2 = DictionaryLearning(n_components=L, transform_n_nonzero_coefs=L)
    dico2.fit(X)
    gamma2 = dico2.transform(X)
    e2 = norm(X - gamma2.dot(dico2.components_))

    assert dico1.components_.shape == dico2.components_.shape
    assert gamma1.shape == gamma2.shape
    assert e1 < e2 
Example #10
Source File: _channel_state_test.py    From OpenFermion with Apache License 2.0 6 votes vote down vote up
def test_amplitude_damping(self):
        """Test amplitude damping on a simple qubit state"""

        # With probability 0
        test_density_matrix = (
            amplitude_damping_channel(self.density_matrix, 0, 1))
        self.assertAlmostEquals(norm(self.density_matrix -
                                     test_density_matrix), 0.0)

        test_density_matrix = (
            amplitude_damping_channel(self.density_matrix, 0, 1,
                                      transpose=True))
        self.assertAlmostEquals(norm(self.density_matrix -
                                     test_density_matrix), 0.0)

        # With probability 1
        correct_density_matrix = zeros((4, 4), dtype=complex)
        correct_density_matrix[2, 2] = 1

        test_density_matrix = (
            amplitude_damping_channel(self.density_matrix, 1, 1))

        self.assertAlmostEquals(norm(correct_density_matrix -
                                     test_density_matrix), 0.0) 
Example #11
Source File: _sparse_tools_test.py    From OpenFermion with Apache License 2.0 6 votes vote down vote up
def test_jw_number_restrict_state(self):
        n_qubits = numpy.random.randint(1, 12)
        n_particles = numpy.random.randint(0, n_qubits)

        number_indices = jw_number_indices(n_particles, n_qubits)
        subspace_dimension = len(number_indices)

        # Create a vector that has entry 1 for every coordinate with
        # the specified particle number, and 0 everywhere else
        vector = numpy.zeros(2**n_qubits, dtype=float)
        vector[number_indices] = 1

        # Restrict the vector
        restricted_vector = jw_number_restrict_state(vector, n_particles)

        # Check that it has the correct shape
        self.assertEqual(restricted_vector.shape[0], subspace_dimension)

        # Check that it has the same norm as the original vector
        self.assertAlmostEqual(inner_product(vector, vector),
                               inner_product(restricted_vector,
                                             restricted_vector)) 
Example #12
Source File: test_linsolve.py    From Computable with MIT License 6 votes vote down vote up
def test_twodiags(self):
        A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
        b = array([1, 2, 3, 4, 5])

        # condition number of A
        cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2)

        for t in ['f','d','F','D']:
            eps = finfo(t).eps  # floating point epsilon
            b = b.astype(t)

            for format in ['csc','csr']:
                Asp = A.astype(t).asformat(format)

                x = spsolve(Asp,b)

                assert_(norm(b - Asp*x) < 10 * cond_A * eps) 
Example #13
Source File: test_iterative.py    From Computable with MIT License 6 votes vote down vote up
def check_maxiter(solver, case):
    A = case.A
    tol = 1e-12

    b = arange(A.shape[0], dtype=float)
    x0 = 0*b

    residuals = []

    def callback(x):
        residuals.append(norm(b - case.A*x))

    x, info = solver(A, b, x0=x0, tol=tol, maxiter=3, callback=callback)

    assert_equal(len(residuals), 3)
    assert_equal(info, 3) 
Example #14
Source File: nonlin.py    From Computable with MIT License 6 votes vote down vote up
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
                 iter=None, norm=maxnorm):

        if f_tol is None:
            f_tol = np.finfo(np.float_).eps ** (1./3)
        if f_rtol is None:
            f_rtol = np.inf
        if x_tol is None:
            x_tol = np.inf
        if x_rtol is None:
            x_rtol = np.inf

        self.x_tol = x_tol
        self.x_rtol = x_rtol
        self.f_tol = f_tol
        self.f_rtol = f_rtol

        self.norm = maxnorm
        self.iter = iter

        self.f0_norm = None
        self.iteration = 0 
Example #15
Source File: test_locally_linear.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_barycenter_kneighbors_graph():
    X = np.array([[0, 1], [1.01, 1.], [2, 0]])

    A = barycenter_kneighbors_graph(X, 1)
    assert_array_almost_equal(
        A.toarray(),
        [[0.,  1.,  0.],
         [1.,  0.,  0.],
         [0.,  1.,  0.]])

    A = barycenter_kneighbors_graph(X, 2)
    # check that columns sum to one
    assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
    pred = np.dot(A.toarray(), X)
    assert_less(linalg.norm(pred - X) / X.shape[0], 1)


# ----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds. 
Example #16
Source File: _sparse_tools_test.py    From OpenFermion with Apache License 2.0 6 votes vote down vote up
def test_jw_restrict_operator(self):
        """Test the scheme for restricting JW encoded operators to number"""
        # Make a Hamiltonian that cares mostly about number of electrons
        n_qubits = 6
        target_electrons = 3
        penalty_const = 100.
        number_sparse = jordan_wigner_sparse(number_operator(n_qubits))
        bias_sparse = jordan_wigner_sparse(
            sum([FermionOperator(((i, 1), (i, 0)), 1.0) for i
                 in range(n_qubits)], FermionOperator()))
        hamiltonian_sparse = penalty_const * (
            number_sparse - target_electrons *
            scipy.sparse.identity(2**n_qubits)).dot(
            number_sparse - target_electrons *
            scipy.sparse.identity(2**n_qubits)) + bias_sparse

        restricted_hamiltonian = jw_number_restrict_operator(
            hamiltonian_sparse, target_electrons, n_qubits)
        true_eigvals, _ = eigh(hamiltonian_sparse.A)
        test_eigvals, _ = eigh(restricted_hamiltonian.A)

        self.assertAlmostEqual(norm(true_eigvals[:20] - test_eigvals[:20]),
                               0.0) 
Example #17
Source File: test_least_angle.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_rank_deficient_design():
    # consistency test that checks that LARS Lasso is handling rank
    # deficient input data (with n_features < rank) in the same way
    # as coordinate descent Lasso
    y = [5, 0, 5]
    for X in (
              [[5, 0],
               [0, 5],
               [10, 10]],
              [[10, 10, 0],
               [1e-32, 0, 0],
               [0, 0, 1]]
             ):
        # To be able to use the coefs to compute the objective function,
        # we need to turn off normalization
        lars = linear_model.LassoLars(.1, normalize=False)
        coef_lars_ = lars.fit(X, y).coef_
        obj_lars = (1. / (2. * 3.)
                    * linalg.norm(y - np.dot(X, coef_lars_)) ** 2
                    + .1 * linalg.norm(coef_lars_, 1))
        coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
        coef_cd_ = coord_descent.fit(X, y).coef_
        obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
                  + .1 * linalg.norm(coef_cd_, 1))
        assert_less(obj_lars, obj_cd * (1. + 1e-8)) 
Example #18
Source File: test_least_angle.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_lasso_lars_vs_lasso_cd_early_stopping():
    # Test that LassoLars and Lasso using coordinate descent give the
    # same results when early stopping is used.
    # (test : before, in the middle, and in the last part of the path)
    alphas_min = [10, 0.9, 1e-4]

    for alpha_min in alphas_min:
        alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
                                                       alpha_min=alpha_min)
        lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
        lasso_cd.alpha = alphas[-1]
        lasso_cd.fit(X, y)
        error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
        assert_less(error, 0.01)

    # same test, with normalization
    for alpha_min in alphas_min:
        alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
                                                       alpha_min=alpha_min)
        lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
                                      tol=1e-8)
        lasso_cd.alpha = alphas[-1]
        lasso_cd.fit(X, y)
        error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
        assert_less(error, 0.01) 
Example #19
Source File: bicluster.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
    """Normalize rows and columns of ``X`` simultaneously so that all
    rows sum to one constant and all columns sum to a different
    constant.

    """
    # According to paper, this can also be done more efficiently with
    # deviation reduction and balancing algorithms.
    X = make_nonnegative(X)
    X_scaled = X
    for _ in range(max_iter):
        X_new, _, _ = _scale_normalize(X_scaled)
        if issparse(X):
            dist = norm(X_scaled.data - X.data)
        else:
            dist = norm(X_scaled - X_new)
        X_scaled = X_new
        if dist is not None and dist < tol:
            break
    return X_scaled 
Example #20
Source File: test_lgmres.py    From Computable with MIT License 5 votes vote down vote up
def do_solve(**kw):
    count[0] = 0
    x0, flag = lgmres(A, b, x0=zeros(A.shape[0]), inner_m=6, tol=1e-14, **kw)
    count_0 = count[0]
    assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))
    return x0, count_0 
Example #21
Source File: _channel_state_test.py    From OpenFermion with Apache License 2.0 5 votes vote down vote up
def test_dephasing(self):
        """Test dephasing on a simple qubit state"""

        # Check for identity on |11> state
        test_density_matrix = (
            dephasing_channel(self.density_matrix, 1, 1))
        self.assertAlmostEquals(norm(self.density_matrix -
                                     test_density_matrix), 0.0)

        test_density_matrix = (
            dephasing_channel(self.density_matrix, 1, 1,
                              transpose=True))

        correct_matrix = array([[0., 0., 0., 0.],
                                [0., 0., 0., 0.],
                                [0., 0., 0.5, -0.5],
                                [0., 0., -0.5, 1.]])
        self.assertAlmostEquals(norm(correct_matrix -
                                     test_density_matrix), 0.0)

        # Check for correct action on cat state
        # With probability = 0
        test_density_matrix = (
            dephasing_channel(self.cat_matrix, 0, 1))
        self.assertAlmostEquals(norm(self.cat_matrix -
                                     test_density_matrix), 0.0)
        # With probability = 1

        correct_matrix = array([[0.50, 0.25, 0.00, 0.00],
                                [0.25, 0.25, 0.00, -0.25],
                                [0.00, 0.00, 0.00, 0.00],
                                [0.00, -0.25, 0.00, 0.50]])
        test_density_matrix = (
            dephasing_channel(self.cat_matrix, 1, 1))
        self.assertAlmostEquals(norm(correct_matrix -
                                     test_density_matrix), 0.0) 
Example #22
Source File: transformations.py    From PyFEM with GNU General Public License v3.0 5 votes vote down vote up
def getRotationMatrix ( el_coords ):

  #Check the dimension of physical space
  if el_coords.shape[1] != 2:
    raise NotImplementedError('Rotation matrix only implemented for 2D situation')

  #Compute the (undeformed) element length
  l0 = norm( el_coords[1]-el_coords[0] )

  #Set up the rotation matrix to rotate a globdal
  #coordinate to an element coordinate (see Ch 1.3)
  sinalpha = (el_coords[1,1]-el_coords[0,1])/l0
  cosalpha = (el_coords[1,0]-el_coords[0,0])/l0

  return array([[cosalpha,sinalpha],[-sinalpha,cosalpha]]) 
Example #23
Source File: utils.py    From Pix2Pose with MIT License 5 votes vote down vote up
def compute_rotation_from_vertex(vertex):
    """Compute rotation matrix from viewpoint vertex """
    up = [0, 0, 1]
    if vertex[0] == 0 and vertex[1] == 0 and vertex[2] != 0:
        up = [-1, 0, 0]
    rot = np.zeros((3, 3))
    rot[:, 2] = -vertex / norm(vertex)  # View direction towards origin
    rot[:, 0] = np.cross(rot[:, 2], up)
    rot[:, 0] /= norm(rot[:, 0])
    rot[:, 1] = np.cross(rot[:, 0], -rot[:, 2])
    return rot.T 
Example #24
Source File: plotting.py    From UnsupervisedGeometryAwareRepresentationLearning with GNU General Public License v3.0 5 votes vote down vote up
def plot3Dcylinder(ax, p0, p1, radius=5, color=(0.5, 0.5, 0.5)):
    num_samples = 8
    origin = np.array([0, 0, 0])
    #vector in direction of axis
    v = p1 - p0
    mag = la.norm(v)
    if mag==0: # prevent division by 0 for bones of length 0
        return np.zeros((0,0)),np.zeros((0,0)),np.zeros((0,0)),np.zeros((0,0))
    #unit vector in direction of axis
    v = v / mag
    #make some vector not in the same direction as v
    not_v = np.array([1, 0, 0])
    eps = 0.00001
    if la.norm(v-not_v)<eps:
        not_v = np.array([0, 1, 0])
    #make vector perpendicular to v
    n1 = np.cross(v, not_v)
    n1 /= eps+la.norm(n1)
    #make unit vector perpendicular to v and n1
    n2 = np.cross(v, n1)
    #surface ranges over t from 0 to length of axis and 0 to 2*pi
    t = np.linspace(0, mag, 2)
    theta = np.linspace(0, 2 * np.pi, num_samples)
    #use meshgrid to make 2d arrays
    t, theta = np.meshgrid(t, theta)
    #generate coordinates for surface
    X, Y, Z = [p0[i] + v[i] * t + radius * np.sin(theta) * n1[i] + radius * np.cos(theta) * n2[i] for i in [0, 1, 2]]
    #ax.plot_surface(X, Y, Z, color=color, alpha=0.25, shade=True)
    c = np.ones( (list(X.shape)+[4]) )
    c[:,:] = color #(1,1,1,0) #color
    return X, Y, Z, c 
Example #25
Source File: test_iterative.py    From Computable with MIT License 5 votes vote down vote up
def assert_normclose(a, b, tol=1e-8):
    residual = norm(a - b)
    tolerance = tol*norm(b)
    msg = "residual (%g) not smaller than tolerance %g" % (residual, tolerance)
    assert_(residual < tolerance, msg=msg) 
Example #26
Source File: test_ndarray.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_norm(ctx=default_context()):
    try:
        import scipy
        assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
        from scipy.linalg import norm as sp_norm
    except (AssertionError, ImportError):
        print("Could not import scipy.linalg.norm or scipy is too old. "
              "Falling back to numpy.linalg.norm which is not numerically stable.")
        from numpy.linalg import norm as sp_norm

    def l1norm(input_data, axis=0, keepdims=False):
        return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
    def l2norm(input_data, axis=0, keepdims=False):
        return sp_norm(input_data, axis=axis, keepdims=keepdims)

    in_data_dim = random_sample([4,5,6], 1)[0]
    for force_reduce_dim1 in [True, False]:
        in_data_shape = rand_shape_nd(in_data_dim)
        if force_reduce_dim1:
            in_data_shape = in_data_shape[:3] + (1, ) + in_data_shape[4:]
        np_arr = np.random.uniform(-1, 1, in_data_shape).astype(np.float32)
        mx_arr = mx.nd.array(np_arr, ctx=ctx)
        for ord in [1, 2]:
            for keep_dims in [True, False]:
                for i in range(4):
                    npy_out = l1norm(np_arr, i, keep_dims) if ord == 1 else l2norm(
                        np_arr, i, keep_dims)
                    mx_out = mx.nd.norm(mx_arr, ord=ord, axis=i, keepdims=keep_dims)
                    assert npy_out.shape == mx_out.shape
                    mx.test_utils.assert_almost_equal(npy_out, mx_out.asnumpy())
                    if (i < 3):
                        npy_out = l1norm(np_arr, (i, i + 1), keep_dims) if ord == 1 else l2norm(
                            np_arr, (i, i + 1), keep_dims)
                        mx_out = mx.nd.norm(mx_arr, ord=ord, axis=(i, i + 1), keepdims=keep_dims)
                        assert npy_out.shape == mx_out.shape
                        mx.test_utils.assert_almost_equal(npy_out, mx_out.asnumpy()) 
Example #27
Source File: nonlin.py    From Computable with MIT License 5 votes vote down vote up
def check(self, f, x, dx):
        self.iteration += 1
        f_norm = self.norm(f)
        x_norm = self.norm(x)
        dx_norm = self.norm(dx)

        if self.f0_norm is None:
            self.f0_norm = f_norm

        if f_norm == 0:
            return 1

        if self.iter is not None:
            # backwards compatibility with Scipy 0.6.0
            return 2 * (self.iteration > self.iter)

        # NB: condition must succeed for rtol=inf even if norm == 0
        return int((f_norm <= self.f_tol
                    and f_norm/self.f_rtol <= self.f0_norm)
                   and (dx_norm <= self.x_tol
                        and dx_norm/self.x_rtol <= x_norm))


#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------ 
Example #28
Source File: nonlin.py    From Computable with MIT License 5 votes vote down vote up
def setup(self, x0, f0, func):
        Jacobian.setup(self, x0, f0, func)
        self.last_f = f0
        self.last_x = x0

        if hasattr(self, 'alpha') and self.alpha is None:
            # autoscale the initial Jacobian parameter
            self.alpha = 0.5*max(norm(x0), 1) / norm(f0) 
Example #29
Source File: nonlin.py    From Computable with MIT License 5 votes vote down vote up
def update(self, x, f):
        df = f - self.last_f
        dx = x - self.last_x
        self._update(x, f, dx, df, norm(dx), norm(df))
        self.last_f = f
        self.last_x = x 
Example #30
Source File: minres.py    From Computable with MIT License 5 votes vote down vote up
def cb(x):
        residuals.append(norm(b - A*x))

    # A = poisson((10,),format='csr')