Python scipy.optimize.least_squares() Examples

The following are 30 code examples of scipy.optimize.least_squares(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.optimize , or try the search function .
Example #1
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_robustness(self):
        for noise in [0.1, 1.0]:
            p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0)

            for jac in ['2-point', '3-point', 'cs', p.jac]:
                res_lsq = least_squares(p.fun, p.p0, jac=jac,
                                        method=self.method)
                assert_allclose(res_lsq.optimality, 0, atol=1e-2)
                for loss in LOSSES:
                    if loss == 'linear':
                        continue
                    res_robust = least_squares(
                        p.fun, p.p0, jac=jac, loss=loss, f_scale=noise,
                        method=self.method)
                    assert_allclose(res_robust.optimality, 0, atol=1e-2)
                    assert_(norm(res_robust.x - p.p_opt) <
                            norm(res_lsq.x - p.p_opt)) 
Example #2
Source File: solver.py    From pastas with MIT License 6 votes vote down vote up
def __init__(self, ml, pcov=None, nfev=None, **kwargs):
        """Solver based on Scipy's least_squares method [scipy_ref]_.

        Notes
        -----
        This class is the default solve method called by the pastas Model solve
        method. All kwargs provided to the Model.solve() method are forwarded
        to the solver. From there, they are forwarded to Scipy least_squares
        solver.

        Examples
        --------

        >>> ml.solve(solver=ps.LeastSquares)

        References
        ----------
        .. [scipy_ref] https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html

        """
        BaseSolver.__init__(self, ml=ml, pcov=pcov, nfev=nfev, **kwargs) 
Example #3
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_with_bounds(self):
        p = BroydenTridiagonal()
        for jac, jac_sparsity in product(
                [p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]):
            res_1 = least_squares(
                p.fun, p.x0, jac, bounds=(p.lb, np.inf),
                method=self.method,jac_sparsity=jac_sparsity)
            res_2 = least_squares(
                p.fun, p.x0, jac, bounds=(-np.inf, p.ub),
                method=self.method, jac_sparsity=jac_sparsity)
            res_3 = least_squares(
                p.fun, p.x0, jac, bounds=(p.lb, p.ub),
                method=self.method, jac_sparsity=jac_sparsity)
            assert_allclose(res_1.optimality, 0, atol=1e-10)
            assert_allclose(res_2.optimality, 0, atol=1e-10)
            assert_allclose(res_3.optimality, 0, atol=1e-10) 
Example #4
Source File: dynamo_fitting.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def fit_beta_lsq(t, l, bounds=(0, np.inf), fix_l0=False, beta_0=None):
    tau = t - np.min(t)
    l0 = np.mean(l[:, tau == 0])
    if beta_0 is None:
        beta_0 = 1

    if fix_l0:
        f_lsq = lambda b: (sol_u(tau, l0, 0, b) - l).flatten()
        ret = least_squares(f_lsq, beta_0, bounds=bounds)
        beta = ret.x
    else:
        f_lsq = lambda p: (sol_u(tau, p[1], 0, p[0]) - l).flatten()
        ret = least_squares(f_lsq, np.array([beta_0, l0]), bounds=bounds)
        beta = ret.x[0]
        l0 = ret.x[1]
    return beta, l0 
Example #5
Source File: datasets_imgnet.py    From 3DOD_thesis with MIT License 6 votes vote down vote up
def solve(self):
        x0 = self._initial_guess()

        ls_results = []
        costs = []
        for rot_y in [-2, -1, 0, 1]:
            x0[6] = rot_y*np.pi/2

            ls_result = least_squares(self._residuals, x0, jac="3-point")
            ls_results.append(ls_result)
            costs.append(ls_result.cost)

        self.result = ls_results[np.argmin(costs)]
        params = self.result.x

        return params 
Example #6
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_args_kwargs(self):
        # Test that args and kwargs are passed correctly to the functions.
        a = 3.0
        for jac in ['2-point', '3-point', 'cs', jac_trivial]:
            with suppress_warnings() as sup:
                sup.filter(UserWarning,
                           "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
                res = least_squares(fun_trivial, 2.0, jac, args=(a,),
                                    method=self.method)
                res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
                                    method=self.method)

            assert_allclose(res.x, a, rtol=1e-4)
            assert_allclose(res1.x, a, rtol=1e-4)

            assert_raises(TypeError, least_squares, fun_trivial, 2.0,
                          args=(3, 4,), method=self.method)
            assert_raises(TypeError, least_squares, fun_trivial, 2.0,
                          kwargs={'kaboom': 3}, method=self.method) 
Example #7
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_full_result_single_fev(self):
        # MINPACK checks the number of nfev after the iteration,
        # so it's hard to tell what he is going to compute.
        if self.method == 'lm':
            return

        res = least_squares(fun_trivial, 2.0, method=self.method,
                            max_nfev=1)
        assert_equal(res.x, np.array([2]))
        assert_equal(res.cost, 40.5)
        assert_equal(res.fun, np.array([9]))
        assert_equal(res.jac, np.array([[4]]))
        assert_equal(res.grad, np.array([36]))
        assert_equal(res.optimality, 36)
        assert_equal(res.active_mask, np.array([0]))
        assert_equal(res.nfev, 1)
        assert_equal(res.njev, 1)
        assert_equal(res.status, 0)
        assert_equal(res.success, 0) 
Example #8
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_full_result(self):
        # MINPACK doesn't work very well with factor=100 on this problem,
        # thus using low 'atol'.
        res = least_squares(fun_trivial, 2.0, method=self.method)
        assert_allclose(res.x, 0, atol=1e-4)
        assert_allclose(res.cost, 12.5)
        assert_allclose(res.fun, 5)
        assert_allclose(res.jac, 0, atol=1e-4)
        assert_allclose(res.grad, 0, atol=1e-2)
        assert_allclose(res.optimality, 0, atol=1e-2)
        assert_equal(res.active_mask, 0)
        if self.method == 'lm':
            assert_(res.nfev < 30)
            assert_(res.njev is None)
        else:
            assert_(res.nfev < 10)
            assert_(res.njev < 10)
        assert_(res.status > 0)
        assert_(res.success) 
Example #9
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_rosenbrock_bounds(self):
        x0_1 = np.array([-2.0, 1.0])
        x0_2 = np.array([2.0, 2.0])
        x0_3 = np.array([-2.0, 2.0])
        x0_4 = np.array([0.0, 2.0])
        x0_5 = np.array([-1.2, 1.0])
        problems = [
            (x0_1, ([-np.inf, -1.5], np.inf)),
            (x0_2, ([-np.inf, 1.5], np.inf)),
            (x0_3, ([-np.inf, 1.5], np.inf)),
            (x0_4, ([-np.inf, 1.5], [1.0, np.inf])),
            (x0_2, ([1.0, 1.5], [3.0, 3.0])),
            (x0_5, ([-50.0, 0.0], [0.5, 100]))
        ]
        for x0, bounds in problems:
            for jac, x_scale, tr_solver in product(
                    ['2-point', '3-point', 'cs', jac_rosenbrock],
                    [1.0, [1.0, 0.5], 'jac'],
                    ['exact', 'lsmr']):
                res = least_squares(fun_rosenbrock, x0, jac, bounds,
                                    x_scale=x_scale, tr_solver=tr_solver,
                                    method=self.method)
                assert_allclose(res.optimality, 0.0, atol=1e-5) 
Example #10
Source File: utils_velocity.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fit_alpha_degradation(t, u, beta, intercept=False):
    """Estimate alpha with degradation data using linear regression. This is a lsq version of the following function that
    constrains u0 to be larger than 0

    Arguments
    ---------
    t: :class:`~numpy.ndarray`
        A vector of time points.
    u: :class:`~numpy.ndarray` or sparse `csr_matrix`
        A matrix of unspliced mRNA counts. Dimension: cells x time points.
    beta: float
        The value of beta.
    intercept: bool
        If using steady state assumption for fitting, then:
        True -- the linear regression is performed with an unfixed intercept;
        False -- the linear regresssion is performed with a fixed zero intercept.

    Returns
    -------
    alpha: float
        The estimated value for alpha.
    u0: float
        The initial unspliced mRNA count.
    r2: float
        Coefficient of determination or r square.
    """
    x = u.A if issparse(u) else u

    tau = t - np.min(t)

    f_lsq = lambda p: sol_u(tau, p[0], p[1], beta) - x
    ret = least_squares(f_lsq, np.array([1, 1]), bounds=(0, np.inf))
    alpha, u0 = ret.x[0], ret.x[1]

    # calculate r-squared
    SS_tot_n = np.var(x)
    SS_res_n = np.mean((f_lsq([alpha, u0])) ** 2)
    r2 = 1 - SS_res_n / SS_tot_n

    return alpha, u0, r2 
Example #11
Source File: stack_registration_base.py    From NiftyMIC with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_optimizer(self):
        return self._optimizer

    ##
    # Set maximum number of iterations for optimizer.
    #
    # least_squares: Corresponds to maximum number of function evaluations
    # L-BFGS-B: Corresponds to maximum number of iterations
    # \date       2016-11-10 19:24:35+0000
    #
    # \param      self                The object
    # \param      optimizer_iter_max  The nfev maximum
    #
    # \see        https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html#scipy.optimize.least_squares
    # 
Example #12
Source File: raw_gnss.py    From laika with MIT License 5 votes vote down vote up
def calc_pos_fix(measurements, x0=[0, 0, 0, 0, 0], no_weight=False, signal='C1C'):
  '''
  Calculates gps fix with WLS optimizer

  returns:
  0 -> list with positions
  1 -> pseudorange errs
  '''
  n = len(measurements)
  if n < 6:
      return []

  Fx_pos = pr_residual(measurements, signal=signal, no_weight=no_weight, no_nans=True)
  opt_pos = opt.least_squares(Fx_pos, x0).x
  return opt_pos, Fx_pos(opt_pos, no_weight=True) 
Example #13
Source File: utils_velocity.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fit_first_order_deg_lsq(t, l, bounds=(0, np.inf), fix_l0=False, beta_0=1):
    """Estimate beta with degradation data using least squares method.

    Arguments
    ---------
    t: :class:`~numpy.ndarray`
        A vector of time points.
    l: :class:`~numpy.ndarray` or sparse `csr_matrix`
        A vector of unspliced, labeled mRNA counts for each time point.
    bounds: tuple
        The bound for beta. The default is beta > 0.
    fixed_l0: bool
        True: l0 will be calculated by averaging the first column of l;
        False: l0 is a parameter that will be estimated all together with beta using lsq.
    beta_0: float
        Initial guess for beta.

    Returns
    -------
    beta: float
        The estimated value for beta.
    l0: float
        The estimated value for the initial spliced, labeled mRNA count.
    """
    l = l.A.flatten() if issparse(l) else l

    tau = t - np.min(t)
    l0 = np.nanmean(l[tau == 0])

    if fix_l0:
        f_lsq = lambda b: sol_u(tau, l0, 0, b) - l
        ret = least_squares(f_lsq, beta_0, bounds=bounds)
        beta = ret.x
    else:
        f_lsq = lambda p: sol_u(tau, p[1], 0, p[0]) - l
        ret = least_squares(f_lsq, np.array([beta_0, l0]), bounds=bounds)
        beta = ret.x[0]
        l0 = ret.x[1]
    return beta, l0 
Example #14
Source File: raw_gnss.py    From laika with MIT License 5 votes vote down vote up
def calc_vel_fix(measurements, est_pos, v0=[0, 0, 0, 0], no_weight=False, signal='D1C'):
  '''
  Calculates gps velocity fix with WLS optimizer

  returns:
  0 -> list with velocities
  1 -> pseudorange_rate errs
  '''
  n = len(measurements)
  if n < 6:
      return []

  Fx_vel = prr_residual(measurements, est_pos, no_weight=no_weight, no_nans=True)
  opt_vel = opt.least_squares(Fx_vel, v0).x
  return opt_vel, Fx_vel(opt_vel, no_weight=True) 
Example #15
Source File: utils_velocity.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fit_alpha_beta_synthesis(t, l, bounds=(0, np.inf), alpha_0=1, beta_0=1):
    """Estimate alpha and beta with synthesis data using least square method.
    It is assumed that u(0) = 0.

    Arguments
    ---------
    t: :class:`~numpy.ndarray`
        A vector of time points.
    l: :class:`~numpy.ndarray` or sparse `csr_matrix`
        A matrix of labeled mRNA counts. Dimension: cells x time points.
    bounds: tuple
        The bound for alpha and beta. The default is alpha / beta > 0.
    alpha_0: float
        Initial guess for alpha.
    beta_0: float
        Initial guess for beta.

    Returns
    -------
    alpha: float
        The estimated value for alpha.
    beta: float
        The estimated value for beta.
    """
    l = l.A if issparse(l) else l

    tau = np.hstack((0, t))
    x = np.hstack((0, l))

    f_lsq = lambda p: sol_u(tau, 0, p[0], p[1]) - x
    ret = least_squares(f_lsq, np.array([alpha_0, beta_0]), bounds=bounds)
    return ret.x[0], ret.x[1] 
Example #16
Source File: utils_velocity.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fit_k_negative_binomial(n, r, var, phi=None, k0=None, return_k0=False):
    k0 = fit_linreg(r, n, intercept=False)[0] if k0 is None else k0
    phi = compute_dispersion(r, var) if phi is None else phi

    g1 = lambda k: k * r - n
    g2 = lambda k: k * k * var - k * n - phi * n * n
    g = lambda k: np.hstack((g1(k) / max(g1(k0).std(0), 1e-3), g2(k) / max(g2(k0).std(0), 1e-3)))
    ret = least_squares(g, k0, bounds=(0, np.inf))
    if return_k0:
        return ret.x[0], k0
    else:
        return ret.x[0] 
Example #17
Source File: starshot.py    From pylinac with MIT License 5 votes vote down vote up
def _find_wobble_minimize(self):
        """Find the minimum distance wobble location and radius to all radiation lines.

        The minimum is found using a scipy minimization function.
        """
        # starting point
        sp = copy.copy(self.circle_profile.center)

        def distance(p, lines):
            """Calculate the maximum distance to any line from the given point."""
            return max(line.distance_to(Point(p[0], p[1])) for line in lines)

        res = optimize.minimize(distance, sp.as_array(), args=(self.lines,), method='Nelder-Mead', options={'ftol': 0.001})
        # res = optimize.least_squares(distance, sp.as_array(), args=(self.lines,), ftol=0.001)

        self.wobble.radius = res.fun
        self.wobble.radius_mm = res.fun / self.image.dpmm
        self.wobble.center = Point(res.x[0], res.x[1]) 
Example #18
Source File: bazin.py    From kaggle-plasticc with MIT License 5 votes vote down vote up
def fit_scipy(time, low_passband, flux, flux_err):
    time -= time[0]
    sn = np.power(flux / flux_err, 2)
    start_point = (sn * flux).argmax()

    t0_init = time[start_point] - time[0]
    amp_init = flux[start_point]
    weights = 1 / (1 + flux_err)
    weights = weights / weights.sum()
    guess = [0, amp_init, t0_init, 40, -5, 0.5]

    result = least_squares(errfunc, guess, args=(time, low_passband, flux, weights), method='lm')
    result.t_shift = t0_init - result.x[2]

    return result 
Example #19
Source File: dynamo_bk.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fit_gamma_splicing(t, s, beta, u0, bounds=(0, np.inf)):
    tau = t - np.min(t)
    s0 = np.mean(s[tau == 0])
    g0 = beta * u0 / s0

    f_lsq = lambda g: sol_s(tau, u0, s0, 0, beta, g) - s
    ret = least_squares(f_lsq, g0, bounds=bounds)
    return ret.x, s0 
Example #20
Source File: stack_registration_base.py    From NiftyMIC with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_optimizer_loss(self):
        return self._optimizer_loss

    ##
    #       Sets the optimizer_method for least_squares optimizer
    # \date       2016-11-17 16:08:37+0000
    #
    # \param      self    The object
    # \param      optimizer_method  The optimizer_method in ["trf", "lm", "dogbox"]
    #
    # \see        https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html#scipy.optimize.least_squares
    # 
Example #21
Source File: solver.py    From pastas with MIT License 5 votes vote down vote up
def solve(self, noise=True, weights=None, callback=None, **kwargs):
        self.vary = self.ml.parameters.vary.values.astype(bool)
        self.initial = self.ml.parameters.initial.values.copy()
        parameters = self.ml.parameters.loc[self.vary]

        # Set the boundaries
        bounds = (np.where(parameters.pmin.isnull(), -np.inf, parameters.pmin),
                  np.where(parameters.pmax.isnull(), np.inf, parameters.pmax))

        self.result = least_squares(self.objfunction, bounds=bounds,
                                    x0=parameters.initial.values,
                                    args=(noise, weights, callback), **kwargs)

        self.pcov = DataFrame(self.get_covariances(self.result.jac,
                                                   self.result.cost),
                              index=parameters.index, columns=parameters.index)
        self.pcor = self.get_correlations(self.pcov)
        self.nfev = self.result.nfev

        # Prepare return values
        success = self.result.success
        optimal = self.initial
        optimal[self.vary] = self.result.x
        stderr = np.zeros(len(optimal)) * np.nan
        stderr[self.vary] = np.sqrt(np.diag(self.pcov))

        return success, optimal, stderr 
Example #22
Source File: trends.py    From astrobase with MIT License 5 votes vote down vote up
def _epd_residual2(coeffs,
                   times, mags, errs,
                   fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd):
    '''This is the residual function to minimize using
    scipy.optimize.least_squares.

    This variant is for :py:func:`.epd_magseries_extparams`.

    '''

    f = _epd_function(coeffs, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd)
    residual = mags - f
    return residual 
Example #23
Source File: uncalibrated_rec.py    From py3DRec with MIT License 5 votes vote down vote up
def projective_pose_estimation(self,feat_2D,P,points3D):
		'''
		Method to add views using an initial 3D structure, i.e. compute the projection matrices for all the additional views (the first two are already
		estimated in previous steps)
		Args: 
			feat_2D: 2D feature coordinates for all images
			P: projection matrices
			points3d: 3D point cloud
		Returns: 
			P: projection matrices for all views
		'''
		number_of_features=feat_2D.shape[2]

		AA=np.zeros(shape=[2*number_of_features,12]);

		for i in range(2,self._sequence_length): 
			for j in range(0,number_of_features):
				AA[2*j,0:4]=points3D[j];
				AA[2*j,8:12]=-feat_2D[i,0,j]*points3D[j]
				AA[2*j+1,4:8]=points3D[j];
				AA[2*j+1,8:12]=-feat_2D[i,1,j]*points3D[j]

			U, s, Vh = svd(AA)
			V=np.transpose(Vh)

			VV=V[0:12,11]
			VV=VV/VV[10]
			VV=np.delete(VV,10)

			#refine the estimate for the i-th projection matrix
			result=least_squares(self._eg_utils.refine_projection_matrix,VV, args=(points3D,feat_2D[i,:,:]))
			VV=result.x

			Pr=np.zeros(shape=[3,4]);
			Pr[0,:]=VV[0:4]
			Pr[1,:]=VV[4:8]
			Pr[2,:]=np.append(np.append(VV[8:10],1),VV[10])
			P[:,:,i]=Pr

		return P 
Example #24
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_basic():
    # test that 'method' arg is really optional
    res = least_squares(fun_trivial, 2.0)
    assert_allclose(res.x, 0, atol=1e-10) 
Example #25
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_loss(self):
        res = least_squares(fun_trivial, 2.0, loss='linear', method='lm')
        assert_allclose(res.x, 0.0, atol=1e-4)

        assert_raises(ValueError, least_squares, fun_trivial, 2.0,
                      method='lm', loss='huber') 
Example #26
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_LinearOperator_not_supported(self):
        p = BroydenTridiagonal(mode="operator")
        assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
                      method='lm') 
Example #27
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_jac_sparsity_not_supported(self):
        assert_raises(ValueError, least_squares, fun_trivial, 2.0,
                      jac_sparsity=[1], method='lm') 
Example #28
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_sparse_not_supported(self):
        p = BroydenTridiagonal()
        assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
                      method='lm') 
Example #29
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_bounds_not_supported(self):
        assert_raises(ValueError, least_squares, fun_trivial,
                      2.0, bounds=(-3.0, 3.0), method='lm') 
Example #30
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_grad(self):
        # Test that res.grad is true gradient of loss function at the
        # solution. Use max_nfev = 1, to avoid reaching minimum.
        x = np.array([2.0])  # res.x will be this.

        res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
                            max_nfev=1, method=self.method)
        assert_equal(res.grad, 2 * x * (x**2 + 5))

        res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
                            max_nfev=1, method=self.method)
        assert_equal(res.grad, 2 * x)

        res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
                            max_nfev=1, method=self.method)
        assert_allclose(res.grad,
                        2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5)

        res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
                            max_nfev=1, method=self.method)
        assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2))

        res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
                            max_nfev=1, method=self.method)
        assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4))

        res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
                            max_nfev=1, method=self.method)
        assert_allclose(res.grad,
                        2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3))