Python scipy.optimize.brute() Examples

The following are 16 code examples of scipy.optimize.brute(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.optimize , or try the search function .
Example #1
Source File: brute2fine.py    From dmipy with MIT License 6 votes vote down vote up
def objective_function(self, parameter_vector, data):
        "The objective function for brute-force and gradient-based optimizer."
        if self.model.N_models > 1:
            nested_fractions = parameter_vector[-(self.model.N_models - 1):]
            normalized_fractions = nested_to_normalized_fractions(
                nested_fractions)
            parameter_vector_ = np.r_[
                parameter_vector[:-(self.model.N_models - 1)],
                normalized_fractions]
        else:
            parameter_vector_ = parameter_vector
        parameter_vector_ = (
            parameter_vector_ * self.model.scales_for_optimization)
        parameters = {}
        parameters.update(
            self.model.parameter_vector_to_parameters(parameter_vector_)
        )
        E_model = self.model(self.acquisition_scheme, **parameters)
        E_diff = E_model - data
        objective = np.dot(E_diff, E_diff) / len(data)
        return objective 
Example #2
Source File: mlae.py    From qiskit-aqua with Apache License 2.0 6 votes vote down vote up
def _compute_mle_safe(self):
        """Compute the MLE via a grid-search.

        This is a stable approach if sufficient gridpoints are used.
        """
        one_hits, all_hits = self._get_hits()

        # search range
        eps = 1e-15  # to avoid invalid value in log
        search_range = [0 + eps, np.pi / 2 - eps]

        def loglikelihood(theta):
            # logL contains the first `it` terms of the full loglikelihood
            logL = 0
            for i, k in enumerate(self._evaluation_schedule):
                logL += np.log(np.sin((2 * k + 1) * theta) ** 2) * one_hits[i]
                logL += np.log(np.cos((2 * k + 1) * theta) ** 2) * (all_hits[i] - one_hits[i])
            return -logL

        est_theta = brute(loglikelihood, [search_range], Ns=self._likelihood_evals)[0]
        return est_theta 
Example #3
Source File: fdesign.py    From empymod with Apache License 2.0 6 votes vote down vote up
def _ls2ar(inp, strinp):
    r"""Convert float or linspace-input to arange/slice-input for brute."""

    # Check if input is 1 or 3 elements (float, list, tuple, array)
    if np.size(inp) == 1:
        start = np.squeeze(inp)
        stop = start+1
        num = 1
    elif np.size(inp) == 3:
        start = inp[0]
        stop = inp[1]
        num = inp[2]
    else:
        raise ValueError(f"<{strinp}> must be a float or a tuple of 3 elements"
                         f" (start, stop, num); <{strinp} provided: {inp}")

    # Re-arrange it to be compatible with np.arange/slice for brute
    if num < 2 or start == stop:
        stop = start
        step = 1
    else:
        step = (stop-start)/(num-1)
    return (start, stop+step/2, step) 
Example #4
Source File: ABuUmpMainBase.py    From abu with GNU General Public License v3.0 6 votes vote down vote up
def min_func_improved(self, l_pmr):
        """
        包装min_func具体现实函数,在brust_min函数中使用:
        sco.brute(self.min_func_improved, bnds, full_output=False, finish=None)
        计算最优参数组合,因为在self.min_func中计算的是提高improved值,要想得到最大
        提高参数组合,即self.min_func返回值最大的组合,使用最优sco.brute的目标是
        最小值,所以使用 -self.min_func(l_pmr)[0],找到最小值,结果的参数组合即为
        最大提高improved值的参数组合
        :param l_pmr: 可迭代序列,eg:(-0.45, -0.11, 0.77), 由三个float值组成的序列
                      l_pmr[0]: 切取self.cprs使用的分类簇交易获利和值,即lps阀值:self.cprs['lps'] <= l_pmr[0]
                      l_pmr[1]: 切取self.cprs使用的分类簇交易获利平均值,即lms阀值:self.cprs['lms'] <= l_pmr[1]
                      l_pmr[2]: 切取self.cprs使用的分类簇交易失败比例,即lrs阀值:self.cprs['lrs'] >= l_pmr[2]
        :return: -self.min_func(l_pmr)[0],即[improved, effect_num][0], 即improved值,float
        """
        self.brust_progress.show()
        return -self.min_func(l_pmr)[0] 
Example #5
Source File: c6.py    From abu with GNU General Public License v3.0 6 votes vote down vote up
def sample_624():
    """
    6.2.4 全局最优求解怎样度过一生最幸福
    :return:
    """
    import scipy.optimize as sco

    def minimize_happiness_global(weights):
        if np.sum(weights) != 1:
            # 过滤权重和不等于1的权重组合
            return 0
        # 最优都是寻找最小值,所以要得到幸福指数最大的权重,
        # 返回-my_life,这样最小的结果其实是幸福指数最大的权重配比
        return -my_life(weights)[1]

    opt_global = sco.brute(minimize_happiness_global,
                           ((0, 1.1, 0.1), (0, 1.1, 0.1), (0, 1.1, 0.1)))
    print(opt_global)

    living_day, happiness, wealth, fame = my_life(opt_global)
    print('活了{}年,幸福指数{}, 积累财富{}, 名望权力{}'.format
          (living_day, happiness, wealth, fame))


# noinspection PyTypeChecker 
Example #6
Source File: fit.py    From tenpy with GNU General Public License v3.0 5 votes vote down vote up
def alg_decay_fit(x, y, npts=5, power_range=(0.01, 4.), power_mesh=[60, 10]):
    """Fit y to the form ``a*x**(-b) + c``.

    Returns a triplet [a, b, c].

    npts specifies the maximum number of points to fit.  If npts < len(x), then alg_decay_fit() will only fit to the last npts points.
    power_range is a tuple that gives that restricts the possible ranges for b.
    power_mesh is a list of numbers, which specifies how fine to search for the optimal b.
    E.g., if power_mesh = [60,10], then it'll first divide the power_range into 60 intervals, and then divide those intervals by 10.
    """
    x = np.array(x)
    y = np.array(y)
    assert x.ndim == 1 and y.ndim == 1
    assert len(x) == len(y)
    if npts < 3:
        raise ValueError
    if len(x) > npts:
        x = x[-npts:]
        y = y[-npts:]
    global_log_power_range = (np.log(power_range[0]), np.log(power_range[1]))
    log_power_range = global_log_power_range
    for i in range(len(power_mesh)):
        # number of points inclusive
        brute_Ns = (power_mesh[i] if i == 0 else 2 * power_mesh[i]) + 1
        log_power_step = (log_power_range[1] - log_power_range[0]) / float(brute_Ns - 1)
        brute_fit = optimize.brute(alg_decay_fit_res, [log_power_range], (x, y),
                                   Ns=brute_Ns,
                                   finish=None)
        if brute_fit <= global_log_power_range[0] + 1e-6:
            return [0., 0., y[-1]]  # shit happened
        log_power_range = (brute_fit - log_power_step, brute_fit + log_power_step)
    l_fit = linear_fit(x**(-np.exp(brute_fit)), y)
    return [l_fit[0], np.exp(brute_fit), l_fit[1]] 
Example #7
Source File: three_tissue_response.py    From dmipy with MIT License 5 votes vote down vote up
def optimal_threshold(data):
    """Optimal image threshold based on pearson correlation [1]_. The idea is
    that an 'optimal' mask of some arbitrary image data should be found by
    thresholding at a value that maximizes the pearson correlation between the
    original image and the mask, i.e:

    T* = argmax_T (\rho(data, data>T))
       = argmin_T -(\rho(data, data>T))

    This function estimates T* based on the second equation on arbitrary input
    arrays.

    Parameters
    ----------
    scalar_data: 1D array,
        scalar array to estimate an 'optimal' threshold on.

    Returns
    -------
    optimal_threshold: float,
        optimal threshold value that maximizes correlation between the original
        and masked data.

    References
    ----------
    .. [1] Ridgway, Gerard R., et al. "Issues with threshold masking in
        voxel-based morphometry of atrophied brains." Neuroimage 44.1 (2009):
        99-111.
    """
    min_bound = data.min()
    max_bound = data.max()
    eps = 1e-10
    optimal_threshold = brute(
        func=_cost_function,
        Ns=100,
        args=(data,),
        ranges=([min_bound + eps, max_bound - eps],))[0]
    return optimal_threshold 
Example #8
Source File: test_optimize.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_brute(self):
        # test fmin
        resbrute = optimize.brute(self.func, self.rranges, args=self.params,
                                  full_output=True, finish=optimize.fmin)
        assert_allclose(resbrute[0], self.solution, atol=1e-3)
        assert_allclose(resbrute[1], self.func(self.solution, *self.params),
                        atol=1e-3)

        # test minimize
        resbrute = optimize.brute(self.func, self.rranges, args=self.params,
                                  full_output=True,
                                  finish=optimize.minimize)
        assert_allclose(resbrute[0], self.solution, atol=1e-3)
        assert_allclose(resbrute[1], self.func(self.solution, *self.params),
                        atol=1e-3) 
Example #9
Source File: test_optimize.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_1D(self):
        # test that for a 1D problem the test function is passed an array,
        # not a scalar.
        def f(x):
            assert_(len(x.shape) == 1)
            assert_(x.shape[0] == 1)
            return x ** 2

        optimize.brute(f, [(-1, 1)], Ns=3, finish=None) 
Example #10
Source File: elastic.py    From elate with MIT License 5 votes vote down vote up
def minimize(func, dim):
  if dim == 2:
    r = ((0, np.pi), (0, np.pi))
    n = 25
  elif dim == 3:
    r = ((0, np.pi), (0, np.pi), (0, np.pi))
    n = 10

  # TODO -- try basin hopping or annealing
  return optimize.brute(func, r, Ns = n, full_output = True, finish = optimize.fmin)[0:2] 
Example #11
Source File: fdesign.py    From empymod with Apache License 2.0 5 votes vote down vote up
def _print_count(log):
    r"""Print run-count information."""
    log['cnt2'] += 1                   # Current number
    cp = log['cnt2']/log['totnr']*100  # Percentage

    if log['cnt2'] == 0:  # Not sure about this; brute seems to call the
        pass              # function with the first arguments twice...

    elif log['cnt2'] > log['totnr']:  # fmin-status
        print(f"   fmin  fct calls : {log['cnt2']-log['totnr']}", end='\r')

    elif int(cp) > log['cnt1'] or cp < 1 or log['cnt2'] == log['totnr']:
        # Get seconds since start
        sec = int(default_timer() - log['time'])

        # Get estimate of remaining time, as string
        tleft = str(timedelta(seconds=int(100*sec/cp - sec)))

        # Print progress
        pstr = f"   brute fct calls : {log['cnt2']}/{log['totnr']}"
        if log['totnr'] > 100:
            pstr += f" ({int(cp)} %); est: {tleft}        "
        print(pstr, end='\r')

        if log['cnt2'] == log['totnr']:
            # Empty previous line
            print(" "*len(pstr), end='\r')

            # Print final brute-message
            print(f"   brute fct calls : {log['totnr']}")

        # Update percentage cnt1
        log['cnt1'] = cp

    return log 
Example #12
Source File: Occupancy.py    From NucleoATAC with MIT License 5 votes vote down vote up
def modelNFR(self, boundaries = (35,115)):
        """Model NFR distribution with gamma distribution"""
        b = np.where(self.fragmentsizes.get(self.lower,boundaries[1]) == max(self.fragmentsizes.get(self.lower,boundaries[1])))[0][0] + self.lower
        boundaries = (min(boundaries[0],b), boundaries[1])
        x = np.arange(boundaries[0],boundaries[1])        
        y = self.fragmentsizes.get(boundaries[0],boundaries[1]) 
        def gamma_fit(X,o,p):
            k = p[0]
            theta = p[1]
            a = p[2]
            x_mod = X-o
            res = np.zeros(len(x_mod))
            if k>=1:
                nz = x_mod >= 0
            else:
                nz = x_mod > 0
            res[nz] = a * x_mod[nz]**(k-1) * np.exp(-x_mod[nz]/theta) / (theta **k * gamma(k))
            return res 
        res_score = np.ones(boundaries[0]+1)*np.float('inf')
        res_param = [0 for i in range(boundaries[0]+1)]
        pranges = ((0.01,10),(0.01,150),(0.01,1))
        for i in range(15,boundaries[0]+1):
            f = lambda p: np.sum((gamma_fit(x,i,p) - y)**2)
            tmpres = optimize.brute(f, pranges,  full_output=True,
                              finish=optimize.fmin)
            res_score[i] = tmpres[1]
            res_param[i] = tmpres[0]
        whichres = np.argmin(res_score)
        res = res_param[whichres]
        self.nfr_fit0 = FragmentSizes(self.lower,self.upper, vals = gamma_fit(np.arange(self.lower,self.upper),whichres,res_param[whichres]))
        nfr = np.concatenate((self.fragmentsizes.get(self.lower,boundaries[1]), self.nfr_fit0.get(boundaries[1],self.upper))) 
        nfr[nfr==0] = min(nfr[nfr!=0])*0.01
        self.nfr_fit = FragmentSizes(self.lower,self.upper, vals = nfr)
        nuc = np.concatenate((np.zeros(boundaries[1]-self.lower),
                            self.fragmentsizes.get(boundaries[1],self.upper) -
                            self.nfr_fit.get(boundaries[1],self.upper)))
        nuc[nuc<=0]=min(min(nfr)*0.1,min(nuc[nuc>0])*0.001)
        self.nuc_fit = FragmentSizes(self.lower, self.upper, vals = nuc) 
Example #13
Source File: c6.py    From abu with GNU General Public License v3.0 5 votes vote down vote up
def sample_625():
    """
    6.2.5 非凸函数计算怎样度过一生最幸福
    :return:
    """
    import scipy.optimize as sco

    method = 'SLSQP'
    # 提供一个函数来规范参数,np.sum(weights) = 1 -> np.sum(weights) - 1 = 0
    constraints = ({'type': 'eq', 'fun': lambda p_x: np.sum(p_x) - 1})
    # 参数的范围选定
    bounds = tuple((0, 0.9) for _ in xrange(3))
    print('bounds:', bounds)

    def minimize_happiness_local(weights):
        # print(weights)
        return -my_life(weights)[1]

    # 初始化猜测最优参数,这里使用brute计算出的全局最优参数作为guess
    guess = [0.5, 0.2, 0.3]
    opt_local = sco.minimize(minimize_happiness_local, guess,
                             method=method, bounds=bounds,
                             constraints=constraints)
    print('opt_local:', opt_local)


# noinspection PyShadowingNames 
Example #14
Source File: mechanical.py    From CO2MPAS-TA with European Union Public License 1.1 5 votes vote down vote up
def _correct_gear_shifts(
        times, ratios, gears, velocity_speed_ratios, shift_window=4.0):
    import scipy.optimize as sci_opt
    from . import calculate_gear_shifts
    shifts = calculate_gear_shifts(gears)
    vsr = np.vectorize(lambda v: velocity_speed_ratios.get(v, 0))
    s = len(gears)

    def _err(v, r):
        v = int(v) or 1
        return np.float32(co2_utl.mae(ratios[slice(v - 1, v + 1, 1)], r))

    k = 0
    new_gears = np.zeros_like(gears)
    dt = shift_window / 2
    for i in np.arange(s)[shifts]:
        g = gears[slice(i - 1, i + 1, 1)]
        j = int(i)
        if g[0] != 0 and g[-1] != 0:
            t = times[i]
            n = max(i - (((t - dt) <= times) & (times <= t)).sum(), min(i, k))
            m = min(i + ((t <= times) & (times <= (t + dt))).sum(), s)
            if n + 1 > m:
                # noinspection PyTypeChecker
                j = int(sci_opt.brute(
                    _err, (slice(n, m, 1),), args=(vsr(g),), finish=None)
                )
        x = slice(j - 1, j + 1, 1)
        new_gears[x] = g
        new_gears[k:x.start] = g[0]
        k = x.stop

    new_gears[k:] = new_gears[k - 1]

    return new_gears 
Example #15
Source File: simulations.py    From MatchingMarkets.py with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def brute_search(self, weights, metaParamNames=list(),
                     objective=lambda x: x.loss,
                     negative_objective=False,
                     stochastic_objective=True,
                     stochastic_samples=25,
                     stochastic_precision=0.01,
                     ):
        """
        Uses BFS to find optimal simulation hyperparameters
        Note:
            You want runs passed in the solver to have __no randomness__
            Solving a stochastic simulation will cause problems with solver

        Arguments
        ---------
        Weights: list<floats>
                 weights to be optimized on
        metaParamName: list<string>
                 list of names of arguments to be optimized on
                 the index respects the weights argument above
                 the strings should be the same as in a simulation run input
                 Weights and metaparamNames together would form the
                 algoParams dict in a normal simulation
        objective: function(Market) -> float
            objective function
            by default number of matches
            can be changed to loss, for example, by
                "objective=lambda x: x.loss"
        returns
        -------
        np.array of weights where function is minimized
                if negative_objective=True, where maximized
        """
        def this_run(w):
            # note - sign here
            # TODO fix negative sign
            sign = 1 if negative_objective else -1
            if stochastic_objective:
                result = 0
                # If objective stochastic, make montecarlo draws & average
                for i in range(stochastic_samples):
                    result += sign * \
                            self.single_run(w,
                                            metaParamNames=metaParamNames,
                                            objective=objective)
                # Average montecarlo draws
                result = result/stochastic_samples
                # Tune precision for convergence
                result = int(result/stochastic_precision)*stochastic_precision
                return result
            else:
                return sign*self.single_run(w,
                                            metaParamNames=metaParamNames,
                                            objective=objective)
        # res = []
        # for i in range(10):
        #    res.append(this_run(weights))
        res = optimize.brute(this_run, weights, full_output=True, disp=True,
                             finish=optimize.fmin
                             )
        return res[0] 
Example #16
Source File: functional.py    From vnpy_crypto with MIT License 4 votes vote down vote up
def _min_max_band(args):
    """Min and max values at `idx`.

    Global optimization to find the extrema per component.

    Parameters
    ----------
    args: list
        It is a list of an idx and other arguments as a tuple:
            idx : int
                Index value of the components to compute
        The tuple contains:
            band : list of float
                PDF values `[min_pdf, max_pdf]` to be within.
            pca : statsmodels Principal Component Analysis instance
                The PCA object to use.
            bounds : sequence
                ``(min, max)`` pair for each components
            ks_gaussian : KDEMultivariate instance

    Returns
    -------
    band : tuple of float
        ``(max, min)`` curve values at `idx`

    """
    idx, (band, pca, bounds, ks_gaussian) = args
    if have_de_optim:
        max_ = differential_evolution(_curve_constrained, bounds=bounds,
                                      args=(idx, -1, band, pca, ks_gaussian),
                                      maxiter=7).x
        min_ = differential_evolution(_curve_constrained, bounds=bounds,
                                      args=(idx, 1, band, pca, ks_gaussian),
                                      maxiter=7).x
    else:
        max_ = brute(_curve_constrained, ranges=bounds, finish=fmin,
                     args=(idx, -1, band, pca, ks_gaussian))

        min_ = brute(_curve_constrained, ranges=bounds, finish=fmin,
                     args=(idx, 1, band, pca, ks_gaussian))

    band = (_inverse_transform(pca, max_)[0][idx],
            _inverse_transform(pca, min_)[0][idx])
    return band