Python scipy.optimize.bisect() Examples

The following are 17 code examples of scipy.optimize.bisect(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.optimize , or try the search function .
Example #1
Source File: pnutils.py    From pycbc with GNU General Public License v3.0 6 votes vote down vote up
def _dtdv_cutoff_velocity(m1, m2, chi1, chi2):
    _, dtdv2, dtdv3, dtdv4, dtdv5, dtdv6, dtdv6log, dtdv7 = _dtdv_coeffs(m1, m2, chi1, chi2)

    def dtdv_func(v):
        x = dtdv7
        x = v * x + dtdv6 + dtdv6log * 3. * numpy.log(v)
        x = v * x + dtdv5
        x = v * x + dtdv4
        x = v * x + dtdv3
        x = v * x + dtdv2
        return v * v * x + 1.

    if dtdv_func(1.0) < 0.:
        return bisect(dtdv_func, 0.05, 1.0)
    else:
        return 1.0 
Example #2
Source File: allocation.py    From pyrb with MIT License 6 votes vote down vote up
def solve(self):
        try:
            lambda_star = optimize.bisect(
                self._sum_to_one_constraint,
                0,
                BISECTION_UPPER_BOUND,
                maxiter=MAXITER_BISECTION,
            )
            self.lambda_star = lambda_star
            self._x = self._lambda_solve(lambda_star)
        except Exception as e:
            if e.args[0] == "f(a) and f(b) must have different signs":
                logging.exception(
                    "Bisection failed: "
                    + str(e)
                    + ". If you are using expected returns the parameter 'c' need to be correctly scaled (see remark 1 in the paper). Otherwise please check the constraints or increase the bisection upper bound."
                )
            else:
                logging.exception("Problem not solved: " + str(e)) 
Example #3
Source File: m_fermi_energy.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def fermi_energy(ee, nelec, telec):
  """ Determine Fermi energy """
  from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations
  
  d = len(ee.shape)
  if d==1 : # assuming state2energy
    fe = bisect(func1, ee.min(), ee.max(), args=(ee, nelec, telec, 2.0))
  elif d==2: # assuming spin_state2energy
    nspin = ee.shape[-2]
    assert nspin==1 or nspin==2
    fe = bisect(func1, ee.min(), ee.max(), args=(ee, nelec, telec, (3.0-nspin)))
  elif d==3: # assuming kpoint_spin_state2energy
    nspin = ee.shape[-2]
    nkpts = ee.shape[0]
    assert nspin==1 or nspin==2
    fe = bisect(func1, ee.min(), ee.max(), args=(ee, nelec, telec, (3.0-nspin)/nkpts))
  else: # how to interpret this ?
    raise RuntimeError('!impl?')

  return fe 
Example #4
Source File: dispatch_budget.py    From EnergyPATHWAYS with MIT License 5 votes vote down vote up
def solve_for_load_cutoff(load, energy_budget, pmin=0, pmax=None):
    lowest = min(load) - pmax - 1
    highest = max(load) + pmax + 1
    if lowest == highest:
        return lowest
    else:
        load_cutoff = optimize.bisect(residual_energy, lowest, highest, args=(load, energy_budget, pmin, pmax))
        return load_cutoff 
Example #5
Source File: pnutils.py    From pycbc with GNU General Public License v3.0 5 votes vote down vote up
def meco2(m1, m2, s1z=0, s2z=0, phase_order=-1, spin_order=-1):
    ecof = energy_coefficients(m1, m2, s1z, s2z, phase_order, spin_order)

    def test(v):
        de = 0
        for i in numpy.arange(0, len(ecof), 1):
            de += v**(i+1.0)* ecof[i] * (i + 2)

        return de

    return bisect(test, 0.001, 1.0) 
Example #6
Source File: pnutils.py    From pycbc with GNU General Public License v3.0 5 votes vote down vote up
def meco_velocity(m1, m2, chi1, chi2):
    """
    Returns the velocity of the minimum energy cutoff for 3.5pN (2.5pN spin)

    Parameters
    ----------
    m1 : float
        First component mass in solar masses
    m2 : float
        Second component mass in solar masses
    chi1 : float
        First component dimensionless spin S_1/m_1^2 projected onto L
    chi2 : float
        Second component dimensionless spin S_2/m_2^2 projected onto L

    Returns
    -------
    v : float
        Velocity (dimensionless)
    """
    _, energy2, energy3, energy4, energy5, energy6 = \
        _energy_coeffs(m1, m2, chi1, chi2)
    def eprime(v):
        return 2. + v * v * (4.*energy2 + v * (5.*energy3 \
                + v * (6.*energy4
                + v * (7.*energy5 + 8.*energy6 * v))))
    return bisect(eprime, 0.05, 1.0) 
Example #7
Source File: autotune.py    From dm_control with Apache License 2.0 5 votes vote down vote up
def tune_stud_radius(desired_force,
                     min_radius=0.0045,
                     max_radius=0.005,
                     desired_places=6,
                     side='closest',
                     **duplo_kwargs):
  """Find a stud size that gives the desired separation force."""

  @_KeepBracketingSolutions
  def func(radius):
    radius = round(radius, desired_places)  # Round radius for aesthetics (!)
    return (get_separation_force_for_radius(radius=radius, **duplo_kwargs)
            - desired_force)

  # Ensure that the min and max radii bracket the solution.
  while func(min_radius) > 0:
    min_radius = max(1e-3, min_radius - (max_radius - min_radius))
  while func(max_radius) < 0:
    max_radius += (max_radius - min_radius)

  tolerance = 10**-(desired_places)

  # Use bisection to refine the bounds on the optimal radius. Note: this assumes
  # that separation force is monotonic w.r.t. stud radius, but this isn't
  # exactly true in all cases.
  optimize.bisect(func, a=min_radius, b=max_radius, xtol=tolerance, disp=True)

  if side == 'below':
    solution = func.below
  elif side == 'above':
    solution = func.above
  else:
    solution = func.closest

  radius = round(solution.x, desired_places)
  force = get_separation_force_for_radius(radius, **duplo_kwargs)

  return radius, force 
Example #8
Source File: pricingmodels.py    From Quantsbin with MIT License 5 votes vote down vote up
def imply_volatility(self, premium):
        def objective_func(vol_guess):
            self.volatility = vol_guess
            val = self.valuation()
            return val - premium

        try:
            return optimize.bisect(objective_func, a=0.001, b=0.999, xtol=0.00005)
        except ValueError:
            raise ValueError("Unable to converge to implied volatility") 
Example #9
Source File: pricingmodels.py    From Quantsbin with MIT License 5 votes vote down vote up
def imply_volatility(self, premium):
        def objective_func(vol_guess):
            self.volatility = vol_guess
            val = self.valuation()
            return val - premium

        try:
            return optimize.bisect(objective_func, a=0.001, b=0.999, xtol=0.00005)
        except ValueError:
            raise ValueError("Unable to converge to implied volatility") 
Example #10
Source File: max_value_entropy_search.py    From emukit with Apache License 2.0 5 votes vote down vote up
def update_parameters(self):
        # apply gumbel sampling to obtain samples from y*
        # we approximate Pr(y*^hat<y) by Gumbel(alpha,beta)
        # generate grid
        N = self.model.model.X.shape[0]

        random_design = RandomDesign(self.space)
        grid = random_design.get_samples(self.grid_size)
        fmean, fvar = self.model.model.predict(np.vstack([self.model.model.X, grid]), include_likelihood=False)
        fsd = np.sqrt(fvar)
        idx = np.argmin(fmean[:N])

        # scaling so that gumbel scale is proportional to IQ range of cdf Pr(y*<z)
        # find quantiles Pr(y*<y1)=r1 and Pr(y*<y2)=r2
        right = fmean[idx].flatten()
        left = right
        probf = lambda x: np.exp(np.sum(norm.logcdf(-(x - fmean) / fsd), axis=0))
        i = 0
        while probf(left) < 0.75:
            left = 2. ** i * np.min(fmean - 5. * fsd) + (1. - 2. ** i) * right
            i += 1
        i = 0
        while probf(right) > 0.25:
            right = -2. ** i * np.min(fmean - 5. * fsd) + (1. + 2. ** i) * fmean[idx].flatten()
            i += 1

        # Binary search for 3 percentiles
        q1, med, q2 = map(lambda val: bisect(lambda x: probf(x) - val, left, right, maxiter=10000, xtol=0.00001),
                            [0.25, 0.5, 0.75])

        # solve for gumbel params
        beta = (q1 - q2) / (np.log(np.log(4. / 3.)) - np.log(np.log(4.)))
        alpha = med + beta * np.log(np.log(2.))

        # sample K length vector from unif([0,1])
        # return K Y* samples
        self.mins = -np.log(-np.log(np.random.rand(self.num_samples))) * beta + alpha 
Example #11
Source File: itu530.py    From ITU-Rpy with MIT License 5 votes vote down vote up
def inverse_rain_attenuation(
            self, lat, lon, d, f, el, Ap, tau=45, R001=None):
        """ Implementation of 'inverse_rain_attenuation' method for
        recommendation ITU-P R.530-16. See documentation for function
        'ITUR530.inverse_rain_attenuation'
        """
        # Step 1: Obtain the rain rate R0.01 exceeded for 0.01% of the time
        # (with an integration time of 1 min).
        if R001 is None:
            R001 = rainfall_rate(lat, lon, 0.01).value

        # Step 2: Compute the specific attenuation, gammar (dB/km) for the
        # frequency, polarization and rain rate of interest using
        # Recommendation ITU-R P.838
        gammar = rain_specific_attenuation(R001, f, el, tau).value
        _, alpha = rain_specific_attenuation_coefficients(f, el, tau).value

        # Step 3: Compute the effective path length, deff, of the link by
        # multiplying the actual path length d by a distance factor r
        r = 1 / (0.477 * d ** 0.633 * R001 ** (0.073 * alpha) *
                 f**(0.123) - 10.579 * (1 - np.exp(-0.024 * d)))
        deff = np.minimum(r, 2.5) * d

        # Step 4: An estimate of the path attenuation exceeded for 0.01% of
        # the time is given by:
        A001 = gammar * deff

        # Step 5: The attenuation exceeded for other percentages of time p in
        # the range 0.001% to 1% may be deduced from the following power law
        C0 = np.where(f >= 10, 0.12 * 0.4 * (np.log10(f / 10)**0.8), 0.12)
        C1 = (0.07**C0) * (0.12**(1 - C0))
        C2 = 0.855 * C0 + 0.546 * (1 - C0)
        C3 = 0.139 * C0 + 0.043 * (1 - C0)

        def func_bisect(p):
            return A001 * C1 * p ** (- (C2 + C3 * np.log10(p))) - Ap

        return bisect(func_bisect, 0, 100) 
Example #12
Source File: mes.py    From GPflowOpt with Apache License 2.0 5 votes vote down vote up
def _setup(self):
        super(MinValueEntropySearch, self)._setup()

        # Apply Gumbel sampling
        m = self.models[0]
        valid = self.feasible_data_index()

        # Work with feasible data
        X = self.data[0][valid, :]
        N = np.shape(X)[0]
        Xrand = RandomDesign(self.gridsize, self._domain).generate()
        fmean, fvar = m.predict_f(np.vstack((X, Xrand)))
        idx = np.argmin(fmean[:N])
        right = fmean[idx].flatten()# + 2*np.sqrt(fvar[idx]).flatten()
        left = right
        probf = lambda x: np.exp(np.sum(norm.logcdf(-(x - fmean) / np.sqrt(fvar)), axis=0))

        i = 0
        while probf(left) < 0.75:
            left = 2. ** i * np.min(fmean - 5. * np.sqrt(fvar)) + (1. - 2. ** i) * right
            i += 1

        # Binary search for 3 percentiles
        q1, med, q2 = map(lambda val: bisect(lambda x: probf(x) - val, left, right, maxiter=10000, xtol=0.01),
                          [0.25, 0.5, 0.75])
        beta = (q1 - q2) / (np.log(np.log(4. / 3.)) - np.log(np.log(4.)))
        alpha = med + beta * np.log(np.log(2.))

        # obtain samples from y*
        mins = -np.log(-np.log(np.random.rand(self.num_samples).astype(np_float_type))) * beta + alpha
        self.samples.set_data(mins) 
Example #13
Source File: rfsm.py    From pysheds with GNU General Public License v3.0 5 votes vote down vote up
def volume_to_level(self, node, waterlevel):
        if node.current_vol > 0:
            maxelev = node.parent.elev
            if node.elev:
                minelev = node.elev
            else:
                # TODO: This bound could be a lot better
                minelev = np.nanmin(self.dem)
            target_vol = node.current_vol
            elev = optimize.bisect(self.compute_vol, minelev, maxelev,
                                   args=(node, target_vol))
            if node.name:
                mask = self.ws[node.level] == node.name
            else:
                leaves = []
                self.enumerate_leaves(node, level=node.level, stack=leaves)
                mask = np.isin(self.ws[node.level], leaves)
                boundary = list(chain.from_iterable([self.b[node.level].setdefault(pair, [])
                                                     for pair in combinations(leaves, 2)]))
                mask.flat[boundary] = True
            mask = np.flatnonzero(mask & (self.dem < elev))
            waterlevel.flat[mask] = elev
        else:
            if node.l:
                self.volume_to_level(node.l, waterlevel)
            if node.r:
                self.volume_to_level(node.r, waterlevel) 
Example #14
Source File: model.py    From QuantEcon.lectures.code with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def find_steady_state(self, a, b, method='brentq', **kwargs):
        """
        Compute the equilibrium value of capital stock (per unit
        effective labor).

        Parameters
        ----------
        a : float
            One end of the bracketing interval [a,b].
        b : float
            The other end of the bracketing interval [a,b]
        method : str (default=`brentq`)
            Method to use when computing the steady state. Supported
            methods are `bisect`, `brenth`, `brentq`, `ridder`. See
            `scipy.optimize` for more details (including references).
        kwargs : optional
            Additional keyword arguments. Keyword arguments are method
            specific see `scipy.optimize` for details.

        Returns
        -------
        x0 : float
            Zero of `f` between `a` and `b`.
        r : RootResults (present if ``full_output = True``)
            Object containing information about the convergence. In
            particular, ``r.converged`` is True if the routine
            converged.

        """
        if method == 'bisect':
            result = optimize.bisect(self.evaluate_k_dot, a, b, **kwargs)
        elif method == 'brenth':
            result = optimize.brenth(self.evaluate_k_dot, a, b, **kwargs)
        elif method == 'brentq':
            result = optimize.brentq(self.evaluate_k_dot, a, b, **kwargs)
        elif method == 'ridder':
            result = optimize.ridder(self.evaluate_k_dot, a, b, **kwargs)
        else:
            mesg = ("Method must be one of : 'bisect', 'brenth', 'brentq', " +
                    "or 'ridder'.")
            raise ValueError(mesg)

        return result 
Example #15
Source File: utils.py    From dm_control with Apache License 2.0 4 votes vote down vote up
def measure_separation_force(top_brick,
                             bottom_brick,
                             min_force=0.,
                             max_force=20.,
                             tolerance=0.01,
                             time_limit=0.5,
                             height_threshold=1e-3):
  """Utility for measuring the separation force for a pair of Duplo bricks.

  Args:
    top_brick: An instance of `Duplo` representing the top brick.
    bottom_brick: An instance of `Duplo` representing the bottom brick.
    min_force: A force that should be insufficent to separate the bricks (N).
    max_force: A force that should be sufficent to separate the bricks (N).
    tolerance: The desired precision of the solution (N).
    time_limit: The maximum simulation time (s) over which to apply force on
      each iteration. Increasing this value will result in smaller estimates
      of the separation force, since given sufficient time the bricks may slip
      apart gradually under a smaller force. This is due to MuJoCo's soft
      contact model (see http://mujoco.org/book/index.html#Soft).
    height_threshold: The distance (m) that the upper brick must move in the
      z-axis for the bricks to count as separated.

  Returns:
    A float, the measured separation force (N).
  """
  arena, attachment_frame = stack_bricks(top_brick, bottom_brick)
  physics = mjcf.Physics.from_mjcf_model(arena.mjcf_model)
  bound_attachment_frame = physics.bind(attachment_frame)

  def func(force):
    """Returns +1 if the bricks separate under this force, and -1 otherwise."""
    with physics.model.disable('gravity'):
      # Reset the simulation.
      physics.reset()
      # Get the initial height.
      initial_height = bound_attachment_frame.xpos[2]
      # Apply an upward force to the attachment frame.
      bound_attachment_frame.xfrc_applied[2] = force
      # Advance the simulation until either the height threshold or time limit
      # is reached.
      while physics.time() < time_limit:
        physics.step()
        distance_lifted = bound_attachment_frame.xpos[2] - initial_height
        if distance_lifted > height_threshold:
          return 1.0
    return -1.0

  # Ensure that the min and max forces bracket the true separation force.
  while func(min_force) > 0:
    min_force *= 0.5
  while func(max_force) < 0:
    max_force *= 2

  return optimize.bisect(func, a=min_force, b=max_force, xtol=tolerance,
                         disp=True) 
Example #16
Source File: itu837.py    From ITU-Rpy with MIT License 4 votes vote down vote up
def rainfall_rate(self, lat_d, lon_d, p):
        """
        """
        if p == 0.01:
            return self.R001(lat_d, lon_d)

        lat_f = lat_d.flatten()
        lon_f = lon_d.flatten()

        Nii = np.array([[31, 28.25, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]])

        # Step 2: For each month, determine the monthly mean surface
        # temperature
        Tii = surface_month_mean_temperature(lat_f, lon_f, self.months).value.T

        # Step 3: For each month, determine the monthly mean total rainfall
        MTii = np.array([self.Mt(lat_f, lon_f, m) for m in self.months]).T

        # Step 4: For each month, determine the monthly mean total rainfall
        tii = Tii - 273.15

        # Step 5: For each month number, calculate rii
        rii = np.where(tii >= 0, 0.5874 * np.exp(0.0883 * tii), 0.5874)

        # Step 6a For each month number, calculate the probability of rain:
        P0ii = 100 * MTii / (24 * Nii * rii)

        # Step 7b:
        rii = np.where(P0ii > 70, 100/70. * MTii / (24 * Nii), rii)
        P0ii = np.where(P0ii > 70, 70, P0ii)

        # Step 7: Calculate the annual probability of rain, P0anual
        P0anual = np.sum(Nii * P0ii, axis=-1) / 365.25

        # Step 8: Compute the rainfall rate exceeded for p
        def _ret_fcn(P0):
            if p > P0:
                return 0
            else:
                # Use a bisection method to determine
                def f_Rp(Rref):
                    P_r_ge_Rii = P0ii * stats.norm.sf(
                            (np.log(Rref) + 0.7938 - np.log(rii)) / 1.26)
                    P_r_ge_R = np.sum(Nii * P_r_ge_Rii) / 365.25
                    return 100 * (P_r_ge_R / p - 1)

                return bisect(f_Rp, 1e-10, 1000, xtol=1e-5)

        fcn = np.vectorize(_ret_fcn)
        return fcn(P0anual).reshape(lat_d.shape) 
Example #17
Source File: model.py    From solowPy with MIT License 4 votes vote down vote up
def find_steady_state(self, a, b, method='brentq', **kwargs):
        """
        Compute the equilibrium value of capital stock (per unit effective
        labor).

        Parameters
        ----------
        a : float
            One end of the bracketing interval [a,b].
        b : float
            The other end of the bracketing interval [a,b]
        method : str (default=`brentq`)
            Method to use when computing the steady state. Supported methods
            are `bisect`, `brenth`, `brentq`, `ridder`. See `scipy.optimize`
            for more details (including references).
        kwargs : optional
            Additional keyword arguments. Keyword arguments are method specific
            see `scipy.optimize` for details.

        Returns
        -------
        x0 : float
            Zero of `f` between `a` and `b`.
        r : RootResults (present if ``full_output = True``)
            Object containing information about the convergence. In particular,
            ``r.converged`` is True if the routine converged.

        """
        if method == 'bisect':
            result = optimize.bisect(self.evaluate_k_dot, a, b, **kwargs)
        elif method == 'brenth':
            result = optimize.brenth(self.evaluate_k_dot, a, b, **kwargs)
        elif method == 'brentq':
            result = optimize.brentq(self.evaluate_k_dot, a, b, **kwargs)
        elif method == 'ridder':
            result = optimize.ridder(self.evaluate_k_dot, a, b, **kwargs)
        else:
            mesg = ("Method must be one of : 'bisect', 'brenth', 'brentq', " +
                    "or 'ridder'.")
            raise ValueError(mesg)

        return result