Python scipy.optimize() Examples

The following are 30 code examples of scipy.optimize(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy , or try the search function .
Example #1
Source File: local_penalization_calculator.py    From emukit with Apache License 2.0 6 votes vote down vote up
def __init__(self, acquisition: Acquisition, acquisition_optimizer: AcquisitionOptimizerBase,
                 model: IDifferentiable, parameter_space: ParameterSpace, batch_size: int):
        """
        :param acquisition: Base acquisition function to use without any penalization applied, this acquisition should
                            output positive values only.
        :param acquisition_optimizer: AcquisitionOptimizer object to optimize the penalized acquisition
        :param model: Model object, used to compute the parameters of the local penalization
        :param parameter_space: Parameter space describing input domain
        :param batch_size: Number of points to collect in each batch
        """
        if not isinstance(model, IDifferentiable):
            raise ValueError('Model must implement ' + str(IDifferentiable) +
                             ' for use with Local Penalization batch method.')

        self.acquisition = acquisition
        self.acquisition_optimizer = acquisition_optimizer
        self.batch_size = batch_size
        self.model = model
        self.parameter_space = parameter_space 
Example #2
Source File: optimizers.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def optimize(self, sess, feed_dict):
    reg_input, reg_weight, old_values, targets = sess.run(
        [self.inputs, self.regression_weight, self.values, self.targets],
        feed_dict=feed_dict)

    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)

    # taken from rllab
    reg_coeff = 1e-5
    for _ in range(5):
      best_fit_weight = np.linalg.lstsq(
          reg_input.T.dot(reg_input) +
          reg_coeff * np.identity(reg_input.shape[1]),
          reg_input.T.dot(intended_values))[0]
      if not np.any(np.isnan(best_fit_weight)):
        break
      reg_coeff *= 10

    if len(best_fit_weight.shape) == 1:
      best_fit_weight = np.expand_dims(best_fit_weight, -1)

    sess.run(self.update_regression_weight,
             feed_dict={self.new_regression_weight: best_fit_weight}) 
Example #3
Source File: optimizer.py    From Robotic_Manipulation with MIT License 6 votes vote down vote up
def optimize(self, x0, target):
        """Calculate an optimum argument of an objective function."""
        x = x0
        for i in range(self.maxiter):
            g = self.g(x, target)
            h = self.h(x, target)
            if i == 0:
                alpha = 0
                m = g
            else:
                alpha = - np.dot(m, np.dot(h, g)) / np.dot(m, np.dot(h, m))
                m = g + np.dot(alpha, m)
            t = - np.dot(m, g) / np.dot(m, np.dot(h, m))
            delta = np.dot(t, m)
            x = x + delta
            if np.linalg.norm(delta) < self.tol:
                break
        return x 
Example #4
Source File: fourier_fitting.py    From westpa with MIT License 6 votes vote down vote up
def optimize(self,data,weight,w0,t0):
        ncenters = data.shape[0]
        self.w0 = w0
        self.t0 = t0
        if weight is None:
            weight = np.ones_like(t0)

        for iiter in range(self.maxiters):
            self.pp.append(self.calc_string(self.w0,self.t0,data))
            if iiter > 0:
                err = np.sum((self.pp[-1] - self.pp[-2])**2)/ncenters
                print('{} -- {}'.format(iiter,err))
                if err < self.tol:
                    break
            else:
                print(iiter)
            # Optimize tk
            for ci in range(ncenters):
                self.t0[ci] = scipy.optimize.leastsq(self._optimize_dist, self.t0[ci], args=(data,self.w0,ci))[0]
            
            # Optimize wij
            for k in range(self.ndims):
                self.w0[k,:] = scipy.optimize.leastsq(self._optimize_w,self.w0[k,:],args=(data,self.t0,k,weight))[0] 
Example #5
Source File: projection.py    From skymapper with MIT License 6 votes vote down vote up
def _optimize(proj_cls, x0, lon_type, lon, lat, crit, bounds=None):
    """Determine parameters for `proj_cls` that minimize `crit` over `lon, lat`.

    Args:
        proj_cls: projection class
        x0: arguments for projection class `__init__`
        lon_type: type of longitude, "lon" or "ra" (see `BaseProjection`)
        lon: list of rectascensions
        lat: list of declinations
        crit: optimization criterion
            needs to be function of semi-major and semi-minor axes of the Tissot indicatix
        bounds: list of upper and lower bounds on each parameter in `x0`

    Returns:
        optimized projection of class `proj_cls`
    """
    print ("optimizing parameters of %s to minimize %s" % (proj_cls.__name__, crit.__name__))
    x, fmin, d = scipy.optimize.fmin_l_bfgs_b(_optimize_objective, x0, args=(proj_cls, lon_type, lon, lat, crit), bounds=bounds, approx_grad=True)
    res = proj_cls(*x, lon_type=lon_type)
    print ("best objective %.6f at %r" % (fmin, res))
    return res 
Example #6
Source File: optimizers.py    From object_detection_with_tensorflow with MIT License 6 votes vote down vote up
def optimize(self, sess, feed_dict):
    reg_input, reg_weight, old_values, targets = sess.run(
        [self.inputs, self.regression_weight, self.values, self.targets],
        feed_dict=feed_dict)

    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)

    # taken from rllab
    reg_coeff = 1e-5
    for _ in range(5):
      best_fit_weight = np.linalg.lstsq(
          reg_input.T.dot(reg_input) +
          reg_coeff * np.identity(reg_input.shape[1]),
          reg_input.T.dot(intended_values))[0]
      if not np.any(np.isnan(best_fit_weight)):
        break
      reg_coeff *= 10

    if len(best_fit_weight.shape) == 1:
      best_fit_weight = np.expand_dims(best_fit_weight, -1)

    sess.run(self.update_regression_weight,
             feed_dict={self.new_regression_weight: best_fit_weight}) 
Example #7
Source File: scipy.py    From OpenFermion-Cirq with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 options: Optional[Dict]=None,
                 kwargs: Optional[Dict]=None,
                 uses_bounds: bool=True) -> None:
        """
        Args:
            options: The `options` dictionary passed to scipy.optimize.minimize.
            kwargs: Other keyword arguments passed to scipy.optimize.minimize.
                This should NOT include the `bounds` or `options` keyword
                arguments.
            uses_bounds: Whether the algorithm uses bounds on the input
                variables. Set this to False to prevent scipy.optimize.minimize
                from raising a warning if the chosen method does not use bounds.
        """
        self.kwargs = kwargs or {}
        self.uses_bounds = uses_bounds
        super().__init__(options) 
Example #8
Source File: scipy.py    From OpenFermion-Cirq with Apache License 2.0 6 votes vote down vote up
def optimize(self,
                 black_box: BlackBox,
                 initial_guess: Optional[numpy.ndarray]=None,
                 initial_guess_array: Optional[numpy.ndarray]=None
                 ) -> OptimizationResult:
        if initial_guess is None:
            raise ValueError('The chosen optimization algorithm requires an '
                             'initial guess.')
        bounds = black_box.bounds if self.uses_bounds else None
        result = scipy.optimize.minimize(black_box.evaluate,
                                         initial_guess,
                                         bounds=bounds,
                                         options=self.options,
                                         **self.kwargs)
        return OptimizationResult(optimal_value=result.fun,
                                  optimal_parameters=result.x,
                                  num_evaluations=result.nfev,
                                  status=result.status,
                                  message=result.message) 
Example #9
Source File: optimizers.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def optimize(self, sess, feed_dict):
    old_theta = sess.run(self.flat_vars)

    old_values, targets = sess.run([self.values, self.targets], feed_dict=feed_dict)
    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)
    feed_dict = dict(feed_dict)
    feed_dict[self.intended_values] = intended_values

    def calc_loss_and_grad(theta):
      sess.run(self.set_vars, feed_dict={self.flat_theta: theta})
      loss, grad = sess.run([self.raw_loss, self.loss_flat_gradient],
                            feed_dict=feed_dict)
      grad = grad.astype('float64')
      return loss, grad

    theta, _, _ = scipy.optimize.fmin_l_bfgs_b(
        calc_loss_and_grad, old_theta, maxiter=self.max_iter)
    sess.run(self.set_vars, feed_dict={self.flat_theta: theta}) 
Example #10
Source File: optimizers.py    From object_detection_with_tensorflow with MIT License 6 votes vote down vote up
def optimize(self, sess, feed_dict):
    old_theta = sess.run(self.flat_vars)

    old_values, targets = sess.run([self.values, self.targets], feed_dict=feed_dict)
    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)
    feed_dict = dict(feed_dict)
    feed_dict[self.intended_values] = intended_values

    def calc_loss_and_grad(theta):
      sess.run(self.set_vars, feed_dict={self.flat_theta: theta})
      loss, grad = sess.run([self.raw_loss, self.loss_flat_gradient],
                            feed_dict=feed_dict)
      grad = grad.astype('float64')
      return loss, grad

    theta, _, _ = scipy.optimize.fmin_l_bfgs_b(
        calc_loss_and_grad, old_theta, maxiter=self.max_iter)
    sess.run(self.set_vars, feed_dict={self.flat_theta: theta}) 
Example #11
Source File: optimizers.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def optimize(self, sess, feed_dict):
    reg_input, reg_weight, old_values, targets = sess.run(
        [self.inputs, self.regression_weight, self.values, self.targets],
        feed_dict=feed_dict)

    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)

    # taken from rllab
    reg_coeff = 1e-5
    for _ in range(5):
      best_fit_weight = np.linalg.lstsq(
          reg_input.T.dot(reg_input) +
          reg_coeff * np.identity(reg_input.shape[1]),
          reg_input.T.dot(intended_values))[0]
      if not np.any(np.isnan(best_fit_weight)):
        break
      reg_coeff *= 10

    if len(best_fit_weight.shape) == 1:
      best_fit_weight = np.expand_dims(best_fit_weight, -1)

    sess.run(self.update_regression_weight,
             feed_dict={self.new_regression_weight: best_fit_weight}) 
Example #12
Source File: SPM.py    From pySPM with Apache License 2.0 6 votes vote down vote up
def get_tik_tf(Img, mu, tukey=0, source_tukey=0, debug=False, d=200, real=np.real):
    import scipy
    def fit(x, a ,A, bg, x0):
        return bg+(A-bg)*np.exp(-abs(x-x0)/a)
    
    x = np.arange(Img.shape[1])
    y = np.arange(Img.shape[0])
    X, Y = np.meshgrid(x, y)
    x0 = Img.shape[1]/2
    y0 = Img.shape[0]/2
    R = np.sqrt((X-x0)**2+(Y-y0)**2)
    
    Z = beam_profile(Img, Img, mu=mu, tukey=tukey, source_tukey=source_tukey, real=real)
    zoom = zoom_center(Z, d)
    P = zoom[zoom.shape[0]//2, :]
    p0 = (1,np.max(zoom), 0, len(P)/2)
    popt, pcov = scipy.optimize.curve_fit(fit, np.arange(len(P)), P, p0, bounds=((0,0,-np.inf,0),np.inf))
    bg = popt[2]
    a = popt[0]
    if debug:
        return bg+np.exp(-np.abs(R)/a), Z, p0, popt
    return bg+np.exp(-np.abs(R)/a) 
Example #13
Source File: projection.py    From skymapper with MIT License 6 votes vote down vote up
def optimize(cls, lon, lat, crit=meanDistortion, lon_type="ra"):
        """Optimize the parameters of projection to minimize `crit` over `lon,lat`

        Args:
            lon: list of longitude
            lat: list of latitude
            crit: optimization criterion
                needs to be function of semi-major and semi-minor axes of the Tissot indicatix
            lon_type: type of longitude, "lon" or "ra" (see `BaseProjection`)

        Returns:
            optimized projection
        """
        lon_ = np.array(lon)
        # go into standard frame, right or left-handed is irrelevant here
        lon_[lon_ > 180] -= 360
        lon_[lon_ < -180] += 360
        bounds = ((-180,180),)
        x0 = np.array((lon_.mean(),))
        return _optimize(cls, x0, lon_type, lon, lat, crit, bounds=bounds) 
Example #14
Source File: optimizers.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def optimize(self, sess, feed_dict):
    old_theta = sess.run(self.flat_vars)

    old_values, targets = sess.run([self.values, self.targets], feed_dict=feed_dict)
    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)
    feed_dict = dict(feed_dict)
    feed_dict[self.intended_values] = intended_values

    def calc_loss_and_grad(theta):
      sess.run(self.set_vars, feed_dict={self.flat_theta: theta})
      loss, grad = sess.run([self.raw_loss, self.loss_flat_gradient],
                            feed_dict=feed_dict)
      grad = grad.astype('float64')
      return loss, grad

    theta, _, _ = scipy.optimize.fmin_l_bfgs_b(
        calc_loss_and_grad, old_theta, maxiter=self.max_iter)
    sess.run(self.set_vars, feed_dict={self.flat_theta: theta}) 
Example #15
Source File: optimizers.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def optimize(self, sess, feed_dict):
    old_theta = sess.run(self.flat_vars)

    old_values, targets = sess.run([self.values, self.targets], feed_dict=feed_dict)
    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)
    feed_dict = dict(feed_dict)
    feed_dict[self.intended_values] = intended_values

    def calc_loss_and_grad(theta):
      sess.run(self.set_vars, feed_dict={self.flat_theta: theta})
      loss, grad = sess.run([self.raw_loss, self.loss_flat_gradient],
                            feed_dict=feed_dict)
      grad = grad.astype('float64')
      return loss, grad

    theta, _, _ = scipy.optimize.fmin_l_bfgs_b(
        calc_loss_and_grad, old_theta, maxiter=self.max_iter)
    sess.run(self.set_vars, feed_dict={self.flat_theta: theta}) 
Example #16
Source File: optimizer.py    From tinyik with MIT License 6 votes vote down vote up
def optimize(self, x0, target):
        """Calculate an optimum argument of an objective function."""
        x = x0
        for i in range(self.maxiter):
            g = self.g(x, target)
            h = self.h(x, target)
            if i == 0:
                alpha = 0
                m = g
            else:
                alpha = - np.dot(m, np.dot(h, g)) / np.dot(m, np.dot(h, m))
                m = g + np.dot(alpha, m)
            t = - np.dot(m, g) / np.dot(m, np.dot(h, m))
            delta = np.dot(t, m)
            x = x + delta
            if np.linalg.norm(delta) < self.tol:
                break
        return x 
Example #17
Source File: optimizer.py    From tinyik with MIT License 6 votes vote down vote up
def optimize(self, angles0, target):
        """Calculate an optimum argument of an objective function."""
        def new_objective(angles):
            a = angles - angles0
            if isinstance(self.smooth_factor, (np.ndarray, list)):
                if len(a) == len(self.smooth_factor):
                    return (self.f(angles, target) +
                            np.sum(self.smooth_factor * np.power(a, 2)))
                else:
                    raise ValueError('len(smooth_factor) != number of joints')
            else:
                return (self.f(angles, target) +
                        self.smooth_factor * np.sum(np.power(a, 2)))

        return scipy.optimize.minimize(
            new_objective,
            angles0,
            **self.optimizer_opt).x 
Example #18
Source File: simple_gp_model.py    From emukit with Apache License 2.0 6 votes vote down vote up
def optimize(self) -> None:
        """
        Optimize the three hyperparameters of the model, namely the kernel variance, kernel lengthscale and likelihood
        variance
        """
        def optimize_fcn(log_hyper_parameters):
            # take exponential to ensure positive values
            hyper_parameters = np.exp(log_hyper_parameters)
            self.lengthscale = hyper_parameters[0]
            self.kernel_variance = hyper_parameters[1]
            self.likelihood_variance = hyper_parameters[2]
            return self._negative_marginal_log_likelihood()

        lower_bound = np.log(1e-6)
        upper_bound = np.log(1e8)

        bounds = [(lower_bound, upper_bound) for _ in range(3)]
        scipy.optimize.minimize(optimize_fcn, np.log(np.array([self.lengthscale,
                                                               self.kernel_variance,
                                                               self.likelihood_variance])), bounds=bounds) 
Example #19
Source File: optimize.py    From flavio with MIT License 6 votes vote down vote up
def minimize_migrad(fun, x0, args=(), dx0=None, **kwargs):
    """Minimization function using MINUIT's MIGRAD minimizer."""
    import iminuit
    mfun = MinuitFunction(f=fun, dim=len(x0), args=args)
    # bring the parameters in a suitable form
    par = iminuit.util.describe(mfun)
    x0_dict = {par[i]: x0i for i, x0i in enumerate(x0)}
    if dx0 is None:
        dx0 = np.ones(len(x0))
    dx0_dict = {'error_' + par[i]: dx0i for i, dx0i in enumerate(dx0)}
    # run
    minuit_args={'errordef': 1}
    minuit_args.update(kwargs)
    minuit = iminuit.Minuit(mfun, **x0_dict, **dx0_dict, **minuit_args)
    fmin, param = minuit.migrad()
    # cast migrad result in terms of scipy-like result object
    res = scipy.optimize.OptimizeResult()
    res.success = fmin['is_valid']
    res.fun = fmin['fval']
    res.x = np.array([p['value'] for p in param])
    res.nfev = fmin['nfcn']
    return res 
Example #20
Source File: scipy.py    From molecular-design-toolkit with Apache License 2.0 6 votes vote down vote up
def _force_constraint_convergence(self, result):
        """ Make sure that all constraints are satisfied, ramp up the constraint functions if not

        Note - if additional iterations are necessary, this will destroy the scipy optimize results
        object stored at self.traj.info. Not sure what to do about that
        """
        import scipy.optimize

        for i in range(5):
            for constraint in self.mol.constraints:
                if not constraint.satisfied():
                    break
            else:
                return result

            print('Constraints not satisfied; raising penalties ...')

            self._constraint_multiplier *= 10.0
            result = scipy.optimize.minimize(self.objective,
                                             self._coords_to_vector(self.mol.positions),
                                             jac=self.grad if self.gradtype=='analytical' else None,
                                             callback=self.callback,
                                             constraints=self._make_constraints(),
                                             **self._optimize_kwargs)
        return result 
Example #21
Source File: optimizers.py    From hands-detection with MIT License 6 votes vote down vote up
def optimize(self, sess, feed_dict):
    old_theta = sess.run(self.flat_vars)

    old_values, targets = sess.run([self.values, self.targets], feed_dict=feed_dict)
    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)
    feed_dict = dict(feed_dict)
    feed_dict[self.intended_values] = intended_values

    def calc_loss_and_grad(theta):
      sess.run(self.set_vars, feed_dict={self.flat_theta: theta})
      loss, grad = sess.run([self.raw_loss, self.loss_flat_gradient],
                            feed_dict=feed_dict)
      grad = grad.astype('float64')
      return loss, grad

    theta, _, _ = scipy.optimize.fmin_l_bfgs_b(
        calc_loss_and_grad, old_theta, maxiter=self.max_iter)
    sess.run(self.set_vars, feed_dict={self.flat_theta: theta}) 
Example #22
Source File: optimization.py    From pysaliency with MIT License 6 votes vote down vote up
def extract_parameters(self, x, return_list=False):
        """Return dictionary of optimization parameters from vector x.
           The non-optimization parameters will be taken from the initial values.
           if return_list==True, return a list instead of an dictionary"""
        params = self.param_values.copy()
        index = 0
        for param_name in self.optimize:
            if not isinstance(self.param_values[param_name], np.ndarray) or len(self.param_values[param_name].shape) == 0:
                # Only scalar value
                params[param_name] = x[index]
                index += 1
            else:
                shape = self.param_values[param_name].shape
                if len(shape) > 1:
                    raise ValueError('Arrays with more than one dimension are not yet supported!')
                params[param_name] = x[index:index+shape[0]]
                index += shape[0]
        if return_list:
            return [params[key] for key in self.parameters]
        else:
            return params 
Example #23
Source File: optimizers.py    From hands-detection with MIT License 6 votes vote down vote up
def optimize(self, sess, feed_dict):
    reg_input, reg_weight, old_values, targets = sess.run(
        [self.inputs, self.regression_weight, self.values, self.targets],
        feed_dict=feed_dict)

    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)

    # taken from rllab
    reg_coeff = 1e-5
    for _ in range(5):
      best_fit_weight = np.linalg.lstsq(
          reg_input.T.dot(reg_input) +
          reg_coeff * np.identity(reg_input.shape[1]),
          reg_input.T.dot(intended_values))[0]
      if not np.any(np.isnan(best_fit_weight)):
        break
      reg_coeff *= 10

    if len(best_fit_weight.shape) == 1:
      best_fit_weight = np.expand_dims(best_fit_weight, -1)

    sess.run(self.update_regression_weight,
             feed_dict={self.new_regression_weight: best_fit_weight}) 
Example #24
Source File: optimizers.py    From object_detection_kitti with Apache License 2.0 6 votes vote down vote up
def optimize(self, sess, feed_dict):
    old_theta = sess.run(self.flat_vars)

    old_values, targets = sess.run([self.values, self.targets], feed_dict=feed_dict)
    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)
    feed_dict = dict(feed_dict)
    feed_dict[self.intended_values] = intended_values

    def calc_loss_and_grad(theta):
      sess.run(self.set_vars, feed_dict={self.flat_theta: theta})
      loss, grad = sess.run([self.raw_loss, self.loss_flat_gradient],
                            feed_dict=feed_dict)
      grad = grad.astype('float64')
      return loss, grad

    theta, _, _ = scipy.optimize.fmin_l_bfgs_b(
        calc_loss_and_grad, old_theta, maxiter=self.max_iter)
    sess.run(self.set_vars, feed_dict={self.flat_theta: theta}) 
Example #25
Source File: optimizers.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def optimize(self, sess, feed_dict):
    reg_input, reg_weight, old_values, targets = sess.run(
        [self.inputs, self.regression_weight, self.values, self.targets],
        feed_dict=feed_dict)

    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)

    # taken from rllab
    reg_coeff = 1e-5
    for _ in range(5):
      best_fit_weight = np.linalg.lstsq(
          reg_input.T.dot(reg_input) +
          reg_coeff * np.identity(reg_input.shape[1]),
          reg_input.T.dot(intended_values))[0]
      if not np.any(np.isnan(best_fit_weight)):
        break
      reg_coeff *= 10

    if len(best_fit_weight.shape) == 1:
      best_fit_weight = np.expand_dims(best_fit_weight, -1)

    sess.run(self.update_regression_weight,
             feed_dict={self.new_regression_weight: best_fit_weight}) 
Example #26
Source File: optimizers.py    From object_detection_kitti with Apache License 2.0 6 votes vote down vote up
def optimize(self, sess, feed_dict):
    reg_input, reg_weight, old_values, targets = sess.run(
        [self.inputs, self.regression_weight, self.values, self.targets],
        feed_dict=feed_dict)

    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)

    # taken from rllab
    reg_coeff = 1e-5
    for _ in range(5):
      best_fit_weight = np.linalg.lstsq(
          reg_input.T.dot(reg_input) +
          reg_coeff * np.identity(reg_input.shape[1]),
          reg_input.T.dot(intended_values))[0]
      if not np.any(np.isnan(best_fit_weight)):
        break
      reg_coeff *= 10

    if len(best_fit_weight.shape) == 1:
      best_fit_weight = np.expand_dims(best_fit_weight, -1)

    sess.run(self.update_regression_weight,
             feed_dict={self.new_regression_weight: best_fit_weight}) 
Example #27
Source File: optimization.py    From pyblp with MIT License 5 votes vote down vote up
def __str__(self) -> str:
        """Format the configuration as a string."""
        description = f"{self._description} {'with' if self._compute_gradient else 'without'} analytic gradients"
        return f"Configured to optimize using {description} and options {format_options(self._method_options)}." 
Example #28
Source File: optimizer.py    From tinyik with MIT License 5 votes vote down vote up
def optimize(self, angles0, target):
        """Calculate an optimum argument of an objective function."""
        def new_objective(angles):
            return self.f(angles, target)

        return scipy.optimize.minimize(
            new_objective,
            angles0,
            **self.optimizer_opt).x 
Example #29
Source File: optimizers.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def optimize(self, sess, feed_dict):
    old_values, targets = sess.run([self.values, self.targets], feed_dict=feed_dict)
    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)

    feed_dict = dict(feed_dict)
    feed_dict[self.intended_values] = intended_values

    for _ in xrange(self.max_iter):
      sess.run(self.gradient_ops, feed_dict=feed_dict) 
Example #30
Source File: scipy_optimizer.py    From spins-b with GNU General Public License v3.0 5 votes vote down vote up
def optimize(self, callback=None):
        """ Run scipy.optimize.minimize.

        Args:
            callback: Callback to pass to scipy.optimize.minimize.
        """
        return self(self.objective, self.param, callback)