Python scipy.optimize.fmin_l_bfgs_b() Examples

The following are 30 code examples of scipy.optimize.fmin_l_bfgs_b(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.optimize , or try the search function .
Example #1
Source File: sparse_gp.py    From D-VAE with MIT License 8 votes vote down vote up
def global_optimization(grid, lower, upper, function_grid, function_scalar, function_scalar_gradient):

    grid_values = function_grid(grid)
    best = grid_values.argmin()
    
    # We solve the optimization problem

    X_initial = grid[ best : (best + 1), : ]
    def objective(X):
        X = casting(X)
        X = X.reshape((1, grid.shape[ 1 ]))
        value = function_scalar(X)
        gradient_value = function_scalar_gradient(X).flatten()
        return np.float(value), gradient_value.astype(np.float)

    lbfgs_bounds = zip(lower.tolist(), upper.tolist())
    x_optimal, y_opt, opt_info = spo.fmin_l_bfgs_b(objective, X_initial, bounds = list(lbfgs_bounds), iprint = 0, maxiter = 150)
    x_optimal = x_optimal.reshape((1, grid.shape[ 1 ]))

    return x_optimal, y_opt 
Example #2
Source File: sparse_gp.py    From icml18-jtnn with MIT License 6 votes vote down vote up
def global_optimization(grid, lower, upper, function_grid, function_scalar, function_scalar_gradient):

    grid_values = function_grid(grid)
    best = grid_values.argmin()
    
    # We solve the optimization problem

    X_initial = grid[ best : (best + 1), : ]
    def objective(X):
        X = casting(X)
        X = X.reshape((1, grid.shape[ 1 ]))
        value = function_scalar(X)
        gradient_value = function_scalar_gradient(X).flatten()
        return np.float(value), gradient_value.astype(np.float)

    lbfgs_bounds = zip(lower.tolist(), upper.tolist())
    x_optimal, y_opt, opt_info = spo.fmin_l_bfgs_b(objective, X_initial, bounds = lbfgs_bounds, iprint = 0, maxiter = 150)
    x_optimal = x_optimal.reshape((1, grid.shape[ 1 ]))

    return x_optimal, y_opt 
Example #3
Source File: sparse_gp.py    From sdvae with MIT License 6 votes vote down vote up
def global_optimization(grid, lower, upper, function_grid, function_scalar, function_scalar_gradient):

    grid_values = function_grid(grid)
    best = grid_values.argmin()
    
    # We solve the optimization problem

    X_initial = grid[ best : (best + 1), : ]
    def objective(X):
        X = casting(X)
        X = X.reshape((1, grid.shape[ 1 ]))
        value = function_scalar(X)
        gradient_value = function_scalar_gradient(X).flatten()
        return np.float(value), gradient_value.astype(np.float)

    lbfgs_bounds = zip(lower.tolist(), upper.tolist())
    x_optimal, y_opt, opt_info = spo.fmin_l_bfgs_b(objective, X_initial, bounds = lbfgs_bounds, iprint = 0, maxiter = 150)
    x_optimal = x_optimal.reshape((1, grid.shape[ 1 ]))

    return x_optimal, y_opt 
Example #4
Source File: sparse_gp.py    From sdvae with MIT License 6 votes vote down vote up
def global_optimization(grid, lower, upper, function_grid, function_scalar, function_scalar_gradient):

    grid_values = function_grid(grid)
    best = grid_values.argmin()
    
    # We solve the optimization problem

    X_initial = grid[ best : (best + 1), : ]
    def objective(X):
        X = casting(X)
        X = X.reshape((1, grid.shape[ 1 ]))
        value = function_scalar(X)
        gradient_value = function_scalar_gradient(X).flatten()
        return np.float(value), gradient_value.astype(np.float)

    lbfgs_bounds = zip(lower.tolist(), upper.tolist())
    x_optimal, y_opt, opt_info = spo.fmin_l_bfgs_b(objective, X_initial, bounds = lbfgs_bounds, iprint = 0, maxiter = 150)
    x_optimal = x_optimal.reshape((1, grid.shape[ 1 ]))

    return x_optimal, y_opt 
Example #5
Source File: api.py    From dl2 with MIT License 6 votes vote down vote up
def lbfgsb(variables, bounds, loss_fn, zero_grad_fn):
    x, shapes, shapes_flat = vars_to_x(variables)
    bounds_list = []
    for var in variables:
        lower, upper = bounds[var]
        lower = lower.ravel()
        upper = upper.ravel()
        for i in range(lower.size):
            bounds_list.append((lower[i], upper[i]))

    def f(x):
        x_to_vars(x, variables, shapes_flat, shapes)
        loss = loss_fn()
        zero_grad_fn()
        loss.backward()
        with torch.no_grad():
            f = loss.detach().cpu().numpy().astype(np.float64)
            g = np.stack([var.tensor.grad.detach().cpu().numpy().ravel() for var in variables]).astype(np.float64)
        return f, g
    x, f, d = spo.fmin_l_bfgs_b(f, x, bounds=bounds_list)
    x_to_vars(x, variables, shapes_flat, shapes) 
Example #6
Source File: arima_model.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def _fit_start_params(self, order, method, start_ar_lags=None):
        if method != 'css-mle':  # use Hannan-Rissanen to get start params
            start_params = self._fit_start_params_hr(order, start_ar_lags)
        else:  # use CSS to get start params
            func = lambda params: -self.loglike_css(params)
            #start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?
            start_params = self._fit_start_params_hr(order, start_ar_lags)
            if self.transparams:
                start_params = self._invtransparams(start_params)
            bounds = [(None,)*2]*sum(order)
            mlefit = optimize.fmin_l_bfgs_b(func, start_params,
                                            approx_grad=True, m=12,
                                            pgtol=1e-7, factr=1e3,
                                            bounds=bounds, iprint=-1)
            start_params = self._transparams(mlefit[0])
        return start_params 
Example #7
Source File: sparse_gp.py    From sdvae with MIT License 6 votes vote down vote up
def global_optimization(grid, lower, upper, function_grid, function_scalar, function_scalar_gradient):

    grid_values = function_grid(grid)
    best = grid_values.argmin()
    
    # We solve the optimization problem

    X_initial = grid[ best : (best + 1), : ]
    def objective(X):
        X = casting(X)
        X = X.reshape((1, grid.shape[ 1 ]))
        value = function_scalar(X)
        gradient_value = function_scalar_gradient(X).flatten()
        return np.float(value), gradient_value.astype(np.float)

    lbfgs_bounds = zip(lower.tolist(), upper.tolist())
    x_optimal, y_opt, opt_info = spo.fmin_l_bfgs_b(objective, X_initial, bounds = lbfgs_bounds, iprint = 0, maxiter = 150)
    x_optimal = x_optimal.reshape((1, grid.shape[ 1 ]))

    return x_optimal, y_opt 
Example #8
Source File: 105_Style_Transfer.py    From Tensorflow-Computer-Vision-Tutorial with MIT License 6 votes vote down vote up
def styling(self, content_image, style_image, n_iter):
        content = Image.open(content_image).resize((self.width, self.height))
        self.content = np.expand_dims(content, axis=0).astype(np.float32)   # [1, height, width, 3]
        style = Image.open(style_image).resize((self.width, self.height))
        self.style = np.expand_dims(style, axis=0).astype(np.float32)       # [1, height, width, 3]

        x = np.copy(self.content)      # initialize styled image from content
        
        # repeat backpropagating to styled image 
        for i in range(n_iter):
            x, min_val, info = fmin_l_bfgs_b(self._get_loss, x.flatten(), fprime=lambda x: self.flat_grads, maxfun=20)
            x = x.clip(0., 255.)
            print('(%i/%i) loss: %.1f' % (i+1, n_iter, min_val))

        x = x.reshape((self.height, self.width, 3))
        for i in range(1, 4):
            x[:, :, -i] += self.vgg_mean[i - 1]
        return x, self.content, self.style 
Example #9
Source File: test_optimize.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_l_bfgs_b(self):
        # limited-memory bound-constrained BFGS algorithm
        retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
                                        self.grad, args=(),
                                        maxiter=self.maxiter)

        (params, fopt, d) = retval

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6)

        # Ensure that function call counts are 'known good'; these are from
        # Scipy 0.7.0. Don't allow them to increase.
        assert_(self.funccalls == 7, self.funccalls)
        assert_(self.gradcalls == 5, self.gradcalls)

        # Ensure that the function behaves the same; this is from Scipy 0.7.0
        assert_allclose(self.trace[3:5],
                        [[0., -0.52489628, 0.48753042],
                         [0., -0.52489628, 0.48753042]],
                        atol=1e-14, rtol=1e-7) 
Example #10
Source File: sparse_gp.py    From sdvae with MIT License 6 votes vote down vote up
def global_optimization(grid, lower, upper, function_grid, function_scalar, function_scalar_gradient):

    grid_values = function_grid(grid)
    best = grid_values.argmin()
    
    # We solve the optimization problem

    X_initial = grid[ best : (best + 1), : ]
    def objective(X):
        X = casting(X)
        X = X.reshape((1, grid.shape[ 1 ]))
        value = function_scalar(X)
        gradient_value = function_scalar_gradient(X).flatten()
        return np.float(value), gradient_value.astype(np.float)

    lbfgs_bounds = zip(lower.tolist(), upper.tolist())
    x_optimal, y_opt, opt_info = spo.fmin_l_bfgs_b(objective, X_initial, bounds = lbfgs_bounds, iprint = 0, maxiter = 150)
    x_optimal = x_optimal.reshape((1, grid.shape[ 1 ]))

    return x_optimal, y_opt 
Example #11
Source File: test_optimize.py    From Computable with MIT License 6 votes vote down vote up
def test_l_bfgs_b(self):
        """ limited-memory bound-constrained BFGS algorithm
        """
        retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
                                        self.grad, args=(),
                                        maxiter=self.maxiter)

        (params, fopt, d) = retval

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6)

        # Ensure that function call counts are 'known good'; these are from
        # Scipy 0.7.0. Don't allow them to increase.
        assert_(self.funccalls == 7, self.funccalls)
        assert_(self.gradcalls == 5, self.gradcalls)

        # Ensure that the function behaves the same; this is from Scipy 0.7.0
        assert_allclose(self.trace[3:5],
                        [[0., -0.52489628, 0.48753042],
                         [0., -0.52489628, 0.48753042]],
                        atol=1e-14, rtol=1e-7) 
Example #12
Source File: dmr.py    From dmr with MIT License 6 votes vote down vote up
def bfgs(self):
        def ll(x):
            x = x.reshape((self.K, self.L))
            return self._ll(x)

        def dll(x):
            x = x.reshape((self.K, self.L))
            result = self._dll(x)
            result = result.reshape(self.K * self.L)
            return result

        Lambda = np.random.multivariate_normal(np.zeros(self.L), 
            (self.sigma ** 2) * np.identity(self.L), size=self.K)
        Lambda = Lambda.reshape(self.K * self.L)

        newLambda, fmin, res = optimize.fmin_l_bfgs_b(ll, Lambda, dll)
        self.Lambda = newLambda.reshape((self.K, self.L)) 
Example #13
Source File: arima_model.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def _fit_start_params(self, order, method, start_ar_lags=None):
        if method != 'css-mle':  # use Hannan-Rissanen to get start params
            start_params = self._fit_start_params_hr(order, start_ar_lags)
        else:  # use CSS to get start params
            func = lambda params: -self.loglike_css(params)
            #start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?
            start_params = self._fit_start_params_hr(order, start_ar_lags)
            if self.transparams:
                start_params = self._invtransparams(start_params)
            bounds = [(None,)*2]*sum(order)
            mlefit = optimize.fmin_l_bfgs_b(func, start_params,
                                            approx_grad=True, m=12,
                                            pgtol=1e-7, factr=1e3,
                                            bounds=bounds, iprint=-1)
            start_params = mlefit[0]
            if self.transparams:
                start_params = self._transparams(start_params)
        return start_params 
Example #14
Source File: l_bfgs_b.py    From qiskit-aqua with Apache License 2.0 6 votes vote down vote up
def optimize(self, num_vars, objective_function, gradient_function=None,
                 variable_bounds=None, initial_point=None):
        super().optimize(num_vars, objective_function, gradient_function,
                         variable_bounds, initial_point)

        if gradient_function is None and self._max_evals_grouped > 1:
            epsilon = self._options['epsilon']
            gradient_function = Optimizer.wrap_function(Optimizer.gradient_num_diff,
                                                        (objective_function,
                                                         epsilon, self._max_evals_grouped))

        approx_grad = bool(gradient_function is None)
        sol, opt, info = sciopt.fmin_l_bfgs_b(objective_function,
                                              initial_point, bounds=variable_bounds,
                                              fprime=gradient_function,
                                              approx_grad=approx_grad, **self._options)

        return sol, opt, info['funcalls'] 
Example #15
Source File: test_optimize.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_minimize_l_bfgs_b_maxfun_interruption(self):
        # gh-6162
        f = optimize.rosen
        g = optimize.rosen_der
        values = []
        x0 = np.ones(7) * 1000

        def objfun(x):
            value = f(x)
            values.append(value)
            return value

        # Look for an interesting test case.
        # Request a maxfun that stops at a particularly bad function
        # evaluation somewhere between 100 and 300 evaluations.
        low, medium, high = 30, 100, 300
        optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)
        v, k = max((y, i) for i, y in enumerate(values[medium:]))
        maxfun = medium + k
        # If the minimization strategy is reasonable,
        # the minimize() result should not be worse than the best
        # of the first 30 function evaluations.
        target = min(values[:low])
        xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)
        assert_array_less(fmin, target) 
Example #16
Source File: MATS_LRT.py    From rMATS-DVR with GNU General Public License v3.0 5 votes vote down vote up
def MLE_iteration(i1,i2,s1,s2,effective_inclusion_length,effective_skipping_length):
	psi1=vec2psi(i1,s1,effective_inclusion_length,effective_skipping_length);psi2=vec2psi(i2,s2,effective_inclusion_length,effective_skipping_length);
	iter_cutoff=1;iter_maxrun=100;count=0;previous_sum=0;
	while((iter_cutoff>0.01)&(count<=iter_maxrun)):
		count+=1;
		#iteration of beta
		beta_0=sum(psi1)/len(psi1);
		beta_1=sum(psi2)/len(psi2);
		var1=0;var2=0;
		current_sum=0;likelihood_sum=0;
		new_psi1=[];new_psi2=[];
		#Debug;print('unconstrain_1xopt');
		for i in range(len(psi1)):
			xopt = fmin_l_bfgs_b(myfunc_individual,[psi1[i],psi2[i]],myfunc_individual_der,args=[[i1[i],i2[i]],[s1[i],s2[i]],[beta_0,beta_1],var1,effective_inclusion_length,effective_skipping_length],bounds=[[0.01,0.99],[0.01,0.99]],iprint=-1);
			new_psi1.append(float(xopt[0][0]));current_sum+=float(xopt[1]);
			new_psi2.append(float(xopt[0][1]));
			#Debug;print(xopt);
			likelihood_sum+=myfunc_likelihood([new_psi1[i],new_psi2[i]],[[i1[i],i2[i]],[s1[i],s2[i]],[beta_0,beta_1],var1]);
		psi1=new_psi1;psi2=new_psi2;
		#Debug;print('count');print(count);print('previous_sum');print(previous_sum);print('current_sum');print(current_sum);
		if count>1:
			iter_cutoff=abs(previous_sum-current_sum)/abs(previous_sum);
		previous_sum=current_sum;
	if count>iter_maxrun:
		return([current_sum,[psi1,psi2,0,0,var1,var2]]);
	#print('unconstrain');print(xopt);
	return([current_sum,[psi1,psi2,beta_0,beta_1,var1,var2]]);

#Random Sampling Function 
Example #17
Source File: art.py    From neural-art with MIT License 5 votes vote down vote up
def go(self, maxiter=512):
        """
        This is where the magic happens.

        Return the image resulting from gradient descent for maxiter
        iterations
        """
        # Init random noise image
        debug_print("Running go")
        if args.init == 'rand':
            img = self.random_image()
        else:
            default = caffe.io.load_image(self.args.content_image)
            scaled = self.resize_image(default)
            self.resize_caffes(scaled)
            img = self.transformer.preprocess('data', scaled)

        # Compute bounds for gradient descent, borrowed from
        # fzliu/style-transfer
        data_min = -self.transformer.mean["data"][:, 0, 0]
        data_max = data_min + self.transformer.raw_scale["data"]
        data_bounds = [(data_min[0], data_max[0])] * (img.size / 3) + \
                      [(data_min[1], data_max[1])] * (img.size / 3) + \
                      [(data_min[2], data_max[2])] * (img.size / 3)

        debug_print("Starting grad descent")

        x, f, d = optimize.fmin_l_bfgs_b(
            self.loss_and_gradient,
            img.flatten(),
            bounds=data_bounds,
            fprime=None,  # We'll use loss_and_gradient
            maxiter=maxiter,
            callback=self.print_prog,
        )

        x = np.reshape(x, self.net.blobs['data'].data[0].shape)

        return self.transformer.deprocess('data', x) 
Example #18
Source File: bo_algorithm_components.py    From autogluon with Apache License 2.0 5 votes vote down vote up
def optimize(self, candidate: Candidate,
                 model: Optional[SurrogateModel] = None) -> Candidate:
        # Before local minimization, the model for this state_id should have been fitted.
        if model is None:
            model = self.model
        state = self.state
        acquisition_function = self.acquisition_function_class(model)

        x0 = state.hp_ranges.to_ndarray(candidate)
        bounds = state.hp_ranges.get_ndarray_bounds()
        n_evaluations = [0]  # wrapped in list to allow access from function

        # unwrap 2d arrays
        def f_df(x):
            n_evaluations[0] += 1
            f, df = acquisition_function.compute_acq_with_gradients(x)
            assert len(f) == 1
            assert len(df) == 1
            return f[0], df[0]

        res = fmin_l_bfgs_b(f_df, x0=x0, bounds=bounds, maxiter=1000)
        self.num_evaluations = n_evaluations[0]
        if res[2]['task'] == b'ABNORMAL_TERMINATION_IN_LNSRCH':
            # this condition was copied from the old GPyOpt code
            # this condition was silently ignored in the old code
            logger.warning(
                f"ABNORMAL_TERMINATION_IN_LNSRCH in lbfgs after {n_evaluations[0]} evaluations, "
                "returning original candidate"
            )
            return candidate  # returning original candidate
        else:
            # Clip to avoid situation where result is small epsilon out of bounds
            a_min, a_max = zip(*bounds)
            optimized_x = np.clip(res[0], a_min, a_max)
            # Make sure the above clipping does really just fix numerical rounding issues in LBFGS
            # if any bigger change was made there is a bug and we want to throw an exception
            assert np.linalg.norm(res[0] - optimized_x) < 1e-6, (res[0], optimized_x, bounds)
            result = state.hp_ranges.from_ndarray(optimized_x.flatten())
            return result 
Example #19
Source File: gpc.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
                 n_restarts_optimizer=0, max_iter_predict=100,
                 warm_start=False, copy_X_train=True, random_state=None):
        self.kernel = kernel
        self.optimizer = optimizer
        self.n_restarts_optimizer = n_restarts_optimizer
        self.max_iter_predict = max_iter_predict
        self.warm_start = warm_start
        self.copy_X_train = copy_X_train
        self.random_state = random_state 
Example #20
Source File: gpc.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def _constrained_optimization(self, obj_func, initial_theta, bounds):
        if self.optimizer == "fmin_l_bfgs_b":
            theta_opt, func_min, convergence_dict = \
                fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
            if convergence_dict["warnflag"] != 0:
                warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
                              " state: %s" % convergence_dict)
        elif callable(self.optimizer):
            theta_opt, func_min = \
                self.optimizer(obj_func, initial_theta, bounds=bounds)
        else:
            raise ValueError("Unknown optimizer %s." % self.optimizer)

        return theta_opt, func_min 
Example #21
Source File: holtwinters.py    From lore with MIT License 5 votes vote down vote up
def linear(x, fc, alpha = None, beta = None):

  Y = x[:]

  if (alpha == None or beta == None):

    initial_values = array([0.3, 0.1])
    boundaries = [(0, 1), (0, 1)]
    type = 'linear'

    parameters = fmin_l_bfgs_b(RMSE, x0 = initial_values, args = (Y, type), bounds = boundaries, approx_grad = True)
    alpha, beta = parameters[0]

  a = [Y[0]]
  b = [Y[1] - Y[0]]
  y = [a[0] + b[0]]
  rmse = 0

  for i in range(len(Y) + fc):

    if i == len(Y):
      Y.append(a[-1] + b[-1])

    a.append(alpha * Y[i] + (1 - alpha) * (a[i] + b[i]))
    b.append(beta * (a[i + 1] - a[i]) + (1 - beta) * b[i])
    y.append(a[i + 1] + b[i + 1])

  rmse = sqrt(sum([(m - n) ** 2 for m, n in zip(Y[:-fc], y[:-fc - 1])]) / len(Y[:-fc]))

  return Y[-fc:], alpha, beta, rmse 
Example #22
Source File: gpr.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _constrained_optimization(self, obj_func, initial_theta, bounds):
        if self.optimizer == "fmin_l_bfgs_b":
            theta_opt, func_min, convergence_dict = \
                fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
            if convergence_dict["warnflag"] != 0:
                warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
                              " state: %s" % convergence_dict)
        elif callable(self.optimizer):
            theta_opt, func_min = \
                self.optimizer(obj_func, initial_theta, bounds=bounds)
        else:
            raise ValueError("Unknown optimizer %s." % self.optimizer)

        return theta_opt, func_min 
Example #23
Source File: gpr.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def __init__(self, kernel=None, alpha=1e-10,
                 optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
                 normalize_y=False, copy_X_train=True, random_state=None):
        self.kernel = kernel
        self.alpha = alpha
        self.optimizer = optimizer
        self.n_restarts_optimizer = n_restarts_optimizer
        self.normalize_y = normalize_y
        self.copy_X_train = copy_X_train
        self.random_state = random_state 
Example #24
Source File: gpc.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
                 n_restarts_optimizer=0, max_iter_predict=100,
                 warm_start=False, copy_X_train=True, random_state=None,
                 multi_class="one_vs_rest", n_jobs=1):
        self.kernel = kernel
        self.optimizer = optimizer
        self.n_restarts_optimizer = n_restarts_optimizer
        self.max_iter_predict = max_iter_predict
        self.warm_start = warm_start
        self.copy_X_train = copy_X_train
        self.random_state = random_state
        self.multi_class = multi_class
        self.n_jobs = n_jobs 
Example #25
Source File: gpc.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _constrained_optimization(self, obj_func, initial_theta, bounds):
        if self.optimizer == "fmin_l_bfgs_b":
            theta_opt, func_min, convergence_dict = \
                fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
            if convergence_dict["warnflag"] != 0:
                warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
                              " state: %s" % convergence_dict)
        elif callable(self.optimizer):
            theta_opt, func_min = \
                self.optimizer(obj_func, initial_theta, bounds=bounds)
        else:
            raise ValueError("Unknown optimizer %s." % self.optimizer)

        return theta_opt, func_min 
Example #26
Source File: gpc.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
                 n_restarts_optimizer=0, max_iter_predict=100,
                 warm_start=False, copy_X_train=True, random_state=None):
        self.kernel = kernel
        self.optimizer = optimizer
        self.n_restarts_optimizer = n_restarts_optimizer
        self.max_iter_predict = max_iter_predict
        self.warm_start = warm_start
        self.copy_X_train = copy_X_train
        self.random_state = random_state 
Example #27
Source File: classif.py    From JDOT with MIT License 5 votes vote down vote up
def fit(self,K,y):  
        # beware Y is a binary matrix to allow for more general solvers (see JDOT)
        if self.bias:
            K1=np.hstack((K,np.ones((K.shape[0],1))))
            self.w=np.zeros((K1.shape[1],y.shape[1]))
            self.w,self.f,self.log=spo.fmin_l_bfgs_b(lambda w: hinge_squared_reg_bias(w,X=K1,Y=y,lambd=self.lambd),self.w,maxiter=1000,maxfun=1000)            
            self.b=self.w.reshape((K1.shape[1],y.shape[1]))[-1,:]
            self.w=self.w.reshape((K1.shape[1],y.shape[1]))[:-1,:]

        else:
            self.w=np.zeros((K.shape[1],y.shape[1]))
            self.w,self.f,self.log=spo.fmin_l_bfgs_b(lambda w: hinge_squared_reg(w,X=K,Y=y,lambd=self.lambd),self.w,maxiter=1000,maxfun=1000)            
            self.w=self.w.reshape((K.shape[1],y.shape[1])) 
Example #28
Source File: holtwinters.py    From lore with MIT License 5 votes vote down vote up
def multiplicative(x, m, fc, alpha = None, beta = None, gamma = None):

  Y = x[:]

  if (alpha == None or beta == None or gamma == None):

    initial_values = array([0.0, 1.0, 0.0])
    boundaries = [(0, 1), (0, 1), (0, 1)]
    type = 'multiplicative'

    parameters = fmin_l_bfgs_b(RMSE, x0 = initial_values, args = (Y, type, m), bounds = boundaries, approx_grad = True)
    alpha, beta, gamma = parameters[0]

  a = [sum(Y[0:m]) / float(m)]
  b = [(sum(Y[m:2 * m]) - sum(Y[0:m])) / m ** 2]
  s = [Y[i] / a[0] for i in range(m)]
  y = [(a[0] + b[0]) * s[0]]
  rmse = 0

  for i in range(len(Y) + fc):

    if i == len(Y):
      Y.append((a[-1] + b[-1]) * s[-m])

    a.append(alpha * (Y[i] / s[i]) + (1 - alpha) * (a[i] + b[i]))
    b.append(beta * (a[i + 1] - a[i]) + (1 - beta) * b[i])
    s.append(gamma * (Y[i] / (a[i] + b[i])) + (1 - gamma) * s[i])
    y.append((a[i + 1] + b[i + 1]) * s[i + 1])

  rmse = sqrt(sum([(m - n) ** 2 for m, n in zip(Y[:-fc], y[:-fc - 1])]) / len(Y[:-fc]))

  return Y[-fc:], alpha, beta, gamma, rmse 
Example #29
Source File: holtwinters.py    From lore with MIT License 5 votes vote down vote up
def additive(x, m, fc, alpha = None, beta = None, gamma = None):

  Y = x[:]

  if (alpha == None or beta == None or gamma == None):

    initial_values = array([0.3, 0.1, 0.1])
    boundaries = [(0, 1), (0, 1), (0, 1)]
    type = 'additive'

    parameters = fmin_l_bfgs_b(RMSE, x0 = initial_values, args = (Y, type, m), bounds = boundaries, approx_grad = True)
    alpha, beta, gamma = parameters[0]

  a = [sum(Y[0:m]) / float(m)]
  b = [(sum(Y[m:2 * m]) - sum(Y[0:m])) / m ** 2]
  s = [Y[i] - a[0] for i in range(m)]
  y = [a[0] + b[0] + s[0]]
  rmse = 0

  for i in range(len(Y) + fc):

    if i == len(Y):
      Y.append(a[-1] + b[-1] + s[-m])

    a.append(alpha * (Y[i] - s[i]) + (1 - alpha) * (a[i] + b[i]))
    b.append(beta * (a[i + 1] - a[i]) + (1 - beta) * b[i])
    s.append(gamma * (Y[i] - a[i] - b[i]) + (1 - gamma) * s[i])
    y.append(a[i + 1] + b[i + 1] + s[i + 1])

  rmse = sqrt(sum([(m - n) ** 2 for m, n in zip(Y[:-fc], y[:-fc - 1])]) / len(Y[:-fc]))

  return Y[-fc:], alpha, beta, gamma, rmse 
Example #30
Source File: fitting.py    From airfoil-opt-gan with MIT License 5 votes vote down vote up
def parsec_airfoil(airfoil):
    
    n_points = airfoil.shape[0]
    func = lambda x: np.linalg.norm(sythesize(x, n_points) - airfoil)
    bounds = [(0.001, 0.1), # rle
              (1e-4, 0.5), # x_pre
              (-0.1, 0.0), # y_pre
              (-0.5, 0.5), # d2ydx2_pre
              (-10, 10), # th_pre
              (1e-4, 0.5), # x_suc
              (0.0, 0.1), # y_suc
              (-0.5, 0.5), # d2ydx2_suc
              (-10, 10) # th_suc
              ]
    bounds = np.array(bounds)
    n_restarts = 10
    opt_x = None
    opt_f = np.inf
    x0s = np.random.uniform(bounds[:,0], bounds[:,1], size=(n_restarts, bounds.shape[0]))
    for x0 in x0s:
        x, f, _ = fmin_l_bfgs_b(func, x0, approx_grad=1, bounds=bounds, disp=1)
        if f < opt_f:
            opt_x = x
            opt_f = f
#    res = differential_evolution(func, bounds=bounds, disp=1)
#    opt_x = res.x
#    opt_f = res.fun
            
    print(opt_x)
    print(opt_f)
    
    return opt_x