Python cvxopt.solvers.options() Examples

The following are 19 code examples of cvxopt.solvers.options(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cvxopt.solvers , or try the search function .
Example #1
Source File: evaluate.py    From MKLpy with GNU General Public License v3.0 7 votes vote down vote up
def radius(K):
    """evaluate the radius of the MEB (Minimum Enclosing Ball) of examples in
    feature space.

    Parameters
    ----------
    K : (n,n) ndarray,
        the kernel that represents the data.

    Returns
    -------
    r : np.float64,
        the radius of the minimum enclosing ball of examples in feature space.
    """
    K = validation.check_K(K).numpy()
    n = K.shape[0]
    P = 2 * matrix(K)
    p = -matrix(K.diagonal())
    G = -spdiag([1.0] * n)
    h = matrix([0.0] * n)
    A = matrix([1.0] * n).T
    b = matrix([1.0])
    solvers.options['show_progress']=False
    sol = solvers.qp(P,p,G,h,A,b)
    return abs(sol['primal objective'])**.5 
Example #2
Source File: spikes.py    From sima with GNU General Public License v2.0 6 votes vote down vote up
def default_psd_opts():
    """
    Return default options for psd method


    Returns
    -------
    dict : dictionary
        Default options for psd method

    """
    return {  # Default option values
        'method': 'cvx',  # solution method (no other currently supported)
        'bas_nonneg': True,  # bseline strictly non-negative
        'noise_range': (.25, .5),  # frequency range for averaging noise PSD
        'noise_method': 'logmexp',  # method of averaging noise PSD
        'lags': 5,  # number of lags for estimating time constants
        'resparse': 0,  # times to resparse original solution (not supported)
        'fudge_factor': 1,  # fudge factor for reducing time constant bias
        'verbosity': False,  # display optimization details
    } 
Example #3
Source File: mdp.py    From pymdptoolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, transitions, reward, discount, skip_check=False):
        # Initialise a linear programming MDP.
        # import some functions from cvxopt and set them as object methods
        try:
            from cvxopt import matrix, solvers
            self._linprog = solvers.lp
            self._cvxmat = matrix
        except ImportError:
            raise ImportError("The python module cvxopt is required to use "
                              "linear programming functionality.")
        # initialise the MDP. epsilon and max_iter are not needed
        MDP.__init__(self, transitions, reward, discount, None, None,
                     skip_check=skip_check)
        # Set the cvxopt solver to be quiet by default, but ...
        # this doesn't do what I want it to do c.f. issue #3
        if not self.verbose:
            solvers.options['show_progress'] = False 
Example #4
Source File: apriori.py    From cryptotrader with MIT License 5 votes vote down vote up
def loss(self, w, alpha, Z, x):
        # minimize allocation risk
        gamma = self.estimate_gamma(alpha, Z, w)
        # if the experts mean returns are low and you have no options, you can choose fiat
        return self.rc * gamma + w[-1] * ((x.mean()) * x.var()) ** 2 
Example #5
Source File: abundance.py    From hypers with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def calculate(self, x_fit: np.ndarray) -> np.ndarray:
        if x_fit.ndim == 1:
            x_fit = x_fit.reshape(x_fit.shape[0], 1)
        solvers.options['show_progress'] = False

        M = self.X.collapse()

        N, p1 = M.shape
        nvars, p2 = x_fit.T.shape
        C = _numpy_to_cvxopt_matrix(x_fit)
        Q = C.T * C

        lb_A = -np.eye(nvars)
        lb = np.repeat(0, nvars)
        A = _numpy_None_vstack(None, lb_A)
        b = _numpy_None_concatenate(None, -lb)
        A = _numpy_to_cvxopt_matrix(A)
        b = _numpy_to_cvxopt_matrix(b)

        Aeq = _numpy_to_cvxopt_matrix(np.ones((1, nvars)))
        beq = _numpy_to_cvxopt_matrix(np.ones(1))

        M = np.array(M, dtype=np.float64)
        self.map = np.zeros((N, nvars), dtype=np.float32)
        for n1 in range(N):
            d = matrix(M[n1], (p1, 1), 'd')
            q = - d.T * C
            sol = solvers.qp(Q, q.T, A, b, Aeq, beq, None, None)['x']
            self.map[n1] = np.array(sol).squeeze()
        self.map = self.map.reshape(self.X.shape[:-1] + (x_fit.shape[-1],))

        return self.map 
Example #6
Source File: mpe.py    From SU_Classification with MIT License 5 votes vote down vote up
def find_nearest_valid_distribution(u_alpha, kernel, initial=None, reg=0):
    """ (solution,distance_sqd)=find_nearest_valid_distribution(u_alpha,kernel):
    Given a n-vector u_alpha summing to 1, with negative terms, 
    finds the distance (squared) to the nearest n-vector summing to 1, 
    with non-neg terms. Distance calculated using nxn matrix kernel. 
    Regularization parameter reg -- 

    min_v (u_alpha - v)^\top K (u_alpha - v) + reg* v^\top v"""

    P = matrix(2 * kernel)
    n = kernel.shape[0]
    q = matrix(np.dot(-2 * kernel, u_alpha))
    A = matrix(np.ones((1, n)))
    b = matrix(1.)
    G = spmatrix(-1., range(n), range(n))
    h = matrix(np.zeros(n))
    dims = {'l': n, 'q': [], 's': []}
    solvers.options['show_progress'] = False
    solution = solvers.coneqp(
        P,
        q,
        G,
        h,
        dims,
        A,
        b,
        initvals=initial
        )
    distance_sqd = solution['primal objective'] + np.dot(u_alpha.T,
            np.dot(kernel, u_alpha))[0, 0]
    return (solution, distance_sqd) 
Example #7
Source File: su_learning.py    From SU_Classification with MIT License 5 votes vote down vote up
def fit(self, x, y):
        from cvxopt import matrix, solvers
        solvers.options['show_progress'] = False

        check_classification_targets(y)
        x, y = check_X_y(x, y)
        x_s, x_u = x[y == +1, :], x[y == 0, :]
        n_s, n_u = len(x_s), len(x_u)

        p_p = self.prior
        p_n = 1 - self.prior
        p_s = p_p ** 2 + p_n ** 2
        k_s = self._basis(x_s)
        k_u = self._basis(x_u)
        d = k_u.shape[1]

        P = np.zeros((d + 2 * n_u, d + 2 * n_u))
        P[:d, :d] = self.lam * np.eye(d)
        q = np.vstack((
            -p_s / (n_s * (p_p - p_n)) * k_s.T.dot(np.ones((n_s, 1))),
            -p_n / (n_u * (p_p - p_n)) * np.ones((n_u, 1)),
            -p_p / (n_u * (p_p - p_n)) * np.ones((n_u, 1))
        ))
        G = np.vstack((
            np.hstack((np.zeros((n_u, d)), -np.eye(n_u), np.zeros((n_u, n_u)))),
            np.hstack((0.5 * k_u, -np.eye(n_u), np.zeros((n_u, n_u)))),
            np.hstack((k_u, -np.eye(n_u), np.zeros((n_u, n_u)))),
            np.hstack((np.zeros((n_u, d)), np.zeros((n_u, n_u)), -np.eye(n_u))),
            np.hstack((-0.5 * k_u, np.zeros((n_u, n_u)), -np.eye(n_u))),
            np.hstack((-k_u, np.zeros((n_u, n_u)), -np.eye(n_u)))
        ))
        h = np.vstack((
            np.zeros((n_u, 1)),
            -0.5 * np.ones((n_u, 1)),
            np.zeros((n_u, 1)),
            np.zeros((n_u, 1)),
            -0.5 * np.ones((n_u, 1)),
            np.zeros((n_u, 1))
        ))
        sol = solvers.qp(matrix(P), matrix(q), matrix(G), matrix(h))
        self.coef_ = np.array(sol['x'])[:d] 
Example #8
Source File: apriori.py    From cryptotrader with MIT License 5 votes vote down vote up
def update(self, b, x):
        last_x = x[-1, :]

        R, Z = risk.polar_returns(-x, self.k)
        alpha = self.estimate_alpha(R)

        self.r_hat = self.beta * self.r_hat + (1 - self.beta) * last_x

        cons = self.cons + [{'type': 'eq', 'fun': lambda w:
            np.dot(w, self.r_hat) - np.clip(0.001, 0.0, self.r_hat.max() / np.sqrt(2))}]

        b = minimize(
            self.loss,
            b,
            args=(alpha, Z, b),
            constraints=cons,
            options={'maxiter': 3333},
            tol=1e-7,
            bounds=tuple((0,1) for _ in range(b.shape[0]))
        )

        # Log variables
        self.log['r_hat'] = "%.4f, %.4f, %.4f" % (self.r_hat.min(), self.r_hat.mean(), self.r_hat.max())
        self.log['alpha'] = "%.2f" % alpha
        self.log['gamma'] = "%.8f" % b['fun']
        self.log['CC'] = "%.2f" % np.power(b['x'], 2).sum() ** -1
        self.log['nit'] = "%d" % b['nit']
        self.log['k'] = "%.2f" % self.k
        self.log['mpc'] = "%.2f" % self.mpc
        self.log['beta'] = "%.4f" % self.beta

        return b['x'] # Truncate small errors 
Example #9
Source File: apriori.py    From cryptotrader with MIT License 5 votes vote down vote up
def update(self, b, x, x2):

        # Update portfolio with no regret
        last_x = x[-1, :]
        leader = np.zeros_like(last_x)
        leader[np.argmax(last_x)] = -1

        b = simplex_proj(self.opt.optimize(leader, b))

        # Manage allocation risk
        b = minimize(
            self.loss,
            b,
            args=(*risk.polar_returns(x2, self.k), last_x),
            constraints=self.cons,
            options={'maxiter': 300},
            tol=1e-6,
            bounds=tuple((0,1) for _ in range(b.shape[0]))
        )

        # Log variables
        self.log['lr'] = "%.4f" % self.opt.lr
        self.log['mpc'] = "%.4f" % self.mpc
        self.log['risk'] = "%.6f" % b['fun']

        # Return best portfolio
        return b['x'] 
Example #10
Source File: komd.py    From MKLpy with GNU General Public License v3.0 5 votes vote down vote up
def _fit(self,X,Y):    
        self.X = X
        values = np.unique(Y)
        Y = [1 if l==values[1] else -1 for l in Y]
        self.Y = Y
        npos = len([1.0 for l in Y if l == 1])
        nneg = len([1.0 for l in Y if l == -1])
        gamma_unif = matrix([1.0/npos if l == 1 else 1.0/nneg for l in Y])
        YY = matrix(np.diag(list(matrix(Y))))

        Kf = self.__kernel_definition__()
        ker_matrix = matrix(Kf(X,X).astype(np.double))
        #KLL = (1.0 / (gamma_unif.T * YY * ker_matrix * YY * gamma_unif)[0])*(1.0-self.lam)*YY*ker_matrix*YY
        KLL = (1.0-self.lam)*YY*ker_matrix*YY
        LID = matrix(np.diag([self.lam * (npos * nneg / (npos+nneg))]*len(Y)))
        Q = 2*(KLL+LID)
        p = matrix([0.0]*len(Y))
        G = -matrix(np.diag([1.0]*len(Y)))
        h = matrix([0.0]*len(Y),(len(Y),1))
        A = matrix([[1.0 if lab==+1 else 0 for lab in Y],[1.0 if lab2==-1 else 0 for lab2 in Y]]).T
        b = matrix([[1.0],[1.0]],(2,1))
        
        solvers.options['show_progress'] = False#True
        solvers.options['maxiters'] = self.max_iter
        sol = solvers.qp(Q,p,G,h,A,b)
        self.gamma = sol['x']
        if self.verbose:
            print ('[KOMD]')
            print ('optimization finished, #iter = %d' % sol['iterations'])
            print ('status of the solution: %s' % sol['status'])
            print ('objval: %.5f' % sol['primal objective'])
            
        bias = 0.5 * self.gamma.T * ker_matrix * YY * self.gamma
        self.bias = bias
        self.is_fitted = True
        self.ker_matrix = ker_matrix
        return self 
Example #11
Source File: MEMO.py    From MKLpy with GNU General Public License v3.0 5 votes vote down vote up
def opt_margin(K,YY,init_sol=None):
	'''optimized margin evaluation'''
	n = K.shape[0]
	P = 2 * (YY * matrix(K) * YY)
	p = matrix([0.0]*n)
	G = -spdiag([1.0]*n)
	h = matrix([0.0]*n)
	A = matrix([[1.0 if YY[i,i]==+1 else 0 for i in range(n)],
				[1.0 if YY[j,j]==-1 else 0 for j in range(n)]]).T
	b = matrix([[1.0],[1.0]],(2,1))
	solvers.options['show_progress']=False
	sol = solvers.qp(P,p,G,h,A,b,initvals=init_sol)	
	margin2 = sol['primal objective']
	return margin2, sol['x'], sol 
Example #12
Source File: GRAM.py    From MKLpy with GNU General Public License v3.0 5 votes vote down vote up
def opt_margin(K, YY, init_sol=None):
    '''optimized margin evaluation'''
    n = K.shape[0]
    P = 2 * (YY * matrix(K.numpy()) * YY)
    p = matrix([0.0]*n)
    G = -spdiag([1.0]*n)
    h = matrix([0.0]*n)
    A = matrix([[1.0 if YY[i,i]==+1 else 0 for i in range(n)],
                [1.0 if YY[j,j]==-1 else 0 for j in range(n)]]).T
    b = matrix([[1.0],[1.0]],(2,1))
    solvers.options['show_progress']=False
    sol = solvers.qp(P,p,G,h,A,b,initvals=init_sol) 
    margin2 = sol['primal objective']
    return sol, margin2 
Example #13
Source File: GRAM.py    From MKLpy with GNU General Public License v3.0 5 votes vote down vote up
def opt_radius(K, init_sol=None): 
    n = K.shape[0]
    K = matrix(K.numpy())
    P = 2 * K
    p = -matrix([K[i,i] for i in range(n)])
    G = -spdiag([1.0] * n)
    h = matrix([0.0] * n)
    A = matrix([1.0] * n).T
    b = matrix([1.0])
    solvers.options['show_progress']=False
    sol = solvers.qp(P,p,G,h,A,b,initvals=init_sol)
    radius2 = (-p.T * sol['x'])[0] - (sol['x'].T * K * sol['x'])[0]
    return sol, radius2 
Example #14
Source File: evaluate.py    From MKLpy with GNU General Public License v3.0 5 votes vote down vote up
def margin(K,Y):
    """evaluate the margin in a classification problem of examples in feature space.
    If the classes are not linearly separable in feature space, then the
    margin obtained is 0.

    Note that it works only for binary tasks.

    Parameters
    ----------
    K : (n,n) ndarray,
        the kernel that represents the data.
    Y : (n) array_like,
        the labels vector.
    """
    K, Y = validation.check_K_Y(K, Y, binary=True)
    n = Y.size()[0]
    Y = [1 if y==Y[0] else -1 for y in Y]
    YY = spdiag(Y)
    P = 2*(YY*matrix(K.numpy())*YY)
    p = matrix([0.0]*n)
    G = -spdiag([1.0]*n)
    h = matrix([0.0]*n)
    A = matrix([[1.0 if Y[i]==+1 else 0 for i in range(n)],
                [1.0 if Y[j]==-1 else 0 for j in range(n)]]).T
    b = matrix([[1.0],[1.0]],(2,1))
    solvers.options['show_progress']=False
    sol = solvers.qp(P,p,G,h,A,b)
    return sol['primal objective']**.5 
Example #15
Source File: _knockoff.py    From vnpy_crypto with MIT License 4 votes vote down vote up
def _design_knockoff_sdp(exog):
    """
    Use semidefinite programming to construct a knockoff design
    matrix.

    Requires cvxopt to be installed.
    """

    try:
        from cvxopt import solvers, matrix
    except ImportError:
        raise ValueError("SDP knockoff designs require installation of cvxopt")

    nobs, nvar = exog.shape

    # Standardize exog
    xnm = np.sum(exog**2, 0)
    xnm = np.sqrt(xnm)
    exog /= xnm

    Sigma = np.dot(exog.T, exog)

    c = matrix(-np.ones(nvar))

    h0 = np.concatenate((np.zeros(nvar), np.ones(nvar)))
    h0 = matrix(h0)
    G0 = np.concatenate((-np.eye(nvar), np.eye(nvar)), axis=0)
    G0 = matrix(G0)

    h1 = 2 * Sigma
    h1 = matrix(h1)
    i, j = np.diag_indices(nvar)
    G1 = np.zeros((nvar*nvar, nvar))
    G1[i*nvar + j, i] = 1
    G1 = matrix(G1)

    solvers.options['show_progress'] = False
    sol = solvers.sdp(c, G0, h0, [G1], [h1])
    sl = np.asarray(sol['x']).ravel()

    xcov = np.dot(exog.T, exog)
    exogn = _get_knmat(exog, xcov, sl)

    return exog, exogn, sl 
Example #16
Source File: apriori.py    From cryptotrader with MIT License 4 votes vote down vote up
def update(self, cov_mat, exp_rets):
        """
         Note: As the Sharpe ratio is not invariant with respect
         to leverage, it is not possible to construct non-trivial
         market neutral tangency portfolios. This is because for
         a positive initial Sharpe ratio the sharpe grows unbound
         with increasing leverage.

         Parameters
         ----------
         cov_mat: pandas.DataFrame
             Covariance matrix of asset returns.
         exp_rets: pandas.Series
             Expected asset returns (often historical returns).
         allow_short: bool, optional
             If 'False' construct a long-only portfolio.
             If 'True' allow shorting, i.e. negative weights.

         Returns
         -------
         weights: pandas.Series
             Optimal asset weights.
         """
        if not isinstance(cov_mat, pd.DataFrame):
            raise ValueError("Covariance matrix is not a DataFrame")

        if not isinstance(exp_rets, pd.Series):
            raise ValueError("Expected returns is not a Series")

        if not cov_mat.index.equals(exp_rets.index):
            raise ValueError("Indices do not match")

        n = len(cov_mat)

        P = opt.matrix(cov_mat.values)
        q = opt.matrix(0.0, (n, 1))

        # Constraints Gx <= h
        # exp_rets*x >= 1 and x >= 0
        G = opt.matrix(np.vstack((-exp_rets.values,
                                  -np.identity(n))))
        h = opt.matrix(np.vstack((-1.0,
                                  np.zeros((n, 1)))))

        # Solve
        optsolvers.options['show_progress'] = False
        sol = optsolvers.qp(P, q, G, h)

        if sol['status'] != 'optimal':
            warnings.warn("Convergence problem")

        weights = np.append(np.squeeze(sol['x']), [0.0])

        # Rescale weights, so that sum(weights) = 1
        weights /= weights.sum()
        return weights 
Example #17
Source File: _knockoff.py    From ibllib with MIT License 4 votes vote down vote up
def _design_knockoff_sdp(exog):
    """
    Use semidefinite programming to construct a knockoff design
    matrix.
    Requires cvxopt to be installed.
    """

    try:
        from cvxopt import solvers, matrix
    except ImportError:
        raise ValueError("SDP knockoff designs require installation of cvxopt")

    nobs, nvar = exog.shape

    # Standardize exog
    xnm = np.sum(exog**2, 0)
    xnm = np.sqrt(xnm)
    exog = exog / xnm

    Sigma = np.dot(exog.T, exog)

    c = matrix(-np.ones(nvar))

    h0 = np.concatenate((np.zeros(nvar), np.ones(nvar)))
    h0 = matrix(h0)
    G0 = np.concatenate((-np.eye(nvar), np.eye(nvar)), axis=0)
    G0 = matrix(G0)

    h1 = 2 * Sigma
    h1 = matrix(h1)
    i, j = np.diag_indices(nvar)
    G1 = np.zeros((nvar * nvar, nvar))
    G1[i * nvar + j, i] = 1
    G1 = matrix(G1)

    solvers.options['show_progress'] = False
    sol = solvers.sdp(c, G0, h0, [G1], [h1])
    sl = np.asarray(sol['x']).ravel()

    xcov = np.dot(exog.T, exog)
    exogn = _get_knmat(exog, xcov, sl)

    return exog, exogn, sl 
Example #18
Source File: multirate.py    From pyroomacoustics with MIT License 4 votes vote down vote up
def frac_delay(delta, N, w_max=0.9, C=4):
    '''
    Compute optimal fractionnal delay filter according to

    Design of Fractional Delay Filters Using Convex Optimization
    William Putnam and Julius Smith

    Parameters
    ----------
    delta: 
        delay of filter in (fractionnal) samples
    N: 
        number of taps
    w_max: 
        Bandwidth of the filter (in fraction of pi) (default 0.9)
    C: 
        sets the number of constraints to C*N (default 4)
    '''

    # constraints
    N_C = int(C*N)
    w = np.linspace(0, w_max*np.pi, N_C)[:,np.newaxis]
    
    n = np.arange(N)

    try:
        from cvxopt import solvers, matrix
    except:
        raise ValueError('To use the frac_delay function, the cvxopt module is necessary.')

    f = np.concatenate((np.zeros(N), np.ones(1)))

    A = []
    b = []
    for i in range(N_C):
        Anp = np.concatenate(([np.cos(w[i]*n), -np.sin(w[i]*n)], [[0],[0]]), axis=1)
        Anp = np.concatenate(([-f], Anp), axis=0)
        A.append(matrix(Anp))
        b.append(matrix(np.concatenate(([0], np.cos(w[i]*delta), -np.sin(w[i]*delta)))))

    solvers.options['show_progress'] = False
    sol = solvers.socp(matrix(f), Gq=A, hq=b)

    h = np.array(sol['x'])[:-1,0]

    '''
    import matplotlib.pyplot as plt
    w = np.linspace(0, np.pi, 2*N_C)
    F = np.exp(-1j*w[:,np.newaxis]*n)
    Hd = np.exp(-1j*delta*w)
    plt.figure()
    plt.subplot(3,1,1)
    plt.plot(np.abs(np.dot(F,h) - Hd))
    plt.subplot(3,1,2)
    plt.plot(np.diff(np.angle(np.dot(F,h))))
    plt.subplot(3,1,3)
    plt.plot(h)
    '''

    return h 
Example #19
Source File: portfolioopt.py    From portfolioopt with MIT License 4 votes vote down vote up
def min_var_portfolio(cov_mat, allow_short=False):
    """
    Computes the minimum variance portfolio.

    Note: As the variance is not invariant with respect
    to leverage, it is not possible to construct non-trivial
    market neutral minimum variance portfolios. This is because
    the variance approaches zero with decreasing leverage,
    i.e. the market neutral portfolio with minimum variance
    is not invested at all.
    
    Parameters
    ----------
    cov_mat: pandas.DataFrame
        Covariance matrix of asset returns.
    allow_short: bool, optional
        If 'False' construct a long-only portfolio.
        If 'True' allow shorting, i.e. negative weights.

    Returns
    -------
    weights: pandas.Series
        Optimal asset weights.
    """
    if not isinstance(cov_mat, pd.DataFrame):
        raise ValueError("Covariance matrix is not a DataFrame")

    n = len(cov_mat)

    P = opt.matrix(cov_mat.values)
    q = opt.matrix(0.0, (n, 1))

    # Constraints Gx <= h
    if not allow_short:
        # x >= 0
        G = opt.matrix(-np.identity(n))
        h = opt.matrix(0.0, (n, 1))
    else:
        G = None
        h = None

    # Constraints Ax = b
    # sum(x) = 1
    A = opt.matrix(1.0, (1, n))
    b = opt.matrix(1.0)

    # Solve
    optsolvers.options['show_progress'] = False
    sol = optsolvers.qp(P, q, G, h, A, b)

    if sol['status'] != 'optimal':
        warnings.warn("Convergence problem")

    # Put weights into a labeled series
    weights = pd.Series(sol['x'], index=cov_mat.index)
    return weights