Java Code Examples for org.apache.mahout.math.Vector#norm()

The following examples show how to use org.apache.mahout.math.Vector#norm() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CRFElasticNetLinearRegOptimizer.java    From pyramid with Apache License 2.0 4 votes vote down vote up
private double penalty(CRFLinearRegression linearRegression){
    Vector vector = linearRegression.getWeights().getWeights();
    double normCombination = (1-l1Ratio)*0.5*Math.pow(vector.norm(2),2) +
            l1Ratio*vector.norm(1);
    return regularization * normCombination;
}
 
Example 2
Source File: BlockwiseCD.java    From pyramid with Apache License 2.0 4 votes vote down vote up
private double getPenalty() {
    Vector vector = cmlcrf.getWeights().getAllWeights();
    double norm = (1-l1Ratio)*0.5*Math.pow(vector.norm(2),2) + l1Ratio*vector.norm(1);
    return norm * regularization;

}
 
Example 3
Source File: CMLCRFElasticNet.java    From pyramid with Apache License 2.0 4 votes vote down vote up
private double getPenalty() {
    Vector vector = cmlcrf.getWeights().getAllWeights();
    double norm = (1-l1Ratio)*0.5*Math.pow(vector.norm(2),2) + l1Ratio*vector.norm(1);
    return norm * regularization;

}
 
Example 4
Source File: TrustRegionNewtonOptimizer.java    From pyramid with Apache License 2.0 4 votes vote down vote up
void tron(Vector w) {

        int numColumns = loss.getNumColumns();
        double delta, snorm, one = 1.0;
        double alpha, f, fnew, prered, actred, gs;
        int search = 1, iter = 1;

        Vector w_new = new DenseVector(numColumns);
        Vector g = new DenseVector(numColumns);

        for (int i = 0; i < numColumns; i++)
            w.set(i,0);

        f = loss.fun(w);
        loss.grad(w, g);
        delta = g.norm(2);
        double gnorm1 = delta;
        double gnorm = gnorm1;

        if (gnorm <= eps * gnorm1) search = 0;

        iter = 1;

        while (iter <= maxIter && search != 0) {

            Pair<Vector,Vector> result = trcg(delta, g);
            Vector s = result.getFirst();
            Vector r = result.getSecond();

            for (int j=0;j<w.size();j++){
                w_new.set(j,w.get(j));
            }
            daxpy(one, s, w_new);

            gs = g.dot(s);
            prered = -0.5 * (gs - s.dot(r));
            fnew = loss.fun(w_new);

            // Compute the actual reduction.
            actred = f - fnew;

            // On the first iteration, adjust the initial step bound.
            snorm = s.norm(2);
            if (iter == 1) delta = Math.min(delta, snorm);

            // Compute prediction alpha*snorm of the step.
            if (fnew - f - gs <= 0)
                alpha = SIGMA3;
            else
                alpha = Math.max(SIGMA1, -0.5 * (gs / (fnew - f - gs)));

            // Update the trust region bound according to the ratio of actual to
            // predicted reduction.
            if (actred < ETA0 * prered)
                delta = Math.min(Math.max(alpha, SIGMA1) * snorm, SIGMA2 * delta);
            else if (actred < ETA1 * prered)
                delta = Math.max(SIGMA1 * delta, Math.min(alpha * snorm, SIGMA2 * delta));
            else if (actred < ETA2 * prered)
                delta = Math.max(SIGMA1 * delta, Math.min(alpha * snorm, SIGMA3 * delta));
            else
                delta = Math.max(delta, Math.min(alpha * snorm, SIGMA3 * delta));

            System.out.println("f = "+f);

            if (actred > ETA0 * prered) {
                iter++;
                for (int j=0;j<w.size();j++){
                    w.set(j,w_new.get(j));
                }
                f = fnew;
                loss.grad(w, g);

                gnorm = g.norm(2);
                if (gnorm <= eps * gnorm1) break;
            }
            if (f < -1.0e+32) {

                break;
            }
            if (Math.abs(actred) <= 0 && prered <= 0) {
                System.out.println("WARNING: actred and prered <= 0%n");
                break;
            }
            if (Math.abs(actred) <= 1.0e-12 * Math.abs(f) && Math.abs(prered) <= 1.0e-12 * Math.abs(f)) {
                System.out.println("WARNING: actred and prered too small%n");
                break;
            }
        }
    }
 
Example 5
Source File: TrustRegionNewtonOptimizer.java    From pyramid with Apache License 2.0 4 votes vote down vote up
/**
 *
 * @param delta input
 * @param g input
 * @return s, r
 */
private Pair<Vector,Vector> trcg(double delta, Vector g) {
    int numColumns = loss.getNumColumns();
    double one = 1;
    Vector d = new DenseVector(numColumns);
    Vector Hd = new DenseVector(numColumns);
    double rTr, rnewTrnew, cgtol;
    Vector s = new DenseVector(numColumns);
    Vector r = new DenseVector(numColumns);
    Pair<Vector,Vector> result = new Pair<>();
    for (int i = 0; i < numColumns; i++) {
        s.set(i,0);
        r.set(i,-g.get(i));
        d.set(i,r.get(i));
    }
    cgtol = 0.1 * g.norm(2);

    rTr = r.dot(r);

    while (true) {
        if (r.norm(2) <= cgtol) {
            break;
        }
        loss.Hv(d, Hd);

        double alpha = rTr / d.dot(Hd);
        daxpy(alpha, d, s);
        if (s.norm(2) > delta) {
            alpha = -alpha;
            daxpy(alpha, d, s);

            double std = s.dot(d);
            double sts = s.dot(s);
            double dtd = d.dot(d);
            double dsq = delta * delta;
            double rad = Math.sqrt(std * std + dtd * (dsq - sts));
            if (std >= 0)
                alpha = (dsq - sts) / (std + rad);
            else
                alpha = (rad - std) / dtd;
            daxpy(alpha, d, s);
            alpha = -alpha;
            daxpy(alpha, Hd, r);
            break;
        }
        alpha = -alpha;
        daxpy(alpha, Hd, r);
        rnewTrnew = r.dot(r);
        double beta = rnewTrnew / rTr;
        scale(beta, d);
        daxpy(one, r, d);
        rTr = rnewTrnew;
    }

    result.setFirst(s);
    result.setSecond(r);
    return result;
}
 
Example 6
Source File: ElasticNetLogisticTrainer.java    From pyramid with Apache License 2.0 4 votes vote down vote up
private double penalty(int k) {
    Vector vector = logisticRegression.getWeights().getWeightsWithoutBiasForClass(k);
    double normCombination = (1-l1Ratio)*0.5*Math.pow(vector.norm(2),2) +
            l1Ratio*vector.norm(1);
    return regularization * normCombination;
}
 
Example 7
Source File: LogisticLoss.java    From pyramid with Apache License 2.0 4 votes vote down vote up
private double penaltyValueEL(int classIndex) {
    Vector vector = logisticRegression.getWeights().getWeightsWithoutBiasForClass(classIndex);
    double normCombination = (1-l1Ratio)*0.5*Math.pow(vector.norm(2),2) +
            l1Ratio*vector.norm(1);
    return regularization * normCombination;
}
 
Example 8
Source File: Vectors.java    From pyramid with Apache License 2.0 4 votes vote down vote up
public static double cosine(Vector vector1, Vector vector2){
    double prod = vector1.dot(vector2);
    return prod/(vector1.norm(2)*vector2.norm(2));
}
 
Example 9
Source File: ElasticNetLinearRegOptimizer.java    From pyramid with Apache License 2.0 4 votes vote down vote up
private double penalty(LinearRegression linearRegression){
    Vector vector = linearRegression.getWeights().getWeightsWithoutBias();
    double normCombination = (1-l1Ratio)*0.5*Math.pow(vector.norm(2),2) +
            l1Ratio*vector.norm(1);
    return regularization * normCombination;
}