Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#sub()

The following examples show how to use org.nd4j.linalg.api.ndarray.INDArray#sub() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TwoPointApproximation.java    From nd4j with Apache License 2.0 6 votes vote down vote up
/**
 * Adjust final scheme to presence of bounds
 *
 * Returns (in this order):
 * adjusted hypothesis, whether to use onesided as an int mask array
 * @param x the point to estimate the derivative
 * @param h the finite difference steps
 * @param numSteps Number of h steps in 1 direction
 *                 to implement finite difference scheme.
 *
 * @param lowerBound Lower bounds for independent variable variable
 * @param upperBound Upper bounds for independent variable
 * @return
 */
public static INDArray[] adjustSchemeToBounds(INDArray x,INDArray h,int numSteps,INDArray lowerBound,INDArray upperBound) {
    INDArray oneSided = Nd4j.onesLike(h);
    if(and(lowerBound.eq(Double.NEGATIVE_INFINITY),upperBound.eq(Double.POSITIVE_INFINITY)).sumNumber().doubleValue() > 0) {
        return new INDArray[] {h,oneSided};
    }
    INDArray hTotal = h.mul(numSteps);
    INDArray hAdjusted = h.dup();
    INDArray lowerDist = x.sub(lowerBound);
    INDArray upperBound2 = upperBound.sub(x);

    INDArray central = and(greaterThanOrEqual(lowerDist,hTotal),greaterThanOrEqual(upperBound2,hTotal));
    INDArray forward = and(greaterThanOrEqual(upperBound,lowerDist),not(central));
    hAdjusted.put(forward,min(h.get(forward),upperBound2.get(forward).mul(0.5).divi(numSteps)));
    oneSided.put(forward,Nd4j.scalar(1.0));

    INDArray backward = and(upperBound2.lt(lowerBound),not(central));
    hAdjusted.put(backward,min(h.get(backward),lowerDist.get(backward).mul(0.5).divi(numSteps)));
    oneSided.put(backward,Nd4j.scalar(1.0));

    INDArray minDist = min(upperBound2,lowerDist).divi(numSteps);
    INDArray adjustedCentral = and(not(central),lessThanOrEqual(abs(hAdjusted),minDist));
    hAdjusted.put(adjustedCentral,minDist.get(adjustedCentral));
    oneSided.put(adjustedCentral,Nd4j.scalar(0.0));
    return new INDArray[] {hAdjusted,oneSided};
}
 
Example 2
Source File: TestPCA.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testFactorDims() {
    int m = 13;
    int n = 4;

    double f[] = new double[] {7, 1, 11, 11, 7, 11, 3, 1, 2, 21, 1, 11, 10, 26, 29, 56, 31, 52, 55, 71, 31, 54, 47,
                    40, 66, 68, 6, 15, 8, 8, 6, 9, 17, 22, 18, 4, 23, 9, 8, 60, 52, 20, 47, 33, 22, 6, 44, 22, 26,
                    34, 12, 12};

    INDArray A = Nd4j.create(f, new int[] {m, n}, 'f');

    INDArray A1 = A.dup('f');
    INDArray Factor = org.nd4j.linalg.dimensionalityreduction.PCA.pca_factor(A1, 3, true);
    A1 = A.subiRowVector(A.mean(0));

    INDArray Reduced = A1.mmul(Factor);
    INDArray Reconstructed = Reduced.mmul(Factor.transpose());
    INDArray Diff = Reconstructed.sub(A1);
    for (int i = 0; i < m * n; i++) {
        assertEquals("Reconstructed matrix is very different from the original.", 0.0, Diff.getDouble(i), 1.0);
    }
}
 
Example 3
Source File: TestPCA.java    From nd4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testFactorVariance() {
    int m = 13;
    int n = 4;

    double f[] = new double[] {7, 1, 11, 11, 7, 11, 3, 1, 2, 21, 1, 11, 10, 26, 29, 56, 31, 52, 55, 71, 31, 54, 47,
                    40, 66, 68, 6, 15, 8, 8, 6, 9, 17, 22, 18, 4, 23, 9, 8, 60, 52, 20, 47, 33, 22, 6, 44, 22, 26,
                    34, 12, 12};

    INDArray A = Nd4j.create(f, new int[] {m, n}, 'f');

    INDArray A1 = A.dup('f');
    INDArray Factor1 = org.nd4j.linalg.dimensionalityreduction.PCA.pca_factor(A1, 0.95, true);
    A1 = A.subiRowVector(A.mean(0));
    INDArray Reduced1 = A1.mmul(Factor1);
    INDArray Reconstructed1 = Reduced1.mmul(Factor1.transpose());
    INDArray Diff1 = Reconstructed1.sub(A1);
    for (int i = 0; i < m * n; i++) {
        assertEquals("Reconstructed matrix is very different from the original.", 0.0, Diff1.getDouble(i), 0.1);
    }
    INDArray A2 = A.dup('f');
    INDArray Factor2 = org.nd4j.linalg.dimensionalityreduction.PCA.pca_factor(A2, 0.50, true);
    assertTrue("Variance differences should change factor sizes.", Factor1.columns() > Factor2.columns());
}
 
Example 4
Source File: TestPCA.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testFactorVariance() {
    int m = 13;
    int n = 4;

    double f[] = new double[] {7, 1, 11, 11, 7, 11, 3, 1, 2, 21, 1, 11, 10, 26, 29, 56, 31, 52, 55, 71, 31, 54, 47,
                    40, 66, 68, 6, 15, 8, 8, 6, 9, 17, 22, 18, 4, 23, 9, 8, 60, 52, 20, 47, 33, 22, 6, 44, 22, 26,
                    34, 12, 12};

    INDArray A = Nd4j.create(f, new int[] {m, n}, 'f');

    INDArray A1 = A.dup('f');
    INDArray Factor1 = org.nd4j.linalg.dimensionalityreduction.PCA.pca_factor(A1, 0.95, true);
    A1 = A.subiRowVector(A.mean(0));
    INDArray Reduced1 = A1.mmul(Factor1);
    INDArray Reconstructed1 = Reduced1.mmul(Factor1.transpose());
    INDArray Diff1 = Reconstructed1.sub(A1);
    for (int i = 0; i < m * n; i++) {
        assertEquals("Reconstructed matrix is very different from the original.", 0.0, Diff1.getDouble(i), 0.1);
    }
    INDArray A2 = A.dup('f');
    INDArray Factor2 = org.nd4j.linalg.dimensionalityreduction.PCA.pca_factor(A2, 0.50, true);
    assertTrue("Variance differences should change factor sizes.", Factor1.columns() > Factor2.columns());
}
 
Example 5
Source File: TestPCA.java    From nd4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testFactorDims() {
    int m = 13;
    int n = 4;

    double f[] = new double[] {7, 1, 11, 11, 7, 11, 3, 1, 2, 21, 1, 11, 10, 26, 29, 56, 31, 52, 55, 71, 31, 54, 47,
                    40, 66, 68, 6, 15, 8, 8, 6, 9, 17, 22, 18, 4, 23, 9, 8, 60, 52, 20, 47, 33, 22, 6, 44, 22, 26,
                    34, 12, 12};

    INDArray A = Nd4j.create(f, new int[] {m, n}, 'f');

    INDArray A1 = A.dup('f');
    INDArray Factor = org.nd4j.linalg.dimensionalityreduction.PCA.pca_factor(A1, 3, true);
    A1 = A.subiRowVector(A.mean(0));

    INDArray Reduced = A1.mmul(Factor);
    INDArray Reconstructed = Reduced.mmul(Factor.transpose());
    INDArray Diff = Reconstructed.sub(A1);
    for (int i = 0; i < m * n; i++) {
        assertEquals("Reconstructed matrix is very different from the original.", 0.0, Diff.getDouble(i), 1.0);
    }
}
 
Example 6
Source File: KerasModelEndToEndTest.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
private static void compareINDArrays(String label, INDArray expected, INDArray actual, double eps) {
    if(!expected.equalShapes(actual)){
        throw new IllegalStateException("Shapes do not match for \"" + label + "\": got " + Arrays.toString(expected.shape()) + " vs " + Arrays.toString(actual.shape()));
    }
    INDArray diff = expected.sub(actual.castTo(expected.dataType()));
    double min = diff.minNumber().doubleValue();
    double max = diff.maxNumber().doubleValue();
    log.info(label + ": " + expected.equalsWithEps(actual, eps) + ", " + min + ", " + max);
    double threshold = 1e-7;
    double aAbsMax = Math.max(Math.abs(expected.minNumber().doubleValue()), Math.abs(expected.maxNumber().doubleValue()));
    double bAbsMax = Math.max(Math.abs(actual.minNumber().doubleValue()), Math.abs(actual.maxNumber().doubleValue()));

    // skip too small absolute inputs
    if (Math.abs(aAbsMax) > threshold && Math.abs(bAbsMax) > threshold) {
        boolean eq = expected.equalsWithEps(actual.castTo(expected.dataType()), eps);
        if(!eq){
            System.out.println("Expected: " + Arrays.toString(expected.shape()) + ", actual: " + Arrays.toString(actual.shape()));
            System.out.println("Expected:\n" + expected);
            System.out.println("Actual: \n" + actual);
        }
        assertTrue("Output differs: " + label, eq);
    }
}
 
Example 7
Source File: CheckUtil.java    From nd4j with Apache License 2.0 6 votes vote down vote up
/** Same as checkMmul, but for matrix subtraction */
public static boolean checkSubtract(INDArray first, INDArray second, double maxRelativeDifference,
                double minAbsDifference) {
    RealMatrix rmFirst = convertToApacheMatrix(first);
    RealMatrix rmSecond = convertToApacheMatrix(second);

    INDArray result = first.sub(second);
    RealMatrix rmResult = rmFirst.subtract(rmSecond);

    if (!checkShape(rmResult, result))
        return false;
    boolean ok = checkEntries(rmResult, result, maxRelativeDifference, minAbsDifference);
    if (!ok) {
        INDArray onCopies = Shape.toOffsetZeroCopy(first).sub(Shape.toOffsetZeroCopy(second));
        printFailureDetails(first, second, rmResult, result, onCopies, "sub");
    }
    return ok;
}
 
Example 8
Source File: LossPoisson.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
public INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if(!labels.equalShapes(preOutput)){
        Preconditions.throwEx("Labels and preOutput must have equal shapes: got shapes %s vs %s", labels.shape(), preOutput.shape());
    }
    labels = labels.castTo(preOutput.dataType());   //No-op if already correct dtype
    /*
     mean of (yhat - y * log(yhat))
     */
    INDArray postOutput = activationFn.getActivation(preOutput.dup(), true);

    INDArray scoreArr = Transforms.log(postOutput);
    scoreArr.muli(labels);
    scoreArr = postOutput.sub(scoreArr);

    if (mask != null) {
        LossUtil.applyMask(scoreArr, mask);
    }
    return scoreArr;
}
 
Example 9
Source File: TimeSeriesUtil.java    From AILibs with GNU Affero General Public License v3.0 6 votes vote down vote up
/**
 * Normalizes an INDArray vector object.
 *
 * @param array INDArray row vector with single shape dimension
 * @param inplace Indication whether the normalization should be performed in
 *            place or on a new array copy
 * @return Returns the view on the transformed INDArray (if inplace) or a
 *         normalized copy of the input array (if not inplace)
 */
public static INDArray normalizeINDArray(final INDArray array, final boolean inplace) {
	if (array.shape().length > 2 && array.shape()[0] != 1) {
		throw new IllegalArgumentException(String.format("Input INDArray object must be a vector with shape size 1. Actual shape: (%s)", Arrays.toString(array.shape())));
	}

	final double mean = array.mean(1).getDouble(0);
	final double std = array.std(1).getDouble(0);

	INDArray result;
	if (inplace) {
		result = array.subi(mean);
	} else {
		result = array.sub(mean);
	}
	return result.addi(Nd4j.EPS_THRESHOLD).divi(std);
}
 
Example 10
Source File: AutoRecLossFunction.java    From jstarcraft-rns with Apache License 2.0 5 votes vote down vote up
@Override
public float computeScore(MathMatrix tests, MathMatrix trains, MathMatrix masks) {
    float score = 0F;
    if (tests instanceof Nd4jMatrix && trains instanceof Nd4jMatrix && maskData instanceof Nd4jMatrix) {
        INDArray testArray = Nd4jMatrix.class.cast(tests).getArray();
        INDArray trainArray = Nd4jMatrix.class.cast(trains).getArray();
        INDArray scoreArray = trainArray.sub(testArray);
        INDArray maskArray = Nd4jMatrix.class.cast(maskData).getArray();
        scoreArray.muli(scoreArray);
        scoreArray.muli(maskArray);
        score = scoreArray.sumNumber().floatValue();
    }
    return score;
}
 
Example 11
Source File: GaussianReconstructionDistribution.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray gradient(INDArray x, INDArray preOutDistributionParams) {
    INDArray output = preOutDistributionParams.dup();
    activationFn.getActivation(output, true);

    val size = output.size(1) / 2;
    INDArray mean = output.get(NDArrayIndex.all(), NDArrayIndex.interval(0, size));
    INDArray logStdevSquared = output.get(NDArrayIndex.all(), NDArrayIndex.interval(size, 2 * size));

    INDArray sigmaSquared = Transforms.exp(logStdevSquared, true).castTo(x.dataType());

    INDArray xSubMean = x.sub(mean.castTo(x.dataType()));
    INDArray xSubMeanSq = xSubMean.mul(xSubMean);

    INDArray dLdmu = xSubMean.divi(sigmaSquared);

    INDArray sigma = Transforms.sqrt(sigmaSquared, true);
    INDArray sigma3 = Transforms.pow(sigmaSquared, 3.0 / 2);

    INDArray dLdsigma = sigma.rdiv(-1).addi(xSubMeanSq.divi(sigma3));
    INDArray dLdlogSigma2 = sigma.divi(2).muli(dLdsigma);

    INDArray dLdx = Nd4j.createUninitialized(preOutDistributionParams.dataType(), output.shape());
    dLdx.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.interval(0, size)}, dLdmu);
    dLdx.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.interval(size, 2 * size)}, dLdlogSigma2);
    dLdx.negi();

    //dL/dz
    return activationFn.backprop(preOutDistributionParams.dup(), dLdx).getFirst();
}
 
Example 12
Source File: GridExecutionerTest.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testReverseFlow2() {
    CudaGridExecutioner executioner = ((CudaGridExecutioner) Nd4j.getExecutioner());

    INDArray n1 = Nd4j.scalar(1);
    INDArray n2 = Nd4j.scalar(2);
    INDArray n3 = Nd4j.scalar(3);
    INDArray n4 = Nd4j.scalar(4);

    System.out.println("0: ------------------------");

    INDArray nClone = n1.add(n2);

    assertEquals(Nd4j.scalar(3), nClone);
    INDArray n1PlusN2 = n1.add(n2);
    assertFalse(n1PlusN2.equals(n1));

    System.out.println("2: ------------------------");

    System.out.println(n4);

    INDArray subbed = n4.sub(n3);
    INDArray mulled = n4.mul(n3);
    INDArray div = n4.div(n3);

    System.out.println("Subbed: " + subbed);
    System.out.println("Mulled: " + mulled);
    System.out.println("Div: " + div);
    System.out.println("4: ------------------------");

    assertFalse(subbed.equals(n4));
    assertFalse(mulled.equals(n4));

    assertEquals(0, executioner.getQueueLength());

    assertEquals(Nd4j.scalar(1), subbed);
    assertEquals(Nd4j.scalar(12), mulled);
    assertEquals(Nd4j.scalar(1.333333333333333333333), div);
}
 
Example 13
Source File: ElementWiseVertexTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testElementWiseVertexForwardProduct() {
    int batchsz = 24;
    int featuresz = 17;
    ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder().graphBuilder()
                    .addInputs("input1", "input2", "input3")
                    .addLayer("denselayer",
                                    new DenseLayer.Builder().nIn(featuresz).nOut(1).activation(Activation.IDENTITY)
                                                    .build(),
                                    "input1")
                    /* denselayer is not actually used, but it seems that you _need_ to have trainable parameters, otherwise, you get
                     * Invalid shape: Requested INDArray shape [1, 0] contains dimension size values < 1 (all dimensions must be 1 or more)
                     * at org.nd4j.linalg.factory.Nd4j.checkShapeValues(Nd4j.java:4877)
                     * at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:4867)
                     * at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:4820)
                     * at org.nd4j.linalg.factory.Nd4j.create(Nd4j.java:3948)
                     * at org.deeplearning4j.nn.graph.ComputationGraph.init(ComputationGraph.java:409)
                     * at org.deeplearning4j.nn.graph.ComputationGraph.init(ComputationGraph.java:341)
                     */
                    .addVertex("elementwiseProduct", new ElementWiseVertex(ElementWiseVertex.Op.Product), "input1",
                                    "input2", "input3")
                    .addLayer("Product", new ActivationLayer.Builder().activation(Activation.IDENTITY).build(),
                                    "elementwiseProduct")
                    .setOutputs("Product", "denselayer").build();

    ComputationGraph cg = new ComputationGraph(cgc);
    cg.init();


    INDArray input1 = Nd4j.rand(batchsz, featuresz);
    INDArray input2 = Nd4j.rand(batchsz, featuresz);
    INDArray input3 = Nd4j.rand(batchsz, featuresz);

    INDArray target = input1.dup().muli(input2).muli(input3);

    INDArray output = cg.output(input1, input2, input3)[0];
    INDArray squared = output.sub(target.castTo(output.dataType()));
    double rms = squared.mul(squared).sumNumber().doubleValue();
    Assert.assertEquals(0.0, rms, this.epsilon);
}
 
Example 14
Source File: AutoRecLearner.java    From jstarcraft-rns with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    INDArray output = activationFn.getActivation(preOutput.dup(), true);
    INDArray yMinusyHat = labels.sub(output);
    INDArray dldyhat = yMinusyHat.mul(-2);

    INDArray gradients = activationFn.backprop(preOutput.dup(), dldyhat).getFirst();
    gradients = gradients.mul(maskData);
    // multiply with masks, always
    if (mask != null) {
        gradients.muliColumnVector(mask);
    }

    return gradients;
}
 
Example 15
Source File: LossL1.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if (labels.size(1) != preOutput.size(1)) {
        throw new IllegalArgumentException(
                        "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer"
                                        + " number of outputs (nOut = " + preOutput.size(1) + ") ");

    }
    INDArray output = activationFn.getActivation(preOutput.dup(), true);

    INDArray outSubLabels = output.sub(labels);
    INDArray dLda = Nd4j.getExecutioner().execAndReturn(new Sign(outSubLabels));

    if (weights != null) {
        dLda.muliRowVector(weights);
    }

    if (mask != null && LossUtil.isPerOutputMasking(dLda, mask)) {
        //For *most* activation functions: we don't actually need to mask dL/da in addition to masking dL/dz later
        //but: some, like softmax, require both (due to dL/dz_i being a function of dL/da_j, for i != j)
        //We could add a special case for softmax (activationFn instanceof ActivationSoftmax) but that would be
        // error prone - but buy us a tiny bit of performance
        LossUtil.applyMask(dLda, mask);
    }

    //dL/dz
    INDArray gradients = activationFn.backprop(preOutput, dLda).getFirst(); //TODO activation function param gradients

    if (mask != null) {
        LossUtil.applyMask(gradients, mask);
    }

    return gradients;
}
 
Example 16
Source File: AutoRecLearner.java    From jstarcraft-rns with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    INDArray output = activationFn.getActivation(preOutput.dup(), true);
    INDArray yMinusyHat = labels.sub(output);
    INDArray dldyhat = yMinusyHat.mul(-2);

    INDArray gradients = activationFn.backprop(preOutput.dup(), dldyhat).getFirst();
    gradients = gradients.mul(maskData);
    // multiply with masks, always
    if (mask != null) {
        gradients.muliColumnVector(mask);
    }

    return gradients;
}
 
Example 17
Source File: NormalizerMinMaxScalerTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testBruteForce() {
    //X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
    //X_scaled = X_std * (max - min) + min
    // Dataset features are scaled consecutive natural numbers
    int nSamples = 500;
    int x = 4, y = 2, z = 3;

    INDArray featureX = Nd4j.linspace(1, nSamples, nSamples).reshape(nSamples, 1);
    INDArray featureY = featureX.mul(y);
    INDArray featureZ = featureX.mul(z);
    featureX.muli(x);
    INDArray featureSet = Nd4j.concat(1, featureX, featureY, featureZ);
    INDArray labelSet = Nd4j.zeros(nSamples, 1);
    DataSet sampleDataSet = new DataSet(featureSet, labelSet);

    //expected min and max
    INDArray theoreticalMin = Nd4j.create(new double[] {x, y, z}, new long[]{1,3});
    INDArray theoreticalMax = Nd4j.create(new double[] {nSamples * x, nSamples * y, nSamples * z}, new long[]{1,3});
    INDArray theoreticalRange = theoreticalMax.sub(theoreticalMin);

    NormalizerMinMaxScaler myNormalizer = new NormalizerMinMaxScaler();
    myNormalizer.fit(sampleDataSet);

    INDArray minDataSet = myNormalizer.getMin();
    INDArray maxDataSet = myNormalizer.getMax();
    INDArray minDiff = minDataSet.sub(theoreticalMin).max();
    INDArray maxDiff = maxDataSet.sub(theoreticalMax).max();
    assertEquals(minDiff.getDouble(0), 0.0, 0.000000001);
    assertEquals(maxDiff.max().getDouble(0), 0.0, 0.000000001);

    // SAME TEST WITH THE ITERATOR
    int bSize = 1;
    DataSetIterator sampleIter = new TestDataSetIterator(sampleDataSet, bSize);
    myNormalizer.fit(sampleIter);
    minDataSet = myNormalizer.getMin();
    maxDataSet = myNormalizer.getMax();
    assertEquals(minDataSet.sub(theoreticalMin).max(1).getDouble(0), 0.0, 0.000000001);
    assertEquals(maxDataSet.sub(theoreticalMax).max(1).getDouble(0), 0.0, 0.000000001);

    sampleIter.setPreProcessor(myNormalizer);
    INDArray actual, expected, delta;
    int i = 1;
    while (sampleIter.hasNext()) {
        expected = theoreticalMin.mul(i - 1).div(theoreticalRange);
        actual = sampleIter.next().getFeatures();
        delta = Transforms.abs(actual.sub(expected));
        assertTrue(delta.max(1).getDouble(0) < 0.0001);
        i++;
    }

}
 
Example 18
Source File: ShiftVertexTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
private static double sum_errors(INDArray a, INDArray b) {
    INDArray o = a.sub(b.castTo(a.dataType()));
    return o.mul(o).sumNumber().doubleValue();
}
 
Example 19
Source File: CifarLoader.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public double varManual(INDArray x, double mean) {
    INDArray xSubMean = x.sub(mean);
    INDArray squared = xSubMean.muli(xSubMean);
    double accum = Nd4j.getExecutioner().execAndReturn(new Sum(squared)).getFinalResult().doubleValue();
    return accum / x.ravel().length();
}
 
Example 20
Source File: NeuralStyleTransfer.java    From dl4j-tutorials with MIT License 2 votes vote down vote up
/**
 * Element-wise differences are squared, and then summed.
 * This is modelled after the content_loss method defined in
 * https://harishnarayanan.org/writing/artistic-style-transfer/
 *
 * @param a One tensor
 * @param b Another tensor
 * @return Sum of squared errors: scalar
 */
private double sumOfSquaredErrors(INDArray a, INDArray b) {
    INDArray diff = a.sub(b); // difference
    INDArray squares = Transforms.pow(diff, 2); // element-wise squaring
    return squares.sumNumber().doubleValue();
}