Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#getDouble()

The following examples show how to use org.nd4j.linalg.api.ndarray.INDArray#getDouble() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CookBookServiceImpl.java    From Java-Deep-Learning-Cookbook with MIT License 6 votes vote down vote up
@Override
public List<String> generateStringOutput(MultipartFile multipartFile, String modelFilePath) throws IOException, InterruptedException {
    final List<String> results = new ArrayList<>();
    File convFile = File.createTempFile(multipartFile.getOriginalFilename(),null, new File(System.getProperty("user.dir")+"/"));
    multipartFile.transferTo(convFile);
    INDArray indArray = CustomerRetentionPredictionApi.generateOutput(convFile, modelFilePath);
    for(int i=0; i<indArray.rows();i++){
        if(indArray.getDouble(i,0)>indArray.getDouble(i,1)){
            results.add("Customer "+(i+1)+"-> Happy Customer \n");
        }
        else{
            results.add("Customer "+(i+1)+"-> Unhappy Customer \n");
        }
    }
    convFile.deleteOnExit();

    return results;
}
 
Example 2
Source File: GraphVectorSerializer.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
public static void writeGraphVectors(DeepWalk deepWalk, String path) throws IOException {

        int nVertices = deepWalk.numVertices();
        int vectorSize = deepWalk.getVectorSize();

        try (BufferedWriter write = new BufferedWriter(new FileWriter(new File(path), false))) {
            for (int i = 0; i < nVertices; i++) {
                StringBuilder sb = new StringBuilder();
                sb.append(i);
                INDArray vec = deepWalk.getVertexVector(i);
                for (int j = 0; j < vectorSize; j++) {
                    double d = vec.getDouble(j);
                    sb.append(DELIM).append(d);
                }
                sb.append("\n");
                write.write(sb.toString());
            }
        }

        log.info("Wrote {} vectors of length {} to: {}", nVertices, vectorSize, path);
    }
 
Example 3
Source File: PCA.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**
 * This method performs a dimensionality reduction, including principal components
 * that cover a fraction of the total variance of the system.  It does all calculations
 * about the mean.
 * @param in A matrix of datapoints as rows, where column are features with fixed number N
 * @param variance The desired fraction of the total variance required
 * @return The reduced basis set
 */
public static INDArray pca2(INDArray in, double variance) {
    // let's calculate the covariance and the mean
    INDArray[] covmean = covarianceMatrix(in);
    // use the covariance matrix (inverse) to find "force constants" and then break into orthonormal
    // unit vector components
    INDArray[] pce = principalComponents(covmean[0]);
    // calculate the variance of each component
    INDArray vars = Transforms.pow(pce[1], -0.5, true);
    double res = vars.sumNumber().doubleValue();
    double total = 0.0;
    int ndims = 0;
    for (int i = 0; i < vars.columns(); i++) {
        ndims++;
        total += vars.getDouble(i);
        if (total / res > variance)
            break;
    }
    INDArray result = Nd4j.create(in.columns(), ndims);
    for (int i = 0; i < ndims; i++)
        result.putColumn(i, pce[0].getColumn(i));
    return result;
}
 
Example 4
Source File: DerivativeTests.java    From nd4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testCubeDerivative() {

    //Derivative of cube: 3*x^2
    INDArray z = Nd4j.zeros(100);
    double[] expOut = new double[100];
    for (int i = 0; i < 100; i++) {
        double x = 0.1 * (i - 50);
        z.putScalar(i, x);
        expOut[i] = 3 * x * x;
    }

    INDArray zPrime = Nd4j.getExecutioner()
                    .execAndReturn(new CubeDerivative(z));

    for (int i = 0; i < 100; i++) {
        double d1 = expOut[i];
        double d2 = zPrime.getDouble(i);
        double relError = Math.abs(d1 - d1) / (Math.abs(d1) + Math.abs(d2));
        if (d1 == 0.0 && d2 == 0.0)
            relError = 0.0;
        String str = "exp=" + expOut[i] + ", act=" + zPrime.getDouble(i) + "; relError = " + relError;
        assertTrue(str, relError < REL_ERROR_TOLERANCE);
    }
}
 
Example 5
Source File: CheckUtil.java    From nd4j with Apache License 2.0 6 votes vote down vote up
public static boolean checkMulManually(INDArray first, INDArray second, double maxRelativeDifference,
                double minAbsDifference) {
    //No apache commons element-wise multiply, but can do this manually

    INDArray result = first.mul(second);
    long[] shape = first.shape();

    INDArray expected = Nd4j.zeros(first.shape());

    for (int i = 0; i < shape[0]; i++) {
        for (int j = 0; j < shape[1]; j++) {
            double v = first.getDouble(i, j) * second.getDouble(i, j);
            expected.putScalar(new int[] {i, j}, v);
        }
    }
    if (!checkShape(expected, result))
        return false;
    boolean ok = checkEntries(expected, result, maxRelativeDifference, minAbsDifference);
    if (!ok) {
        INDArray onCopies = Shape.toOffsetZeroCopy(first).mul(Shape.toOffsetZeroCopy(second));
        printFailureDetails(first, second, expected, result, onCopies, "mul");
    }
    return ok;
}
 
Example 6
Source File: BooleanIndexing.java    From nd4j with Apache License 2.0 6 votes vote down vote up
/**
 * And over the whole ndarray given some condition, with respect to dimensions
 *
 * @param n    the ndarray to test
 * @param condition the condition to test against
 * @return true if all of the elements meet the specified
 * condition false otherwise
 */
public static boolean[] and(final INDArray n, final Condition condition, int... dimension) {
    if (!(condition instanceof BaseCondition))
        throw new UnsupportedOperationException("Only static Conditions are supported");

    MatchCondition op = new MatchCondition(n, condition);
    INDArray arr = Nd4j.getExecutioner().exec(op, dimension);
    boolean[] result = new boolean[(int) arr.length()];

    long tadLength = Shape.getTADLength(n.shape(), dimension);

    for (int i = 0; i < arr.length(); i++) {
        if (arr.getDouble(i) == tadLength)
            result[i] = true;
        else
            result[i] = false;
    }

    return result;
}
 
Example 7
Source File: PlotUtil.java    From dl4j-tutorials with MIT License 6 votes vote down vote up
/**Create data for the background data set
 */
private static XYZDataset createBackgroundData(INDArray backgroundIn, INDArray backgroundOut) {
    int nRows = backgroundIn.rows();
    double[] xValues = new double[nRows];
    double[] yValues = new double[nRows];
    double[] zValues = new double[nRows];
    for( int i=0; i<nRows; i++ ){
        xValues[i] = backgroundIn.getDouble(i,0);
        yValues[i] = backgroundIn.getDouble(i,1);
        zValues[i] = backgroundOut.getDouble(i, 0);

    }

    DefaultXYZDataset dataset = new DefaultXYZDataset();
    dataset.addSeries("Series 1",
            new double[][]{xValues, yValues, zValues});
    return dataset;
}
 
Example 8
Source File: CustomerRetentionPredictionApi.java    From Java-Deep-Learning-Cookbook with MIT License 5 votes vote down vote up
public static void main(String[] args) throws IOException, InterruptedException {
    
    INDArray indArray = CustomerRetentionPredictionApi.generateOutput(new ClassPathResource("test.csv").getFile(),"model.zip");
    String message="";
    for(int i=0; i<indArray.rows();i++){
       if(indArray.getDouble(i,0)>indArray.getDouble(i,1)){
          message+="Customer "+(i+1)+"-> Happy Customer\n";
       }
       else{
           message+="Customer "+(i+1)+"-> Unhappy Customer\n";
       }
    }
    System.out.println(message);

}
 
Example 9
Source File: BooleanIndexingTest.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testAbsValueGreaterThan() {
    final double threshold = 2;

    Condition absValueCondition = new AbsValueGreaterThan(threshold);
    Function<Number, Number> clipFn = new Function<Number, Number>() {
        @Override
        public Number apply(Number number) {
            System.out.println("Number: " + number.doubleValue());
            return (number.doubleValue() > threshold ? threshold : -threshold);
        }
    };

    Nd4j.getRandom().setSeed(12345);
    INDArray orig = Nd4j.rand(1, 20).muli(6).subi(3); //Random numbers: -3 to 3
    INDArray exp = orig.dup();
    INDArray after = orig.dup();

    for (int i = 0; i < exp.length(); i++) {
        double d = exp.getDouble(i);
        if (d > threshold) {
            exp.putScalar(i, threshold);
        } else if (d < -threshold) {
            exp.putScalar(i, -threshold);
        }
    }

    BooleanIndexing.applyWhere(after, absValueCondition, clipFn);

    System.out.println(orig);
    System.out.println(exp);
    System.out.println(after);

    assertEquals(exp, after);
}
 
Example 10
Source File: QLearning.java    From dl4j-tutorials with MIT License 5 votes vote down vote up
private static boolean allZeros(INDArray array) {
    NdIndexIterator iter = new NdIndexIterator(array.shape());
    while (iter.hasNext()) {
        double nextVal = array.getDouble(iter.next());
        if (nextVal != 0) {
            return false;
        }
    }
    return true;
}
 
Example 11
Source File: NDArrayStrings.java    From nd4j with Apache License 2.0 5 votes vote down vote up
private String vectorToString(INDArray arr, boolean summarize) {
    StringBuilder sb = new StringBuilder();
    sb.append("[");
    for (int i = 0; i < arr.length(); i++) {
        if (arr instanceof IComplexNDArray) {
            sb.append(((IComplexNDArray) arr).getComplex(i).toString());
        } else {
            if (summarize && i > 2 && i < arr.length() - 3) {
                if (i == 3) sb.append("  ...");
            } else {
                double arrElement = arr.getDouble(i);
                if (!dontOverrideFormat && ((Math.abs(arrElement) < this.minToPrintWithoutSwitching && arrElement!= 0) || (Math.abs(arrElement) >= this.maxToPrintWithoutSwitching))) {
                    //switch to scientific notation
                    String asString = new DecimalFormat(scientificFormat).format(arrElement);
                    //from E to small e
                    asString = asString.replace('E','e');
                    sb.append(String.format("%1$" + padding + "s", asString));
                }
                else {
                    if (arrElement == 0) {
                        sb.append(String.format("%1$" + padding + "s", 0));
                    }
                    else {
                        sb.append(String.format("%1$" + padding + "s", decimalFormat.format(arrElement)));
                    }
                }
            }
        }
        if (i < arr.length() - 1) {
            if (!summarize || i < 2 || i > arr.length() - 3 || (summarize && arr.length() == 6)) {
                sb.append(colSep);
            }
        }
    }
    sb.append("]");
    return sb.toString();
}
 
Example 12
Source File: ImageUtils.java    From AILibs with GNU Affero General Public License v3.0 5 votes vote down vote up
/**
 * Converts RGB matrices to grayscale matrices using the Luminosity method (cf.
 * ITU-R recommendation BT.709).
 *
 * @param matrices Input matrices (RGB with 3 channels, assuming shape [width,
 *                 height, 3, [depth, ...])
 * @return Returns grayscale matrices with same shape but only one channel
 */
public static List<INDArray> rgbMatricesToGrayscale(final List<INDArray> matrices) {
    if (matrices == null || matrices.isEmpty() || matrices.get(0).shape().length < 3
            || matrices.get(0).shape()[2] < 3)
        throw new IllegalArgumentException("Parameter matrices must not be null and must have three channels.");

    List<INDArray> result = new ArrayList<>(matrices.size());
    for (int i = 0; i < matrices.size(); i++) {
        INDArray currMatrix = matrices.get(i);
        INDArray resultMatrix = Nd4j.create(currMatrix.shape()[0], currMatrix.shape()[1], 1);

        for (int j = 0; j < currMatrix.shape()[0]; j++) {
            for (int k = 0; k < currMatrix.shape()[1]; k++) {
                double r = currMatrix.getDouble(j, k, 0);
                double g = currMatrix.getDouble(j, k, 1);
                double b = currMatrix.getDouble(j, k, 2);

                double gray = (int) (r * 0.2125 + g * 0.7154 + b * 0.0721);

                resultMatrix.putScalar(new int[]{j, k, 0}, gray);
            }
        }

        result.add(resultMatrix);
    }

    return result;
}
 
Example 13
Source File: RandomOpValidation.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
    public void testUniformRankSimple() {

        INDArray arr = Nd4j.createFromArray(new double[]{100.0});
//        OpTestCase tc = new OpTestCase(DynamicCustomOp.builder("randomuniform")
//                .addInputs(arr)
//                .addOutputs(Nd4j.createUninitialized(new long[]{100}))
//                .addFloatingPointArguments(0.0, 1.0)
//                .build());

//        OpTestCase tc = new OpTestCase(new DistributionUniform(arr, Nd4j.createUninitialized(new long[]{100}), 0, 1));
        OpTestCase tc = new OpTestCase(new RandomBernoulli(arr, Nd4j.createUninitialized(new long[]{100}), 0.5));

        tc.expectedOutput(0, LongShapeDescriptor.fromShape(new long[]{100}, DataType.FLOAT), in -> {
            double min = in.minNumber().doubleValue();
            double max = in.maxNumber().doubleValue();
            double mean = in.meanNumber().doubleValue();
            if (min >= 0 && max <= 1 && (in.length() == 1 || Math.abs(mean - 0.5) < 0.2))
                return null;
            return "Failed: min = " + min + ", max = " + max + ", mean = " + mean;
        });

        String err = OpValidation.validate(tc);
        assertNull(err);

        double d = arr.getDouble(0);

        assertEquals(100.0, d, 0.0);

    }
 
Example 14
Source File: Vasttext.java    From scava with Eclipse Public License 2.0 5 votes vote down vote up
private List<Object> predictLabels(DataIteratorConstructor vasttextMemoryDataContrustor)
{
	INDArray predictions = predict(vasttextMemoryDataContrustor);
	List<Object> predictionsLabels = new ArrayList<Object>();
	if(multiLabel)
	{
		predictions=predictions.gt(multiLabelActivation);
		List<String> activatedLabels;
		for(int i=0; i<predictions.rows(); i++)
		{
			//This is the worst case scenario in which all the labels are present
			activatedLabels = new ArrayList<String>(labelsSize);
			for(int j=0; j<labelsSize; j++)
			{
				if(predictions.getDouble(i, j)==1.0)
					activatedLabels.add(labels.get(j));
			}
			predictionsLabels.add(activatedLabels);
		}
	}
	else
	{
		INDArray predictionIndexes = Nd4j.argMax(predictions, 1);
		for(int i=0; i<predictionIndexes.length(); i++)
		{
			predictionsLabels.add(labels.get(predictionIndexes.getInt(i)));
		}
	}
	return predictionsLabels;
}
 
Example 15
Source File: TestReconstructionDistributions.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testGaussianLogProb() {
    Nd4j.getRandom().setSeed(12345);

    int inputSize = 4;
    int[] mbs = new int[] {1, 2, 5};

    for (boolean average : new boolean[] {true, false}) {
        for (int minibatch : mbs) {

            INDArray x = Nd4j.rand(minibatch, inputSize);
            INDArray mean = Nd4j.randn(minibatch, inputSize);
            INDArray logStdevSquared = Nd4j.rand(minibatch, inputSize).subi(0.5);

            INDArray distributionParams = Nd4j.createUninitialized(new int[] {minibatch, 2 * inputSize});
            distributionParams.get(NDArrayIndex.all(), NDArrayIndex.interval(0, inputSize)).assign(mean);
            distributionParams.get(NDArrayIndex.all(), NDArrayIndex.interval(inputSize, 2 * inputSize))
                            .assign(logStdevSquared);

            ReconstructionDistribution dist = new GaussianReconstructionDistribution(Activation.IDENTITY);

            double negLogProb = dist.negLogProbability(x, distributionParams, average);

            INDArray exampleNegLogProb = dist.exampleNegLogProbability(x, distributionParams);
            assertArrayEquals(new long[] {minibatch, 1}, exampleNegLogProb.shape());

            //Calculate the same thing, but using Apache Commons math

            double logProbSum = 0.0;
            for (int i = 0; i < minibatch; i++) {
                double exampleSum = 0.0;
                for (int j = 0; j < inputSize; j++) {
                    double mu = mean.getDouble(i, j);
                    double logSigma2 = logStdevSquared.getDouble(i, j);
                    double sigma = Math.sqrt(Math.exp(logSigma2));
                    NormalDistribution nd = new NormalDistribution(mu, sigma);

                    double xVal = x.getDouble(i, j);
                    double thisLogProb = nd.logDensity(xVal);
                    logProbSum += thisLogProb;
                    exampleSum += thisLogProb;
                }
                assertEquals(-exampleNegLogProb.getDouble(i), exampleSum, 1e-6);
            }

            double expNegLogProb;
            if (average) {
                expNegLogProb = -logProbSum / minibatch;
            } else {
                expNegLogProb = -logProbSum;
            }


            //                System.out.println(expLogProb + "\t" + logProb + "\t" + (logProb / expLogProb));
            assertEquals(expNegLogProb, negLogProb, 1e-6);


            //Also: check random sampling...
            int count = minibatch * inputSize;
            INDArray arr = Nd4j.linspace(-3, 3, count, Nd4j.dataType()).reshape(minibatch, inputSize);
            INDArray sampleMean = dist.generateAtMean(arr);
            INDArray sampleRandom = dist.generateRandom(arr);
        }
    }
}
 
Example 16
Source File: PCA.java    From nd4j with Apache License 2.0 4 votes vote down vote up
/**
 * Calculates pca vectors of a matrix, for a given variance. A larger variance (99%)
 * will result in a higher order feature set.
 *
 * To use the returned factor: multiply feature(s) by the factor to get a reduced dimension
 *
 * INDArray Areduced = A.mmul( factor ) ;
 * 
 * The array Areduced is a projection of A onto principal components
 *
 * @see pca(INDArray, double, boolean)
 *
 * @param A the array of features, rows are results, columns are features - will be changed
 * @param variance the amount of variance to preserve as a float 0 - 1
 * @param normalize whether to normalize (set features to have zero mean)
 * @return the matrix to mulitiply a feature by to get a reduced feature set
 */
public static INDArray pca_factor(INDArray A, double variance, boolean normalize) {
    if (normalize) {
        // Normalize to mean 0 for each feature ( each column has 0 mean )
        INDArray mean = A.mean(0);
        A.subiRowVector(mean);
    }

    long m = A.rows();
    long n = A.columns();

    // The prepare SVD results, we'll decomp A to UxSxV'
    INDArray s = Nd4j.create(m < n ? m : n);
    INDArray VT = Nd4j.create(n, n, 'f');

    // Note - we don't care about U 
    Nd4j.getBlasWrapper().lapack().gesvd(A, s, null, VT);

    // Now convert the eigs of X into the eigs of the covariance matrix
    for (int i = 0; i < s.length(); i++) {
        s.putScalar(i, Math.sqrt(s.getDouble(i)) / (m - 1));
    }

    // Now find how many features we need to preserve the required variance
    // Which is the same percentage as a cumulative sum of the eigenvalues' percentages
    double totalEigSum = s.sumNumber().doubleValue() * variance;
    int k = -1; // we will reduce to k dimensions
    double runningTotal = 0;
    for (int i = 0; i < s.length(); i++) {
        runningTotal += s.getDouble(i);
        if (runningTotal >= totalEigSum) { // OK I know it's a float, but what else can we do ?
            k = i + 1; // we will keep this many features to preserve the reqd. variance
            break;
        }
    }
    if (k == -1) { // if we need everything
        throw new RuntimeException("No reduction possible for reqd. variance - use smaller variance");
    }
    // So now let's rip out the appropriate number of left singular vectors from
    // the V output (note we pulls rows since VT is a transpose of V)
    INDArray V = VT.transpose();
    INDArray factor = Nd4j.create(n, k, 'f');
    for (int i = 0; i < k; i++) {
        factor.putColumn(i, V.getColumn(i));
    }

    return factor;
}
 
Example 17
Source File: LossMultiLabel.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
private void calculate(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask, INDArray scoreOutput, INDArray gradientOutput) {
    if (scoreOutput == null && gradientOutput == null) {
        throw new IllegalArgumentException("You have to provide at least one of scoreOutput or gradientOutput!");
    }
    if (labels.size(1) != preOutput.size(1)) {
        throw new IllegalArgumentException(
                "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer"
                        + " number of outputs (nOut = " + preOutput.size(1) + ") ");

    }
    labels = labels.castTo(preOutput.dataType());   //No-op if already correct dtype
    final INDArray postOutput = activationFn.getActivation(preOutput.dup(), true);

    final INDArray positive = labels;
    final INDArray negative = labels.eq(0.0).castTo(Nd4j.defaultFloatingPointType());
    final INDArray normFactor = negative.sum(true,1).castTo(Nd4j.defaultFloatingPointType()).muli(positive.sum(true,1));


    long examples = positive.size(0);
    for (int i = 0; i < examples; i++) {
        final INDArray locCfn = postOutput.getRow(i, true);
        final long[] shape = locCfn.shape();

        final INDArray locPositive = positive.getRow(i, true);
        final INDArray locNegative = negative.getRow(i, true);
        final Double locNormFactor = normFactor.getDouble(i);

        final int outSetSize = locNegative.sumNumber().intValue();
        if(outSetSize == 0 || outSetSize == locNegative.columns()){
            if (scoreOutput != null) {
                scoreOutput.getRow(i, true).assign(0);
            }

            if (gradientOutput != null) {
                gradientOutput.getRow(i, true).assign(0);
            }
        }else {
            final INDArray operandA = Nd4j.ones(shape[1], shape[0]).mmul(locCfn);
            final INDArray operandB = operandA.transpose();

            final INDArray pairwiseSub = Transforms.exp(operandA.sub(operandB));

            final INDArray selection = locPositive.transpose().mmul(locNegative);

            final INDArray classificationDifferences = pairwiseSub.muli(selection).divi(locNormFactor);

            if (scoreOutput != null) {
                if (mask != null) {
                    final INDArray perLabel = classificationDifferences.sum(0);
                    LossUtil.applyMask(perLabel, mask.getRow(i, true));
                    perLabel.sum(scoreOutput.getRow(i, true), 0);
                } else {
                    classificationDifferences.sum(scoreOutput.getRow(i, true), 0, 1);
                }
            }

            if (gradientOutput != null) {
                gradientOutput.getRow(i, true).assign(classificationDifferences.sum(true, 0).addi(classificationDifferences.sum(true,1).transposei().negi()));
            }
        }
    }

    if (gradientOutput != null) {
        gradientOutput.assign(activationFn.backprop(preOutput.dup(), gradientOutput).getFirst());
        //multiply with masks, always
        if (mask != null) {
            LossUtil.applyMask(gradientOutput, mask);
        }
    }
}
 
Example 18
Source File: TestVariableLengthTSCG.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testOutputMaskingScoreMagnitudes() {
    //Idea: check magnitude of scores, with differing number of values masked out
    //i.e., MSE with zero weight init and 1.0 labels: know what to expect in terms of score

    int nIn = 3;
    int[] timeSeriesLengths = {3, 10};
    int[] outputSizes = {1, 2, 5};
    int[] miniBatchSizes = {1, 4};

    Random r = new Random(12345);

    for (int tsLength : timeSeriesLengths) {
        for (int nOut : outputSizes) {
            for (int miniBatch : miniBatchSizes) {
                for (int nToMask = 0; nToMask < tsLength - 1; nToMask++) {
                    String msg = "tsLen=" + tsLength + ", nOut=" + nOut + ", miniBatch=" + miniBatch;

                    INDArray labelMaskArray = Nd4j.ones(miniBatch, tsLength);
                    for (int i = 0; i < miniBatch; i++) {
                        //For each example: select which outputs to mask...
                        int nMasked = 0;
                        while (nMasked < nToMask) {
                            int tryIdx = r.nextInt(tsLength);
                            if (labelMaskArray.getDouble(i, tryIdx) == 0.0)
                                continue;
                            labelMaskArray.putScalar(new int[] {i, tryIdx}, 0.0);
                            nMasked++;
                        }
                    }

                    INDArray input = Nd4j.rand(new int[] {miniBatch, nIn, tsLength});
                    INDArray labels = Nd4j.ones(miniBatch, nOut, tsLength);

                    ComputationGraphConfiguration conf =
                                    new NeuralNetConfiguration.Builder().seed(12345L)
                                                    .graphBuilder()
                                                    .addInputs("in").addLayer("0",
                                                                    new GravesLSTM.Builder().nIn(nIn).nOut(5)

                                                                                    .dist(new NormalDistribution(0,
                                                                                                    1))
                                                                                    .updater(new NoOp()).build(),
                                                                    "in")
                                                    .addLayer("1", new RnnOutputLayer.Builder(
                                                                    LossFunctions.LossFunction.MSE)
                                                                                    .activation(Activation.IDENTITY)
                                                                                    .nIn(5).nOut(nOut)
                                                                                    .weightInit(WeightInit.ZERO)
                                                                                    .updater(new NoOp()).build(),
                                                                    "0")
                                                    .setOutputs("1").build();
                    ComputationGraph net = new ComputationGraph(conf);
                    net.init();

                    //MSE loss function: 1/n * sum(squaredErrors)... but sum(squaredErrors) = n * (1-0) here -> sum(squaredErrors)
                    double expScore = tsLength - nToMask; //Sum over minibatches, then divide by minibatch size

                    net.setLayerMaskArrays(null, new INDArray[] {labelMaskArray});
                    net.setInput(0, input);
                    net.setLabel(0, labels);

                    net.computeGradientAndScore();
                    double score = net.score();

                    assertEquals(msg, expScore, score, 0.1);
                }
            }
        }
    }
}
 
Example 19
Source File: IndexingTests.java    From nd4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testGet() {
    System.out.println("Testing sub-array put and get with a 3D array ...");

    INDArray arr = Nd4j.linspace(0, 124, 125).reshape(5, 5, 5);

    /*
     * Extract elements with the following indices:
     *
     * (2,1,1) (2,1,2) (2,1,3)
     * (2,2,1) (2,2,2) (2,2,3)
     * (2,3,1) (2,3,2) (2,3,3)
     */

    int slice = 2;

    int iStart = 1;
    int jStart = 1;

    int iEnd = 4;
    int jEnd = 4;

    // Method A: Element-wise.

    INDArray subArr_A = Nd4j.create(new int[] {3, 3});

    for (int i = iStart; i < iEnd; i++) {
        for (int j = jStart; j < jEnd; j++) {

            double val = arr.getDouble(slice, i, j);
            int[] sub = new int[] {i - iStart, j - jStart};

            subArr_A.putScalar(sub, val);

        }
    }

    // Method B: Using NDArray get and put with index classes.

    INDArray subArr_B = Nd4j.create(new int[] {3, 3});

    INDArrayIndex ndi_Slice = NDArrayIndex.point(slice);
    INDArrayIndex ndi_J = NDArrayIndex.interval(jStart, jEnd);
    INDArrayIndex ndi_I = NDArrayIndex.interval(iStart, iEnd);

    INDArrayIndex[] whereToGet = new INDArrayIndex[] {ndi_Slice, ndi_I, ndi_J};

    INDArray whatToPut = arr.get(whereToGet);
    assertEquals(subArr_A, whatToPut);
    System.out.println(whatToPut);
    INDArrayIndex[] whereToPut = new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all()};

    subArr_B.put(whereToPut, whatToPut);

    assertEquals(subArr_A, subArr_B);
    System.out.println("... done");
}
 
Example 20
Source File: CNNProcessorTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testFeedForwardToCnnPreProcessor2() {
    int[] nRows = {1, 5, 20};
    int[] nCols = {1, 5, 20};
    int[] nDepth = {1, 3};
    int[] nMiniBatchSize = {1, 5};
    for (int rows : nRows) {
        for (int cols : nCols) {
            for (int d : nDepth) {
                FeedForwardToCnnPreProcessor convProcessor = new FeedForwardToCnnPreProcessor(rows, cols, d);

                for (int miniBatch : nMiniBatchSize) {
                    long[] ffShape = new long[] {miniBatch, rows * cols * d};
                    INDArray rand = Nd4j.rand(ffShape);
                    INDArray ffInput_c = Nd4j.create(DataType.FLOAT, ffShape, 'c');
                    INDArray ffInput_f = Nd4j.create(DataType.FLOAT, ffShape, 'f');
                    ffInput_c.assign(rand);
                    ffInput_f.assign(rand);
                    assertEquals(ffInput_c, ffInput_f);

                    //Test forward pass:
                    INDArray convAct_c = convProcessor.preProcess(ffInput_c, -1, LayerWorkspaceMgr.noWorkspaces());
                    INDArray convAct_f = convProcessor.preProcess(ffInput_f, -1, LayerWorkspaceMgr.noWorkspaces());
                    long[] convShape = {miniBatch, d, rows, cols};
                    assertArrayEquals(convShape, convAct_c.shape());
                    assertArrayEquals(convShape, convAct_f.shape());
                    assertEquals(convAct_c, convAct_f);

                    //Check values:
                    //CNN reshaping (for each example) takes a 1d vector and converts it to 3d
                    // (4d total, for minibatch data)
                    //1d vector is assumed to be rows from channels 0 concatenated, followed by channels 1, etc
                    for (int ex = 0; ex < miniBatch; ex++) {
                        for (int r = 0; r < rows; r++) {
                            for (int c = 0; c < cols; c++) {
                                for (int depth = 0; depth < d; depth++) {
                                    int origPosition = depth * (rows * cols) + r * cols + c; //pos in vector
                                    double vecValue = ffInput_c.getDouble(ex, origPosition);
                                    double convValue = convAct_c.getDouble(ex, depth, r, c);
                                    assertEquals(vecValue, convValue, 0.0);
                                }
                            }
                        }
                    }

                    //Test backward pass:
                    //Idea is that backward pass should do opposite to forward pass
                    INDArray epsilon4_c = Nd4j.create(DataType.FLOAT, convShape, 'c');
                    INDArray epsilon4_f = Nd4j.create(DataType.FLOAT, convShape, 'f');
                    epsilon4_c.assign(convAct_c);
                    epsilon4_f.assign(convAct_f);
                    INDArray epsilon2_c = convProcessor.backprop(epsilon4_c, -1, LayerWorkspaceMgr.noWorkspaces());
                    INDArray epsilon2_f = convProcessor.backprop(epsilon4_f, -1, LayerWorkspaceMgr.noWorkspaces());
                    assertEquals(ffInput_c, epsilon2_c);
                    assertEquals(ffInput_c, epsilon2_f);
                }
            }
        }
    }
}