Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#size()

The following examples show how to use org.nd4j.linalg.api.ndarray.INDArray#size() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LossL1.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
public INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if(!labels.equalShapes(preOutput)){
        Preconditions.throwEx("Labels and preOutput must have equal shapes: got shapes %s vs %s", labels.shape(), preOutput.shape());
    }
    labels = labels.castTo(preOutput.dataType());   //No-op if already correct dtype
    INDArray scoreArr;
    INDArray output = activationFn.getActivation(preOutput.dup(), true);
    scoreArr = output.subi(labels);
    Transforms.abs(scoreArr, false);

    //Weighted loss function
    if (weights != null) {
        if (weights.length() != output.size(1)) {
            throw new IllegalStateException("Weights vector (length " + weights.length()
                            + ") does not match output.size(1)=" + output.size(1));
        }
        scoreArr.muliRowVector(weights.castTo(scoreArr.dataType()));
    }

    if (mask != null) {
        LossUtil.applyMask(scoreArr, mask);
    }
    return scoreArr;
}
 
Example 2
Source File: BaseUnderSamplingPreProcessor.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
private void validateData(INDArray label, INDArray labelMask) {
    if (label.rank() != 3) {
        throw new IllegalArgumentException(
                        "UnderSamplingByMaskingPreProcessor can only be applied to a time series dataset");
    }
    if (label.size(1) > 2) {
        throw new IllegalArgumentException(
                        "UnderSamplingByMaskingPreProcessor can only be applied to labels that represent binary classes. Label size was found to be "
                                        + label.size(1) + ".Expecting size=1 or size=2.");
    }
    if (label.size(1) == 2) {
        //check if label is of size one hot
        INDArray sum1 = label.sum(1).mul(labelMask);
        INDArray floatMask = labelMask.castTo(label.dataType());
        if (!sum1.equals(floatMask)) {
            throw new IllegalArgumentException("Labels of size minibatchx2xtimesteps are expected to be one hot."
                            + label.toString() + "\n is not one-hot");
        }
    }
}
 
Example 3
Source File: BaseLevel2.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**
 * performs a rank-1 update of a general m-by-n matrix a:
 * a := alpha*x*y' + a.
 *
 * @param order
 * @param alpha
 * @param X
 * @param Y
 * @param A
 */
@Override
public void ger(char order, double alpha, INDArray X, INDArray Y, INDArray A) {
    if (Nd4j.getExecutioner().getProfilingMode() == OpExecutioner.ProfilingMode.ALL)
        OpProfiler.getInstance().processBlasCall(false, A, X, Y);

    if (X.data().dataType() == DataType.DOUBLE) {
        DefaultOpExecutioner.validateDataType(DataType.DOUBLE, A, X, Y);
        if (A.rows() > Integer.MAX_VALUE || A.columns() > Integer.MAX_VALUE || A.size(0) > Integer.MAX_VALUE)
            throw new ND4JArraySizeException();
        dger(order, (int) A.rows(), (int) A.columns(), alpha, X, X.stride(-1), Y, Y.stride(-1), A, (int) A.size(0));
    } else {
        DefaultOpExecutioner.validateDataType(DataType.FLOAT, A, X, Y);
        sger(order, (int) A.rows(), (int) A.columns(), (float) alpha, X, X.stride(-1), Y, Y.stride(-1), A, (int) A.size(0));
    }

    OpExecutionerUtil.checkForAny(A);
}
 
Example 4
Source File: ShapeOpValidation.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testPermute2(){
    for (int[] perm : new int[][]{{0, 1, 2}, {0, 2, 1}, {1, 0, 2}, {1, 2, 0}, {2, 0, 1}, {2, 1, 0}}) {
        INDArray in = Nd4j.linspace(1, 60, 60).reshape(3,4,5);
        INDArray exp = in.permute(perm).dup('c');

        int[] outShape = new int[3];
        for( int i=0; i<3; i++ ){
            outShape[i] = (int)in.size(perm[i]);
        }

        //System.out.println(Arrays.toString(outShape) + " - permute " + Arrays.toString(perm));
        INDArray out = Nd4j.create(outShape);
        OpTestCase op = new OpTestCase(new Permute(in, out, perm));
        op.expectedOutput(0, exp);

        assertNull(OpValidation.validate(op));
    }
}
 
Example 5
Source File: LossL1.java    From nd4j with Apache License 2.0 5 votes vote down vote up
public INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if (labels.size(1) != preOutput.size(1)) {
        throw new IllegalArgumentException(
                        "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer"
                                        + " number of outputs (nOut = " + preOutput.size(1) + ") ");

    }
    INDArray scoreArr;
    //INDArray output = Nd4j.getExecutioner().execAndReturn(Nd4j.getOpFactory().createTransform(activationFn, preOutput.dup()));
    INDArray output = activationFn.getActivation(preOutput.dup(), true);
    scoreArr = output.subi(labels);
    Nd4j.getExecutioner().execAndReturn(Nd4j.getOpFactory().createTransform("abs", scoreArr));

    //Weighted loss function
    if (weights != null) {
        if (weights.length() != output.size(1)) {
            throw new IllegalStateException("Weights vector (length " + weights.length()
                            + ") does not match output.size(1)=" + output.size(1));
        }
        scoreArr.muliRowVector(weights);
    }

    if (mask != null) {
        LossUtil.applyMask(scoreArr, mask);
    }
    return scoreArr;
}
 
Example 6
Source File: LossFMeasure.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    labels = labels.castTo(preOutput.dataType());   //No-op if already correct dtype
    double[] d = computeScoreNumDenom(labels, preOutput, activationFn, mask, false);
    double numerator = d[0];
    double denominator = d[1];

    if (numerator == 0.0 && denominator == 0.0) {
        //Zero score -> zero gradient
        return Nd4j.create(preOutput.shape());
    }

    double secondTerm = numerator / (denominator * denominator);

    INDArray dLdOut;
    if (labels.size(1) == 1) {
        //Single binary output case
        dLdOut = labels.mul(1 + beta * beta).divi(denominator).subi(secondTerm);
    } else {
        //Softmax case: the getColumn(1) here is to account for the fact that we're using prob(class1)
        // only in the score function; column(1) is equivalent to output for the single output case
        dLdOut = Nd4j.create(labels.shape());
        dLdOut.getColumn(1).assign(labels.getColumn(1).mul(1 + beta * beta).divi(denominator).subi(secondTerm));
    }

    //Negate relative to description in paper, as we want to *minimize* 1.0-fMeasure, which is equivalent to
    // maximizing fMeasure
    dLdOut.negi();

    INDArray dLdPreOut = activationFn.backprop(preOutput, dLdOut).getFirst();

    if (mask != null) {
        dLdPreOut.muliColumnVector(mask);
    }

    return dLdPreOut;
}
 
Example 7
Source File: LapackTest.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testGetrf() {
    int m = 150;
    int n = 100;
    float f[] = new float[m * n];
    for (int i = 0; i < f.length; i++)
        f[i] = rng.nextInt(5) + 1;
    // there is a very very small (non-zero) chance that the random matrix is singular
    // and may fail a test
    long start = System.currentTimeMillis();

    INDArray IPIV = null;
    INDArray arr = null;
    final int N = 100;
    for (int i = 0; i < N; i++) {
        arr = Nd4j.create(f, new int[]{m, n}, 'f');
        IPIV = Nd4j.getBlasWrapper().lapack().getrf(arr);
    }

    INDArray L = Nd4j.getBlasWrapper().lapack().getLFactor(arr);
    INDArray U = Nd4j.getBlasWrapper().lapack().getUFactor(arr);
    INDArray P = Nd4j.getBlasWrapper().lapack().getPFactor(m, IPIV);

    INDArray orig = P.mmul(L).mmul(U);

    assertEquals("PxLxU is not the expected size - rows", orig.size(0), arr.size(0));
    assertEquals("PxLxU is not the expected size - cols", orig.size(1), arr.size(1));

    arr = Nd4j.create(f, new int[]{m, n}, 'f');
    for (int r = 0; r < orig.size(0); r++) {
        for (int c = 0; c < orig.size(1); c++) {
            assertEquals("Original & recombined matrices differ", orig.getFloat(r, c), arr.getFloat(r, c), 0.001f );
        }
    }
}
 
Example 8
Source File: LossMAPE.java    From nd4j with Apache License 2.0 5 votes vote down vote up
public INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if (labels.size(1) != preOutput.size(1)) {
        throw new IllegalArgumentException(
                        "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer"
                                        + " number of outputs (nOut = " + preOutput.size(1) + ") ");

    }
    INDArray scoreArr;
    //INDArray output = Nd4j.getExecutioner().execAndReturn(Nd4j.getOpFactory().createTransform(activationFn, preOutput.dup()));
    INDArray output = activationFn.getActivation(preOutput.dup(), true);
    scoreArr = output.rsubi(labels).divi(labels);
    Nd4j.getExecutioner().execAndReturn(Nd4j.getOpFactory().createTransform("abs", scoreArr));
    scoreArr.muli(100.0 / labels.size(1));

    //Weighted loss function
    if (weights != null) {
        if (weights.length() != output.size(1)) {
            throw new IllegalStateException("Weights vector (length " + weights.length()
                            + ") does not match output.size(1)=" + output.size(1));
        }
        scoreArr.muliRowVector(weights);
    }

    if (mask != null) {
        LossUtil.applyMask(scoreArr, mask);
    }
    return scoreArr;
}
 
Example 9
Source File: CoverageModelParameters.java    From gatk-protected with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
/**
 * Public constructor.
 *
 * Note:
 *
 * - If {@code meanBiasCovariates} is null, it is assumed that bias covariate correction is disabled.
 *
 * - If {@code biasCovariateARDCoefficients} is null, it is assumed that ARD for bias covariates is
 *   disabled.
 *
 * @param targetMeanLogBias target-specific mean log bias (m_t)
 * @param targetUnexplainedVariance target-specific unexplained bias variance (\Psi_t)
 * @param meanBiasCovariates mean bias covariates matrix (W_{t\mu})
 * @param biasCovariateARDCoefficients bias covariates ARD coefficients (\alpha_\mu)
 */
public CoverageModelParameters(@Nonnull final List<Target> targetList,
                               @Nonnull final INDArray targetMeanLogBias,
                               @Nonnull final INDArray targetUnexplainedVariance,
                               @Nullable final INDArray meanBiasCovariates,
                               @Nullable final INDArray biasCovariateARDCoefficients) {
    this.targetList = Utils.nonNull(targetList, "Target list must be non-null");
    this.targetMeanLogBias = Utils.nonNull(targetMeanLogBias, "Target-specific mean log bias must be non-null");
    this.targetUnexplainedVariance = Utils.nonNull(targetUnexplainedVariance, "Target-specific unexplained variance" +
            " must be non-null");
    Utils.validateArg(meanBiasCovariates != null || biasCovariateARDCoefficients == null, "If ARD coefficients" +
            " are non-null, bias covariates matrix must be non-null as well");
    biasCovariatesEnabled = meanBiasCovariates != null;
    ardEnabled = biasCovariateARDCoefficients != null;
    this.meanBiasCovariates = meanBiasCovariates;
    this.biasCovariateARDCoefficients = biasCovariateARDCoefficients;

    this.numTargets = targetList.size();
    validateNDArrayShape(targetUnexplainedVariance, new int[] {1, numTargets}, "target-specific unexplained variance");
    validateNDArrayShape(targetMeanLogBias, new int[] {1, numTargets}, "target-specific mean log bias");
    if (biasCovariatesEnabled) {
        Utils.validateArg(meanBiasCovariates.rank() == 2, "The mean bias covariate NDArray must be rank 2");
        numLatents = meanBiasCovariates.size(1);
        validateNDArrayShape(meanBiasCovariates, new int[] {numTargets, numLatents}, "mean bias covariates");
        if (ardEnabled) {
            validateNDArrayShape(biasCovariateARDCoefficients, new int[] {1, numLatents}, "ARD coefficients");
        }
    } else {
        numLatents = 0;
    }
}
 
Example 10
Source File: LossMixtureDensity.java    From nd4j with Apache License 2.0 5 votes vote down vote up
private INDArray labelsMinusMu(INDArray labels, INDArray mu) {
    // Now that we have the mixtures, let's compute the negative
    // log likelihodd of the label against the 
    long nSamples = labels.size(0);
    long labelsPerSample = labels.size(1);

    // This worked, but was actually much
    // slower than the for loop below.
    // labels = samples, mixtures, labels
    // mu = samples, mixtures
    // INDArray labelMinusMu = labels
    //        .reshape('f', nSamples, labelsPerSample, 1)
    //        .repeat(2, mMixtures)
    //        .permute(0, 2, 1)
    //        .subi(mu);

    // The above code does the same thing as the loop below,
    // but it does it with index magix instead of a for loop.
    // It turned out to be way less efficient than the simple 'for' here.
    INDArray labelMinusMu = Nd4j.zeros(nSamples, mMixtures, labelsPerSample);
    for (int k = 0; k < mMixtures; k++) {
        labelMinusMu.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.point(k), NDArrayIndex.all()},
                        labels);
    }
    labelMinusMu.subi(mu);

    return labelMinusMu;
}
 
Example 11
Source File: NDArrayMath.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * The number of vectors
 * in each slice of an ndarray.
 * @param arr the array to
 *            get the number
 *            of vectors per slice for
 * @return the number of vectors per slice
 */
public static long matricesPerSlice(INDArray arr) {
    if (arr.rank() == 3) {
        return 1;
    } else if (arr.rank() > 3) {
        int ret = 1;
        for (int i = 1; i < arr.rank() - 2; i++) {
            ret *= arr.size(i);
        }
        return ret;
    }
    return arr.size(-2);
}
 
Example 12
Source File: LossPoisson.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public double computeScore(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask,
                boolean average) {
    INDArray scoreArr = scoreArray(labels, preOutput, activationFn, mask);

    double score = scoreArr.sumNumber().doubleValue();

    if (average)
        score /= scoreArr.size(0);

    return score;
}
 
Example 13
Source File: Binning.java    From ml-models with Apache License 2.0 5 votes vote down vote up
public void linearBins(double[][] embedding, int numBins) {


        INDArray indArray = Nd4j.create(embedding);
        for (int column = 0; column < embedding[0].length; column++) {
            INDArray slice = indArray.slice(column, 1);
            INDArray[] indArrays = Nd4j.sortWithIndices(slice, 0, true);
            INDArray indices = indArrays[0];
            int maxRank = embedding.length;
            for (int rank = 0; rank < indices.size(0); rank++) {
                embedding[(int) indices.getDouble(rank)][column] = (int) (((double) rank / maxRank) * numBins);

            }
        }
    }
 
Example 14
Source File: ND4JConverters.java    From konduit-serving with Apache License 2.0 5 votes vote down vote up
@Override
public float[][][] convert(INDArray from) {
    Preconditions.checkState(from.rank() == 3, "Can only convert rank 3 arrays to float[][][], got array with shape %s", from.shape());
    float[][][] out = new float[(int)from.size(0)][0][0];
    for( int i=0; i<out.length; i++){
        out[i] = from.get(NDArrayIndex.point(i), NDArrayIndex.all(), NDArrayIndex.all()).toFloatMatrix();
    }
    return out;
}
 
Example 15
Source File: LossMultiLabel.java    From nd4j with Apache License 2.0 4 votes vote down vote up
private void calculate(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask, INDArray scoreOutput, INDArray gradientOutput) {
    if (scoreOutput == null && gradientOutput == null) {
        throw new IllegalArgumentException("You have to provide at least one of scoreOutput or gradientOutput!");
    }
    if (labels.size(1) != preOutput.size(1)) {
        throw new IllegalArgumentException(
                "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer"
                        + " number of outputs (nOut = " + preOutput.size(1) + ") ");

    }
    final INDArray postOutput = activationFn.getActivation(preOutput.dup(), true);

    final INDArray positive = labels;
    final INDArray negative = labels.eq(0.0);
    final INDArray normFactor = negative.sum(1).muli(positive.sum(1));


    long examples = positive.size(0);
    for (int i = 0; i < examples; i++) {
        final INDArray locCfn = postOutput.getRow(i);
        final long[] shape = locCfn.shape();

        final INDArray locPositive = positive.getRow(i);
        final INDArray locNegative = negative.getRow(i);
        final Double locNormFactor = normFactor.getDouble(i);

        final INDArray operandA = Nd4j.ones(shape[1], shape[0]).mmul(locCfn);
        final INDArray operandB = operandA.transpose();

        final INDArray pairwiseSub = Transforms.exp(operandA.sub(operandB));

        final INDArray selection = locPositive.transpose().mmul(locNegative);

        final INDArray classificationDifferences = pairwiseSub.muli(selection).divi(locNormFactor);

        if (scoreOutput != null) {
            if (mask != null) {
                final INDArray perLabel = classificationDifferences.sum(0);
                LossUtil.applyMask(perLabel, mask.getRow(i));
                perLabel.sum(scoreOutput.getRow(i), 0);
            } else {
                classificationDifferences.sum(scoreOutput.getRow(i), 0, 1);
            }
        }

        if (gradientOutput != null) {
            gradientOutput.getRow(i).assign(classificationDifferences.sum(0).addi(classificationDifferences.sum(1).transposei().negi()));
        }
    }

    if (gradientOutput != null) {
        gradientOutput.assign(activationFn.backprop(preOutput.dup(), gradientOutput).getFirst());
        //multiply with masks, always
        if (mask != null) {
            LossUtil.applyMask(gradientOutput, mask);
        }
    }
}
 
Example 16
Source File: ComputationGraphTestRNN.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testRnnTimeStepGravesLSTM() {
    Nd4j.getRandom().setSeed(12345);
    int timeSeriesLength = 12;

    //4 layer network: 2 GravesLSTM + DenseLayer + RnnOutputLayer. Hence also tests preprocessors.
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder()
                    .addInputs("in")
                    .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(5).nOut(7)
                                    .activation(Activation.TANH)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "in")
                    .addLayer("1", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8)
                                    .activation(Activation.TANH)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "0")
                    .addLayer("2", new DenseLayer.Builder().nIn(8).nOut(9).activation(Activation.TANH)

                                    .dist(new NormalDistribution(0,
                                                    0.5))
                                    .build(), "1")
                    .addLayer("3", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .nIn(9).nOut(4)
                                    .activation(Activation.SOFTMAX)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "2")
                    .setOutputs("3").inputPreProcessor("2", new RnnToFeedForwardPreProcessor())
                    .inputPreProcessor("3", new FeedForwardToRnnPreProcessor())
                    .build();
    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    INDArray input = Nd4j.rand(new int[] {3, 5, timeSeriesLength});

    Map<String, INDArray> allOutputActivations = graph.feedForward(input, true);
    INDArray fullOutL0 = allOutputActivations.get("0");
    INDArray fullOutL1 = allOutputActivations.get("1");
    INDArray fullOutL3 = allOutputActivations.get("3");

    assertArrayEquals(new long[] {3, 7, timeSeriesLength}, fullOutL0.shape());
    assertArrayEquals(new long[] {3, 8, timeSeriesLength}, fullOutL1.shape());
    assertArrayEquals(new long[] {3, 4, timeSeriesLength}, fullOutL3.shape());

    int[] inputLengths = {1, 2, 3, 4, 6, 12};

    //Do steps of length 1, then of length 2, ..., 12
    //Should get the same result regardless of step size; should be identical to standard forward pass
    for (int i = 0; i < inputLengths.length; i++) {
        int inLength = inputLengths[i];
        int nSteps = timeSeriesLength / inLength; //each of length inLength

        graph.rnnClearPreviousState();

        for (int j = 0; j < nSteps; j++) {
            int startTimeRange = j * inLength;
            int endTimeRange = startTimeRange + inLength;

            INDArray inputSubset = input.get(NDArrayIndex.all(), NDArrayIndex.all(),
                            NDArrayIndex.interval(startTimeRange, endTimeRange));
            if (inLength > 1)
                assertTrue(inputSubset.size(2) == inLength);

            INDArray[] outArr = graph.rnnTimeStep(inputSubset);
            assertEquals(1, outArr.length);
            INDArray out = outArr[0];

            INDArray expOutSubset;
            if (inLength == 1) {
                val sizes = new long[] {fullOutL3.size(0), fullOutL3.size(1), 1};
                expOutSubset = Nd4j.create(DataType.FLOAT, sizes);
                expOutSubset.tensorAlongDimension(0, 1, 0).assign(fullOutL3.get(NDArrayIndex.all(),
                                NDArrayIndex.all(), NDArrayIndex.point(startTimeRange)));
            } else {
                expOutSubset = fullOutL3.get(NDArrayIndex.all(), NDArrayIndex.all(),
                                NDArrayIndex.interval(startTimeRange, endTimeRange));
            }

            assertEquals(expOutSubset, out);

            Map<String, INDArray> currL0State = graph.rnnGetPreviousState("0");
            Map<String, INDArray> currL1State = graph.rnnGetPreviousState("1");

            INDArray lastActL0 = currL0State.get(GravesLSTM.STATE_KEY_PREV_ACTIVATION);
            INDArray lastActL1 = currL1State.get(GravesLSTM.STATE_KEY_PREV_ACTIVATION);

            INDArray expLastActL0 = fullOutL0.tensorAlongDimension(endTimeRange - 1, 1, 0);
            INDArray expLastActL1 = fullOutL1.tensorAlongDimension(endTimeRange - 1, 1, 0);

            assertEquals(expLastActL0, lastActL0);
            assertEquals(expLastActL1, lastActL1);
        }
    }
}
 
Example 17
Source File: ShapeOpValidation.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testDistancesExec(){
        //https://github.com/deeplearning4j/deeplearning4j/issues/7001
        for(String s : new String[]{"euclidean", "manhattan", "cosinesim", "cosinedist", "jaccard"}) {
            log.info("Starting: {}", s);
            INDArray defaultTestCase = Nd4j.create(4, 4);
            defaultTestCase.putRow(0, Nd4j.create(new float[]{0, 2, -2, 0}));
            defaultTestCase.putRow(1, Nd4j.create(new float[]{0, 1, -1, 0}));
            defaultTestCase.putRow(2, Nd4j.create(new float[]{0, -1, 1, 0}));
            defaultTestCase.putRow(3, Nd4j.create(new float[]{0, -2, 2, 0}));
            long singleEmbeddingSize = defaultTestCase.size(1) / 2L;

            // Split vectors
            INDArray x = defaultTestCase.get(NDArrayIndex.all(), NDArrayIndex.interval(0, singleEmbeddingSize));
            INDArray y = defaultTestCase.get(NDArrayIndex.all(), NDArrayIndex.interval(singleEmbeddingSize, defaultTestCase.size(1)));

            log.info(y.shapeInfoToString());

            SameDiff sd = SameDiff.create();
            sd.enableDebugMode();

            SDVariable xSd = sd.var("x", x);
            SDVariable ySd = sd.var("y", y);

            ySd = ySd.add(ySd);
            SDVariable dist;
            switch (s){
                case "euclidean":
                    dist = sd.math().euclideanDistance(s, ySd, xSd, 0);
                    break;
                case "manhattan":
                    dist = sd.math().manhattanDistance(s, ySd, xSd, 0);
                    break;
                case "cosinesim":
                    dist = sd.math().cosineSimilarity(s, ySd, xSd, 0);
                    break;
                case "cosinedist":
                    dist = sd.math().cosineDistance(s, ySd, xSd, 0);
                    break;
                case "jaccard":
                    dist = sd.math().jaccardDistance(s, ySd, xSd, 0);
                    break;
                default:
                    throw new RuntimeException();
            }

            SDVariable loss = dist.sum();


//            log.info(sd.summary());
            sd.output(Collections.emptyMap(), Lists.newArrayList(s));
            sd.calculateGradients(Collections.emptyMap(), sd.getVariables().keySet());
        }
    }
 
Example 18
Source File: PreProcessor3D4DTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public Construct3dDataSet(INDArray featureScale, int timeSteps, int samples, int origin) {
    this.featureScale = featureScale;
    this.timeSteps = timeSteps;
    this.samples = samples;
    this.origin = origin;

    numFeatures = (int) featureScale.size(0);
    maxN = samples * timeSteps;
    INDArray template = Nd4j.linspace(origin, origin + timeSteps - 1, timeSteps).reshape(1, -1);
    template = Nd4j.concat(0, Nd4j.linspace(origin, origin + timeSteps - 1, timeSteps).reshape(1, -1), template);
    template = Nd4j.concat(0, Nd4j.linspace(origin, origin + timeSteps - 1, timeSteps).reshape(1, -1), template);
    template.muliColumnVector(featureScale);
    template = template.reshape(1, numFeatures, timeSteps);
    INDArray featureMatrix = template.dup();

    int newStart = origin + timeSteps;
    int newEnd;
    for (int i = 1; i < samples; i++) {
        newEnd = newStart + timeSteps - 1;
        template = Nd4j.linspace(newStart, newEnd, timeSteps).reshape(1, -1);
        template = Nd4j.concat(0, Nd4j.linspace(newStart, newEnd, timeSteps).reshape(1, -1), template);
        template = Nd4j.concat(0, Nd4j.linspace(newStart, newEnd, timeSteps).reshape(1, -1), template);
        template.muliColumnVector(featureScale);
        template = template.reshape(1, numFeatures, timeSteps);
        newStart = newEnd + 1;
        featureMatrix = Nd4j.concat(0, featureMatrix, template);
    }
    INDArray labelSet = featureMatrix.dup();
    this.newOrigin = newStart;
    sampleDataSet = new DataSet(featureMatrix, labelSet);

    //calculating stats
    // The theoretical mean should be the mean of 1,..samples*timesteps
    float theoreticalMean = origin - 1 + (samples * timeSteps + 1) / 2.0f;
    expectedMean = Nd4j.create(new double[] {theoreticalMean, theoreticalMean, theoreticalMean}, new long[]{1, 3}).castTo(featureScale.dataType());
    expectedMean.muli(featureScale.transpose());

    float stdNaturalNums = (float) Math.sqrt((samples * samples * timeSteps * timeSteps - 1) / 12);
    expectedStd = Nd4j.create(new double[] {stdNaturalNums, stdNaturalNums, stdNaturalNums}, new long[]{1, 3}).castTo(Nd4j.defaultFloatingPointType());
    expectedStd.muli(Transforms.abs(featureScale, true).transpose());
    //preprocessors use the population std so divides by n not (n-1)
    expectedStd = expectedStd.dup().muli(Math.sqrt(maxN)).divi(Math.sqrt(maxN));

    //min max assumes all scaling values are +ve
    expectedMin = Nd4j.ones(featureScale.dataType(), 3, 1).muliColumnVector(featureScale);
    expectedMax = Nd4j.ones(featureScale.dataType(),3, 1).muli(samples * timeSteps).muliColumnVector(featureScale);
}
 
Example 19
Source File: NDArrayMath.java    From nd4j with Apache License 2.0 3 votes vote down vote up
/**
 * The number of vectors
 * in each slice of an ndarray.
 * @param arr the array to
 *            get the number
 *            of vectors per slice for
 * @param rank the dimensions to get the number of vectors per slice for
 * @return the number of vectors per slice
 */
public static long vectorsPerSlice(INDArray arr, int... rank) {
    if (arr.rank() > 2) {
        return arr.size(-2) * arr.size(-1);
    }

    return arr.size(-1);

}
 
Example 20
Source File: NDArrayMath.java    From deeplearning4j with Apache License 2.0 2 votes vote down vote up
/**
 * This maps an index of a vector
 * on to a vector in the matrix that can be used
 * for indexing in to a tensor
 * @param index the index to map
 * @param arr the array to use
 *            for indexing
 * @return the mapped index
 */
public static long mapIndexOntoVector(int index, INDArray arr) {
    long ret = index * arr.size(-1);
    return ret;
}