Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#rsub()

The following examples show how to use org.nd4j.linalg.api.ndarray.INDArray#rsub() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MixedDataTypesTests.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
    public void testSimple(){
        Nd4j.create(1);
        for(DataType dt : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF, DataType.INT, DataType.LONG}) {
//            System.out.println("----- " + dt + " -----");
            INDArray arr = Nd4j.ones(dt,1, 5);
//            System.out.println("Ones: " + arr);
            arr.assign(1.0);
//            System.out.println("assign(1.0): " + arr);
//            System.out.println("DIV: " + arr.div(8));
//            System.out.println("MUL: " + arr.mul(8));
//            System.out.println("SUB: " + arr.sub(8));
//            System.out.println("ADD: " + arr.add(8));
//            System.out.println("RDIV: " + arr.rdiv(8));
//            System.out.println("RSUB: " + arr.rsub(8));
            arr.div(8);
            arr.mul(8);
            arr.sub(8);
            arr.add(8);
            arr.rdiv(8);
            arr.rsub(8);
        }
    }
 
Example 2
Source File: OCNNOutputLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public double computeScore(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask, boolean average) {
    double wSum = Transforms.pow(getParam(W_KEY),2).sumNumber().doubleValue() * 0.5;
    double vSum = Transforms.pow(getParam(V_KEY),2).sumNumber().doubleValue() * 0.5;
    org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer ocnnOutputLayer = (org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer) conf().getLayer();
    INDArray rSubPre = preOutput.rsub(getParam(R_KEY).getDouble(0));
    INDArray rMeanSub  = relu.getActivation(rSubPre,true);
    double rMean = rMeanSub.meanNumber().doubleValue();
    double rSum = getParam(R_KEY).getDouble(0);
    double nuDiv = (1 / ocnnOutputLayer.getNu()) * rMean;
    double lastTerm = -rSum;
    return (wSum + vSum + nuDiv + lastTerm);
}
 
Example 3
Source File: LossFMeasure.java    From nd4j with Apache License 2.0 4 votes vote down vote up
private double[] computeScoreNumDenom(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask,
                boolean average) {
    INDArray output = activationFn.getActivation(preOutput.dup(), true);

    long n = labels.size(1);
    if (n != 1 && n != 2) {
        throw new UnsupportedOperationException(
                        "For binary classification: expect output size of 1 or 2. Got: " + n);
    }

    //First: determine positives and negatives
    INDArray isPositiveLabel;
    INDArray isNegativeLabel;
    INDArray pClass0;
    INDArray pClass1;
    if (n == 1) {
        isPositiveLabel = labels;
        isNegativeLabel = Transforms.not(isPositiveLabel);
        pClass0 = output.rsub(1.0);
        pClass1 = output;
    } else {
        isPositiveLabel = labels.getColumn(1);
        isNegativeLabel = labels.getColumn(0);
        pClass0 = output.getColumn(0);
        pClass1 = output.getColumn(1);
    }

    if (mask != null) {
        isPositiveLabel = isPositiveLabel.mulColumnVector(mask);
        isNegativeLabel = isNegativeLabel.mulColumnVector(mask);
    }

    double tp = isPositiveLabel.mul(pClass1).sumNumber().doubleValue();
    double fp = isNegativeLabel.mul(pClass1).sumNumber().doubleValue();
    double fn = isPositiveLabel.mul(pClass0).sumNumber().doubleValue();

    double numerator = (1.0 + beta * beta) * tp;
    double denominator = (1.0 + beta * beta) * tp + beta * beta * fn + fp;

    return new double[] {numerator, denominator};
}
 
Example 4
Source File: EvaluationBinary.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public void eval(INDArray labelsArr, INDArray predictionsArr, INDArray maskArr) {

    //Check for NaNs in predictions - without this, evaulation could silently be intepreted as class 0 prediction due to argmax
    long count = Nd4j.getExecutioner().execAndReturn(new MatchCondition(predictionsArr, Conditions.isNan())).getFinalResult().longValue();
    Preconditions.checkState(count == 0, "Cannot perform evaluation with NaNs present in predictions:" +
            " %s NaNs present in predictions INDArray", count);

    if (countTruePositive != null && countTruePositive.length != labelsArr.size(axis)) {
        throw new IllegalStateException("Labels array does not match stored state size. Expected labels array with "
                        + "size " + countTruePositive.length + ", got labels array with size " + labelsArr.size(axis) + " for axis " + axis);
    }

    Triple<INDArray,INDArray, INDArray> p = BaseEvaluation.reshapeAndExtractNotMasked(labelsArr, predictionsArr, maskArr, axis);
    INDArray labels = p.getFirst();
    INDArray predictions = p.getSecond();
    INDArray maskArray = p.getThird();

    if(labels.dataType() != predictions.dataType())
        labels = labels.castTo(predictions.dataType());

    if(decisionThreshold != null && decisionThreshold.dataType() != predictions.dataType())
        decisionThreshold = decisionThreshold.castTo(predictions.dataType());

    //First: binarize the network prediction probabilities, threshold 0.5 unless otherwise specified
    //This gives us 3 binary arrays: labels, predictions, masks
    INDArray classPredictions;
    if (decisionThreshold != null) {
        classPredictions = Nd4j.createUninitialized(DataType.BOOL, predictions.shape());
        Nd4j.getExecutioner()
                        .exec(new BroadcastGreaterThan(predictions, decisionThreshold, classPredictions, 1));
    } else {
        classPredictions = predictions.gt(0.5);
    }
    classPredictions = classPredictions.castTo(predictions.dataType());

    INDArray notLabels = labels.rsub(1.0);  //If labels are 0 or 1, then rsub(1) swaps
    INDArray notClassPredictions = classPredictions.rsub(1.0);

    INDArray truePositives = classPredictions.mul(labels); //1s where predictions are 1, and labels are 1. 0s elsewhere
    INDArray trueNegatives = notClassPredictions.mul(notLabels); //1s where predictions are 0, and labels are 0. 0s elsewhere
    INDArray falsePositives = classPredictions.mul(notLabels); //1s where predictions are 1, labels are 0
    INDArray falseNegatives = notClassPredictions.mul(labels); //1s where predictions are 0, labels are 1

    if (maskArray != null) {
        //By multiplying by mask, we keep only those 1s that are actually present
        maskArray = maskArray.castTo(truePositives.dataType());
        truePositives.muli(maskArray);
        trueNegatives.muli(maskArray);
        falsePositives.muli(maskArray);
        falseNegatives.muli(maskArray);
    }

    int[] tpCount = truePositives.sum(0).data().asInt();
    int[] tnCount = trueNegatives.sum(0).data().asInt();
    int[] fpCount = falsePositives.sum(0).data().asInt();
    int[] fnCount = falseNegatives.sum(0).data().asInt();

    if (countTruePositive == null) {
        int l = tpCount.length;
        countTruePositive = new int[l];
        countFalsePositive = new int[l];
        countTrueNegative = new int[l];
        countFalseNegative = new int[l];
    }

    addInPlace(countTruePositive, tpCount);
    addInPlace(countFalsePositive, fpCount);
    addInPlace(countTrueNegative, tnCount);
    addInPlace(countFalseNegative, fnCount);

    if (rocBinary != null) {
        rocBinary.eval(labels, predictions, maskArray);
    }
}
 
Example 5
Source File: BaseUnderSamplingPreProcessor.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public INDArray adjustMasks(INDArray label, INDArray labelMask, int minorityLabel, double targetDist) {

        if (labelMask == null) {
            labelMask = Nd4j.ones(label.size(0), label.size(2));
        }
        validateData(label, labelMask);

        INDArray bernoullis = Nd4j.zeros(labelMask.shape());
        long currentTimeSliceEnd = label.size(2);
        //iterate over each tbptt window
        while (currentTimeSliceEnd > 0) {

            long currentTimeSliceStart = Math.max(currentTimeSliceEnd - tbpttWindowSize, 0);

            //get views for current time slice
            INDArray currentWindowBernoulli = bernoullis.get(NDArrayIndex.all(),
                            NDArrayIndex.interval(currentTimeSliceStart, currentTimeSliceEnd));
            INDArray currentMask = labelMask.get(NDArrayIndex.all(),
                            NDArrayIndex.interval(currentTimeSliceStart, currentTimeSliceEnd));
            INDArray currentLabel;
            if (label.size(1) == 2) {
                //if one hot grab the right index
                currentLabel = label.get(NDArrayIndex.all(), NDArrayIndex.point(minorityLabel),
                                NDArrayIndex.interval(currentTimeSliceStart, currentTimeSliceEnd));
            } else {
                currentLabel = label.get(NDArrayIndex.all(), NDArrayIndex.point(0),
                                NDArrayIndex.interval(currentTimeSliceStart, currentTimeSliceEnd));
                if (minorityLabel == 0) {
                    currentLabel = currentLabel.rsub(1.0);  //rsub(1.0) is equivalent to swapping 0s and 1s
                }
            }

            //calculate required probabilities and write into the view
            currentWindowBernoulli.assign(calculateBernoulli(currentLabel, currentMask, targetDist));

            currentTimeSliceEnd = currentTimeSliceStart;
        }

        return Nd4j.getExecutioner().exec(
                        new BernoulliDistribution(Nd4j.createUninitialized(bernoullis.shape()), bernoullis),
                        Nd4j.getRandom());
    }
 
Example 6
Source File: LossFMeasure.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
private double[] computeScoreNumDenom(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask,
                boolean average) {
    INDArray output = activationFn.getActivation(preOutput.dup(), true);

    long n = labels.size(1);
    if (n != 1 && n != 2) {
        throw new UnsupportedOperationException(
                        "For binary classification: expect output size of 1 or 2. Got: " + n);
    }

    //First: determine positives and negatives
    INDArray isPositiveLabel;
    INDArray isNegativeLabel;
    INDArray pClass0;
    INDArray pClass1;
    if (n == 1) {
        isPositiveLabel = labels;
        isNegativeLabel = isPositiveLabel.rsub(1.0);
        pClass0 = output.rsub(1.0);
        pClass1 = output;
    } else {
        isPositiveLabel = labels.getColumn(1);
        isNegativeLabel = labels.getColumn(0);
        pClass0 = output.getColumn(0);
        pClass1 = output.getColumn(1);
    }

    if (mask != null) {
        isPositiveLabel = isPositiveLabel.mulColumnVector(mask);
        isNegativeLabel = isNegativeLabel.mulColumnVector(mask);
    }

    double tp = isPositiveLabel.mul(pClass1).sumNumber().doubleValue();
    double fp = isNegativeLabel.mul(pClass1).sumNumber().doubleValue();
    double fn = isPositiveLabel.mul(pClass0).sumNumber().doubleValue();

    double numerator = (1.0 + beta * beta) * tp;
    double denominator = (1.0 + beta * beta) * tp + beta * beta * fn + fp;

    return new double[] {numerator, denominator};
}
 
Example 7
Source File: OCNNOutputLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    INDArray preAct = preOutput.rsub(getParam(R_KEY).getDouble(0));
    INDArray target =   relu.backprop(preAct,Nd4j.ones(preOutput.dataType(), preAct.shape())).getFirst();
    return target;
}
 
Example 8
Source File: MaskedReductionUtil.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public static INDArray maskedPoolingEpsilonTimeSeries(PoolingType poolingType, INDArray input, INDArray mask,
                INDArray epsilon2d, int pnorm) {

    if (input.rank() != 3) {
        throw new IllegalArgumentException("Expect rank 3 input activation array: got " + input.rank());
    }
    if (mask.rank() != 2) {
        throw new IllegalArgumentException("Expect rank 2 array for mask: got " + mask.rank());
    }
    if (epsilon2d.rank() != 2) {
        throw new IllegalArgumentException("Expected rank 2 array for errors: got " + epsilon2d.rank());
    }

    //Mask: [minibatch, tsLength]
    //Epsilon: [minibatch, vectorSize]

    mask = mask.castTo(input.dataType());

    switch (poolingType) {
        case MAX:
            INDArray negInfMask = mask.rsub(1.0);
            BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));

            INDArray withInf = Nd4j.createUninitialized(input.dataType(), input.shape());
            Nd4j.getExecutioner().exec(new BroadcastAddOp(input, negInfMask, withInf, 0, 2));
            //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op

            INDArray isMax = Nd4j.exec(new IsMax(withInf, withInf.ulike(), 2))[0];

            return Nd4j.getExecutioner().exec(new BroadcastMulOp(isMax, epsilon2d, isMax, 0, 1));
        case AVG:
        case SUM:
            //if out = sum(in,dims) then dL/dIn = dL/dOut -> duplicate to each step and mask
            //if out = avg(in,dims) then dL/dIn = 1/N * dL/dOut
            //With masking: N differs for different time series

            INDArray out = Nd4j.createUninitialized(input.dataType(), input.shape(), 'f');

            //Broadcast copy op, then divide and mask to 0 as appropriate
            Nd4j.getExecutioner().exec(new BroadcastCopyOp(out, epsilon2d, out, 0, 1));
            Nd4j.getExecutioner().exec(new BroadcastMulOp(out, mask, out, 0, 2));

            if (poolingType == PoolingType.SUM) {
                return out;
            }

            INDArray nEachTimeSeries = mask.sum(1); //[minibatchSize,tsLength] -> [minibatchSize,1]
            Nd4j.getExecutioner().exec(new BroadcastDivOp(out, nEachTimeSeries, out, 0));

            return out;

        case PNORM:
            //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
            INDArray masked2 = Nd4j.createUninitialized(input.dataType(), input.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(input, mask, masked2, 0, 2));

            INDArray abs = Transforms.abs(masked2, true);
            Transforms.pow(abs, pnorm, false);
            INDArray pNorm = Transforms.pow(abs.sum(2), 1.0 / pnorm);

            INDArray numerator;
            if (pnorm == 2) {
                numerator = input.dup();
            } else {
                INDArray absp2 = Transforms.pow(Transforms.abs(input, true), pnorm - 2, false);
                numerator = input.mul(absp2);
            }

            INDArray denom = Transforms.pow(pNorm, pnorm - 1, false);
            denom.rdivi(epsilon2d);
            Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(numerator, denom, numerator, 0, 1));
            Nd4j.getExecutioner().exec(new BroadcastMulOp(numerator, mask, numerator, 0, 2)); //Apply mask

            return numerator;
        default:
            throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);
    }
}
 
Example 9
Source File: MaskedReductionUtil.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public static INDArray maskedPoolingConvolution(PoolingType poolingType, INDArray toReduce, INDArray mask, int pnorm, DataType dataType) {
    if(mask.rank() != 4){
        //TODO BETTER ERROR MESSAGE EXPLAINING FORMAT
        //TODO ALSO HANDLE LEGACY FORMAT WITH WARNING WHERE POSSIBLE
        throw new IllegalStateException("Expected rank 4 mask array: Got array with shape " + Arrays.toString(mask.shape()));
    }

    mask = mask.castTo(dataType);   //no-op if already correct dtype

    // [minibatch, channels, h, w] data with a mask array of shape [minibatch, 1, X, Y]
    // where X=(1 or inH) and Y=(1 or inW)

    //General case: must be equal or 1 on each dimension
    int[] dimensions = new int[4];
    int count = 0;
    for(int i=0; i<4; i++ ){
        if(toReduce.size(i) == mask.size(i)){
            dimensions[count++] = i;
        }
    }
    if(count < 4){
        dimensions = Arrays.copyOfRange(dimensions, 0, count);
    }

    switch (poolingType) {
        case MAX:
            //TODO This is ugly - replace it with something better... Need something like a Broadcast CAS op
            INDArray negInfMask;
            if(mask.dataType() == DataType.BOOL){
                negInfMask = Transforms.not(mask).castTo(dataType);
            } else {
                negInfMask = mask.rsub(1.0);
            }
            BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));

            INDArray withInf = Nd4j.createUninitialized(dataType, toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastAddOp(toReduce, negInfMask, withInf, dimensions));
            //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op

            return withInf.max(2, 3);
        case AVG:
        case SUM:
            INDArray masked = Nd4j.createUninitialized(dataType, toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked, dimensions));

            INDArray summed = masked.sum(2, 3);
            if (poolingType == PoolingType.SUM) {
                return summed;
            }
            INDArray maskCounts = mask.sum(1,2,3);
            summed.diviColumnVector(maskCounts);
            return summed;

        case PNORM:
            //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
            INDArray masked2 = Nd4j.createUninitialized(dataType, toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked2, dimensions));

            INDArray abs = Transforms.abs(masked2, true);
            Transforms.pow(abs, pnorm, false);
            INDArray pNorm = abs.sum(2, 3);

            return Transforms.pow(pNorm, 1.0 / pnorm);
        default:
            throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);
    }
}
 
Example 10
Source File: MaskedReductionUtil.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public static INDArray maskedPoolingEpsilonCnn(PoolingType poolingType, INDArray input, INDArray mask,
                INDArray epsilon2d, int pnorm, DataType dataType) {

    // [minibatch, channels, h=1, w=X] or [minibatch, channels, h=X, w=1] data
    // with a mask array of shape [minibatch, X]

    //If masking along height: broadcast dimensions are [0,2]
    //If masking along width: broadcast dimensions are [0,3]

    mask = mask.castTo(dataType);   //No-op if correct type

    //General case: must be equal or 1 on each dimension
    int[] dimensions = new int[4];
    int count = 0;
    for(int i=0; i<4; i++ ){
        if(input.size(i) == mask.size(i)){
            dimensions[count++] = i;
        }
    }
    if(count < 4){
        dimensions = Arrays.copyOfRange(dimensions, 0, count);
    }

    switch (poolingType) {
        case MAX:
            //TODO This is ugly - replace it with something better... Need something like a Broadcast CAS op
            INDArray negInfMask;
            if(mask.dataType() == DataType.BOOL){
                negInfMask = Transforms.not(mask).castTo(dataType);
            } else {
                negInfMask = mask.rsub(1.0);
            }
            BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));

            INDArray withInf = Nd4j.createUninitialized(dataType, input.shape());
            Nd4j.getExecutioner().exec(new BroadcastAddOp(input, negInfMask, withInf, dimensions));
            //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op

            INDArray isMax = Nd4j.exec(new IsMax(withInf, withInf.ulike(), 2, 3))[0];

            return Nd4j.getExecutioner().exec(new BroadcastMulOp(isMax, epsilon2d, isMax, 0, 1));
        case AVG:
        case SUM:
            //if out = sum(in,dims) then dL/dIn = dL/dOut -> duplicate to each step and mask
            //if out = avg(in,dims) then dL/dIn = 1/N * dL/dOut
            //With masking: N differs for different time series

            INDArray out = Nd4j.createUninitialized(dataType, input.shape(), 'f');

            //Broadcast copy op, then divide and mask to 0 as appropriate
            Nd4j.getExecutioner().exec(new BroadcastCopyOp(out, epsilon2d, out, 0, 1));
            Nd4j.getExecutioner().exec(new BroadcastMulOp(out, mask, out, dimensions));

            if (poolingType == PoolingType.SUM) {
                return out;
            }

            //Note that with CNNs, current design is restricted to [minibatch, channels, 1, W] ot [minibatch, channels, H, 1]
            INDArray nEachTimeSeries = mask.sum(1,2,3); //[minibatchSize,tsLength] -> [minibatchSize,1]
            Nd4j.getExecutioner().exec(new BroadcastDivOp(out, nEachTimeSeries, out, 0));

            return out;

        case PNORM:
            //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
            INDArray masked2 = Nd4j.createUninitialized(dataType, input.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(input, mask, masked2, dimensions));

            INDArray abs = Transforms.abs(masked2, true);
            Transforms.pow(abs, pnorm, false);
            INDArray pNorm = Transforms.pow(abs.sum(2, 3), 1.0 / pnorm);

            INDArray numerator;
            if (pnorm == 2) {
                numerator = input.dup();
            } else {
                INDArray absp2 = Transforms.pow(Transforms.abs(input, true), pnorm - 2, false);
                numerator = input.mul(absp2);
            }

            INDArray denom = Transforms.pow(pNorm, pnorm - 1, false);
            denom.rdivi(epsilon2d);
            Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(numerator, denom, numerator, 0, 1));
            Nd4j.getExecutioner().exec(new BroadcastMulOp(numerator, mask, numerator, dimensions)); //Apply mask

            return numerator;
        default:
            throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);

    }
}