Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#rsubi()

The following examples show how to use org.nd4j.linalg.api.ndarray.INDArray#rsubi() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LossL2.java    From nd4j with Apache License 2.0 6 votes vote down vote up
protected INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if (labels.size(1) != preOutput.size(1)) {
        throw new IllegalArgumentException(
                "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer"
                        + " number of outputs (nOut = " + preOutput.size(1) + ") ");

    }
    INDArray output = activationFn.getActivation(preOutput.dup(), true);
    INDArray scoreArr = output.rsubi(labels);
    scoreArr = scoreArr.muli(scoreArr);

    //Weighted loss function
    if (weights != null) {
        if (weights.length() != output.size(1)) {
            throw new IllegalStateException("Weights vector (length " + weights.length()
                    + ") does not match output.size(1)=" + output.size(1));
        }
        scoreArr.muliRowVector(weights);
    }

    //Loss function with masking
    if (mask != null) {
        LossUtil.applyMask(scoreArr, mask);
    }
    return scoreArr;
}
 
Example 2
Source File: LossL2.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
protected INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if(!labels.equalShapes(preOutput)){
        Preconditions.throwEx("Labels and preOutput must have equal shapes: got shapes %s vs %s", labels.shape(), preOutput.shape());
    }
    labels = labels.castTo(preOutput.dataType());   //No-op if already correct dtype
    INDArray output = activationFn.getActivation(preOutput.dup(), true);
    INDArray scoreArr = output.rsubi(labels);
    scoreArr = scoreArr.muli(scoreArr);

    //Weighted loss function
    if (weights != null) {
        if (weights.length() != output.size(1)) {
            throw new IllegalStateException("Weights vector (length " + weights.length()
                    + ") does not match output.size(1)=" + output.size(1));
        }
        scoreArr.muliRowVector(weights.castTo(scoreArr.dataType()));
    }

    //Loss function with masking
    if (mask != null) {
        LossUtil.applyMask(scoreArr, mask);
    }
    return scoreArr;
}
 
Example 3
Source File: LossHinge.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
public INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if(!labels.equalShapes(preOutput)){
        Preconditions.throwEx("Labels and preOutput must have equal shapes: got shapes %s vs %s", labels.shape(), preOutput.shape());
    }
    labels = labels.castTo(preOutput.dataType());   //No-op if already correct dtype
    /* y_hat is -1 or 1
    hinge loss is max(0,1-y_hat*y)
     */
    INDArray output = activationFn.getActivation(preOutput.dup(), true);

    INDArray scoreArr = output.muli(labels); //y*yhat
    scoreArr.rsubi(1.0); //1 - y*yhat

    if (mask != null) {
        LossUtil.applyMask(scoreArr, mask);
    }
    return scoreArr; // 1 - y*yhat
}
 
Example 4
Source File: LossSquaredHinge.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
public INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if(!labels.equalShapes(preOutput)){
        Preconditions.throwEx("Labels and preOutput must have equal shapes: got shapes %s vs %s", labels.shape(), preOutput.shape());
    }
    labels = labels.castTo(preOutput.dataType());   //No-op if already correct dtype
    /* y_hat is -1 or 1
    hinge loss is max(0,1-y_hat*y)
     */
    INDArray output = activationFn.getActivation(preOutput.dup(), true);

    INDArray scoreArr = output.muli(labels); //y*yhat
    scoreArr.rsubi(1.0); //1 - y*yhat

    if (mask != null) {
        LossUtil.applyMask(scoreArr, mask);
    }
    return scoreArr; // 1 - y*yhat
}
 
Example 5
Source File: LossPoisson.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Override
public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if(!labels.equalShapes(preOutput)){
        Preconditions.throwEx("Labels and preOutput must have equal shapes: got shapes %s vs %s", labels.shape(), preOutput.shape());
    }
    labels = labels.castTo(preOutput.dataType());   //No-op if already correct dtype
    INDArray yHat = activationFn.getActivation(preOutput.dup(), true);
    INDArray yDivyhat = labels.div(yHat);
    INDArray dLda = yDivyhat.rsubi(1);

    if (mask != null && LossUtil.isPerOutputMasking(dLda, mask)) {
        //For *most* activation functions: we don't actually need to mask dL/da in addition to masking dL/dz later
        //but: some, like softmax, require both (due to dL/dz_i being a function of dL/da_j, for i != j)
        //We could add a special case for softmax (activationFn instanceof ActivationSoftmax) but that would be
        // error prone - though buy us a tiny bit of performance
        LossUtil.applyMask(dLda, mask);
    }

    INDArray gradients = activationFn.backprop(preOutput, dLda).getFirst(); //TODO activation functions with params

    if (mask != null) {
        LossUtil.applyMask(gradients, mask);
    }

    return gradients;
}
 
Example 6
Source File: LossHinge.java    From nd4j with Apache License 2.0 6 votes vote down vote up
public INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if (labels.size(1) != preOutput.size(1)) {
        throw new IllegalArgumentException(
                        "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer"
                                        + " number of outputs (nOut = " + preOutput.size(1) + ") ");

    }
    /* y_hat is -1 or 1
    hinge loss is max(0,1-y_hat*y)
     */
    //INDArray output = Nd4j.getExecutioner().execAndReturn(Nd4j.getOpFactory().createTransform(activationFn, preOutput.dup()));
    INDArray output = activationFn.getActivation(preOutput.dup(), true);

    INDArray scoreArr = output.muli(labels); //y*yhat
    scoreArr.rsubi(1.0); //1 - y*yhat

    if (mask != null) {
        LossUtil.applyMask(scoreArr, mask);
    }
    return scoreArr; // 1 - y*yhat
}
 
Example 7
Source File: LossSquaredHinge.java    From nd4j with Apache License 2.0 6 votes vote down vote up
public INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if (labels.size(1) != preOutput.size(1)) {
        throw new IllegalArgumentException(
                        "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer"
                                        + " number of outputs (nOut = " + preOutput.size(1) + ") ");

    }
    /* y_hat is -1 or 1
    hinge loss is max(0,1-y_hat*y)
     */
    INDArray output = activationFn.getActivation(preOutput.dup(), true);

    INDArray scoreArr = output.muli(labels); //y*yhat
    scoreArr.rsubi(1.0); //1 - y*yhat

    if (mask != null) {
        LossUtil.applyMask(scoreArr, mask);
    }
    return scoreArr; // 1 - y*yhat
}
 
Example 8
Source File: NDArrayTestsFortran.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testRSubi() {
    INDArray n2 = Nd4j.ones(2);
    INDArray n2Assertion = Nd4j.zeros(2);
    INDArray nRsubi = n2.rsubi(1);
    assertEquals(n2Assertion, nRsubi);
}
 
Example 9
Source File: LossPoisson.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if (labels.size(1) != preOutput.size(1)) {
        throw new IllegalArgumentException(
                        "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer"
                                        + " number of outputs (nOut = " + preOutput.size(1) + ") ");

    }
    INDArray yHat = activationFn.getActivation(preOutput.dup(), true);
    INDArray yDivyhat = labels.div(yHat);
    INDArray dLda = yDivyhat.rsubi(1);

    if (mask != null && LossUtil.isPerOutputMasking(dLda, mask)) {
        //For *most* activation functions: we don't actually need to mask dL/da in addition to masking dL/dz later
        //but: some, like softmax, require both (due to dL/dz_i being a function of dL/da_j, for i != j)
        //We could add a special case for softmax (activationFn instanceof ActivationSoftmax) but that would be
        // error prone - though buy us a tiny bit of performance
        LossUtil.applyMask(dLda, mask);
    }

    INDArray gradients = activationFn.backprop(preOutput, dLda).getFirst(); //TODO activation functions with params

    if (mask != null) {
        LossUtil.applyMask(gradients, mask);
    }

    return gradients;
}
 
Example 10
Source File: NDArrayTestsFortran.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testRSubi() {
    INDArray n2 = Nd4j.ones(2);
    INDArray n2Assertion = Nd4j.zeros(2);
    INDArray nRsubi = n2.rsubi(1);
    assertEquals(n2Assertion, nRsubi);
}
 
Example 11
Source File: CudaScalarsTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testPinnedScalarRSub() throws Exception {
    // simple way to stop test if we're not on CUDA backend here
    assertEquals("JcublasLevel1", Nd4j.getBlasWrapper().level1().getClass().getSimpleName());

    INDArray array1 = Nd4j.create(new float[]{1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f});
    INDArray array2 = Nd4j.create(new float[]{2.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f});

    array2.rsubi(0.5f);

    System.out.println("RSub result: " + array2.getFloat(0));
    assertEquals(-1.5f, array2.getFloat(0), 0.01f);
}
 
Example 12
Source File: LossBinaryXENT.java    From nd4j with Apache License 2.0 4 votes vote down vote up
private INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {

        if (labels.size(1) != preOutput.size(1)) {
            throw new IllegalArgumentException(
                            "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer"
                                            + " number of outputs (nOut = " + preOutput.size(1) + ") ");
        }

        INDArray scoreArr;
        if (activationFn instanceof ActivationSoftmax) {
            //Use LogSoftMax op to avoid numerical issues when calculating score
            INDArray logsoftmax = Nd4j.getExecutioner().execAndReturn(new LogSoftMax(preOutput.dup()));
            scoreArr = logsoftmax.muli(labels);

        } else {
            //INDArray output = Nd4j.getExecutioner().execAndReturn(Nd4j.getOpFactory().createTransform(activationFn, preOutput.dup()));
            INDArray output = activationFn.getActivation(preOutput.dup(), true);
            if (clipEps > 0.0) {
                CustomOp op = DynamicCustomOp.builder("clipbyvalue")
                        .addInputs(output)
                        .callInplace(true)
                        .addFloatingPointArguments(clipEps, 1.0-clipEps)
                        .build();
                Nd4j.getExecutioner().exec(op);
            }
            scoreArr = Transforms.log(output, true).muli(labels);
            INDArray secondTerm = output.rsubi(1);
            Transforms.log(secondTerm, false);
            secondTerm.muli(labels.rsub(1));
            scoreArr.addi(secondTerm);
        }

        //Weighted loss function
        if (weights != null) {
            if (weights.length() != preOutput.size(1)) {
                throw new IllegalStateException("Weights vector (length " + weights.length()
                                + ") does not match output.size(1)=" + preOutput.size(1));
            }

            scoreArr.muliRowVector(weights);
        }

        if (mask != null) {
            LossUtil.applyMask(scoreArr, mask);
        }
        return scoreArr;
    }
 
Example 13
Source File: NDArrayColumnsMathOpTransform.java    From DataVec with Apache License 2.0 4 votes vote down vote up
@Override
protected Writable doOp(Writable... input) {
    INDArray out = ((NDArrayWritable) input[0]).get().dup();

    switch (mathOp) {
        case Add:
            for (int i = 1; i < input.length; i++) {
                out.addi(((NDArrayWritable) input[i]).get());
            }
            break;
        case Subtract:
            out.subi(((NDArrayWritable) input[1]).get());
            break;
        case Multiply:
            for (int i = 1; i < input.length; i++) {
                out.muli(((NDArrayWritable) input[i]).get());
            }
            break;
        case Divide:
            out.divi(((NDArrayWritable) input[1]).get());
            break;
        case ReverseSubtract:
            out.rsubi(((NDArrayWritable) input[1]).get());
            break;
        case ReverseDivide:
            out.rdivi(((NDArrayWritable) input[1]).get());
            break;
        case Modulus:
        case ScalarMin:
        case ScalarMax:
            throw new IllegalArgumentException(
                            "Invalid MathOp: cannot use " + mathOp + " with NDArrayColumnsMathOpTransform");
        default:
            throw new RuntimeException("Unknown MathOp: " + mathOp);
    }

    //To avoid threading issues...
    Nd4j.getExecutioner().commit();

    return new NDArrayWritable(out);
}
 
Example 14
Source File: NDArrayColumnsMathOpTransform.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
protected Writable doOp(Writable... input) {
    INDArray out = ((NDArrayWritable) input[0]).get().dup();

    switch (mathOp) {
        case Add:
            for (int i = 1; i < input.length; i++) {
                out.addi(((NDArrayWritable) input[i]).get());
            }
            break;
        case Subtract:
            out.subi(((NDArrayWritable) input[1]).get());
            break;
        case Multiply:
            for (int i = 1; i < input.length; i++) {
                out.muli(((NDArrayWritable) input[i]).get());
            }
            break;
        case Divide:
            out.divi(((NDArrayWritable) input[1]).get());
            break;
        case ReverseSubtract:
            out.rsubi(((NDArrayWritable) input[1]).get());
            break;
        case ReverseDivide:
            out.rdivi(((NDArrayWritable) input[1]).get());
            break;
        case Modulus:
        case ScalarMin:
        case ScalarMax:
            throw new IllegalArgumentException(
                            "Invalid MathOp: cannot use " + mathOp + " with NDArrayColumnsMathOpTransform");
        default:
            throw new RuntimeException("Unknown MathOp: " + mathOp);
    }

    //To avoid threading issues...
    Nd4j.getExecutioner().commit();

    return new NDArrayWritable(out);
}
 
Example 15
Source File: NDArrayScalarOpTransform.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public NDArrayWritable map(Writable w) {
    if (!(w instanceof NDArrayWritable)) {
        throw new IllegalArgumentException("Input writable is not an NDArrayWritable: is " + w.getClass());
    }

    //Make a copy - can't always assume that the original INDArray won't be used again in the future
    NDArrayWritable n = ((NDArrayWritable) w);
    INDArray a = n.get().dup();
    switch (mathOp) {
        case Add:
            a.addi(scalar);
            break;
        case Subtract:
            a.subi(scalar);
            break;
        case Multiply:
            a.muli(scalar);
            break;
        case Divide:
            a.divi(scalar);
            break;
        case Modulus:
            a.fmodi(scalar);
            break;
        case ReverseSubtract:
            a.rsubi(scalar);
            break;
        case ReverseDivide:
            a.rdivi(scalar);
            break;
        case ScalarMin:
            Transforms.min(a, scalar, false);
            break;
        case ScalarMax:
            Transforms.max(a, scalar, false);
            break;
        default:
            throw new UnsupportedOperationException("Unknown or not supported op: " + mathOp);
    }

    //To avoid threading issues...
    Nd4j.getExecutioner().commit();

    return new NDArrayWritable(a);
}
 
Example 16
Source File: LossBinaryXENT.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
private INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if(!labels.equalShapes(preOutput)){
        Preconditions.throwEx("Labels and preOutput must have equal shapes: got shapes %s vs %s", labels.shape(), preOutput.shape());
    }
    labels = labels.castTo(preOutput.dataType());   //No-op if already correct dtype

    INDArray scoreArr;
    if (activationFn instanceof ActivationSoftmax) {
        //TODO Post GPU support for custom ops: Use LogSoftMax op to avoid numerical issues when calculating score
        INDArray logsoftmax = Nd4j.exec((CustomOp) new SoftMax(preOutput, preOutput.ulike(), -1))[0];
        Transforms.log(logsoftmax, false);
        scoreArr = logsoftmax.muli(labels);

    } else {
        INDArray output = activationFn.getActivation(preOutput.dup(), true);
        if (clipEps > 0.0) {
            CustomOp op = DynamicCustomOp.builder("clipbyvalue")
                    .addInputs(output)
                    .callInplace(true)
                    .addFloatingPointArguments(clipEps, 1.0-clipEps)
                    .build();
            Nd4j.getExecutioner().execAndReturn(op);
        }
        scoreArr = Transforms.log(output, true).muli(labels);
        INDArray secondTerm = output.rsubi(1);
        Transforms.log(secondTerm, false);
        secondTerm.muli(labels.rsub(1));
        scoreArr.addi(secondTerm);
    }

    //Weighted loss function
    if (weights != null) {
        if (weights.length() != preOutput.size(1)) {
            throw new IllegalStateException("Weights vector (length " + weights.length()
                            + ") does not match output.size(1)=" + preOutput.size(1));
        }

        scoreArr.muliRowVector(weights.castTo(scoreArr.dataType()));
    }

    if (mask != null) {
        LossUtil.applyMask(scoreArr, mask);
    }
    return scoreArr;
}
 
Example 17
Source File: NDArrayScalarOpTransform.java    From DataVec with Apache License 2.0 4 votes vote down vote up
@Override
public NDArrayWritable map(Writable w) {
    if (!(w instanceof NDArrayWritable)) {
        throw new IllegalArgumentException("Input writable is not an NDArrayWritable: is " + w.getClass());
    }

    //Make a copy - can't always assume that the original INDArray won't be used again in the future
    NDArrayWritable n = ((NDArrayWritable) w);
    INDArray a = n.get().dup();
    switch (mathOp) {
        case Add:
            a.addi(scalar);
            break;
        case Subtract:
            a.subi(scalar);
            break;
        case Multiply:
            a.muli(scalar);
            break;
        case Divide:
            a.divi(scalar);
            break;
        case Modulus:
            throw new UnsupportedOperationException(mathOp + " is not supported for NDArrayWritable");
        case ReverseSubtract:
            a.rsubi(scalar);
            break;
        case ReverseDivide:
            a.rdivi(scalar);
            break;
        case ScalarMin:
            Transforms.min(a, scalar, false);
            break;
        case ScalarMax:
            Transforms.max(a, scalar, false);
            break;
        default:
            throw new UnsupportedOperationException("Unknown or not supported op: " + mathOp);
    }

    //To avoid threading issues...
    Nd4j.getExecutioner().commit();

    return new NDArrayWritable(a);
}