Java Code Examples for org.nd4j.linalg.lossfunctions.ILossFunction#computeScoreArray()

The following examples show how to use org.nd4j.linalg.lossfunctions.ILossFunction#computeScoreArray() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RnnOutputLayer.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**Compute the score for each example individually, after labels and input have been set.
 *
 * @param fullNetRegTerm Regularization score term for the entire network (or, 0.0 to not include regularization)
 * @return A column INDArray of shape [numExamples,1], where entry i is the score of the ith example
 */
@Override
public INDArray computeScoreForExamples(double fullNetRegTerm, LayerWorkspaceMgr workspaceMgr) {
    //For RNN: need to sum up the score over each time step before returning.

    if (input == null || labels == null)
        throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());
    INDArray preOut = preOutput2d(false, workspaceMgr);

    ILossFunction lossFunction = layerConf().getLossFn();
    INDArray scoreArray =
                    lossFunction.computeScoreArray(getLabels2d(workspaceMgr, ArrayType.FF_WORKING_MEM), preOut,
                            layerConf().getActivationFn(), maskArray);
    //scoreArray: shape [minibatch*timeSeriesLength, 1]
    //Reshape it to [minibatch, timeSeriesLength] then sum over time step

    INDArray scoreArrayTs = TimeSeriesUtils.reshapeVectorToTimeSeriesMask(scoreArray, (int)input.size(0));
    INDArray summedScores = scoreArrayTs.sum(true, 1);

    if (fullNetRegTerm != 0.0) {
        summedScores.addi(fullNetRegTerm);
    }

    return summedScores;
}
 
Example 2
Source File: OCNNOutputLayer.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**Compute the score for each example individually, after labels and input have been set.
 *
 * @param fullNetRegTerm Regularization score term for the entire network (or, 0.0 to not include regularization)
 * @return A column INDArray of shape [numExamples,1], where entry i is the score of the ith example
 */
@Override
public INDArray computeScoreForExamples(double fullNetRegTerm, LayerWorkspaceMgr workspaceMgr) {
    //For RNN: need to sum up the score over each time step before returning.

    if (input == null || labels == null)
        throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());
    INDArray preOut = preOutput2d(false, workspaceMgr);

    ILossFunction lossFunction = layerConf().getLossFn();
    INDArray scoreArray =
            lossFunction.computeScoreArray(getLabels2d(workspaceMgr, ArrayType.FF_WORKING_MEM), preOut,
                    layerConf().getActivationFn(), maskArray);
    INDArray summedScores = scoreArray.sum(1);

    if (fullNetRegTerm != 0.0) {
        summedScores.addi(fullNetRegTerm);
    }

    return summedScores;
}
 
Example 3
Source File: CenterLossOutputLayer.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**Compute the score for each example individually, after labels and input have been set.
 *
 * @param fullNetRegTerm Regularization term for the entire network (or, 0.0 to not include regularization)
 * @return A column INDArray of shape [numExamples,1], where entry i is the score of the ith example
 */
@Override
public INDArray computeScoreForExamples(double fullNetRegTerm, LayerWorkspaceMgr workspaceMgr) {
    if (input == null || labels == null)
        throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());
    INDArray preOut = preOutput2d(false, workspaceMgr);

    // calculate the intra-class score component
    INDArray centers = params.get(CenterLossParamInitializer.CENTER_KEY);
    INDArray centersForExamples = labels.mmul(centers);
    INDArray intraClassScoreArray = input.sub(centersForExamples);

    // calculate the inter-class score component
    ILossFunction interClassLoss = layerConf().getLossFn();
    INDArray scoreArray = interClassLoss.computeScoreArray(getLabels2d(workspaceMgr, ArrayType.FF_WORKING_MEM), preOut, layerConf().getActivationFn(),
                    maskArray);
    scoreArray.addi(intraClassScoreArray.muli(layerConf().getLambda() / 2));

    if (fullNetRegTerm != 0.0) {
        scoreArray.addi(fullNetRegTerm);
    }
    return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, scoreArray);
}
 
Example 4
Source File: VariationalAutoencoder.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**
 * Return the reconstruction error for this variational autoencoder.<br>
 * <b>NOTE (important):</b> This method is used ONLY for VAEs that have a standard neural network loss function (i.e.,
 * an {@link org.nd4j.linalg.lossfunctions.ILossFunction} instance such as mean squared error) instead of using a
 * probabilistic reconstruction distribution P(x|z) for the reconstructions (as presented in the VAE architecture by
 * Kingma and Welling).<br>
 * You can check if the VAE has a loss function using {@link #hasLossFunction()}<br>
 * Consequently, the reconstruction error is a simple deterministic function (no Monte-Carlo sampling is required,
 * unlike {@link #reconstructionProbability(INDArray, int)} and {@link #reconstructionLogProbability(INDArray, int)})
 *
 * @param data       The data to calculate the reconstruction error on
 * @return Column vector of reconstruction errors for each example (shape: [numExamples,1])
 */
public INDArray reconstructionError(INDArray data) {
    if (!hasLossFunction()) {
        throw new IllegalStateException(
                        "Cannot use reconstructionError method unless the variational autoencoder is "
                                        + "configured with a standard loss function (via LossFunctionWrapper). For VAEs utilizing a reconstruction "
                                        + "distribution, use the reconstructionProbability or reconstructionLogProbability methods "
                                        + layerId());
    }

    INDArray pZXMean = activate(data, false, LayerWorkspaceMgr.noWorkspaces());
    INDArray reconstruction = generateAtMeanGivenZ(pZXMean); //Not probabilistic -> "mean" == output

    if (reconstructionDistribution instanceof CompositeReconstructionDistribution) {
        CompositeReconstructionDistribution c = (CompositeReconstructionDistribution) reconstructionDistribution;
        return c.computeLossFunctionScoreArray(data, reconstruction);
    } else {

        LossFunctionWrapper lfw = (LossFunctionWrapper) reconstructionDistribution;
        ILossFunction lossFunction = lfw.getLossFunction();

        //Re: the activation identity here - the reconstruction array already has the activation function applied,
        // so we don't want to apply it again. i.e., we are passing the output, not the pre-output.
        return lossFunction.computeScoreArray(data, reconstruction, new ActivationIdentity(), null);
    }
}
 
Example 5
Source File: BaseOutputLayer.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**Compute the score for each example individually, after labels and input have been set.
 *
 * @param fullNetRegTerm Regularization score term for the entire network (or, 0.0 to not include regularization)
 * @return A column INDArray of shape [numExamples,1], where entry i is the score of the ith example
 */
@Override
public INDArray computeScoreForExamples(double fullNetRegTerm, LayerWorkspaceMgr workspaceMgr) {
    if (input == null || labels == null)
        throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());
    INDArray preOut = preOutput2d(false, workspaceMgr);

    ILossFunction lossFunction = layerConf().getLossFn();
    INDArray scoreArray =
            lossFunction.computeScoreArray(getLabels2d(workspaceMgr, ArrayType.FF_WORKING_MEM),
                    preOut, layerConf().getActivationFn(), maskArray);
    if (fullNetRegTerm != 0.0) {
        scoreArray.addi(fullNetRegTerm);
    }
    return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, scoreArray);
}
 
Example 6
Source File: LossLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**Compute the score for each example individually, after labels and input have been set.
 *
 * @param fullNetRegTerm Regularization score term for the entire network (or, 0.0 to not include regularization)
 * @return A column INDArray of shape [numExamples,1], where entry i is the score of the ith example
 */
@Override
public INDArray computeScoreForExamples(double fullNetRegTerm, LayerWorkspaceMgr workspaceMgr) {
    if (input == null || labels == null)
        throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());
    INDArray preOut = input;

    ILossFunction lossFunction = layerConf().getLossFn();
    INDArray scoreArray =
                    lossFunction.computeScoreArray(getLabels2d(), preOut, layerConf().getActivationFn(), maskArray);
    if (fullNetRegTerm != 0.0) {
        scoreArray.addi(fullNetRegTerm);
    }
    return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, scoreArray);
}
 
Example 7
Source File: CnnLossLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * Compute the score for each example individually, after labels and input have been set.
 *
 * @param fullNetRegTerm Regularization score term for the entire network (or, 0.0 to not include regularization)
 * @return A column INDArray of shape [numExamples,1], where entry i is the score of the ith example
 */
@Override
public INDArray computeScoreForExamples(double fullNetRegTerm, LayerWorkspaceMgr workspaceMgr) {
    //For CNN: need to sum up the score over each x/y location before returning

    if (input == null || labels == null)
        throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());

    CNN2DFormat format = layerConf().getFormat();

    INDArray input2d = ConvolutionUtils.reshape4dTo2d(input, format, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray labels2d = ConvolutionUtils.reshape4dTo2d(labels, format, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray maskReshaped = ConvolutionUtils.reshapeMaskIfRequired(maskArray, input, format, workspaceMgr, ArrayType.FF_WORKING_MEM);

    ILossFunction lossFunction = layerConf().getLossFn();
    INDArray scoreArray =
            lossFunction.computeScoreArray(labels2d, input2d, layerConf().getActivationFn(), maskReshaped);
    //scoreArray: shape [minibatch*h*w, 1]
    //Reshape it to [minibatch, 1, h, w] then sum over x/y to give [minibatch, 1]

    val newShape = input.shape().clone();
    newShape[1] = 1;

    INDArray scoreArrayTs = ConvolutionUtils.reshape2dTo4d(scoreArray, newShape, format, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray summedScores = scoreArrayTs.sum(1,2,3).reshape(scoreArrayTs.size(0), 1);

    if (fullNetRegTerm != 0.0) {
        summedScores.addi(fullNetRegTerm);
    }

    return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, summedScores);
}
 
Example 8
Source File: CompositeReconstructionDistribution.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
private INDArray getScoreArray(ReconstructionDistribution reconstructionDistribution, INDArray dataSubset,
                INDArray reconstructionSubset) {
    if (reconstructionDistribution instanceof LossFunctionWrapper) {
        ILossFunction lossFunction = ((LossFunctionWrapper) reconstructionDistribution).getLossFunction();
        //Re: the activation identity here - the reconstruction array already has the activation function applied,
        // so we don't want to apply it again. i.e., we are passing the output, not the pre-output.
        return lossFunction.computeScoreArray(dataSubset, reconstructionSubset, new ActivationIdentity(), null);
    } else if (reconstructionDistribution instanceof CompositeReconstructionDistribution) {
        return ((CompositeReconstructionDistribution) reconstructionDistribution)
                        .computeLossFunctionScoreArray(dataSubset, reconstructionSubset);
    } else {
        throw new UnsupportedOperationException("Cannot calculate composite reconstruction distribution");
    }
}
 
Example 9
Source File: RnnLossLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
/**Compute the score for each example individually, after labels and input have been set.
 *
 * @param fullNetRegTerm Regularization score term for the entire network (or, 0.0 to not include regularization)
 * @return A column INDArray of shape [numExamples,1], where entry i is the score of the ith example
 */
@Override
public INDArray computeScoreForExamples(double fullNetRegTerm, LayerWorkspaceMgr workspaceMgr) {
    //For RNN: need to sum up the score over each time step before returning.
    INDArray input = this.input;
    INDArray labels = this.labels;
    if (input == null || labels == null)
        throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());
    if (layerConf().getRnnDataFormat() == RNNFormat.NWC){
        input = input.permute(0, 2, 1);
        labels = input.permute(0, 2, 1);
    }
    INDArray input2d = TimeSeriesUtils.reshape3dTo2d(input, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray labels2d = TimeSeriesUtils.reshape3dTo2d(labels, workspaceMgr, ArrayType.FF_WORKING_MEM);

    INDArray maskReshaped;
    if(this.maskArray != null){
        if(this.maskArray.rank() == 3){
            maskReshaped = TimeSeriesUtils.reshapePerOutputTimeSeriesMaskTo2d(this.maskArray, workspaceMgr, ArrayType.FF_WORKING_MEM);
        } else {
            maskReshaped = TimeSeriesUtils.reshapeTimeSeriesMaskToVector(this.maskArray, workspaceMgr, ArrayType.FF_WORKING_MEM);
        }
    } else {
        maskReshaped = null;
    }

    ILossFunction lossFunction = layerConf().getLossFn();
    INDArray scoreArray =
            lossFunction.computeScoreArray(labels2d, input2d, layerConf().getActivationFn(), maskReshaped);
    //scoreArray: shape [minibatch*timeSeriesLength, 1]
    //Reshape it to [minibatch, timeSeriesLength] then sum over time step

    INDArray scoreArrayTs = TimeSeriesUtils.reshapeVectorToTimeSeriesMask(scoreArray, (int)input.size(0));
    INDArray summedScores = scoreArrayTs.sum(1);

    if (fullNetRegTerm != 0.0) {
        summedScores.addi(fullNetRegTerm);
    }

    return summedScores;
}
 
Example 10
Source File: Cnn3DLossLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
/**
 * Compute the score for each example individually, after labels and input have been set.
 *
 * @param fullNetRegTerm Regularization score term for the entire network (or, 0.0 to not include regularization)
 * @return A column INDArray of shape [numExamples,1], where entry i is the score of the ith example
 */
@Override
public INDArray computeScoreForExamples(double fullNetRegTerm, LayerWorkspaceMgr workspaceMgr) {
    //For 3D CNN: need to sum up the score over each x/y/z location before returning

    if (input == null || labels == null)
        throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());

    INDArray input2d = ConvolutionUtils.reshape5dTo2d(layerConf().getDataFormat(), input, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray labels2d = ConvolutionUtils.reshape5dTo2d(layerConf().getDataFormat(), labels, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray maskReshaped = ConvolutionUtils.reshapeCnn3dMask(layerConf().getDataFormat(), maskArray, input, workspaceMgr, ArrayType.FF_WORKING_MEM);

    ILossFunction lossFunction = layerConf().getLossFn();
    INDArray scoreArray =
            lossFunction.computeScoreArray(labels2d, input2d, layerConf().getActivationFn(), maskReshaped);
    //scoreArray: shape [minibatch*d*h*w, 1]
    //Reshape it to [minibatch, 1, d, h, w] then sum over x/y/z to give [minibatch, 1]

    val newShape = input.shape().clone();
    newShape[1] = 1;

    long n = input.size(0);
    long d, h, w, c;
    if(layerConf().getDataFormat() == Convolution3D.DataFormat.NDHWC){
        d = input.size(1);
        h = input.size(2);
        w = input.size(3);
        c = input.size(4);
    } else {
        d = input.size(2);
        h = input.size(3);
        w = input.size(4);
        c = input.size(1);
    }
    INDArray scoreArrayTs = ConvolutionUtils.reshape2dTo5d(layerConf().getDataFormat(), scoreArray, n, d, h, w, c, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray summedScores = scoreArrayTs.sum(1,2,3,4);

    if (fullNetRegTerm != 0.0) {
        summedScores.addi(fullNetRegTerm);
    }

    return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, summedScores);
}