Java Code Examples for org.nd4j.linalg.lossfunctions.ILossFunction#computeScore()

The following examples show how to use org.nd4j.linalg.lossfunctions.ILossFunction#computeScore() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: OCNNOutputLayer.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/** Compute score after labels and input have been set.
 * @param fullNetRegTerm Regularization score term for the entire network
 * @param training whether score should be calculated at train or test time (this affects things like application of
 *                 dropout, etc)
 * @return score (loss function)
 */
@Override
public double computeScore(double fullNetRegTerm, boolean training, LayerWorkspaceMgr workspaceMgr) {
    if (input == null)
        throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());
    INDArray preOut = preOutput2d(training, workspaceMgr);

    ILossFunction lossFunction = layerConf().getLossFn();

    double score = lossFunction.computeScore(getLabels2d(workspaceMgr, ArrayType.FF_WORKING_MEM), preOut,
            layerConf().getActivationFn(), maskArray,false);
    if(conf().isMiniBatch())
        score /= getInputMiniBatchSize();

    score += fullNetRegTerm;
    this.score = score;
    return score;
}
 
Example 2
Source File: LossLayer.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/** Compute score after labels and input have been set.
 * @param fullNetRegTerm Regularization score term for the entire network
 * @param training whether score should be calculated at train or test time (this affects things like application of
 *                 dropout, etc)
 * @return score (loss function)
 */
@Override
public double computeScore(double fullNetRegTerm, boolean training, LayerWorkspaceMgr workspaceMgr) {
    if (input == null || labels == null)
        throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());
    this.fullNetworkRegularizationScore = fullNetRegTerm;
    INDArray preOut = input;

    ILossFunction lossFunction = layerConf().getLossFn();

    //double score = lossFunction.computeScore(getLabels2d(), preOut, layerConf().getActivationFunction(), maskArray, false);
    double score = lossFunction.computeScore(getLabels2d(), preOut, layerConf().getActivationFn(), maskArray,
                    false);
    score /= getInputMiniBatchSize();
    score += fullNetworkRegularizationScore;

    this.score = score;
    return score;
}
 
Example 3
Source File: BaseOutputLayer.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/** Compute score after labels and input have been set.
 * @param fullNetRegTerm Regularization score term for the entire network
 * @param training whether score should be calculated at train or test time (this affects things like application of
 *                 dropout, etc)
 * @return score (loss function)
 */
@Override
public double computeScore(double fullNetRegTerm, boolean training, LayerWorkspaceMgr workspaceMgr) {
    if (input == null || labels == null)
        throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());
    this.fullNetRegTerm = fullNetRegTerm;
    INDArray preOut = preOutput2d(training, workspaceMgr);

    ILossFunction lossFunction = layerConf().getLossFn();

    INDArray labels2d = getLabels2d(workspaceMgr, ArrayType.FF_WORKING_MEM);
    double score = lossFunction.computeScore(labels2d, preOut,
            layerConf().getActivationFn(), maskArray,false);

    if(conf().isMiniBatch())
        score /= getInputMiniBatchSize();

    score += fullNetRegTerm;

    this.score = score;
    return score;
}
 
Example 4
Source File: RnnLossLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public double computeScore(double fullNetRegTerm, boolean training, LayerWorkspaceMgr workspaceMgr) {
    INDArray input = this.input;
    INDArray labels = this.labels;
    if (layerConf().getRnnDataFormat() == RNNFormat.NWC){
        input = input.permute(0, 2, 1);
        labels = input.permute(0, 2, 1);
    }
    INDArray input2d = TimeSeriesUtils.reshape3dTo2d(input, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray labels2d = TimeSeriesUtils.reshape3dTo2d(labels, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray maskReshaped;
    if(this.maskArray != null){
        if(this.maskArray.rank() == 3){
            maskReshaped = TimeSeriesUtils.reshapePerOutputTimeSeriesMaskTo2d(this.maskArray, workspaceMgr, ArrayType.FF_WORKING_MEM);
        } else {
            maskReshaped = TimeSeriesUtils.reshapeTimeSeriesMaskToVector(this.maskArray, workspaceMgr, ArrayType.FF_WORKING_MEM);
        }
    } else {
        maskReshaped = null;
    }

    ILossFunction lossFunction = layerConf().getLossFn();

    double score = lossFunction.computeScore(labels2d, input2d.dup(), layerConf().getActivationFn(), maskReshaped,false);
    score /= getInputMiniBatchSize();
    score += fullNetRegTerm;

    this.score = score;

    return score;
}
 
Example 5
Source File: BasePretrainNetwork.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
protected void setScoreWithZ(INDArray z) {
    if (input == null || z == null)
        throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());
    ILossFunction lossFunction = layerConf().getLossFunction().getILossFunction();

    //double score = lossFunction.computeScore(input, z, layerConf().getActivationFunction(), maskArray, false);
    double score = lossFunction.computeScore(input, z, layerConf().getActivationFn(), maskArray, false);
    score /= getInputMiniBatchSize();
    score += calcRegularizationScore(false);

    this.score = score;
}
 
Example 6
Source File: CenterLossOutputLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/** Compute score after labels and input have been set.
 * @param fullNetRegTerm Regularization score term for the entire network
 * @param training whether score should be calculated at train or test time (this affects things like application of
 *                 dropout, etc)
 * @return score (loss function)
 */
@Override
public double computeScore(double fullNetRegTerm, boolean training, LayerWorkspaceMgr workspaceMgr) {
    if (input == null || labels == null)
        throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());
    this.fullNetRegTerm = fullNetRegTerm;
    INDArray preOut = preOutput2d(training, workspaceMgr);

    // center loss has two components
    // the first enforces inter-class dissimilarity, the second intra-class dissimilarity (squared l2 norm of differences)
    ILossFunction interClassLoss = layerConf().getLossFn();

    // calculate the intra-class score component
    INDArray centers = params.get(CenterLossParamInitializer.CENTER_KEY);
    INDArray l = labels.castTo(centers.dataType()); //Ensure correct dtype (same as params); no-op if already correct dtype
    INDArray centersForExamples = l.mmul(centers);

    //        double intraClassScore = intraClassLoss.computeScore(centersForExamples, input, Activation.IDENTITY.getActivationFunction(), maskArray, false);
    INDArray norm2DifferenceSquared = input.sub(centersForExamples).norm2(1);
    norm2DifferenceSquared.muli(norm2DifferenceSquared);

    double sum = norm2DifferenceSquared.sumNumber().doubleValue();
    double lambda = layerConf().getLambda();
    double intraClassScore = lambda / 2.0 * sum;

    //        intraClassScore = intraClassScore * layerConf().getLambda() / 2;

    // now calculate the inter-class score component
    double interClassScore = interClassLoss.computeScore(getLabels2d(workspaceMgr, ArrayType.FF_WORKING_MEM), preOut, layerConf().getActivationFn(),
                    maskArray, false);

    double score = interClassScore + intraClassScore;

    score /= getInputMiniBatchSize();
    score += fullNetRegTerm;

    this.score = score;
    return score;
}
 
Example 7
Source File: CnnLossLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public double computeScore(double fullNetRegTerm, boolean training, LayerWorkspaceMgr workspaceMgr) {
    INDArray input2d = ConvolutionUtils.reshape4dTo2d(input, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray labels2d = ConvolutionUtils.reshape4dTo2d(labels, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray maskReshaped = ConvolutionUtils.reshapeMaskIfRequired(maskArray, input, layerConf().getFormat(), workspaceMgr, ArrayType.FF_WORKING_MEM);

    ILossFunction lossFunction = layerConf().getLossFn();

    double score = lossFunction.computeScore(labels2d, input2d.dup(), layerConf().getActivationFn(), maskReshaped, false);
    score /= getInputMiniBatchSize();
    score += fullNetRegTerm;
    this.score = score;
    return score;
}
 
Example 8
Source File: Cnn3DLossLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public double computeScore(double fullNetRegTerm, boolean training, LayerWorkspaceMgr workspaceMgr) {
    INDArray input2d = ConvolutionUtils.reshape5dTo2d(layerConf().getDataFormat(), input, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray labels2d = ConvolutionUtils.reshape5dTo2d(layerConf().getDataFormat(), labels, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray maskReshaped = ConvolutionUtils.reshapeCnn3dMask(layerConf().getDataFormat(), maskArray, input, workspaceMgr, ArrayType.FF_WORKING_MEM);

    ILossFunction lossFunction = layerConf().getLossFn();

    double score = lossFunction.computeScore(labels2d, input2d.dup(), layerConf().getActivationFn(), maskReshaped, false);
    score /= getInputMiniBatchSize();
    score += fullNetRegTerm;
    this.score = score;
    return score;
}