Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#sum()

The following examples show how to use org.nd4j.linalg.api.ndarray.INDArray#sum() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SpecialWorkspaceTests.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testAlignment_1() {
    WorkspaceConfiguration initialConfig = WorkspaceConfiguration.builder().initialSize(10 * 1024L * 1024L)
            .policyAllocation(AllocationPolicy.STRICT).policyLearning(LearningPolicy.NONE).build();
    MemoryWorkspace workspace = Nd4j.getWorkspaceManager().getAndActivateWorkspace(initialConfig, "WS132143452343");

    for( int j=0; j<100; j++ ){

        try(MemoryWorkspace ws = workspace.notifyScopeEntered()) {

            for (int x = 0; x < 10; x++) {
                //System.out.println("Start iteration (" + j + "," + x + ")");
                INDArray arr = Nd4j.linspace(1,10,10, DataType.DOUBLE).reshape(1,10);
                INDArray sum = arr.sum(true, 1);
                Nd4j.create(DataType.BOOL, x+1);        //NOTE: no crash if set to FLOAT/HALF, No crash if removed entirely; same crash for BOOL/UBYTE
                //System.out.println("End iteration (" + j + "," + x + ")");
            }
        }
    }
}
 
Example 2
Source File: CudaAccumTests.java    From nd4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testSum5() {
    INDArray n = Nd4j.linspace(1, 1000, 128000).reshape(128, 1000);


    INDArray sum = n.sum(new int[]{1});
    INDArray sum2 = n.sum(new int[]{-1});
    INDArray sum3 = n.sum(new int[]{0});

    System.out.println("elementWiseStride: " + n.elementWiseStride());
    System.out.println("elementStride: " + n.elementStride());

    assertEquals(4898.4707f, sum.getFloat(0), 0.01f);
    assertEquals(12703.209f, sum.getFloat(1), 0.01f);
    assertEquals(sum, sum2);
    assertNotEquals(sum, sum3);
    assertEquals(63565.023f, sum3.getFloat(0), 0.01f);
    assertEquals(63570.008f, sum3.getFloat(5), 0.01f);
}
 
Example 3
Source File: AutoEncoder.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Override
public void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr) {
    INDArray W = getParamWithNoise(PretrainParamInitializer.WEIGHT_KEY, true, workspaceMgr);
    INDArray input = this.input.castTo(dataType);

    double corruptionLevel = layerConf().getCorruptionLevel();

    INDArray corruptedX = corruptionLevel > 0 ? getCorruptedInput(input, corruptionLevel) : input;
    setInput(corruptedX, workspaceMgr);

    INDArray y = encode(corruptedX, true, workspaceMgr);
    INDArray z = decode(y, workspaceMgr);

    INDArray visibleLoss = input.sub(z);
    INDArray hiddenLoss = layerConf().getSparsity() == 0 ? visibleLoss.mmul(W).muli(y).muli(y.rsub(1))
                    : visibleLoss.mmul(W).muli(y).muli(y.add(-layerConf().getSparsity()));

    INDArray wGradient = corruptedX.transpose().mmul(hiddenLoss).addi(visibleLoss.transpose().mmul(y));
    INDArray hBiasGradient = hiddenLoss.sum(0);
    INDArray vBiasGradient = visibleLoss.sum(0);

    gradient = createGradient(wGradient, vBiasGradient, hBiasGradient);
    setScoreWithZ(z);

}
 
Example 4
Source File: TransformOpValidation.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testLogSumExp() {
    Nd4j.getRandom().setSeed(12345);
    INDArray inputArr = Nd4j.rand(DataType.FLOAT, 1, 4);
    SameDiff sd = SameDiff.create();
    SDVariable in = sd.var(inputArr);
    SDVariable lse = sd.math().logSumExp(in);
    INDArray out = lse.eval();

    INDArray exp = Transforms.exp(inputArr, true);
    INDArray sum = exp.sum();
    INDArray log = Transforms.log(sum);
    assertEquals(log, out);
}
 
Example 5
Source File: SpecialTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
protected static INDArray transform(INDArray a, INDArray b) {
    int nShape[] = new int[] {1, 2};
    INDArray a_reduced = a.sum(nShape);
    INDArray b_reduced = b.sum(nShape);

    //log.info("reduced shape: {}", Arrays.toString(a_reduced.shapeInfoDataBuffer().asInt()));

    return Transforms.abs(a_reduced.sub(b_reduced)).div(a_reduced);
}
 
Example 6
Source File: TestGraphNodes.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testDuplicateToTimeSeriesVertex() {

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder()
                    .addInputs("in2d", "in3d")
                    .addVertex("duplicateTS", new DuplicateToTimeSeriesVertex("in3d"), "in2d")
                    .addLayer("out", new OutputLayer.Builder().nIn(1).nOut(1).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build(), "duplicateTS")
                    .addLayer("out3d", new RnnOutputLayer.Builder().nIn(1).nOut(1).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build(), "in3d")
                    .setOutputs("out", "out3d").build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    INDArray in2d = Nd4j.rand(3, 5);
    INDArray in3d = Nd4j.rand(new int[] {3, 2, 7});

    graph.setInputs(in2d, in3d);

    INDArray expOut = Nd4j.zeros(3, 5, 7);
    for (int i = 0; i < 7; i++) {
        expOut.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(i)}, in2d);
    }

    GraphVertex gv = graph.getVertex("duplicateTS");
    gv.setInputs(in2d);
    INDArray outFwd = gv.doForward(true, LayerWorkspaceMgr.noWorkspaces());
    assertEquals(expOut, outFwd);

    INDArray expOutBackward = expOut.sum(2);
    gv.setEpsilon(expOut);
    INDArray outBwd = gv.doBackward(false, LayerWorkspaceMgr.noWorkspaces()).getSecond()[0];
    assertEquals(expOutBackward, outBwd);

    String json = conf.toJson();
    ComputationGraphConfiguration conf2 = ComputationGraphConfiguration.fromJson(json);
    assertEquals(conf, conf2);
}
 
Example 7
Source File: AtomicAllocatorTest.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {
    log.info(this.getName() + "/"+ this.getId() + " started on device ["+AtomicAllocator.getInstance().getDeviceId()+"]");
    AtomicLong cnt = new AtomicLong(0);
    AtomicLong cntX = new AtomicLong(0);
    while(true) {
        INDArray sum = null;
        INDArray array1 = Nd4j.create(Nd4j.linspace(1, 8, 8).data(), new int[]{2, 2, 2});
        INDArray array2 = Nd4j.create(new float[]{3, 7, 11, 15}, new int[]{2, 2});

        long time1 = 0;
        long time2 = 0;
        for (int x = 0; x < 30; x++) {
            time1 = System.nanoTime();
            sum = array1.sum(-1);
            time2 = System.nanoTime();
            cntX.incrementAndGet();
        }

        if (cnt.incrementAndGet() % 1000 == 0) {
            log.info("SUM(-1) execution time: [" + (time2 - time1) + "] ns on device ["+ allocator.getDeviceId(array1)+"]");

            assertEquals(array2, sum);

            if (threadId == 0) {
                log.info("Total calls: " + cntX.get() * 4);
                log.info("Total memory allocated on device [0]: " + allocator.getTotalAllocatedDeviceMemory(0));
            }

            try {
                Thread.sleep(5000);
            } catch (Exception e) {
                throw  new RuntimeException(e);
            }
        }
    }
}
 
Example 8
Source File: LogSoftMaxDerivative.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public void exec() {
    //TODO add dimension arg. For now: hardcoded along dimension 1...
    INDArray softmax = Transforms.softmax(x, true);
    INDArray mul = softmax.mul(y);
    INDArray summed = mul.sum(1);
    Nd4j.getExecutioner().exec(new BroadcastSubOp(y,summed,z,0));
}
 
Example 9
Source File: ShapeTest.java    From nd4j with Apache License 2.0 5 votes vote down vote up
private void testSumHelper(int... shape) {
    INDArray array = Nd4j.ones(shape);
    for (int i = 0; i < shape.length; i++) {
        for (int j = 0; j < array.vectorsAlongDimension(i); j++) {
            INDArray vec = array.vectorAlongDimension(j, i);
        }
        array.sum(i);
    }
}
 
Example 10
Source File: ShapeTestsC.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testColumnSum() {
    INDArray twoByThree = Nd4j.linspace(1, 600, 600).reshape(150, 4);
    INDArray columnVar = twoByThree.sum(0);
    INDArray assertion = Nd4j.create(new float[] {44850.0f, 45000.0f, 45150.0f, 45300.0f});
    assertEquals(getFailureMessage(), assertion, columnVar);

}
 
Example 11
Source File: LossL2.java    From nd4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray computeScoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    INDArray scoreArr = scoreArray(labels, preOutput, activationFn, mask);
    return scoreArr.sum(1);
}
 
Example 12
Source File: LossMCXENT.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if(!labels.equalShapes(preOutput)){
        Preconditions.throwEx("Labels and preOutput must have equal shapes: got shapes %s vs %s", labels.shape(), preOutput.shape());
    }
    INDArray grad;
    INDArray output = activationFn.getActivation(preOutput.dup(), true);
    labels = labels.castTo(preOutput.dataType());   //No-op if already correct dtype

    if (activationFn instanceof ActivationSoftmax) {

        if (mask != null && LossUtil.isPerOutputMasking(output, mask)) {
            throw new UnsupportedOperationException("Per output masking for MCXENT + softmax: not supported");
        }

        //Weighted loss function
        if (weights != null) {
            if (weights.length() != output.size(1)) {
                throw new IllegalStateException("Weights vector (length " + weights.length()
                                + ") does not match output.size(1)=" + output.size(1));
            }
            INDArray temp = labels.mulRowVector(weights.castTo(labels.dataType()));
            INDArray col = temp.sum(true,1);
            grad = output.mulColumnVector(col).sub(temp);
        } else {
            grad = output.subi(labels);
        }
    } else {
        INDArray dLda = output.rdivi(labels).negi();

        grad = activationFn.backprop(preOutput, dLda).getFirst(); //TODO activation function with weights

        //Weighted loss function
        if (weights != null) {
            if (weights.length() != output.size(1)) {
                throw new IllegalStateException("Weights vector (length " + weights.length()
                                + ") does not match output.size(1)=" + output.size(1));
            }
            grad.muliRowVector(weights.castTo(grad.dataType()));
        }
    }

    //Loss function with masking
    if (mask != null) {
        LossUtil.applyMask(grad, mask);
    }

    return grad;
}
 
Example 13
Source File: RnnLossLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
/**Compute the score for each example individually, after labels and input have been set.
 *
 * @param fullNetRegTerm Regularization score term for the entire network (or, 0.0 to not include regularization)
 * @return A column INDArray of shape [numExamples,1], where entry i is the score of the ith example
 */
@Override
public INDArray computeScoreForExamples(double fullNetRegTerm, LayerWorkspaceMgr workspaceMgr) {
    //For RNN: need to sum up the score over each time step before returning.
    INDArray input = this.input;
    INDArray labels = this.labels;
    if (input == null || labels == null)
        throw new IllegalStateException("Cannot calculate score without input and labels " + layerId());
    if (layerConf().getRnnDataFormat() == RNNFormat.NWC){
        input = input.permute(0, 2, 1);
        labels = input.permute(0, 2, 1);
    }
    INDArray input2d = TimeSeriesUtils.reshape3dTo2d(input, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray labels2d = TimeSeriesUtils.reshape3dTo2d(labels, workspaceMgr, ArrayType.FF_WORKING_MEM);

    INDArray maskReshaped;
    if(this.maskArray != null){
        if(this.maskArray.rank() == 3){
            maskReshaped = TimeSeriesUtils.reshapePerOutputTimeSeriesMaskTo2d(this.maskArray, workspaceMgr, ArrayType.FF_WORKING_MEM);
        } else {
            maskReshaped = TimeSeriesUtils.reshapeTimeSeriesMaskToVector(this.maskArray, workspaceMgr, ArrayType.FF_WORKING_MEM);
        }
    } else {
        maskReshaped = null;
    }

    ILossFunction lossFunction = layerConf().getLossFn();
    INDArray scoreArray =
            lossFunction.computeScoreArray(labels2d, input2d, layerConf().getActivationFn(), maskReshaped);
    //scoreArray: shape [minibatch*timeSeriesLength, 1]
    //Reshape it to [minibatch, timeSeriesLength] then sum over time step

    INDArray scoreArrayTs = TimeSeriesUtils.reshapeVectorToTimeSeriesMask(scoreArray, (int)input.size(0));
    INDArray summedScores = scoreArrayTs.sum(1);

    if (fullNetRegTerm != 0.0) {
        summedScores.addi(fullNetRegTerm);
    }

    return summedScores;
}
 
Example 14
Source File: BaseLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) {
    assertInputSet(true);
    //If this layer is layer L, then epsilon is (w^(L+1)*(d^(L+1))^T) (or equivalent)
    Pair<INDArray, INDArray> zAndPreNorm = preOutputWithPreNorm(true, true, workspaceMgr);
    INDArray z = zAndPreNorm.getFirst(); //Note: using preOutput(INDArray) can't be used as this does a setInput(input) and resets the 'appliedDropout' flag
    INDArray preNorm = zAndPreNorm.getSecond();
    INDArray delta = layerConf().getActivationFn().backprop(z, epsilon).getFirst(); //TODO handle activation function params

    if (maskArray != null) {
        applyMask(delta);
    }

    Gradient ret = new DefaultGradient();

    if(hasBias()){
        INDArray biasGrad = gradientViews.get(DefaultParamInitializer.BIAS_KEY);
        delta.sum(biasGrad, 0); //biasGrad is initialized/zeroed first
        ret.gradientForVariable().put(DefaultParamInitializer.BIAS_KEY, biasGrad);
    }

    INDArray W = getParamWithNoise(DefaultParamInitializer.WEIGHT_KEY, true, workspaceMgr);

    INDArray epsilonNext = workspaceMgr.createUninitialized(ArrayType.ACTIVATION_GRAD, delta.dataType(), new long[]{W.size(0), delta.size(0)}, 'f');
    if(hasLayerNorm()) {
        INDArray g = getParam(DefaultParamInitializer.GAIN_KEY);

        INDArray dldg = gradientViews.get(DefaultParamInitializer.GAIN_KEY);
        Nd4j.getExecutioner().exec(new LayerNormBp(preNorm, g, delta, delta, dldg, true, 1));
        ret.gradientForVariable().put(DefaultParamInitializer.GAIN_KEY, dldg);

    }

    epsilonNext = W.mmuli(delta.transpose(),epsilonNext).transpose();   //W.mmul(delta.transpose()).transpose();

    INDArray weightGrad = gradientViews.get(DefaultParamInitializer.WEIGHT_KEY); //f order
    Nd4j.gemm(input.castTo(weightGrad.dataType()), delta, weightGrad, true, false, 1.0, 0.0);           //TODO avoid castTo?
    ret.gradientForVariable().put(DefaultParamInitializer.WEIGHT_KEY, weightGrad);

    weightNoiseParams.clear();

    epsilonNext = backpropDropOutIfPresent(epsilonNext);
    return new Pair<>(ret, epsilonNext);
}
 
Example 15
Source File: LossL1.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray computeScoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    INDArray scoreArr = scoreArray(labels, preOutput, activationFn, mask);
    return scoreArr.sum(true,1);
}
 
Example 16
Source File: OpExecutionerTestsC.java    From nd4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testMean() {
    int[] shape = new int[] {1, 2, 2, 2, 2, 2};
    int len = ArrayUtil.prod(shape);
    INDArray val = Nd4j.linspace(1, len, len).reshape('c', shape);
    /**
     * Failure comes from the lack of a jump
     * when doing tad offset in c++
     *
     * We need to jump from the last element rather than the
     * first for the next element.
     *
     * This happens when the index for a tad is >= the
     * stride[0]
     *
     * When the index is >= a stride[0] then you take
     * the offset at the end of the tad and use that +
     * (possibly the last stride?)
     * to get to the next offset.
     *
     * In order to get to the last element for a jump, just iterate
     * over the tad (coordinate wise) to get the coordinate pair +
     * offset at which to do compute.
     *
     * Another possible solution is to create an initialize pointer
     * method that will just set up the tad pointer directly.
     * Right now it is a simplistic base pointer + offset that
     * we could turn in to an init method instead.
     * This would allow use to use coordinate based techniques
     * on the pointer directly. The proposal here
     * would then be turning tad offset given an index
     * in to a pointer initialization method which
     * will auto insert the pointer at the right index.
     */
    INDArray sum = val.sum(2, 3);
    double[] assertionData = new double[] {28.0, 32.0, 36.0, 40.0, 92.0, 96.0, 100.0, 104.0};

    INDArray avgExpected = Nd4j.create(assertionData).reshape(1, 2, 2, 2);

    assertEquals(avgExpected, sum);
}
 
Example 17
Source File: LossMSLE.java    From nd4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray computeScoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    INDArray scoreArr = scoreArray(labels, preOutput, activationFn, mask);
    return scoreArr.sum(1);
}
 
Example 18
Source File: LossCosineProximity.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray computeScoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    INDArray scoreArr = scoreArray(labels, preOutput, activationFn, mask);
    return scoreArr.sum(true,1);
}
 
Example 19
Source File: LossMultiLabel.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
private void calculate(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask, INDArray scoreOutput, INDArray gradientOutput) {
    if (scoreOutput == null && gradientOutput == null) {
        throw new IllegalArgumentException("You have to provide at least one of scoreOutput or gradientOutput!");
    }
    if (labels.size(1) != preOutput.size(1)) {
        throw new IllegalArgumentException(
                "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer"
                        + " number of outputs (nOut = " + preOutput.size(1) + ") ");

    }
    labels = labels.castTo(preOutput.dataType());   //No-op if already correct dtype
    final INDArray postOutput = activationFn.getActivation(preOutput.dup(), true);

    final INDArray positive = labels;
    final INDArray negative = labels.eq(0.0).castTo(Nd4j.defaultFloatingPointType());
    final INDArray normFactor = negative.sum(true,1).castTo(Nd4j.defaultFloatingPointType()).muli(positive.sum(true,1));


    long examples = positive.size(0);
    for (int i = 0; i < examples; i++) {
        final INDArray locCfn = postOutput.getRow(i, true);
        final long[] shape = locCfn.shape();

        final INDArray locPositive = positive.getRow(i, true);
        final INDArray locNegative = negative.getRow(i, true);
        final Double locNormFactor = normFactor.getDouble(i);

        final int outSetSize = locNegative.sumNumber().intValue();
        if(outSetSize == 0 || outSetSize == locNegative.columns()){
            if (scoreOutput != null) {
                scoreOutput.getRow(i, true).assign(0);
            }

            if (gradientOutput != null) {
                gradientOutput.getRow(i, true).assign(0);
            }
        }else {
            final INDArray operandA = Nd4j.ones(shape[1], shape[0]).mmul(locCfn);
            final INDArray operandB = operandA.transpose();

            final INDArray pairwiseSub = Transforms.exp(operandA.sub(operandB));

            final INDArray selection = locPositive.transpose().mmul(locNegative);

            final INDArray classificationDifferences = pairwiseSub.muli(selection).divi(locNormFactor);

            if (scoreOutput != null) {
                if (mask != null) {
                    final INDArray perLabel = classificationDifferences.sum(0);
                    LossUtil.applyMask(perLabel, mask.getRow(i, true));
                    perLabel.sum(scoreOutput.getRow(i, true), 0);
                } else {
                    classificationDifferences.sum(scoreOutput.getRow(i, true), 0, 1);
                }
            }

            if (gradientOutput != null) {
                gradientOutput.getRow(i, true).assign(classificationDifferences.sum(true, 0).addi(classificationDifferences.sum(true,1).transposei().negi()));
            }
        }
    }

    if (gradientOutput != null) {
        gradientOutput.assign(activationFn.backprop(preOutput.dup(), gradientOutput).getFirst());
        //multiply with masks, always
        if (mask != null) {
            LossUtil.applyMask(gradientOutput, mask);
        }
    }
}
 
Example 20
Source File: ROCTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testCompare2Vs3Classes() {

    //ROC multi-class: 2 vs. 3 classes should be the same, if we add two of the classes together...
    //Both methods implement one vs. all ROC/AUC in different ways

    int nExamples = 200;
    INDArray predictions3 = Nd4j.rand(nExamples, 3);
    INDArray tempSum = predictions3.sum(1);
    predictions3.diviColumnVector(tempSum);

    INDArray labels3 = Nd4j.create(nExamples, 3);
    Random r = new Random(12345);
    for (int i = 0; i < nExamples; i++) {
        labels3.putScalar(i, r.nextInt(3), 1.0);
    }

    INDArray predictions2 = Nd4j.zeros(nExamples, 2);
    predictions2.getColumn(0).assign(predictions3.getColumn(0));
    predictions2.getColumn(0).addi(predictions3.getColumn(1));
    predictions2.getColumn(1).addi(predictions3.getColumn(2));

    INDArray labels2 = Nd4j.zeros(nExamples, 2);
    labels2.getColumn(0).assign(labels3.getColumn(0));
    labels2.getColumn(0).addi(labels3.getColumn(1));
    labels2.getColumn(1).addi(labels3.getColumn(2));

    for (int numSteps : new int[] {30, 0}) { //Steps = 0: exact

        ROCMultiClass rocMultiClass3 = new ROCMultiClass(numSteps);
        ROCMultiClass rocMultiClass2 = new ROCMultiClass(numSteps);

        rocMultiClass3.eval(labels3, predictions3);
        rocMultiClass2.eval(labels2, predictions2);

        double auc3 = rocMultiClass3.calculateAUC(2);
        double auc2 = rocMultiClass2.calculateAUC(1);

        assertEquals(auc2, auc3, 1e-6);

        RocCurve c3 = rocMultiClass3.getRocCurve(2);
        RocCurve c2 = rocMultiClass2.getRocCurve(1);

        assertArrayEquals(c2.getThreshold(), c3.getThreshold(), 1e-6);
        assertArrayEquals(c2.getFpr(), c3.getFpr(), 1e-6);
        assertArrayEquals(c2.getTpr(), c3.getTpr(), 1e-6);
    }
}