Java Code Examples for org.deeplearning4j.nn.multilayer.MultiLayerNetwork#fit()

The following examples show how to use org.deeplearning4j.nn.multilayer.MultiLayerNetwork#fit() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestNetConversion.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
private MultiLayerNetwork getNet1(boolean train) {

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .convolutionMode(ConvolutionMode.Same)
                .activation(Activation.TANH)
                .weightInit(WeightInit.XAVIER)
                .updater(new Sgd(0.1))
                .list()
                .layer(new ConvolutionLayer.Builder().nIn(3).nOut(5).kernelSize(2, 2).stride(1, 1).build())
                .layer(new SubsamplingLayer.Builder().kernelSize(2, 2).stride(1, 1).build())
                .layer(new DenseLayer.Builder().nOut(32).build())
                .layer(new OutputLayer.Builder().nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build())
                .setInputType(InputType.convolutional(10, 10, 3))
                .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        if(train) {
            for (int i = 0; i < 3; i++) {
                INDArray f = Nd4j.rand(new int[]{8, 3, 10, 10});
                INDArray l = Nd4j.rand(8, 10);

                net.fit(f, l);
            }
        }

        return net;
    }
 
Example 2
Source File: ConvolutionLayerTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testCNNMLNBackprop() throws Exception {
    int numSamples = 10;
    int batchSize = 10;
    DataSetIterator mnistIter = new MnistDataSetIterator(batchSize, numSamples, true);

    MultiLayerNetwork model = getCNNMLNConfig(true, false);
    model.fit(mnistIter);

    MultiLayerNetwork model2 = getCNNMLNConfig(true, false);
    model2.fit(mnistIter);

    mnistIter.reset();
    DataSet test = mnistIter.next();

    Evaluation eval = new Evaluation();
    INDArray output = model.output(test.getFeatures());
    eval.eval(test.getLabels(), output);
    double f1Score = eval.f1();

    Evaluation eval2 = new Evaluation();
    INDArray output2 = model2.output(test.getFeatures());
    eval2.eval(test.getLabels(), output2);
    double f1Score2 = eval2.f1();

    assertEquals(f1Score, f1Score2, 1e-4);

}
 
Example 3
Source File: EvalTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testEvaluativeListenerSimple(){
    //Sanity check: https://github.com/deeplearning4j/deeplearning4j/issues/5351

    // Network config
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()

            .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(42)
            .updater(new Sgd(1e-6)).list()
            .layer(0, new DenseLayer.Builder().nIn(4).nOut(2).activation(Activation.TANH)
                    .weightInit(WeightInit.XAVIER).build())
            .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                    LossFunctions.LossFunction.MCXENT).nIn(2).nOut(3).weightInit(WeightInit.XAVIER)
                    .activation(Activation.SOFTMAX).build())
            .build();

    // Instantiate model
    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    // Train-test split
    DataSetIterator iter = new IrisDataSetIterator(30, 150);
    DataSetIterator iterTest = new IrisDataSetIterator(30, 150);

    net.setListeners(new EvaluativeListener(iterTest, 3));

    for( int i=0; i<3; i++ ){
        net.fit(iter);
    }
}
 
Example 4
Source File: BatchNormalizationTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testBatchNorm() throws Exception {

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .seed(12345)
            .updater(new Adam(1e-3))
            .activation(Activation.TANH)
            .list()
            .layer(new ConvolutionLayer.Builder().nOut(5).kernelSize(2, 2).build())
            .layer(new BatchNormalization())
            .layer(new ConvolutionLayer.Builder().nOut(5).kernelSize(2, 2).build())
            .layer(new OutputLayer.Builder().activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).nOut(10).build())
            .setInputType(InputType.convolutionalFlat(28, 28, 1))
            .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    DataSetIterator iter = new EarlyTerminationDataSetIterator(new MnistDataSetIterator(32, true, 12345), 10);

    net.fit(iter);

    MultiLayerNetwork net2 = new TransferLearning.Builder(net)
            .fineTuneConfiguration(FineTuneConfiguration.builder()
                    .updater(new AdaDelta())
                    .build())
            .removeOutputLayer()
            .addLayer(new BatchNormalization.Builder().nOut(3380).build())
            .addLayer(new OutputLayer.Builder().activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).nIn(3380).nOut(10).build())
            .build();

    net2.fit(iter);
}
 
Example 5
Source File: ConvolutionLayerSetupTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testMnistLenet() throws Exception {
    MultiLayerConfiguration.Builder incomplete = incompleteMnistLenet();
    incomplete.setInputType(InputType.convolutionalFlat(28, 28, 1));

    MultiLayerConfiguration testConf = incomplete.build();
    assertEquals(800, ((FeedForwardLayer) testConf.getConf(4).getLayer()).getNIn());
    assertEquals(500, ((FeedForwardLayer) testConf.getConf(5).getLayer()).getNIn());

    //test instantiation
    DataSetIterator iter = new MnistDataSetIterator(10, 10);
    MultiLayerNetwork network = new MultiLayerNetwork(testConf);
    network.init();
    network.fit(iter.next());
}
 
Example 6
Source File: TestConvolution.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testGradientNorm() throws Exception {

        int height = 100;
        int width = 100;
        int channels = 1;
        int numLabels = 10;

        for( int batchSize : new int[]{1, 32}) {

            long seed = 12345;
            double nonZeroBias = 1;

            MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .seed(seed)
                    .dataType(DataType.DOUBLE)
                    .dist(new NormalDistribution(0.0, 0.01))
                    .activation(Activation.RELU)
                    .updater(new Adam(5e-3))
                    //.biasUpdater(new Nesterovs(new StepSchedule(ScheduleType.ITERATION, 2e-2, 0.1, 20000), 0.9))
                    .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
                    .l2(5 * 1e-4)
                    .list()
                    .layer(convInit("cnn1", channels, 96, new int[]{11, 11}, new int[]{4, 4},
                            new int[]{3, 3}, 0))
                    .layer(maxPool("maxpool1", new int[]{3, 3}))
                    .layer(conv5x5("cnn2", 256, new int[]{1, 1}, new int[]{2, 2}, nonZeroBias))
                    .layer(maxPool("maxpool2", new int[]{3, 3}))
                    .layer(conv3x3("cnn3", 384, 0))
                    .layer(conv3x3("cnn4", 384, nonZeroBias))
                    .layer(conv3x3("cnn5", 256, nonZeroBias))
                    .layer(maxPool("maxpool3", new int[]{3, 3}))
                    .layer(fullyConnected("ffn1", 4096, nonZeroBias, new GaussianDistribution(0, 0.005)))
                    .layer(fullyConnected("ffn2", 4096, nonZeroBias, new GaussianDistribution(0, 0.005)))
                    .layer(new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                            .name("output")
                            .nOut(numLabels)
                            .activation(Activation.SOFTMAX)
                            .build())
                    .setInputType(InputType.convolutional(height, width, channels))
                    .build();


            MultiLayerNetwork netNoCudnn = new MultiLayerNetwork(conf.clone());
            netNoCudnn.init();
            MultiLayerNetwork netWithCudnn = new MultiLayerNetwork(conf.clone());
            netWithCudnn.init();

            CuDNNTestUtils.removeHelpers(netNoCudnn.getLayers());



            Nd4j.getRandom().setSeed(12345);
            for( int j=0; j<3; j++ ) {
//                System.out.println("j=" + j);
                INDArray f = Nd4j.rand(new int[]{batchSize, channels, height, width});
                INDArray l = TestUtils.randomOneHot(batchSize, numLabels);

                netNoCudnn.fit(f, l);
                netWithCudnn.fit(f, l);

                assertEquals(netNoCudnn.score(), netWithCudnn.score(), 1e-5);

                for (Map.Entry<String, INDArray> e : netNoCudnn.paramTable().entrySet()) {
                    boolean pEq = e.getValue().equalsWithEps(netWithCudnn.paramTable().get(e.getKey()), 1e-4);
//                    int idx = e.getKey().indexOf("_");
//                    int layerNum = Integer.parseInt(e.getKey().substring(0, idx));
                    //System.out.println(e.getKey() + " - " + pEq + " - " + netNoCudnn.getLayer(layerNum).getClass().getSimpleName());
                    assertTrue(pEq);
                }

                boolean eq = netNoCudnn.params().equalsWithEps(netWithCudnn.params(), 1e-4);
                assertTrue(eq);
            }
        }
    }
 
Example 7
Source File: TestCheckpointListener.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testCheckpointListenerKeepLast3AndEvery3() throws Exception {
    File f = tempDir.newFolder();
    Pair<MultiLayerNetwork, DataSetIterator> p = getNetAndData();
    MultiLayerNetwork net = p.getFirst();
    DataSetIterator iter = p.getSecond();


    CheckpointListener l = new CheckpointListener.Builder(f)
            .keepLastAndEvery(3, 3)
            .saveEveryNEpochs(2)
            .build();
    net.setListeners(l);

    for(int i=0; i<20; i++ ){   //40 iterations total
        net.fit(iter);
    }

    //Expect models saved at end of epochs: 1, 3, 5, 7, 9, 11, 13, 15, 17, 19
    //But: keep only 5, 11, 15, 17, 19
    File[] files = f.listFiles();
    int count = 0;
    Set<Integer> ns = new HashSet<>();
    for(File f2 : files){
        if(!f2.getPath().endsWith(".zip")){
            continue;
        }
        count++;
        int prefixLength = "checkpoint_".length();
        int end = f2.getName().lastIndexOf("_");
        int num = Integer.parseInt(f2.getName().substring(prefixLength, end));

        MultiLayerNetwork n = ModelSerializer.restoreMultiLayerNetwork(f2, true);
        int expEpoch = 2 * (num+1) - 1;
        assertEquals(expEpoch, n.getEpochCount());

        ns.add(n.getEpochCount());
        count++;
    }

    assertEquals(ns.toString(), 5, ns.size());
    assertTrue(ns.toString(), ns.containsAll(Arrays.asList(5, 11, 15, 17, 19)));

    assertEquals(5, l.availableCheckpoints().size());
}
 
Example 8
Source File: GradientCheckTestsComputationGraph.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testCnnPoolCenterLoss() {
        Nd4j.getRandom().setSeed(12345);
        int numLabels = 2;

        boolean[] trainFirst = new boolean[] {false, true};

        int inputH = 5;
        int inputW = 4;
        int inputDepth = 3;

        for (boolean train : trainFirst) {
            for (double lambda : new double[] {0.0, 0.5, 2.0}) {

                MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                                .dataType(DataType.DOUBLE)
                                .updater(new NoOp())
                                .dist(new NormalDistribution(0, 1.0)).seed(12345L).list()
                                .layer(0, new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).nOut(3).build())
                                .layer(1, new GlobalPoolingLayer.Builder().poolingType(PoolingType.AVG).build())
                                .layer(2, new CenterLossOutputLayer.Builder()
                                                .lossFunction(LossFunctions.LossFunction.MCXENT).nOut(numLabels)
                                                .alpha(1.0).lambda(lambda).gradientCheck(true)
                                                .activation(Activation.SOFTMAX).build())

                                .setInputType(InputType.convolutional(inputH, inputW, inputDepth)).build();

                MultiLayerNetwork net = new MultiLayerNetwork(conf);
                net.init();

                INDArray example = Nd4j.rand(new int[] {150, inputDepth, inputH, inputW});

                INDArray labels = Nd4j.zeros(150, numLabels);
                Random r = new Random(12345);
                for (int i = 0; i < 150; i++) {
                    labels.putScalar(i, r.nextInt(numLabels), 1.0);
                }

                if (train) {
                    for (int i = 0; i < 10; i++) {
                        INDArray f = Nd4j.rand(new int[] {10, inputDepth, inputH, inputW});
                        INDArray l = Nd4j.zeros(10, numLabels);
                        for (int j = 0; j < 10; j++) {
                            l.putScalar(j, r.nextInt(numLabels), 1.0);
                        }
                        net.fit(f, l);
                    }
                }

                String msg = "testBasicCenterLoss() - trainFirst = " + train;
                if (PRINT_RESULTS) {
                    System.out.println(msg);
//                    for (int j = 0; j < net.getnLayers(); j++)
//                        System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams());
                }

                boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                                DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, example, labels);

                assertTrue(msg, gradOK);
                TestUtils.testModelSerialization(net);
            }
        }
    }
 
Example 9
Source File: DTypeTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testMultiLayerNetworkTypeConversion() {

    for (DataType dt : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        Nd4j.setDefaultDataTypes(dt, dt);

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .seed(12345)
                .weightInit(WeightInit.XAVIER)
                .updater(new Adam(0.01))
                .dataType(DataType.DOUBLE)
                .list()
                .layer(new DenseLayer.Builder().activation(Activation.TANH).nIn(10).nOut(10).build())
                .layer(new DenseLayer.Builder().activation(Activation.TANH).nIn(10).nOut(10).build())
                .layer(new OutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build())
                .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray inD = Nd4j.rand(DataType.DOUBLE, 1, 10);
        INDArray lD = Nd4j.create(DataType.DOUBLE, 1, 10);
        net.fit(inD, lD);

        INDArray outDouble = net.output(inD);
        net.setInput(inD);
        net.setLabels(lD);
        net.computeGradientAndScore();
        double scoreDouble = net.score();
        INDArray grads = net.getFlattenedGradients();
        INDArray u = net.getUpdater().getStateViewArray();
        assertEquals(DataType.DOUBLE, net.params().dataType());
        assertEquals(DataType.DOUBLE, grads.dataType());
        assertEquals(DataType.DOUBLE, u.dataType());


        MultiLayerNetwork netFloat = net.convertDataType(DataType.FLOAT);
        netFloat.initGradientsView();
        assertEquals(DataType.FLOAT, netFloat.params().dataType());
        assertEquals(DataType.FLOAT, netFloat.getFlattenedGradients().dataType());
        assertEquals(DataType.FLOAT, netFloat.getUpdater(true).getStateViewArray().dataType());
        INDArray inF = inD.castTo(DataType.FLOAT);
        INDArray lF = lD.castTo(DataType.FLOAT);
        INDArray outFloat = netFloat.output(inF);
        netFloat.setInput(inF);
        netFloat.setLabels(lF);
        netFloat.computeGradientAndScore();
        double scoreFloat = netFloat.score();
        INDArray gradsFloat = netFloat.getFlattenedGradients();
        INDArray uFloat = netFloat.getUpdater().getStateViewArray();

        assertEquals(scoreDouble, scoreFloat, 1e-6);
        assertEquals(outDouble.castTo(DataType.FLOAT), outFloat);
        assertEquals(grads.castTo(DataType.FLOAT), gradsFloat);
        INDArray uCast = u.castTo(DataType.FLOAT);
        assertTrue(uCast.equalsWithEps(uFloat, 1e-4));

        MultiLayerNetwork netFP16 = net.convertDataType(DataType.HALF);
        netFP16.initGradientsView();
        assertEquals(DataType.HALF, netFP16.params().dataType());
        assertEquals(DataType.HALF, netFP16.getFlattenedGradients().dataType());
        assertEquals(DataType.HALF, netFP16.getUpdater(true).getStateViewArray().dataType());

        INDArray inH = inD.castTo(DataType.HALF);
        INDArray lH = lD.castTo(DataType.HALF);
        INDArray outHalf = netFP16.output(inH);
        netFP16.setInput(inH);
        netFP16.setLabels(lH);
        netFP16.computeGradientAndScore();
        double scoreHalf = netFP16.score();
        INDArray gradsHalf = netFP16.getFlattenedGradients();
        INDArray uHalf = netFP16.getUpdater().getStateViewArray();

        assertEquals(scoreDouble, scoreHalf, 1e-4);
        boolean outHalfEq = outDouble.castTo(DataType.HALF).equalsWithEps(outHalf, 1e-3);
        assertTrue(outHalfEq);
        boolean gradsHalfEq = grads.castTo(DataType.HALF).equalsWithEps(gradsHalf, 1e-3);
        assertTrue(gradsHalfEq);
        INDArray uHalfCast = u.castTo(DataType.HALF);
        assertTrue(uHalfCast.equalsWithEps(uHalf, 1e-4));
    }
}
 
Example 10
Source File: ValidateCudnnLSTM.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void validateImplMultiLayerTBPTT() throws Exception {

    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int inputSize = 3;
    int lstmLayerSize = 4;
    int timeSeriesLength = 23;
    int tbpttLength = 5;
    int nOut = 2;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .inferenceWorkspaceMode(WorkspaceMode.NONE).trainingWorkspaceMode(WorkspaceMode.NONE)
                    .seed(12345L)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new LSTM.Builder().nIn(inputSize).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(1, new LSTM.Builder().nIn(lstmLayerSize).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(lstmLayerSize).nOut(nOut).build())
                    .backpropType(BackpropType.TruncatedBPTT)
                    .tBPTTLength(tbpttLength).build();

    MultiLayerNetwork mln1 = new MultiLayerNetwork(conf.clone());
    mln1.init();

    MultiLayerNetwork mln2 = new MultiLayerNetwork(conf.clone());
    mln2.init();


    assertEquals(mln1.params(), mln2.params());

    Field f = org.deeplearning4j.nn.layers.recurrent.LSTM.class.getDeclaredField("helper");
    f.setAccessible(true);

    Layer l0 = mln1.getLayer(0);
    Layer l1 = mln1.getLayer(1);
    f.set(l0, null);
    f.set(l1, null);
    assertNull(f.get(l0));
    assertNull(f.get(l1));

    l0 = mln2.getLayer(0);
    l1 = mln2.getLayer(1);
    assertTrue(f.get(l0) instanceof CudnnLSTMHelper);
    assertTrue(f.get(l1) instanceof CudnnLSTMHelper);

    Random r = new Random(12345);
    for (int x = 0; x < 1; x++) {
        INDArray input = Nd4j.rand(new int[] {minibatch, inputSize, timeSeriesLength});
        INDArray labels = Nd4j.zeros(minibatch, nOut, timeSeriesLength);
        for (int i = 0; i < minibatch; i++) {
            for (int j = 0; j < timeSeriesLength; j++) {
                labels.putScalar(i, r.nextInt(nOut), j, 1.0);
            }
        }

        DataSet ds = new DataSet(input, labels);
        mln1.fit(ds);
        mln2.fit(ds);
    }


    assertEquals(mln1.params(), mln2.params());
}
 
Example 11
Source File: TestRnnLayers.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testMismatchedInputLabelLength(){

    for( int i=0; i<2; i++ ){

        NeuralNetConfiguration.ListBuilder lb = new NeuralNetConfiguration.Builder()

                .list()
                .layer(new SimpleRnn.Builder().nIn(5).nOut(5).dataFormat(rnnDataFormat).build());

        switch (i){
            case 0:
                lb.layer(new RnnOutputLayer.Builder().activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).nIn(5).nOut(5).dataFormat(rnnDataFormat).build());
                break;
            case 1:
                lb.layer(new RnnLossLayer.Builder().activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).dataFormat(rnnDataFormat).build());
                break;
            default:
                throw new RuntimeException();
        }

        MultiLayerConfiguration conf = lb.build();
        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray in = Nd4j.rand(DataType.FLOAT, 3, 5, 5);
        INDArray l = TestUtils.randomOneHotTimeSeries(rnnDataFormat, 3, 5, 10, new Random(12345));
        try{
            net.fit(in,l);
        } catch (Throwable t){
            String msg = t.getMessage();
            if(msg == null)
                t.printStackTrace();
            System.out.println(i);
            assertTrue(msg, msg != null && msg.contains("sequence length") && msg.contains("input") && msg.contains("label"));
        }

    }


}
 
Example 12
Source File: TestConstraints.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testLayerWeightsAndBiasConstraints() throws Exception {

    LayerConstraint[] constraints = new LayerConstraint[]{
            new MaxNormConstraint(0.5, 1),
            new MinMaxNormConstraint(0.3, 0.4, 1.0, 1),
            new NonNegativeConstraint(),
            new UnitNormConstraint(1)
    };

    for (LayerConstraint lc : constraints) {

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .updater(new Sgd(0.0))
                .dist(new NormalDistribution(0, 5))
                .biasInit(0.2)
                .list()
                .layer(new DenseLayer.Builder().nIn(12).nOut(10)
                        .constrainAllParameters(lc).build())
                .layer(new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(10).nOut(8).build())
                .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        LayerConstraint exp = lc.clone();
        assertEquals(exp.toString(), net.getLayer(0).conf().getLayer().getConstraints().get(0).toString());

        INDArray input = Nd4j.rand(3, 12);
        INDArray labels = Nd4j.rand(3, 8);

        net.fit(input, labels);

        INDArray w0 = net.getParam("0_W");
        INDArray b0 = net.getParam("0_b");


        if (lc instanceof MaxNormConstraint) {
            assertTrue(w0.norm2(1).maxNumber().doubleValue() <= 0.5);
            assertTrue(b0.norm2(1).maxNumber().doubleValue() <= 0.5);

        } else if (lc instanceof MinMaxNormConstraint) {
            assertTrue(w0.norm2(1).minNumber().doubleValue() >= 0.3);
            assertTrue(w0.norm2(1).maxNumber().doubleValue() <= 0.4);
            assertTrue(b0.norm2(1).minNumber().doubleValue() >= 0.3);
            assertTrue(b0.norm2(1).maxNumber().doubleValue() <= 0.4);
        } else if (lc instanceof NonNegativeConstraint) {
            assertTrue(w0.minNumber().doubleValue() >= 0.0);
            assertTrue(b0.minNumber().doubleValue() >= 0.0);
        } else if (lc instanceof UnitNormConstraint) {
            assertEquals(1.0, w0.norm2(1).minNumber().doubleValue(), 1e-6);
            assertEquals(1.0, w0.norm2(1).maxNumber().doubleValue(), 1e-6);
            assertEquals(1.0, b0.norm2(1).minNumber().doubleValue(), 1e-6);
            assertEquals(1.0, b0.norm2(1).maxNumber().doubleValue(), 1e-6);
        }

        TestUtils.testModelSerialization(net);
    }
}
 
Example 13
Source File: TestDropout.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testCalls(){

    CustomDropout d1 = new CustomDropout();
    CustomDropout d2 = new CustomDropout();

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .list()
            .layer(new DenseLayer.Builder().nIn(4).nOut(3).dropOut(d1).build())
            .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MSE).dropOut(d2).nIn(3).nOut(3).build())
            .build();
    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    List<DataSet> l = new ArrayList<>();
    l.add(new DataSet(Nd4j.rand(5,4), Nd4j.rand(5,3)));
    l.add(new DataSet(Nd4j.rand(5,4), Nd4j.rand(5,3)));
    l.add(new DataSet(Nd4j.rand(5,4), Nd4j.rand(5,3)));

    DataSetIterator iter = new ExistingDataSetIterator(l);

    net.fit(iter);
    net.fit(iter);

    List<Pair<Integer,Integer>> expList = Arrays.asList(
            new Pair<>(0, 0),
            new Pair<>(1, 0),
            new Pair<>(2, 0),
            new Pair<>(3, 1),
            new Pair<>(4, 1),
            new Pair<>(5, 1));

    assertEquals(expList, d1.getAllCalls());
    assertEquals(expList, d2.getAllCalls());

    assertEquals(expList, d1.getAllReverseCalls());
    assertEquals(expList, d2.getAllReverseCalls());


    d1 = new CustomDropout();
    d2 = new CustomDropout();
    ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder()
            .graphBuilder()
            .addInputs("in")
            .addLayer("0", new DenseLayer.Builder().nIn(4).nOut(3).dropOut(d1).build(), "in")
            .addLayer("1", new OutputLayer.Builder(LossFunctions.LossFunction.MSE).dropOut(d2).nIn(3).nOut(3).build(), "0")
            .setOutputs("1")
            .build();

    ComputationGraph net2 = new ComputationGraph(conf2);
    net2.init();

    net2.fit(iter);
    net2.fit(iter);

    assertEquals(expList, d1.getAllCalls());
    assertEquals(expList, d2.getAllCalls());
}
 
Example 14
Source File: FrozenLayerTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testFrozen() {
    DataSet randomData = new DataSet(Nd4j.rand(10, 4), Nd4j.rand(10, 3));

    NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1))
                    .activation(Activation.IDENTITY);

    FineTuneConfiguration finetune = new FineTuneConfiguration.Builder().updater(new Sgd(0.1)).build();

    MultiLayerNetwork modelToFineTune = new MultiLayerNetwork(overallConf.clone().list()
                    .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).build())
                    .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).build())
                    .layer(2, new DenseLayer.Builder().nIn(2).nOut(3).build())
                    .layer(3, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                                    LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3)
                                                    .build())
                    .build());

    modelToFineTune.init();
    List<INDArray> ff = modelToFineTune.feedForwardToLayer(2, randomData.getFeatures(), false);
    INDArray asFrozenFeatures = ff.get(2);

    MultiLayerNetwork modelNow = new TransferLearning.Builder(modelToFineTune).fineTuneConfiguration(finetune)
                    .setFeatureExtractor(1).build();

    INDArray paramsLastTwoLayers =
                    Nd4j.hstack(modelToFineTune.getLayer(2).params(), modelToFineTune.getLayer(3).params());
    MultiLayerNetwork notFrozen = new MultiLayerNetwork(overallConf.clone().list()
                    .layer(0, new DenseLayer.Builder().nIn(2).nOut(3).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                                    LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3)
                                                    .build())
                    .build(), paramsLastTwoLayers);

    //        assertEquals(modelNow.getLayer(2).conf(), notFrozen.getLayer(0).conf());  //Equal, other than names
    //        assertEquals(modelNow.getLayer(3).conf(), notFrozen.getLayer(1).conf());  //Equal, other than names

    //Check: forward pass
    INDArray outNow = modelNow.output(randomData.getFeatures());
    INDArray outNotFrozen = notFrozen.output(asFrozenFeatures);
    assertEquals(outNow, outNotFrozen);

    for (int i = 0; i < 5; i++) {
        notFrozen.fit(new DataSet(asFrozenFeatures, randomData.getLabels()));
        modelNow.fit(randomData);
    }

    INDArray expected = Nd4j.hstack(modelToFineTune.getLayer(0).params(), modelToFineTune.getLayer(1).params(),
                    notFrozen.params());
    INDArray act = modelNow.params();
    assertEquals(expected, act);
}
 
Example 15
Source File: DeepLearning4J_CSV_Iris_Model.java    From kafka-streams-machine-learning-examples with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {

        // First: get the dataset using the record reader. CSVRecordReader handles
        // loading/parsing
        int numLinesToSkip = 0;
        char delimiter = ',';
        RecordReader recordReader = new CSVRecordReader(numLinesToSkip, delimiter);
        recordReader.initialize(new FileSplit(new ClassPathResource("DL4J_Resources/iris.txt").getFile()));

        // Second: the RecordReaderDataSetIterator handles conversion to DataSet
        // objects, ready for use in neural network
        int labelIndex = 4; // 5 values in each row of the iris.txt CSV: 4 input features followed by an
                            // integer label (class) index. Labels are the 5th value (index 4) in each row
        int numClasses = 3; // 3 classes (types of iris flowers) in the iris data set. Classes have integer
                            // values 0, 1 or 2
        int batchSize = 150; // Iris data set: 150 examples total. We are loading all of them into one
                             // DataSet (not recommended for large data sets)

        DataSetIterator iterator = new RecordReaderDataSetIterator(recordReader, batchSize, labelIndex, numClasses);
        DataSet allData = iterator.next();
        allData.shuffle();
        SplitTestAndTrain testAndTrain = allData.splitTestAndTrain(0.65); // Use 65% of data for training

        DataSet trainingData = testAndTrain.getTrain();
        DataSet testData = testAndTrain.getTest();

        // We need to normalize our data. We'll use NormalizeStandardize (which gives us
        // mean 0, unit variance):
        DataNormalization normalizer = new NormalizerStandardize();
        normalizer.fit(trainingData); // Collect the statistics (mean/stdev) from the training data. This does not
                                      // modify the input data
        normalizer.transform(trainingData); // Apply normalization to the training data
        normalizer.transform(testData); // Apply normalization to the test data. This is using statistics calculated
                                        // from the *training* set

        final int numInputs = 4;
        int outputNum = 3;
        long seed = 6;

        log.info("Build model....");
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(seed).activation(Activation.TANH)
                .weightInit(WeightInit.XAVIER).updater(new Sgd(0.1)).l2(1e-4).list()
                .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(3).build())
                .layer(1, new DenseLayer.Builder().nIn(3).nOut(3).build())
                .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                        .activation(Activation.SOFTMAX).nIn(3).nOut(outputNum).build())
                .build();

        // run the model
        MultiLayerNetwork model = new MultiLayerNetwork(conf);
        model.init();
        model.setListeners(new ScoreIterationListener(100));

        for (int i = 0; i < 1000; i++) {
            model.fit(trainingData);
        }

        // evaluate the model on the test set
        Evaluation eval = new Evaluation(3);
        INDArray input = testData.getFeatures();
        INDArray output = model.output(input);
        System.out.println("INPUT:" + input.toString());
        eval.eval(testData.getLabels(), output);
        log.info(eval.stats());

        // Save the model
        File locationToSave = new File("src/main/resources/generatedModels/DL4J/DL4J_Iris_Model.zip"); // Where to save
        // the network.
        // Note: the file
        // is in .zip
        // format - can
        // be opened
        // externally
        boolean saveUpdater = true; // Updater: i.e., the state for Momentum, RMSProp, Adagrad etc. Save this if you
        // want to train your network more in the future
        // ModelSerializer.writeModel(model, locationToSave, saveUpdater);

        // Load the model
        MultiLayerNetwork restored = ModelSerializer.restoreMultiLayerNetwork(locationToSave);

        System.out.println("Saved and loaded parameters are equal:      " + model.params().equals(restored.params()));
        System.out.println("Saved and loaded configurations are equal:  "
                + model.getLayerWiseConfigurations().equals(restored.getLayerWiseConfigurations()));

    }
 
Example 16
Source File: NeuralNetworks.java    From Machine-Learning-in-Java with MIT License 4 votes vote down vote up
public static void main(String[] args) throws Exception {
		final int numRows = 28;
		final int numColumns = 28;
		int outputNum = 10;
		int numSamples = 60000;
		int batchSize = 100;
		int iterations = 10;
		int seed = 123;
		int listenerFreq = batchSize / 5;

		log.info("Load data....");
		DataSetIterator iter = new MnistDataSetIterator(batchSize, numSamples,
				true);

		log.info("Build model....");
		 MultiLayerNetwork model = softMaxRegression(seed, iterations, numRows, numColumns, outputNum);
//		// MultiLayerNetwork model = deepBeliefNetwork(seed, iterations,
//		// numRows, numColumns, outputNum);
//		MultiLayerNetwork model = deepConvNetwork(seed, iterations, numRows,
//				numColumns, outputNum);

		model.init();
		model.setListeners(Collections
				.singletonList((IterationListener) new ScoreIterationListener(
						listenerFreq)));

		log.info("Train model....");
		model.fit(iter); // achieves end to end pre-training

		log.info("Evaluate model....");
		Evaluation eval = new Evaluation(outputNum);

		DataSetIterator testIter = new MnistDataSetIterator(100, 10000);
		while (testIter.hasNext()) {
			DataSet testMnist = testIter.next();
			INDArray predict2 = model.output(testMnist.getFeatureMatrix());
			eval.eval(testMnist.getLabels(), predict2);
		}

		log.info(eval.stats());
		log.info("****************Example finished********************");

	}
 
Example 17
Source File: FrozenLayerWithBackpropTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
/**
 * Frozen layer should have same results as a layer with Sgd updater with learning rate set to 0
 */
@Test
public void testFrozenLayerVsSgd() {
    Nd4j.getRandom().setSeed(12345);
    DataSet randomData = new DataSet(Nd4j.rand(100, 4), Nd4j.rand(100, 1));

    MultiLayerConfiguration confSgd = new NeuralNetConfiguration.Builder()
            .seed(12345)
            .weightInit(WeightInit.XAVIER)
            .updater(new Sgd(2))
            .list()
            .layer(0,new DenseLayer.Builder().nIn(4).nOut(3).build())
            .layer(1,new DenseLayer.Builder().updater(new Sgd(0.0)).biasUpdater(new Sgd(0.0)).nIn(3).nOut(4).build())
            .layer(2,new DenseLayer.Builder().updater(new Sgd(0.0)).biasUpdater(new Sgd(0.0)).nIn(4).nOut(2).build())
            .layer(3,new OutputLayer.Builder(LossFunctions.LossFunction.MSE).updater(new Sgd(0.0)).biasUpdater(new Sgd(0.0)).activation(Activation.TANH).nIn(2).nOut(1).build())
            .build();

    MultiLayerConfiguration confFrozen = new NeuralNetConfiguration.Builder()
            .seed(12345)
            .weightInit(WeightInit.XAVIER)
            .updater(new Sgd(2))
            .list()
            .layer(0,new DenseLayer.Builder().nIn(4).nOut(3).build())
            .layer(1,new org.deeplearning4j.nn.conf.layers.misc.FrozenLayerWithBackprop(new DenseLayer.Builder().nIn(3).nOut(4).build()))
            .layer(2,new org.deeplearning4j.nn.conf.layers.misc.FrozenLayerWithBackprop(new DenseLayer.Builder().nIn(4).nOut(2).build()))
            .layer(3,new org.deeplearning4j.nn.conf.layers.misc.FrozenLayerWithBackprop(new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.TANH).nIn(2).nOut(1).build()))
            .build();
    MultiLayerNetwork frozenNetwork = new MultiLayerNetwork(confFrozen);
    frozenNetwork.init();
    INDArray unfrozenLayerParams = frozenNetwork.getLayer(0).params().dup();
    INDArray frozenLayerParams1 = frozenNetwork.getLayer(1).params().dup();
    INDArray frozenLayerParams2 = frozenNetwork.getLayer(2).params().dup();
    INDArray frozenOutputLayerParams = frozenNetwork.getLayer(3).params().dup();

    MultiLayerNetwork sgdNetwork = new MultiLayerNetwork(confSgd);
    sgdNetwork.init();
    INDArray unfrozenSgdLayerParams = sgdNetwork.getLayer(0).params().dup();
    INDArray frozenSgdLayerParams1 = sgdNetwork.getLayer(1).params().dup();
    INDArray frozenSgdLayerParams2 = sgdNetwork.getLayer(2).params().dup();
    INDArray frozenSgdOutputLayerParams = sgdNetwork.getLayer(3).params().dup();

    for (int i = 0; i < 100; i++) {
        frozenNetwork.fit(randomData);
    }
    for (int i = 0; i < 100; i++) {
        sgdNetwork.fit(randomData);
    }

    assertEquals(frozenNetwork.getLayer(0).params(), sgdNetwork.getLayer(0).params());
    assertEquals(frozenNetwork.getLayer(1).params(), sgdNetwork.getLayer(1).params());
    assertEquals(frozenNetwork.getLayer(2).params(), sgdNetwork.getLayer(2).params());
    assertEquals(frozenNetwork.getLayer(3).params(), sgdNetwork.getLayer(3).params());

}
 
Example 18
Source File: GradientCheckTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testAutoEncoder() {
        //As above (testGradientMLP2LayerIrisSimple()) but with L2, L1, and both L2/L1 applied
        //Need to run gradient through updater, so that L2 can be applied

        Activation[] activFns = {Activation.SIGMOID, Activation.TANH};
        boolean[] characteristic = {false, true}; //If true: run some backprop steps first

        LossFunction[] lossFunctions = {LossFunction.MCXENT, LossFunction.MSE};
        Activation[] outputActivations = {Activation.SOFTMAX, Activation.TANH};

        DataNormalization scaler = new NormalizerMinMaxScaler();
        DataSetIterator iter = new IrisDataSetIterator(150, 150);
        scaler.fit(iter);
        iter.setPreProcessor(scaler);
        DataSet ds = iter.next();
        INDArray input = ds.getFeatures();
        INDArray labels = ds.getLabels();

        NormalizerStandardize norm = new NormalizerStandardize();
        norm.fit(ds);
        norm.transform(ds);

        double[] l2vals = {0.2, 0.0, 0.2};
        double[] l1vals = {0.0, 0.3, 0.3}; //i.e., use l2vals[i] with l1vals[i]

        for (Activation afn : activFns) {
            for (boolean doLearningFirst : characteristic) {
                for (int i = 0; i < lossFunctions.length; i++) {
                    for (int k = 0; k < l2vals.length; k++) {
                        LossFunction lf = lossFunctions[i];
                        Activation outputActivation = outputActivations[i];
                        double l2 = l2vals[k];
                        double l1 = l1vals[k];

                        Nd4j.getRandom().setSeed(12345);
                        MultiLayerConfiguration conf =
                                        new NeuralNetConfiguration.Builder()
                                                        .dataType(DataType.DOUBLE)
                                                        .updater(new NoOp())
                                                        .l2(l2).l1(l1)
                                                        .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
                                                        .seed(12345L)
                                                        .dist(new NormalDistribution(0, 1))
                                                        .list().layer(0,
                                                                        new AutoEncoder.Builder().nIn(4).nOut(3)
                                                                                        .activation(afn).build())
                                                        .layer(1, new OutputLayer.Builder(lf).nIn(3).nOut(3)
                                                                        .activation(outputActivation).build())
                                                        .build();

                        MultiLayerNetwork mln = new MultiLayerNetwork(conf);
                        mln.init();

                        String msg;
                        if (doLearningFirst) {
                            //Run a number of iterations of learning
                            mln.setInput(ds.getFeatures());
                            mln.setLabels(ds.getLabels());
                            mln.computeGradientAndScore();
                            double scoreBefore = mln.score();
                            for (int j = 0; j < 10; j++)
                                mln.fit(ds);
                            mln.computeGradientAndScore();
                            double scoreAfter = mln.score();
                            //Can't test in 'characteristic mode of operation' if not learning
                            msg = "testGradMLP2LayerIrisSimple() - score did not (sufficiently) decrease during learning - activationFn="
                                            + afn + ", lossFn=" + lf + ", outputActivation=" + outputActivation
                                            + ", doLearningFirst=" + doLearningFirst + ", l2=" + l2 + ", l1=" + l1
                                            + " (before=" + scoreBefore + ", scoreAfter=" + scoreAfter + ")";
                            assertTrue(msg, scoreAfter < scoreBefore);
                        }

                        msg = "testGradMLP2LayerIrisSimple() - activationFn=" + afn + ", lossFn=" + lf
                                        + ", outputActivation=" + outputActivation + ", doLearningFirst="
                                        + doLearningFirst + ", l2=" + l2 + ", l1=" + l1;
                        if (PRINT_RESULTS) {
                            System.out.println(msg);
//                            for (int j = 0; j < mln.getnLayers(); j++)
//                                System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams());
                        }

                        boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
                                        DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels);
                        assertTrue(msg, gradOK);
                        TestUtils.testModelSerialization(mln);
                    }
                }
            }
        }
    }
 
Example 19
Source File: WorkspaceTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testTbpttFit() {
    for (WorkspaceMode ws : WorkspaceMode.values()) {
        for (int i = 0; i < 3; i++) {

            System.out.println("Starting test: " + ws + " - " + i);

            NeuralNetConfiguration.ListBuilder b = new NeuralNetConfiguration.Builder()
                    .weightInit(WeightInit.XAVIER)
                    .activation(Activation.TANH)
                    .inferenceWorkspaceMode(ws)
                    .trainingWorkspaceMode(ws)
                    .list();

            ComputationGraphConfiguration.GraphBuilder gb = new NeuralNetConfiguration.Builder()
                    .weightInit(WeightInit.XAVIER)
                    .activation(Activation.TANH)
                    .inferenceWorkspaceMode(ws)
                    .trainingWorkspaceMode(ws)
                    .graphBuilder()
                    .addInputs("in");

            switch (i) {
                case 0:
                    b.layer(new SimpleRnn.Builder().nIn(10).nOut(10).build());
                    b.layer(new SimpleRnn.Builder().nIn(10).nOut(10).build());

                    gb.addLayer("0", new SimpleRnn.Builder().nIn(10).nOut(10).build(), "in");
                    gb.addLayer("1", new SimpleRnn.Builder().nIn(10).nOut(10).build(), "0");
                    break;
                case 1:
                    b.layer(new LSTM.Builder().nIn(10).nOut(10).build());
                    b.layer(new LSTM.Builder().nIn(10).nOut(10).build());

                    gb.addLayer("0", new LSTM.Builder().nIn(10).nOut(10).build(), "in");
                    gb.addLayer("1", new LSTM.Builder().nIn(10).nOut(10).build(), "0");
                    break;
                case 2:
                    b.layer(new GravesLSTM.Builder().nIn(10).nOut(10).build());
                    b.layer(new GravesLSTM.Builder().nIn(10).nOut(10).build());

                    gb.addLayer("0", new GravesLSTM.Builder().nIn(10).nOut(10).build(), "in");
                    gb.addLayer("1", new GravesLSTM.Builder().nIn(10).nOut(10).build(), "0");
                    break;
                default:
                    throw new RuntimeException();
            }

            b.layer(new RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(10).nOut(10).build());
            gb.addLayer("out", new RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE)
                    .nIn(10).nOut(10).build(), "1");
            gb.setOutputs("out");

            MultiLayerConfiguration conf = b
                    .backpropType(BackpropType.TruncatedBPTT)
                    .tBPTTLength(5)
                    .build();

            ComputationGraphConfiguration conf2 = gb
                    .backpropType(BackpropType.TruncatedBPTT)
                    .tBPTTForwardLength(5).tBPTTBackwardLength(5)
                    .build();


            MultiLayerNetwork net = new MultiLayerNetwork(conf);
            net.init();

            ComputationGraph net2 = new ComputationGraph(conf2);
            net2.init();

            for (int j = 0; j < 3; j++) {
                net.fit(Nd4j.rand(new int[]{3, 10, 20}), Nd4j.rand(new int[]{3, 10, 20}));
            }

            for (int j = 0; j < 3; j++) {
                net2.fit(new DataSet(Nd4j.rand(new int[]{3, 10, 20}), Nd4j.rand(new int[]{3, 10, 20})));
            }
        }
    }
}
 
Example 20
Source File: TestListeners.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testListenerCalls(){

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .list()
            .layer(new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build())
            .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    TestListener tl = new TestListener();
    net.setListeners(tl);

    DataSetIterator irisIter = new IrisDataSetIterator(50, 150);

    net.fit(irisIter, 2);

    List<Triple<Call,Integer,Integer>> exp = new ArrayList<>();
    exp.add(new Triple<>(Call.EPOCH_START, 0, 0));
    exp.add(new Triple<>(Call.ON_FWD, 0, 0));
    exp.add(new Triple<>(Call.ON_BWD, 0, 0));
    exp.add(new Triple<>(Call.ON_GRAD, 0, 0));
    exp.add(new Triple<>(Call.ITER_DONE, 0, 0));
    exp.add(new Triple<>(Call.ON_FWD, 1, 0));
    exp.add(new Triple<>(Call.ON_BWD, 1, 0));
    exp.add(new Triple<>(Call.ON_GRAD, 1, 0));
    exp.add(new Triple<>(Call.ITER_DONE, 1, 0));
    exp.add(new Triple<>(Call.ON_FWD, 2, 0));
    exp.add(new Triple<>(Call.ON_BWD, 2, 0));
    exp.add(new Triple<>(Call.ON_GRAD, 2, 0));
    exp.add(new Triple<>(Call.ITER_DONE, 2, 0));
    exp.add(new Triple<>(Call.EPOCH_END, 3, 0));    //Post updating iter count, pre update epoch count

    exp.add(new Triple<>(Call.EPOCH_START, 3, 1));
    exp.add(new Triple<>(Call.ON_FWD, 3, 1));
    exp.add(new Triple<>(Call.ON_BWD, 3, 1));
    exp.add(new Triple<>(Call.ON_GRAD, 3, 1));
    exp.add(new Triple<>(Call.ITER_DONE, 3, 1));
    exp.add(new Triple<>(Call.ON_FWD, 4, 1));
    exp.add(new Triple<>(Call.ON_BWD, 4, 1));
    exp.add(new Triple<>(Call.ON_GRAD, 4, 1));
    exp.add(new Triple<>(Call.ITER_DONE, 4, 1));
    exp.add(new Triple<>(Call.ON_FWD, 5, 1));
    exp.add(new Triple<>(Call.ON_BWD, 5, 1));
    exp.add(new Triple<>(Call.ON_GRAD, 5, 1));
    exp.add(new Triple<>(Call.ITER_DONE, 5, 1));
    exp.add(new Triple<>(Call.EPOCH_END, 6, 1));


    assertEquals(exp, tl.getCalls());


    tl = new TestListener();

    ComputationGraph cg = net.toComputationGraph();
    cg.setListeners(tl);

    cg.fit(irisIter, 2);

    assertEquals(exp, tl.getCalls());
}