Java Code Examples for org.deeplearning4j.nn.conf.MultiLayerConfiguration#clone()

The following examples show how to use org.deeplearning4j.nn.conf.MultiLayerConfiguration#clone() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSparkMultiLayerParameterAveraging.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
@Ignore   //Ignored 2019/04/09 - low priority: https://github.com/deeplearning4j/deeplearning4j/issues/6656
public void testVaePretrainSimple() {
    //Simple sanity check on pretraining
    int nIn = 8;

    Nd4j.getRandom().setSeed(12345);
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(new RmsProp())
                    .weightInit(WeightInit.XAVIER).list()
                    .layer(0, new VariationalAutoencoder.Builder().nIn(8).nOut(10).encoderLayerSizes(12)
                                    .decoderLayerSizes(13).reconstructionDistribution(
                                                    new GaussianReconstructionDistribution(Activation.IDENTITY))
                                    .build())
                    .build();

    //Do training on Spark with one executor, for 3 separate minibatches
    int rddDataSetNumExamples = 10;
    int totalAveragings = 5;
    int averagingFrequency = 3;
    ParameterAveragingTrainingMaster tm = new ParameterAveragingTrainingMaster.Builder(rddDataSetNumExamples)
                    .averagingFrequency(averagingFrequency).batchSizePerWorker(rddDataSetNumExamples)
                    .saveUpdater(true).workerPrefetchNumBatches(0).build();
    Nd4j.getRandom().setSeed(12345);
    SparkDl4jMultiLayer sparkNet = new SparkDl4jMultiLayer(sc, conf.clone(), tm);

    List<DataSet> trainData = new ArrayList<>();
    int nDataSets = numExecutors() * totalAveragings * averagingFrequency;
    for (int i = 0; i < nDataSets; i++) {
        trainData.add(new DataSet(Nd4j.rand(rddDataSetNumExamples, nIn), null));
    }

    JavaRDD<DataSet> data = sc.parallelize(trainData);

    sparkNet.fit(data);
}
 
Example 2
Source File: TestMultiModelGradientApplication.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testGradientApplyMultiLayerNetwork() {
    int minibatch = 7;
    int nIn = 10;
    int nOut = 10;

    for (boolean regularization : new boolean[] {false, true}) {
        for (IUpdater u : new IUpdater[] {new Sgd(0.1), new Nesterovs(0.1), new Adam(0.1)}) {

            MultiLayerConfiguration conf =
                            new NeuralNetConfiguration.Builder().seed(12345).activation(Activation.TANH)
                                            .weightInit(WeightInit.XAVIER).updater(u)
                                            .l1(regularization ? 0.2 : 0.0)
                                            .l2(regularization ? 0.3 : 0.0).list()
                                            .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(10).build())
                                            .layer(1, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(2,
                                                            new OutputLayer.Builder(
                                                                            LossFunctions.LossFunction.MCXENT)
                                                                                            .activation(Activation.SOFTMAX)
                                                                                            .nIn(10).nOut(nOut)
                                                                                            .build())
                                            .build();


            Nd4j.getRandom().setSeed(12345);
            MultiLayerNetwork net1GradCalc = new MultiLayerNetwork(conf);
            net1GradCalc.init();

            Nd4j.getRandom().setSeed(12345);
            MultiLayerNetwork net2GradUpd = new MultiLayerNetwork(conf.clone());
            net2GradUpd.init();

            assertEquals(net1GradCalc.params(), net2GradUpd.params());

            INDArray f = Nd4j.rand(minibatch, nIn);
            INDArray l = Nd4j.create(minibatch, nOut);
            for (int i = 0; i < minibatch; i++) {
                l.putScalar(i, i % nOut, 1.0);
            }
            net1GradCalc.setInput(f);
            net1GradCalc.setLabels(l);

            net2GradUpd.setInput(f);
            net2GradUpd.setLabels(l);

            //Calculate gradient in first net, update and apply it in the second
            //Also: calculate gradient in the second net, just to be sure it isn't modified while doing updating on
            // the other net's gradient
            net1GradCalc.computeGradientAndScore();
            net2GradUpd.computeGradientAndScore();

            Gradient g = net1GradCalc.gradient();
            INDArray gBefore = g.gradient().dup(); //Net 1 gradient should be modified
            INDArray net2GradBefore = net2GradUpd.gradient().gradient().dup(); //But net 2 gradient should not be
            net2GradUpd.getUpdater().update(net2GradUpd, g, 0, 0, minibatch, LayerWorkspaceMgr.noWorkspaces());
            INDArray gAfter = g.gradient().dup();
            INDArray net2GradAfter = net2GradUpd.gradient().gradient().dup();

            assertNotEquals(gBefore, gAfter); //Net 1 gradient should be modified
            assertEquals(net2GradBefore, net2GradAfter); //But net 2 gradient should not be


            //Also: if we apply the gradient using a subi op, we should get the same final params as if we did a fit op
            // on the original network
            net2GradUpd.params().subi(g.gradient());

            net1GradCalc.fit(f, l);
            assertEquals(net1GradCalc.params(), net2GradUpd.params());


            //=============================
            if (!(u instanceof Sgd)) {
                net2GradUpd.getUpdater().getStateViewArray().assign(net1GradCalc.getUpdater().getStateViewArray());
            }
            assertEquals(net1GradCalc.params(), net2GradUpd.params());
            assertEquals(net1GradCalc.getUpdater().getStateViewArray(),
                            net2GradUpd.getUpdater().getStateViewArray());

            //Remove the next 2 lines: fails - as net 1 is 1 iteration ahead
            net1GradCalc.getLayerWiseConfigurations().setIterationCount(0);
            net2GradUpd.getLayerWiseConfigurations().setIterationCount(0);

            for (int i = 0; i < 100; i++) {
                net1GradCalc.fit(f, l);
                net2GradUpd.fit(f, l);
                assertEquals(net1GradCalc.params(), net2GradUpd.params());
            }
        }
    }
}
 
Example 3
Source File: ValidateMKLDNN.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void validateConvSubsampling() throws Exception {
    //Only run test if using nd4j-native backend
    assumeTrue(Nd4j.getBackend().getClass().getName().toLowerCase().contains("native"));
    Nd4j.setDefaultDataTypes(DataType.FLOAT, DataType.FLOAT);
    Nd4j.getRandom().setSeed(12345);

    int[] inputSize = {-1, 3, 16, 16};

    for(int minibatch : new int[]{1,3}) {
        for (ConvolutionMode cm : new ConvolutionMode[]{ConvolutionMode.Same, ConvolutionMode.Truncate}) {
            for (int[] kernel : new int[][]{{2, 2}, {2, 3}}) {
                for (int[] stride : new int[][]{{1, 1}, {2, 2}}) {
                    for (PoolingType pt : new PoolingType[]{PoolingType.MAX, PoolingType.AVG}) {

                        inputSize[0] = minibatch;
                        INDArray f = Nd4j.rand(DataType.FLOAT, inputSize);
                        INDArray l = TestUtils.randomOneHot(minibatch, 10).castTo(DataType.FLOAT);

                        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                                .updater(new Adam(0.01))
                                .convolutionMode(cm)
                                .seed(12345)
                                .list()
                                .layer(new ConvolutionLayer.Builder().activation(Activation.TANH)
                                        .kernelSize(kernel)
                                        .stride(stride)
                                        .padding(0, 0)
                                        .nOut(3)
                                        .build())
                                .layer(new SubsamplingLayer.Builder()
                                        .poolingType(pt)
                                        .kernelSize(kernel)
                                        .stride(stride)
                                        .padding(0, 0)
                                        .build())
                                .layer(new ConvolutionLayer.Builder().activation(Activation.TANH)
                                        .kernelSize(kernel)
                                        .stride(stride)
                                        .padding(0, 0)
                                        .nOut(3)
                                        .build())
                                .layer(new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build())
                                .setInputType(InputType.convolutional(inputSize[2], inputSize[3], inputSize[1]))
                                .build();

                        MultiLayerNetwork netWith = new MultiLayerNetwork(conf.clone());
                        netWith.init();

                        MultiLayerNetwork netWithout = new MultiLayerNetwork(conf.clone());
                        netWithout.init();

                        String name = pt + ", mb=" + minibatch + ", cm=" + cm + ", kernel=" + Arrays.toString(kernel) + ", stride=" + Arrays.toString(stride);
                        LayerHelperValidationUtil.TestCase tc = LayerHelperValidationUtil.TestCase.builder()
                                .testName(name)
                                .allowHelpersForClasses(Arrays.<Class<?>>asList(org.deeplearning4j.nn.layers.convolution.subsampling.SubsamplingLayer.class,
                                        org.deeplearning4j.nn.layers.convolution.ConvolutionLayer.class))
                                .testForward(true)
                                .testScore(true)
                                .testBackward(true)
                                .testTraining(true)
                                .features(f)
                                .labels(l)
                                .data(new SingletonDataSetIterator(new DataSet(f, l)))
                                .build();

                        System.out.println("Starting test: " + name);
                        LayerHelperValidationUtil.validateMLN(netWith, tc);
                    }
                }
            }
        }
    }
}
 
Example 4
Source File: ValidateMKLDNN.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void validateBatchNorm() {
    //Only run test if using nd4j-native backend
    assumeTrue(Nd4j.getBackend().getClass().getName().toLowerCase().contains("native"));
    Nd4j.setDefaultDataTypes(DataType.FLOAT, DataType.FLOAT);
    Nd4j.getRandom().setSeed(12345);

    int[] inputSize = {-1, 3, 16, 16};
    int[] stride = {1, 1};
    int[] kernel = {2, 2};
    ConvolutionMode cm = ConvolutionMode.Truncate;

    for (int minibatch : new int[]{1, 3}) {
        for (boolean b : new boolean[]{true, false}) {

            inputSize[0] = minibatch;
            INDArray f = Nd4j.rand(Nd4j.defaultFloatingPointType(), inputSize);
            INDArray l = TestUtils.randomOneHot(minibatch, 10);

            MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .dataType(DataType.FLOAT)
                    .updater(new Adam(0.01))
                    .convolutionMode(cm)
                    .seed(12345)
                    .list()
                    .layer(new ConvolutionLayer.Builder().activation(Activation.TANH)
                            .kernelSize(kernel)
                            .stride(stride)
                            .padding(0, 0)
                            .nOut(3)
                            .build())
                    .layer(new BatchNormalization.Builder().useLogStd(b).helperAllowFallback(false)/*.eps(0)*/.build())
                    .layer(new ConvolutionLayer.Builder().activation(Activation.TANH)
                            .kernelSize(kernel)
                            .stride(stride)
                            .padding(0, 0)
                            .nOut(3)
                            .build())
                    .layer(new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build())
                    .setInputType(InputType.convolutional(inputSize[2], inputSize[3], inputSize[1]))
                    .build();

            MultiLayerNetwork netWith = new MultiLayerNetwork(conf.clone());
            netWith.init();

            MultiLayerNetwork netWithout = new MultiLayerNetwork(conf.clone());
            netWithout.init();

            LayerHelperValidationUtil.TestCase tc = LayerHelperValidationUtil.TestCase.builder()
                    .allowHelpersForClasses(Collections.<Class<?>>singletonList(org.deeplearning4j.nn.layers.normalization.BatchNormalization.class))
                    .testForward(true)
                    .testScore(true)
                    .testBackward(true)
                    .testTraining(true)
                    .features(f)
                    .labels(l)
                    .data(new SingletonDataSetIterator(new DataSet(f, l)))
                    .maxRelError(1e-4)
                    .build();

            LayerHelperValidationUtil.validateMLN(netWith, tc);
        }
    }
}
 
Example 5
Source File: ValidateMKLDNN.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test @Ignore   //https://github.com/deeplearning4j/deeplearning4j/issues/7272
public void validateLRN() {

    //Only run test if using nd4j-native backend
    assumeTrue(Nd4j.getBackend().getClass().getName().toLowerCase().contains("native"));
    Nd4j.setDefaultDataTypes(DataType.FLOAT, DataType.FLOAT);
    Nd4j.getRandom().setSeed(12345);

    int[] inputSize = {-1, 3, 16, 16};
    int[] stride = {1, 1};
    int[] kernel = {2, 2};
    ConvolutionMode cm = ConvolutionMode.Truncate;

    double[] a = new double[]{1e-4, 1e-4, 1e-3, 1e-3};
    double[] b = new double[]{0.75, 0.9, 0.75, 0.75};
    double[] n = new double[]{5, 3, 3, 4};
    double[] k = new double[]{2, 2.5, 2.75, 2};

    for (int minibatch : new int[]{1, 3}) {
        for( int i=0; i<a.length; i++ ) {
            System.out.println("+++++ MINIBATCH = " + minibatch + ", TEST=" + i + " +++++");


            inputSize[0] = minibatch;
            INDArray f = Nd4j.rand(Nd4j.defaultFloatingPointType(), inputSize);
            INDArray l = TestUtils.randomOneHot(minibatch, 10).castTo(DataType.FLOAT);

            MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .updater(new Adam(0.01))
                    .convolutionMode(cm)
                    .weightInit(new NormalDistribution(0,1))
                    .seed(12345)
                    .list()
                    .layer(new ConvolutionLayer.Builder().activation(Activation.TANH)
                            .kernelSize(kernel)
                            .stride(stride)
                            .padding(0, 0)
                            .nOut(3)
                            .build())
                    .layer(new LocalResponseNormalization.Builder()
                            .alpha(a[i])
                            .beta(b[i])
                            .n(n[i])
                            .k(k[i])
                            .cudnnAllowFallback(false).build())
                    .layer(new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build())
                    .setInputType(InputType.convolutional(inputSize[2], inputSize[3], inputSize[1]))
                    .build();

            MultiLayerNetwork netWith = new MultiLayerNetwork(conf.clone());
            netWith.init();

            MultiLayerNetwork netWithout = new MultiLayerNetwork(conf.clone());
            netWithout.init();

            LayerHelperValidationUtil.TestCase tc = LayerHelperValidationUtil.TestCase.builder()
                    .allowHelpersForClasses(Collections.<Class<?>>singletonList(org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization.class))
                    .testForward(true)
                    .testScore(true)
                    .testBackward(true)
                    .testTraining(true)
                    .features(f)
                    .labels(l)
                    .data(new SingletonDataSetIterator(new DataSet(f, l)))
                    //Very infrequent minor differences - as far as I can tell, just numerical precision issues...
                    .minAbsError(1e-3)
                    .maxRelError(1e-2)
                    .build();

            LayerHelperValidationUtil.validateMLN(netWith, tc);

            System.out.println("/////////////////////////////////////////////////////////////////////////////");
        }
    }
}
 
Example 6
Source File: TestSameDiffOutput.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testOutputMSELossLayer(){
    Nd4j.getRandom().setSeed(12345);

    MultiLayerConfiguration confSD = new NeuralNetConfiguration.Builder()
            .seed(12345)
            .updater(new Adam(0.01))
            .list()
            .layer(new DenseLayer.Builder().nIn(5).nOut(5).activation(Activation.TANH).build())
            .layer(new SameDiffMSELossLayer())
            .build();

    MultiLayerConfiguration confStd = new NeuralNetConfiguration.Builder()
            .seed(12345)
            .updater(new Adam(0.01))
            .list()
            .layer(new DenseLayer.Builder().nIn(5).nOut(5).activation(Activation.TANH).build())
            .layer(new LossLayer.Builder().activation(Activation.IDENTITY).lossFunction(LossFunctions.LossFunction.MSE).build())
            .build();

    MultiLayerNetwork netSD = new MultiLayerNetwork(confSD);
    netSD.init();

    MultiLayerNetwork netStd = new MultiLayerNetwork(confStd);
    netStd.init();

    INDArray in = Nd4j.rand(3, 5);
    INDArray label = Nd4j.rand(3,5);

    INDArray outSD = netSD.output(in);
    INDArray outStd = netStd.output(in);
    assertEquals(outStd, outSD);

    DataSet ds = new DataSet(in, label);
    double scoreSD = netSD.score(ds);
    double scoreStd = netStd.score(ds);
    assertEquals(scoreStd, scoreSD, 1e-6);

    for( int i=0; i<3; i++ ){
        netSD.fit(ds);
        netStd.fit(ds);

        assertEquals(netStd.params(), netSD.params());
        assertEquals(netStd.getFlattenedGradients(), netSD.getFlattenedGradients());
    }

    //Test fit before output:
    MultiLayerNetwork net = new MultiLayerNetwork(confSD.clone());
    net.init();
    net.fit(ds);

    //Sanity check on different minibatch sizes:
    INDArray newIn = Nd4j.vstack(in, in);
    INDArray outMbsd = netSD.output(newIn);
    INDArray outMb = netStd.output(newIn);
    assertEquals(outMb, outMbsd);
}
 
Example 7
Source File: TestSameDiffOutput.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testMSEOutputLayer(){       //Faliing 2019/04/17 - https://github.com/deeplearning4j/deeplearning4j/issues/7560
    Nd4j.getRandom().setSeed(12345);

    for(Activation a : new Activation[]{Activation.IDENTITY, Activation.TANH, Activation.SOFTMAX}) {
        log.info("Starting test: " + a);

        MultiLayerConfiguration confSD = new NeuralNetConfiguration.Builder()
                .seed(12345)
                .updater(new Adam(0.01))
                .list()
                .layer(new DenseLayer.Builder().nIn(5).nOut(5).activation(Activation.TANH).build())
                .layer(new SameDiffMSEOutputLayer(5, 5, a, WeightInit.XAVIER))
                .build();

        MultiLayerConfiguration confStd = new NeuralNetConfiguration.Builder()
                .seed(12345)
                .updater(new Adam(0.01))
                .list()
                .layer(new DenseLayer.Builder().nIn(5).nOut(5).activation(Activation.TANH).build())
                .layer(new OutputLayer.Builder().nIn(5).nOut(5).activation(a).lossFunction(LossFunctions.LossFunction.MSE).build())
                .build();

        MultiLayerNetwork netSD = new MultiLayerNetwork(confSD);
        netSD.init();

        MultiLayerNetwork netStd = new MultiLayerNetwork(confStd);
        netStd.init();

        netSD.params().assign(netStd.params());

        assertEquals(netStd.paramTable(), netSD.paramTable());

        int minibatch = 2;
        INDArray in = Nd4j.rand(minibatch, 5);
        INDArray label = Nd4j.rand(minibatch, 5);

        INDArray outSD = netSD.output(in);
        INDArray outStd = netStd.output(in);
        assertEquals(outStd, outSD);

        DataSet ds = new DataSet(in, label);
        double scoreSD = netSD.score(ds);
        double scoreStd = netStd.score(ds);
        assertEquals(scoreStd, scoreSD, 1e-6);

        netSD.setInput(in);
        netSD.setLabels(label);

        netStd.setInput(in);
        netStd.setLabels(label);

        //System.out.println(((SameDiffOutputLayer) netSD.getLayer(1)).sameDiff.summary());

        netSD.computeGradientAndScore();
        netStd.computeGradientAndScore();

        assertEquals(netStd.getFlattenedGradients(), netSD.getFlattenedGradients());

        for (int i = 0; i < 3; i++) {
            netSD.fit(ds);
            netStd.fit(ds);
            String s = String.valueOf(i);
            assertEquals(s, netStd.params(), netSD.params());
            assertEquals(s, netStd.getFlattenedGradients(), netSD.getFlattenedGradients());
        }

        //Test fit before output:
        MultiLayerNetwork net = new MultiLayerNetwork(confSD.clone());
        net.init();
        net.fit(ds);

        //Sanity check on different minibatch sizes:
        INDArray newIn = Nd4j.vstack(in, in);
        INDArray outMbsd = netSD.output(newIn);
        INDArray outMb = netStd.output(newIn);
        assertEquals(outMb, outMbsd);
    }
}
 
Example 8
Source File: TestConvolution.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testGradientNorm() throws Exception {

        int height = 100;
        int width = 100;
        int channels = 1;
        int numLabels = 10;

        for( int batchSize : new int[]{1, 32}) {

            long seed = 12345;
            double nonZeroBias = 1;

            MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .seed(seed)
                    .dataType(DataType.DOUBLE)
                    .dist(new NormalDistribution(0.0, 0.01))
                    .activation(Activation.RELU)
                    .updater(new Adam(5e-3))
                    //.biasUpdater(new Nesterovs(new StepSchedule(ScheduleType.ITERATION, 2e-2, 0.1, 20000), 0.9))
                    .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
                    .l2(5 * 1e-4)
                    .list()
                    .layer(convInit("cnn1", channels, 96, new int[]{11, 11}, new int[]{4, 4},
                            new int[]{3, 3}, 0))
                    .layer(maxPool("maxpool1", new int[]{3, 3}))
                    .layer(conv5x5("cnn2", 256, new int[]{1, 1}, new int[]{2, 2}, nonZeroBias))
                    .layer(maxPool("maxpool2", new int[]{3, 3}))
                    .layer(conv3x3("cnn3", 384, 0))
                    .layer(conv3x3("cnn4", 384, nonZeroBias))
                    .layer(conv3x3("cnn5", 256, nonZeroBias))
                    .layer(maxPool("maxpool3", new int[]{3, 3}))
                    .layer(fullyConnected("ffn1", 4096, nonZeroBias, new GaussianDistribution(0, 0.005)))
                    .layer(fullyConnected("ffn2", 4096, nonZeroBias, new GaussianDistribution(0, 0.005)))
                    .layer(new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                            .name("output")
                            .nOut(numLabels)
                            .activation(Activation.SOFTMAX)
                            .build())
                    .setInputType(InputType.convolutional(height, width, channels))
                    .build();


            MultiLayerNetwork netNoCudnn = new MultiLayerNetwork(conf.clone());
            netNoCudnn.init();
            MultiLayerNetwork netWithCudnn = new MultiLayerNetwork(conf.clone());
            netWithCudnn.init();

            CuDNNTestUtils.removeHelpers(netNoCudnn.getLayers());



            Nd4j.getRandom().setSeed(12345);
            for( int j=0; j<3; j++ ) {
//                System.out.println("j=" + j);
                INDArray f = Nd4j.rand(new int[]{batchSize, channels, height, width});
                INDArray l = TestUtils.randomOneHot(batchSize, numLabels);

                netNoCudnn.fit(f, l);
                netWithCudnn.fit(f, l);

                assertEquals(netNoCudnn.score(), netWithCudnn.score(), 1e-5);

                for (Map.Entry<String, INDArray> e : netNoCudnn.paramTable().entrySet()) {
                    boolean pEq = e.getValue().equalsWithEps(netWithCudnn.paramTable().get(e.getKey()), 1e-4);
//                    int idx = e.getKey().indexOf("_");
//                    int layerNum = Integer.parseInt(e.getKey().substring(0, idx));
                    //System.out.println(e.getKey() + " - " + pEq + " - " + netNoCudnn.getLayer(layerNum).getClass().getSimpleName());
                    assertTrue(pEq);
                }

                boolean eq = netNoCudnn.params().equalsWithEps(netWithCudnn.params(), 1e-4);
                assertTrue(eq);
            }
        }
    }
 
Example 9
Source File: TestSparkMultiLayerParameterAveraging.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testIterationCounts() throws Exception {
        int dataSetObjSize = 5;
        int batchSizePerExecutor = 25;
        List<DataSet> list = new ArrayList<>();
        int minibatchesPerWorkerPerEpoch = 10;
        DataSetIterator iter = new MnistDataSetIterator(dataSetObjSize,
                        batchSizePerExecutor * numExecutors() * minibatchesPerWorkerPerEpoch, false);
        while (iter.hasNext()) {
            list.add(iter.next());
        }

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp())
                        .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list()
                        .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(50)
                                        .activation(Activation.TANH).build())
                        .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(
                                        LossFunctions.LossFunction.MCXENT).nIn(50).nOut(10)
                                                        .activation(Activation.SOFTMAX).build())
                        .build();

        for (int avgFreq : new int[] {1, 5, 10}) {
//            System.out.println("--- Avg freq " + avgFreq + " ---");
            SparkDl4jMultiLayer sparkNet = new SparkDl4jMultiLayer(sc, conf.clone(),
                            new ParameterAveragingTrainingMaster.Builder(numExecutors(), dataSetObjSize)
                                            .batchSizePerWorker(batchSizePerExecutor).averagingFrequency(avgFreq)
                                            .repartionData(Repartition.Always).build());

            sparkNet.setListeners(new ScoreIterationListener(5));



            JavaRDD<DataSet> rdd = sc.parallelize(list);

            assertEquals(0, sparkNet.getNetwork().getLayerWiseConfigurations().getIterationCount());
            sparkNet.fit(rdd);
            assertEquals(minibatchesPerWorkerPerEpoch,
                            sparkNet.getNetwork().getLayerWiseConfigurations().getIterationCount());
            sparkNet.fit(rdd);
            assertEquals(2 * minibatchesPerWorkerPerEpoch,
                            sparkNet.getNetwork().getLayerWiseConfigurations().getIterationCount());

            sparkNet.getTrainingMaster().deleteTempFiles(sc);
        }
    }
 
Example 10
Source File: TestSparkMultiLayerParameterAveraging.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 120000L)
public void testEpochCounter() throws Exception {

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .list()
            .layer(new OutputLayer.Builder().nIn(4).nOut(3).build())
            .build();

    ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder()
            .graphBuilder()
            .addInputs("in")
            .addLayer("out", new OutputLayer.Builder().nIn(4).nOut(3).build(), "in")
            .setOutputs("out")
            .build();

    DataSetIterator iter = new IrisDataSetIterator(1, 50);

    List<DataSet> l = new ArrayList<>();
    while(iter.hasNext()){
        l.add(iter.next());
    }

    JavaRDD<DataSet> rdd = sc.parallelize(l);


    int rddDataSetNumExamples = 1;
    int averagingFrequency = 2;
    int batch = 2;
    ParameterAveragingTrainingMaster tm = new ParameterAveragingTrainingMaster.Builder(rddDataSetNumExamples)
            .averagingFrequency(averagingFrequency).batchSizePerWorker(batch)
            .saveUpdater(true).workerPrefetchNumBatches(0).build();
    Nd4j.getRandom().setSeed(12345);


    SparkDl4jMultiLayer sn1 = new SparkDl4jMultiLayer(sc, conf.clone(), tm);
    SparkComputationGraph sn2 = new SparkComputationGraph(sc, conf2.clone(), tm);


    for(int i=0; i<3; i++ ){
        assertEquals(i, sn1.getNetwork().getLayerWiseConfigurations().getEpochCount());
        assertEquals(i, sn2.getNetwork().getConfiguration().getEpochCount());
        sn1.fit(rdd);
        sn2.fit(rdd);
        assertEquals(i+1, sn1.getNetwork().getLayerWiseConfigurations().getEpochCount());
        assertEquals(i+1, sn2.getNetwork().getConfiguration().getEpochCount());
    }
}