Java Code Examples for org.deeplearning4j.nn.graph.ComputationGraph#setLabels()

The following examples show how to use org.deeplearning4j.nn.graph.ComputationGraph#setLabels() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: WorkspaceTests.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void checkScopesTestCGAS() throws Exception {
    ComputationGraph c = createNet();
    for (WorkspaceMode wm : new WorkspaceMode[]{WorkspaceMode.NONE, WorkspaceMode.ENABLED}) {
        log.info("Starting test: {}", wm);
        c.getConfiguration().setTrainingWorkspaceMode(wm);
        c.getConfiguration().setInferenceWorkspaceMode(wm);

        INDArray f = Nd4j.rand(new int[]{8, 1, 28, 28});
        INDArray l = Nd4j.rand(8, 10);
        c.setInputs(f);
        c.setLabels(l);

        c.computeGradientAndScore();
    }
}
 
Example 2
Source File: WorkspaceTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
    public void testWithPreprocessorsCG() {
        //https://github.com/deeplearning4j/deeplearning4j/issues/4347
        //Cause for the above issue was layerVertex.setInput() applying the preprocessor, with the result
        // not being detached properly from the workspace...

        for (WorkspaceMode wm : WorkspaceMode.values()) {
            System.out.println(wm);
            ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
                    .trainingWorkspaceMode(wm)
                    .inferenceWorkspaceMode(wm)
                    .graphBuilder()
                    .addInputs("in")
                    .addLayer("e", new GravesLSTM.Builder().nIn(10).nOut(5).build(), new DupPreProcessor(), "in")
//                .addLayer("e", new GravesLSTM.Builder().nIn(10).nOut(5).build(), "in")    //Note that no preprocessor is OK
                    .addLayer("rnn", new GravesLSTM.Builder().nIn(5).nOut(8).build(), "e")
                    .addLayer("out", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE)
                            .activation(Activation.SIGMOID).nOut(3).build(), "rnn")
                    .setInputTypes(InputType.recurrent(10))
                    .setOutputs("out")
                    .build();

            ComputationGraph cg = new ComputationGraph(conf);
            cg.init();


            INDArray[] input = new INDArray[]{Nd4j.zeros(1, 10, 5)};

            for (boolean train : new boolean[]{false, true}) {
                cg.clear();
                cg.feedForward(input, train);
            }

            cg.setInputs(input);
            cg.setLabels(Nd4j.rand(new int[]{1, 3, 5}));
            cg.computeGradientAndScore();
        }
    }
 
Example 3
Source File: GradientCheckTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void elementWiseMultiplicationLayerTest(){

        for(Activation a : new Activation[]{Activation.IDENTITY, Activation.TANH}) {

            ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
                    .dataType(DataType.DOUBLE)
                    .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).updater(new NoOp())
                    .seed(12345L)
                    .weightInit(new UniformDistribution(0, 1))
                    .graphBuilder()
                    .addInputs("features")
                    .addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(4)
                            .activation(Activation.TANH)
                            .build(), "features")
                    .addLayer("elementWiseMul", new ElementWiseMultiplicationLayer.Builder().nIn(4).nOut(4)
                            .activation(a)
                            .build(), "dense")
                    .addLayer("loss", new LossLayer.Builder(LossFunctions.LossFunction.COSINE_PROXIMITY)
                            .activation(Activation.IDENTITY).build(), "elementWiseMul")
                    .setOutputs("loss")
                    .build();

            ComputationGraph netGraph = new ComputationGraph(conf);
            netGraph.init();

            log.info("params before learning: " + netGraph.getLayer(1).paramTable());

            //Run a number of iterations of learning manually make some pseudo data
            //the ides is simple: since we do a element wise multiplication layer (just a scaling), we want the cos sim
            // is mainly decided by the fourth value, if everything runs well, we will get a large weight for the fourth value

            INDArray features = Nd4j.create(new double[][]{{1, 2, 3, 4}, {1, 2, 3, 1}, {1, 2, 3, 0}});
            INDArray labels = Nd4j.create(new double[][]{{1, 1, 1, 8}, {1, 1, 1, 2}, {1, 1, 1, 1}});

            netGraph.setInputs(features);
            netGraph.setLabels(labels);
            netGraph.computeGradientAndScore();
            double scoreBefore = netGraph.score();

            String msg;
            for (int epoch = 0; epoch < 5; epoch++)
                netGraph.fit(new INDArray[]{features}, new INDArray[]{labels});
            netGraph.computeGradientAndScore();
            double scoreAfter = netGraph.score();
            //Can't test in 'characteristic mode of operation' if not learning
            msg = "elementWiseMultiplicationLayerTest() - score did not (sufficiently) decrease during learning - activationFn="
                    + "Id" + ", lossFn=" + "Cos-sim" + ", outputActivation=" + "Id"
                    + ", doLearningFirst=" + "true" + " (before=" + scoreBefore
                    + ", scoreAfter=" + scoreAfter + ")";
            assertTrue(msg, scoreAfter < 0.8 * scoreBefore);

//        expectation in case linear regression(with only element wise multiplication layer): large weight for the fourth weight
            log.info("params after learning: " + netGraph.getLayer(1).paramTable());

            boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(netGraph).inputs(new INDArray[]{features})
                    .labels(new INDArray[]{labels}));

            msg = "elementWiseMultiplicationLayerTest() - activationFn=" + "ID" + ", lossFn=" + "Cos-sim"
                    + ", outputActivation=" + "Id" + ", doLearningFirst=" + "true";
            assertTrue(msg, gradOK);

            TestUtils.testModelSerialization(netGraph);
        }
    }
 
Example 4
Source File: BNGradientCheckTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testGradientBNWithCNNandSubsamplingCompGraph() {
        //Parameterized test, testing combinations of:
        // (a) activation function
        // (b) Whether to test at random initialization, or after some learning (i.e., 'characteristic mode of operation')
        // (c) Loss function (with specified output activations)
        // (d) l1 and l2 values
        Activation[] activFns = {Activation.TANH, Activation.IDENTITY};
        boolean doLearningFirst = true;

        LossFunctions.LossFunction[] lossFunctions = {LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD};
        Activation[] outputActivations = {Activation.SOFTMAX}; //i.e., lossFunctions[i] used with outputActivations[i] here

        double[] l2vals = {0.0, 0.1};
        double[] l1vals = {0.0, 0.2}; //i.e., use l2vals[j] with l1vals[j]

        Nd4j.getRandom().setSeed(12345);
        int minibatch = 10;
        int depth = 2;
        int hw = 5;
        int nOut = 3;
        INDArray input = Nd4j.rand(new int[]{minibatch, depth, hw, hw});
        INDArray labels = Nd4j.zeros(minibatch, nOut);
        Random r = new Random(12345);
        for (int i = 0; i < minibatch; i++) {
            labels.putScalar(i, r.nextInt(nOut), 1.0);
        }

        DataSet ds = new DataSet(input, labels);

        for (boolean useLogStd : new boolean[]{true, false}) {
            for (Activation afn : activFns) {
                for (int i = 0; i < lossFunctions.length; i++) {
                    for (int j = 0; j < l2vals.length; j++) {
                        LossFunctions.LossFunction lf = lossFunctions[i];
                        Activation outputActivation = outputActivations[i];

                        ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                                .dataType(DataType.DOUBLE)
                                .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
                                .updater(new NoOp())
                                .dist(new UniformDistribution(-2, 2)).seed(12345L).graphBuilder()
                                .addInputs("in")
                                .addLayer("0", new ConvolutionLayer.Builder(2, 2).stride(1, 1).nOut(3)
                                        .activation(afn).build(), "in")
                                .addLayer("1", new BatchNormalization.Builder().useLogStd(useLogStd).build(), "0")
                                .addLayer("2", new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX)
                                        .kernelSize(2, 2).stride(1, 1).build(), "1")
                                .addLayer("3", new BatchNormalization.Builder().useLogStd(useLogStd).build(), "2")
                                .addLayer("4", new ActivationLayer.Builder().activation(afn).build(), "3")
                                .addLayer("5", new OutputLayer.Builder(lf).activation(outputActivation)
                                        .nOut(nOut).build(), "4")
                                .setOutputs("5").setInputTypes(InputType.convolutional(hw, hw, depth))
                                .build();

                        ComputationGraph net = new ComputationGraph(conf);
                        net.init();
                        String name = new Object() {
                        }.getClass().getEnclosingMethod().getName();

                        if (doLearningFirst) {
                            //Run a number of iterations of learning
                            net.setInput(0, ds.getFeatures());
                            net.setLabels(ds.getLabels());
                            net.computeGradientAndScore();
                            double scoreBefore = net.score();
                            for (int k = 0; k < 20; k++)
                                net.fit(ds);
                            net.computeGradientAndScore();
                            double scoreAfter = net.score();
                            //Can't test in 'characteristic mode of operation' if not learning
                            String msg = name
                                    + " - score did not (sufficiently) decrease during learning - activationFn="
                                    + afn + ", lossFn=" + lf + ", outputActivation=" + outputActivation
                                    + ", doLearningFirst= " + doLearningFirst + " (before=" + scoreBefore
                                    + ", scoreAfter=" + scoreAfter + ")";
                            assertTrue(msg, scoreAfter < 0.9 * scoreBefore);
                        }

                        System.out.println(name + " - activationFn=" + afn + ", lossFn=" + lf
                                + ", outputActivation=" + outputActivation + ", doLearningFirst="
                                + doLearningFirst + ", l1=" + l1vals[j] + ", l2=" + l2vals[j]);
//                        for (int k = 0; k < net.getNumLayers(); k++)
//                            System.out.println("Layer " + k + " # params: " + net.getLayer(k).numParams());

                        //Mean and variance vars are not gradient checkable; mean/variance "gradient" is used to implement running mean/variance calc
                        //i.e., runningMean = decay * runningMean + (1-decay) * batchMean
                        //However, numerical gradient will be 0 as forward pass doesn't depend on this "parameter"
                        Set<String> excludeParams = new HashSet<>(Arrays.asList("1_mean", "1_var", "3_mean", "3_var", "1_log10stdev", "3_log10stdev"));
                        boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(net).inputs(new INDArray[]{input})
                                .labels(new INDArray[]{labels}).excludeParams(excludeParams));

                        assertTrue(gradOK);
                        TestUtils.testModelSerialization(net);
                    }
                }
            }
        }
    }
 
Example 5
Source File: TestMultiModelGradientApplication.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testGradientApplyComputationGraph() {
    int minibatch = 7;
    int nIn = 10;
    int nOut = 10;

    for (boolean regularization : new boolean[] {false, true}) {
        for (IUpdater u : new IUpdater[] {new Sgd(0.1), new Adam(0.1)}) {

            ComputationGraphConfiguration conf =
                            new NeuralNetConfiguration.Builder().seed(12345).activation(Activation.TANH)
                                            .weightInit(WeightInit.XAVIER).updater(u)
                                            .l1(regularization ? 0.2 : 0.0)
                                            .l2(regularization ? 0.3 : 0.0).graphBuilder().addInputs("in")
                                            .addLayer("0", new DenseLayer.Builder().nIn(nIn).nOut(10).build(), "in")
                                            .addLayer("1", new DenseLayer.Builder().nIn(10).nOut(10).build(), "0")
                                            .addLayer("2", new OutputLayer.Builder(
                                                            LossFunctions.LossFunction.MCXENT)
                                                                            .activation(Activation.SOFTMAX).nIn(10)
                                                                            .nOut(nOut).build(),
                                                            "1")
                                            .setOutputs("2").build();


            Nd4j.getRandom().setSeed(12345);
            ComputationGraph net1GradCalc = new ComputationGraph(conf);
            net1GradCalc.init();

            Nd4j.getRandom().setSeed(12345);
            ComputationGraph net2GradUpd = new ComputationGraph(conf.clone());
            net2GradUpd.init();

            assertEquals(net1GradCalc.params(), net2GradUpd.params());

            INDArray f = Nd4j.rand(minibatch, nIn);
            INDArray l = Nd4j.create(minibatch, nOut);
            for (int i = 0; i < minibatch; i++) {
                l.putScalar(i, i % nOut, 1.0);
            }
            net1GradCalc.setInputs(f);
            net1GradCalc.setLabels(l);

            net2GradUpd.setInputs(f);
            net2GradUpd.setLabels(l);

            //Calculate gradient in first net, update and apply it in the second
            //Also: calculate gradient in the second net, just to be sure it isn't modified while doing updating on
            // the other net's gradient
            net1GradCalc.computeGradientAndScore();
            net2GradUpd.computeGradientAndScore();

            Gradient g = net1GradCalc.gradient();
            INDArray gBefore = g.gradient().dup(); //Net 1 gradient should be modified
            INDArray net2GradBefore = net2GradUpd.gradient().gradient().dup(); //But net 2 gradient should not be
            net2GradUpd.getUpdater().update(g, 0, 0, minibatch, LayerWorkspaceMgr.noWorkspaces());
            INDArray gAfter = g.gradient().dup();
            INDArray net2GradAfter = net2GradUpd.gradient().gradient().dup();

            assertNotEquals(gBefore, gAfter); //Net 1 gradient should be modified
            assertEquals(net2GradBefore, net2GradAfter); //But net 2 gradient should not be


            //Also: if we apply the gradient using a subi op, we should get the same final params as if we did a fit op
            // on the original network
            net2GradUpd.params().subi(g.gradient());

            net1GradCalc.fit(new INDArray[] {f}, new INDArray[] {l});
            assertEquals(net1GradCalc.params(), net2GradUpd.params());

            //=============================
            if (!(u instanceof Sgd)) {
                net2GradUpd.getUpdater().getStateViewArray().assign(net1GradCalc.getUpdater().getStateViewArray());
            }
            assertEquals(net1GradCalc.params(), net2GradUpd.params());
            assertEquals(net1GradCalc.getUpdater().getStateViewArray(),
                            net2GradUpd.getUpdater().getStateViewArray());

            //Remove the next 2 lines: fails - as net 1 is 1 iteration ahead
            net1GradCalc.getConfiguration().setIterationCount(0);
            net2GradUpd.getConfiguration().setIterationCount(0);


            for (int i = 0; i < 100; i++) {
                net1GradCalc.fit(new INDArray[] {f}, new INDArray[] {l});
                net2GradUpd.fit(new INDArray[] {f}, new INDArray[] {l});
                assertEquals(net1GradCalc.params(), net2GradUpd.params());
            }
        }
    }
}
 
Example 6
Source File: TestNetConversion.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testMlnToCompGraph() {
    Nd4j.getRandom().setSeed(12345);

    for( int i=0; i<3; i++ ){
        MultiLayerNetwork n;
        switch (i){
            case 0:
                n = getNet1(false);
                break;
            case 1:
                n = getNet1(true);
                break;
            case 2:
                n = getNet2();
                break;
            default:
                throw new RuntimeException();
        }
        INDArray in = (i <= 1 ? Nd4j.rand(new int[]{8, 3, 10, 10}) : Nd4j.rand(new int[]{8, 5, 10}));
        INDArray labels = (i <= 1 ? Nd4j.rand(new int[]{8, 10}) : Nd4j.rand(new int[]{8, 10, 10}));

        ComputationGraph cg = n.toComputationGraph();

        INDArray out1 = n.output(in);
        INDArray out2 = cg.outputSingle(in);
        assertEquals(out1, out2);


        n.setInput(in);
        n.setLabels(labels);

        cg.setInputs(in);
        cg.setLabels(labels);

        n.computeGradientAndScore();
        cg.computeGradientAndScore();

        assertEquals(n.score(), cg.score(), 1e-6);

        assertEquals(n.gradient().gradient(), cg.gradient().gradient());

        n.fit(in, labels);
        cg.fit(new INDArray[]{in}, new INDArray[]{labels});

        assertEquals(n.params(), cg.params());
    }
}
 
Example 7
Source File: BidirectionalTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void compareImplementationsCompGraph(){
//        for(WorkspaceMode wsm : WorkspaceMode.values()) {
        for(WorkspaceMode wsm : new WorkspaceMode[]{WorkspaceMode.NONE, WorkspaceMode.ENABLED}) {
            log.info("*** Starting workspace mode: " + wsm);

            //Bidirectional(GravesLSTM) and GravesBidirectionalLSTM should be equivalent, given equivalent params
            //Note that GravesBidirectionalLSTM implements ADD mode only

            ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder()
                    .activation(Activation.TANH)
                    .weightInit(WeightInit.XAVIER)
                    .updater(new Adam())
                    .trainingWorkspaceMode(wsm)
                    .inferenceWorkspaceMode(wsm)
                    .graphBuilder()
                    .addInputs("in")
                    .layer("0", new Bidirectional(Bidirectional.Mode.ADD, new GravesLSTM.Builder().nIn(10).nOut(10).build()), "in")
                    .layer("1", new Bidirectional(Bidirectional.Mode.ADD, new GravesLSTM.Builder().nIn(10).nOut(10).build()), "0")
                    .layer("2", new RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE)
                            .nIn(10).nOut(10).build(), "1")
                    .setOutputs("2")
                    .build();

            ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder()
                    .activation(Activation.TANH)
                    .weightInit(WeightInit.XAVIER)
                    .updater(new Adam())
                    .trainingWorkspaceMode(wsm)
                    .inferenceWorkspaceMode(wsm)
                    .graphBuilder()
                    .addInputs("in")
                    .layer("0", new GravesBidirectionalLSTM.Builder().nIn(10).nOut(10).build(), "in")
                    .layer("1", new GravesBidirectionalLSTM.Builder().nIn(10).nOut(10).build(), "0")
                    .layer("2", new RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE)
                            .nIn(10).nOut(10).build(), "1")
                    .setOutputs("2")
                    .build();

            ComputationGraph net1 = new ComputationGraph(conf1);
            net1.init();

            ComputationGraph net2 = new ComputationGraph(conf2);
            net2.init();

            assertEquals(net1.numParams(), net2.numParams());
            for (int i = 0; i < 3; i++) {
                int n1 = (int)net1.getLayer(i).numParams();
                int n2 = (int)net2.getLayer(i).numParams();
                assertEquals(n1, n2);
            }

            net2.setParams(net1.params());  //Assuming exact same layout here...

            INDArray in = Nd4j.rand(new int[]{3, 10, 5});

            INDArray out1 = net1.outputSingle(in);
            INDArray out2 = net2.outputSingle(in);

            assertEquals(out1, out2);

            INDArray labels = Nd4j.rand(new int[]{3, 10, 5});

            net1.setInput(0,in);
            net1.setLabels(labels);

            net2.setInput(0,in);
            net2.setLabels(labels);

            net1.computeGradientAndScore();
            net2.computeGradientAndScore();

            //Ensure scores are equal:
            assertEquals(net1.score(), net2.score(), 1e-6);

            //Ensure gradients are equal:
            Gradient g1 = net1.gradient();
            Gradient g2 = net2.gradient();
            assertEquals(g1.gradient(), g2.gradient());

            //Ensure updates are equal:
            ComputationGraphUpdater u1 = (ComputationGraphUpdater) net1.getUpdater();
            ComputationGraphUpdater u2 = (ComputationGraphUpdater) net2.getUpdater();
            assertEquals(u1.getUpdaterStateViewArray(), u2.getUpdaterStateViewArray());
            u1.update(g1, 0, 0, 3, LayerWorkspaceMgr.noWorkspaces());
            u2.update(g2, 0, 0, 3, LayerWorkspaceMgr.noWorkspaces());
            assertEquals(g1.gradient(), g2.gradient());
            assertEquals(u1.getUpdaterStateViewArray(), u2.getUpdaterStateViewArray());

            //Ensure params are equal, after fitting
            net1.fit(new DataSet(in, labels));
            net2.fit(new DataSet(in, labels));

            INDArray p1 = net1.params();
            INDArray p2 = net2.params();
            assertEquals(p1, p2);
        }
    }
 
Example 8
Source File: BidirectionalTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testSerializationCompGraph() throws Exception {

    for(WorkspaceMode wsm : WorkspaceMode.values()) {
        log.info("*** Starting workspace mode: " + wsm);

        Nd4j.getRandom().setSeed(12345);

        ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder()
                .activation(Activation.TANH)
                .weightInit(WeightInit.XAVIER)
                .trainingWorkspaceMode(wsm)
                .inferenceWorkspaceMode(wsm)
                .updater(new Adam())
                .graphBuilder()
                .addInputs("in")
                .layer("0", new Bidirectional(Bidirectional.Mode.ADD, new GravesLSTM.Builder().nIn(10).nOut(10).dataFormat(rnnDataFormat).build()), "in")
                .layer("1", new Bidirectional(Bidirectional.Mode.ADD, new GravesLSTM.Builder().nIn(10).nOut(10).dataFormat(rnnDataFormat).build()), "0")
                .layer("2", new RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).dataFormat(rnnDataFormat)
                        .nIn(10).nOut(10).build(), "1")
                .setOutputs("2")
                .build();

        ComputationGraph net1 = new ComputationGraph(conf1);
        net1.init();
        long[] inshape = (rnnDataFormat == NCW)? new long[]{3, 10, 5}: new long[]{3, 5, 10};
        INDArray in = Nd4j.rand(inshape);
        INDArray labels = Nd4j.rand(inshape);

        net1.fit(new DataSet(in, labels));

        byte[] bytes;
        try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
            ModelSerializer.writeModel(net1, baos, true);
            bytes = baos.toByteArray();
        }


        ComputationGraph net2 = ModelSerializer.restoreComputationGraph(new ByteArrayInputStream(bytes), true);


        in = Nd4j.rand(inshape);
        labels = Nd4j.rand(inshape);

        INDArray out1 = net1.outputSingle(in);
        INDArray out2 = net2.outputSingle(in);

        assertEquals(out1, out2);

        net1.setInput(0, in);
        net2.setInput(0, in);
        net1.setLabels(labels);
        net2.setLabels(labels);

        net1.computeGradientAndScore();
        net2.computeGradientAndScore();

        assertEquals(net1.score(), net2.score(), 1e-6);
        assertEquals(net1.gradient().gradient(), net2.gradient().gradient());
    }
}
 
Example 9
Source File: OutputLayerTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testCnnLossLayerCompGraph(){

    for(WorkspaceMode ws : WorkspaceMode.values()) {
        log.info("*** Testing workspace: " + ws);

        for (Activation a : new Activation[]{Activation.TANH, Activation.SELU}) {
            //Check that (A+identity) is equal to (identity+A), for activation A
            //i.e., should get same output and weight gradients for both

            ComputationGraphConfiguration conf1 =
                    new NeuralNetConfiguration.Builder().seed(12345L)
                            .updater(new NoOp())
                            .convolutionMode(ConvolutionMode.Same)
                            .inferenceWorkspaceMode(ws)
                            .trainingWorkspaceMode(ws)
                            .graphBuilder()
                            .addInputs("in")
                            .addLayer("0", new ConvolutionLayer.Builder().nIn(3).nOut(4).activation(Activation.IDENTITY)
                                    .kernelSize(2, 2).stride(1, 1)
                                    .dist(new NormalDistribution(0, 1.0))
                                    .updater(new NoOp()).build(), "in")
                            .addLayer("1", new CnnLossLayer.Builder(LossFunction.MSE)
                                    .activation(a)
                                    .build(), "0")
                            .setOutputs("1")
                            .build();

            ComputationGraphConfiguration conf2 =
                    new NeuralNetConfiguration.Builder().seed(12345L)
                            .updater(new NoOp())
                            .convolutionMode(ConvolutionMode.Same)
                            .inferenceWorkspaceMode(ws)
                            .trainingWorkspaceMode(ws)
                            .graphBuilder()
                            .addInputs("in")
                            .addLayer("0", new ConvolutionLayer.Builder().nIn(3).nOut(4).activation(a)
                                    .kernelSize(2, 2).stride(1, 1)
                                    .dist(new NormalDistribution(0, 1.0))
                                    .updater(new NoOp()).build(), "in")
                            .addLayer("1", new CnnLossLayer.Builder(LossFunction.MSE)
                                    .activation(Activation.IDENTITY)
                                    .build(), "0")
                            .setOutputs("1")
                            .build();

            ComputationGraph graph = new ComputationGraph(conf1);
            graph.init();

            ComputationGraph graph2 = new ComputationGraph(conf2);
            graph2.init();


            graph2.setParams(graph.params());


            INDArray in = Nd4j.rand(new int[]{3, 3, 5, 5});

            INDArray out1 = graph.outputSingle(in);
            INDArray out2 = graph2.outputSingle(in);

            assertEquals(out1, out2);

            INDArray labels = Nd4j.rand(out1.shape());

            graph.setInput(0,in);
            graph.setLabels(labels);

            graph2.setInput(0,in);
            graph2.setLabels(labels);

            graph.computeGradientAndScore();
            graph2.computeGradientAndScore();

            assertEquals(graph.score(), graph2.score(), 1e-6);
            assertEquals(graph.gradient().gradient(), graph2.gradient().gradient());

            //Also check computeScoreForExamples
            INDArray in2a = Nd4j.rand(new int[]{1, 3, 5, 5});
            INDArray labels2a = Nd4j.rand(new int[]{1, 4, 5, 5});

            INDArray in2 = Nd4j.concat(0, in2a, in2a);
            INDArray labels2 = Nd4j.concat(0, labels2a, labels2a);

            INDArray s = graph.scoreExamples(new DataSet(in2, labels2), false);
            assertArrayEquals(new long[]{2, 1}, s.shape());
            assertEquals(s.getDouble(0), s.getDouble(1), 1e-6);

            TestUtils.testModelSerialization(graph);
        }
    }
}
 
Example 10
Source File: LocallyConnectedLayerTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testLocallyConnected(){
        for (DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
            Nd4j.setDefaultDataTypes(globalDtype, globalDtype);
            for (DataType networkDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
                assertEquals(globalDtype, Nd4j.dataType());
                assertEquals(globalDtype, Nd4j.defaultFloatingPointType());

                for (int test = 0; test < 2; test++) {
                    String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", test=" + test;

                    ComputationGraphConfiguration.GraphBuilder b = new NeuralNetConfiguration.Builder()
                            .dataType(networkDtype)
                            .seed(123)
                            .updater(new NoOp())
                            .weightInit(WeightInit.XAVIER)
                            .convolutionMode(ConvolutionMode.Same)
                            .graphBuilder();

                    INDArray[] in;
                    INDArray label;
                    switch (test){
                        case 0:
                            b.addInputs("in")
                                    .addLayer("1", new LSTM.Builder().nOut(5).build(), "in")
                                    .addLayer("2", new LocallyConnected1D.Builder().kernelSize(2).nOut(4).build(), "1")
                                    .addLayer("out", new RnnOutputLayer.Builder().nOut(10).build(), "2")
                                    .setOutputs("out")
                                    .setInputTypes(InputType.recurrent(5, 4));
                            in = new INDArray[]{Nd4j.rand(networkDtype, 2, 5, 4)};
                            label = TestUtils.randomOneHotTimeSeries(2, 10, 4).castTo(networkDtype);
                            break;
                        case 1:
                            b.addInputs("in")
                                    .addLayer("1", new ConvolutionLayer.Builder().kernelSize(2,2).nOut(5).convolutionMode(ConvolutionMode.Same).build(), "in")
                                    .addLayer("2", new LocallyConnected2D.Builder().kernelSize(2,2).nOut(5).build(), "1")
                                    .addLayer("out", new OutputLayer.Builder().nOut(10).build(), "2")
                                    .setOutputs("out")
//                                    .setInputTypes(InputType.convolutional(28, 28, 1));
//                            in = new INDArray[]{Nd4j.rand(networkDtype, 2, 1, 28, 28)};
                                    .setInputTypes(InputType.convolutional(8, 8, 1));
                            in = new INDArray[]{Nd4j.rand(networkDtype, 2, 1, 8, 8)};
                            label = TestUtils.randomOneHot(2, 10).castTo(networkDtype);
                            break;
                        default:
                            throw new RuntimeException();
                    }

                    ComputationGraph net = new ComputationGraph(b.build());
                    net.init();

                    INDArray out = net.outputSingle(in);
                    assertEquals(msg, networkDtype, out.dataType());
                    Map<String, INDArray> ff = net.feedForward(in, false);
                    for (Map.Entry<String, INDArray> e : ff.entrySet()) {
                        if (e.getKey().equals("in"))
                            continue;
                        String s = msg + " - layer: " + e.getKey();
                        assertEquals(s, networkDtype, e.getValue().dataType());
                    }

                    net.setInputs(in);
                    net.setLabels(label);
                    net.computeGradientAndScore();

                    net.fit(new MultiDataSet(in, new INDArray[]{label}));
                }
            }
        }
    }
 
Example 11
Source File: DTypeTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testComputationGraphTypeConversion() {

    for (DataType dt : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        Nd4j.setDefaultDataTypes(dt, dt);

        ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
                .seed(12345)
                .weightInit(WeightInit.XAVIER)
                .updater(new Adam(0.01))
                .dataType(DataType.DOUBLE)
                .graphBuilder()
                .addInputs("in")
                .layer("l0", new DenseLayer.Builder().activation(Activation.TANH).nIn(10).nOut(10).build(), "in")
                .layer("l1", new DenseLayer.Builder().activation(Activation.TANH).nIn(10).nOut(10).build(), "l0")
                .layer("out", new OutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build(), "l1")
                .setOutputs("out")
                .build();

        ComputationGraph net = new ComputationGraph(conf);
        net.init();

        INDArray inD = Nd4j.rand(DataType.DOUBLE, 1, 10);
        INDArray lD = Nd4j.create(DataType.DOUBLE, 1, 10);
        net.fit(new DataSet(inD, lD));

        INDArray outDouble = net.outputSingle(inD);
        net.setInput(0, inD);
        net.setLabels(lD);
        net.computeGradientAndScore();
        double scoreDouble = net.score();
        INDArray grads = net.getFlattenedGradients();
        INDArray u = net.getUpdater().getStateViewArray();
        assertEquals(DataType.DOUBLE, net.params().dataType());
        assertEquals(DataType.DOUBLE, grads.dataType());
        assertEquals(DataType.DOUBLE, u.dataType());


        ComputationGraph netFloat = net.convertDataType(DataType.FLOAT);
        netFloat.initGradientsView();
        assertEquals(DataType.FLOAT, netFloat.params().dataType());
        assertEquals(DataType.FLOAT, netFloat.getFlattenedGradients().dataType());
        assertEquals(DataType.FLOAT, netFloat.getUpdater(true).getStateViewArray().dataType());
        INDArray inF = inD.castTo(DataType.FLOAT);
        INDArray lF = lD.castTo(DataType.FLOAT);
        INDArray outFloat = netFloat.outputSingle(inF);
        netFloat.setInput(0, inF);
        netFloat.setLabels(lF);
        netFloat.computeGradientAndScore();
        double scoreFloat = netFloat.score();
        INDArray gradsFloat = netFloat.getFlattenedGradients();
        INDArray uFloat = netFloat.getUpdater().getStateViewArray();

        assertEquals(scoreDouble, scoreFloat, 1e-6);
        assertEquals(outDouble.castTo(DataType.FLOAT), outFloat);
        assertEquals(grads.castTo(DataType.FLOAT), gradsFloat);
        INDArray uCast = u.castTo(DataType.FLOAT);
        assertTrue(uCast.equalsWithEps(uFloat, 1e-4));

        ComputationGraph netFP16 = net.convertDataType(DataType.HALF);
        netFP16.initGradientsView();
        assertEquals(DataType.HALF, netFP16.params().dataType());
        assertEquals(DataType.HALF, netFP16.getFlattenedGradients().dataType());
        assertEquals(DataType.HALF, netFP16.getUpdater(true).getStateViewArray().dataType());

        INDArray inH = inD.castTo(DataType.HALF);
        INDArray lH = lD.castTo(DataType.HALF);
        INDArray outHalf = netFP16.outputSingle(inH);
        netFP16.setInput(0, inH);
        netFP16.setLabels(lH);
        netFP16.computeGradientAndScore();
        double scoreHalf = netFP16.score();
        INDArray gradsHalf = netFP16.getFlattenedGradients();
        INDArray uHalf = netFP16.getUpdater().getStateViewArray();

        assertEquals(scoreDouble, scoreHalf, 1e-4);
        boolean outHalfEq = outDouble.castTo(DataType.HALF).equalsWithEps(outHalf, 1e-3);
        assertTrue(outHalfEq);
        boolean gradsHalfEq = grads.castTo(DataType.HALF).equalsWithEps(gradsHalf, 1e-3);
        assertTrue(gradsHalfEq);
        INDArray uHalfCast = u.castTo(DataType.HALF);
        assertTrue(uHalfCast.equalsWithEps(uHalf, 1e-4));
    }
}
 
Example 12
Source File: DTypeTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testEmbeddingDtypes() {
    for (DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        Nd4j.setDefaultDataTypes(globalDtype, globalDtype);
        for (DataType networkDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
            for (boolean frozen : new boolean[]{false, true}) {
                for (int test = 0; test < 3; test++) {
                    assertEquals(globalDtype, Nd4j.dataType());
                    assertEquals(globalDtype, Nd4j.defaultFloatingPointType());

                    String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", test=" + test;

                    ComputationGraphConfiguration.GraphBuilder conf = new NeuralNetConfiguration.Builder()
                            .dataType(networkDtype)
                            .seed(123)
                            .updater(new NoOp())
                            .weightInit(new WeightInitDistribution(new UniformDistribution(-6, 6)))
                            .graphBuilder()
                            .addInputs("in")
                            .setOutputs("out");

                    INDArray input;
                    if (test == 0) {
                        if (frozen) {
                            conf.layer("0", new FrozenLayer(new EmbeddingLayer.Builder().nIn(5).nOut(5).build()), "in");
                        } else {
                            conf.layer("0", new EmbeddingLayer.Builder().nIn(5).nOut(5).build(), "in");
                        }
                        input = Nd4j.rand(networkDtype, 10, 1).muli(5).castTo(DataType.INT);
                        conf.setInputTypes(InputType.feedForward(1));
                    } else if (test == 1) {
                        if (frozen) {
                            conf.layer("0", new FrozenLayer(new EmbeddingSequenceLayer.Builder().nIn(5).nOut(5).build()), "in");
                        } else {
                            conf.layer("0", new EmbeddingSequenceLayer.Builder().nIn(5).nOut(5).build(), "in");
                        }
                        conf.layer("gp", new GlobalPoolingLayer.Builder(PoolingType.PNORM).pnorm(2).poolingDimensions(2).build(), "0");
                        input = Nd4j.rand(networkDtype, 10, 1, 5).muli(5).castTo(DataType.INT);
                        conf.setInputTypes(InputType.recurrent(1));
                    } else {
                        conf.layer("0", new RepeatVector.Builder().repetitionFactor(5).nOut(5).build(), "in");
                        conf.layer("gp", new GlobalPoolingLayer.Builder(PoolingType.SUM).build(), "0");
                        input = Nd4j.rand(networkDtype, 10, 5);
                        conf.setInputTypes(InputType.feedForward(5));
                    }

                    conf.appendLayer("el", new ElementWiseMultiplicationLayer.Builder().nOut(5).build())
                            .appendLayer("ae", new AutoEncoder.Builder().nOut(5).build())
                            .appendLayer("prelu", new PReLULayer.Builder().nOut(5).inputShape(5).build())
                            .appendLayer("out", new OutputLayer.Builder().nOut(10).build());

                    ComputationGraph net = new ComputationGraph(conf.build());
                    net.init();

                    INDArray label = Nd4j.zeros(networkDtype, 10, 10);

                    INDArray out = net.outputSingle(input);
                    assertEquals(msg, networkDtype, out.dataType());
                    Map<String, INDArray> ff = net.feedForward(input, false);
                    for (Map.Entry<String, INDArray> e : ff.entrySet()) {
                        if (e.getKey().equals("in"))
                            continue;
                        String s = msg + " - layer: " + e.getKey();
                        assertEquals(s, networkDtype, e.getValue().dataType());
                    }

                    net.setInput(0, input);
                    net.setLabels(label);
                    net.computeGradientAndScore();

                    net.fit(new DataSet(input, label));

                    logUsedClasses(net);

                    //Now, test mismatched dtypes for input/labels:
                    for (DataType inputLabelDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
                        INDArray in2 = input.castTo(inputLabelDtype);
                        INDArray label2 = label.castTo(inputLabelDtype);
                        net.output(in2);
                        net.setInput(0, in2);
                        net.setLabels(label2);
                        net.computeGradientAndScore();

                        net.fit(new DataSet(in2, label2));
                    }
                }
            }
        }
    }
}
 
Example 13
Source File: DTypeTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testLocallyConnected() {
    for (DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        Nd4j.setDefaultDataTypes(globalDtype, globalDtype);
        for (DataType networkDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
            assertEquals(globalDtype, Nd4j.dataType());
            assertEquals(globalDtype, Nd4j.defaultFloatingPointType());

            INDArray[] in = null;
            for (int test = 0; test < 2; test++) {
                String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", test=" + test;

                ComputationGraphConfiguration.GraphBuilder b = new NeuralNetConfiguration.Builder()
                        .dataType(networkDtype)
                        .seed(123)
                        .updater(new NoOp())
                        .weightInit(WeightInit.XAVIER)
                        .convolutionMode(ConvolutionMode.Same)
                        .graphBuilder();

                INDArray label;
                switch (test) {
                    case 0:
                        b.addInputs("in")
                                .addLayer("1", new LSTM.Builder().nOut(5).build(), "in")
                                .addLayer("2", new LocallyConnected1D.Builder().kernelSize(2).nOut(4).build(), "1")
                                .addLayer("out", new RnnOutputLayer.Builder().nOut(10).build(), "2")
                                .setOutputs("out")
                                .setInputTypes(InputType.recurrent(5, 2));
                        in = new INDArray[]{Nd4j.rand(networkDtype, 2, 5, 2)};
                        label = TestUtils.randomOneHotTimeSeries(2, 10, 2);
                        break;
                    case 1:
                        b.addInputs("in")
                                .addLayer("1", new ConvolutionLayer.Builder().kernelSize(2, 2).nOut(5).convolutionMode(ConvolutionMode.Same).build(), "in")
                                .addLayer("2", new LocallyConnected2D.Builder().kernelSize(2, 2).nOut(5).build(), "1")
                                .addLayer("out", new OutputLayer.Builder().nOut(10).build(), "2")
                                .setOutputs("out")
                                .setInputTypes(InputType.convolutional(8, 8, 1));
                        in = new INDArray[]{Nd4j.rand(networkDtype, 2, 1, 8, 8)};
                        label = TestUtils.randomOneHot(2, 10).castTo(networkDtype);
                        break;
                    default:
                        throw new RuntimeException();
                }

                ComputationGraph net = new ComputationGraph(b.build());
                net.init();

                INDArray out = net.outputSingle(in);
                assertEquals(msg, networkDtype, out.dataType());
                Map<String, INDArray> ff = net.feedForward(in, false);
                for (Map.Entry<String, INDArray> e : ff.entrySet()) {
                    if (e.getKey().equals("in"))
                        continue;
                    String s = msg + " - layer: " + e.getKey();
                    assertEquals(s, networkDtype, e.getValue().dataType());
                }

                net.setInputs(in);
                net.setLabels(label);
                net.computeGradientAndScore();

                net.fit(new MultiDataSet(in, new INDArray[]{label}));

                logUsedClasses(net);

                //Now, test mismatched dtypes for input/labels:
                for (DataType inputLabelDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
                    INDArray[] in2 = new INDArray[in.length];
                    for (int i = 0; i < in.length; i++) {
                        in2[i] = in[i].castTo(inputLabelDtype);
                    }
                    INDArray label2 = label.castTo(inputLabelDtype);
                    net.output(in2);
                    net.setInputs(in2);
                    net.setLabels(label2);
                    net.computeGradientAndScore();

                    net.fit(new MultiDataSet(in2, new INDArray[]{label2}));
                }
            }
        }
    }
}