Java Code Examples for org.deeplearning4j.nn.graph.ComputationGraph#getLayers()

The following examples show how to use org.deeplearning4j.nn.graph.ComputationGraph#getLayers() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FaceNetSmallV2Helper.java    From Java-Machine-Learning-for-Computer-Vision with MIT License 6 votes vote down vote up
static void loadWeights(ComputationGraph computationGraph) throws IOException {

        Layer[] layers = computationGraph.getLayers();
        for (Layer layer : layers) {
            List<double[]> all = new ArrayList<>();
            String layerName = layer.conf().getLayer().getLayerName();
            if (layerName.contains("bn")) {
                all.add(readWightsValues(BASE + layerName + "_w.csv"));
                all.add(readWightsValues(BASE + layerName + "_b.csv"));
                all.add(readWightsValues(BASE + layerName + "_m.csv"));
                all.add(readWightsValues(BASE + layerName + "_v.csv"));
                layer.setParams(mergeAll(all));
            } else if (layerName.contains("conv")) {
                all.add(readWightsValues(BASE + layerName + "_b.csv"));
                all.add(readWightsValues(BASE + layerName + "_w.csv"));
                layer.setParams(mergeAll(all));
            } else if (layerName.contains("dense")) {
                double[] w = readWightsValues(BASE + layerName + "_w.csv");
                all.add(w);
                double[] b = readWightsValues(BASE + layerName + "_b.csv");
                all.add(b);
                layer.setParams(mergeAll(all));
            }
        }
    }
 
Example 2
Source File: NetworkUtils.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
private static void setLearningRate(ComputationGraph net, double newLr, ISchedule lrSchedule) {
    org.deeplearning4j.nn.api.Layer[] layers = net.getLayers();
    for (int i = 0; i < layers.length; i++) {
        setLearningRate(net, layers[i].conf().getLayer().getLayerName(), newLr, lrSchedule, false);
    }
    refreshUpdater(net);
}
 
Example 3
Source File: RegressionTest100a.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
@Ignore("AB 2019/05/23 - Failing on linux-x86_64-cuda-9.2 - see issue #7657")
public void testYoloHouseNumber() throws Exception {

    File f = Resources.asFile("regression_testing/100a/HouseNumberDetection_100a.bin");
    ComputationGraph net = ComputationGraph.load(f, true);

    int nBoxes = 5;
    int nClasses = 10;

    ConvolutionLayer cl = (ConvolutionLayer)((LayerVertex)net.getConfiguration().getVertices().get("convolution2d_9")).getLayerConf().getLayer();
    assertEquals(nBoxes * (5 + nClasses), cl.getNOut());
    assertEquals(new ActivationIdentity(), cl.getActivationFn());
    assertEquals(ConvolutionMode.Same, cl.getConvolutionMode());
    assertEquals(new WeightInitXavier(), cl.getWeightInitFn());
    assertArrayEquals(new int[]{1,1}, cl.getKernelSize());
    assertArrayEquals(new int[]{1,1}, cl.getKernelSize());

    INDArray outExp;
    File f2 = Resources.asFile("regression_testing/100a/HouseNumberDetection_Output_100a.bin");
    try(DataInputStream dis = new DataInputStream(new FileInputStream(f2))){
        outExp = Nd4j.read(dis);
    }

    INDArray in;
    File f3 = Resources.asFile("regression_testing/100a/HouseNumberDetection_Input_100a.bin");
    try(DataInputStream dis = new DataInputStream(new FileInputStream(f3))){
        in = Nd4j.read(dis);
    }

    //Minor bug in 1.0.0-beta and earlier: not adding epsilon value to forward pass for batch norm
    //Which means: the record output doesn't have this. To account for this, we'll manually set eps to 0.0 here
    //https://github.com/deeplearning4j/deeplearning4j/issues/5836#issuecomment-405526228
    for(Layer l : net.getLayers()){
        if(l.conf().getLayer() instanceof BatchNormalization){
            BatchNormalization bn = (BatchNormalization) l.conf().getLayer();
            bn.setEps(0.0);
        }
    }

    INDArray outAct = net.outputSingle(in).castTo(outExp.dataType());

    boolean eq = outExp.equalsWithEps(outAct, 1e-4);
    if(!eq){
        log.info("Expected: {}", outExp);
        log.info("Actual: {}", outAct);
    }
    assertTrue("Output not equal", eq);
}
 
Example 4
Source File: TransferLearningComplex.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testMergeAndFreeze() {
    // in1 -> A -> B -> merge, in2 -> C -> merge -> D -> out
    //Goal here: test a number of things...
    // (a) Ensure that freezing C doesn't impact A and B. Only C should be frozen in this config
    // (b) Test global override (should be selective)


    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Adam(1e-4))
                    .activation(Activation.LEAKYRELU).graphBuilder().addInputs("in1", "in2")
                    .addLayer("A", new DenseLayer.Builder().nIn(10).nOut(9).build(), "in1")
                    .addLayer("B", new DenseLayer.Builder().nIn(9).nOut(8).build(), "A")
                    .addLayer("C", new DenseLayer.Builder().nIn(7).nOut(6).build(), "in2")
                    .addLayer("D", new DenseLayer.Builder().nIn(8 + 7).nOut(5).build(), "B", "C")
                    .addLayer("out", new OutputLayer.Builder().nIn(5).nOut(4).activation(Activation.LEAKYRELU).build(), "D")
                    .setOutputs("out")
                    .validateOutputLayerConfig(false)
                    .build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();


    int[] topologicalOrder = graph.topologicalSortOrder();
    org.deeplearning4j.nn.graph.vertex.GraphVertex[] vertices = graph.getVertices();

    for (int i = 0; i < topologicalOrder.length; i++) {
        org.deeplearning4j.nn.graph.vertex.GraphVertex v = vertices[topologicalOrder[i]];
        log.info(i + "\t" + v.getVertexName());
    }

    ComputationGraph graph2 =
                    new TransferLearning.GraphBuilder(graph)
                                    .fineTuneConfiguration(new FineTuneConfiguration.Builder().updater(new Adam(2e-2)).build())
                                    .setFeatureExtractor("C").validateOutputLayerConfig(false).build();

    boolean cFound = false;
    Layer[] layers = graph2.getLayers();

    for (Layer l : layers) {
        String name = l.conf().getLayer().getLayerName();
        log.info(name + "\t frozen: " + (l instanceof FrozenLayer));
        if ("C".equals(l.conf().getLayer().getLayerName())) {
            //Only C should be frozen in this config
            cFound = true;
            assertTrue(name, l instanceof FrozenLayer);
        } else {
            assertFalse(name, l instanceof FrozenLayer);
        }

        //Also check config:
        BaseLayer bl = ((BaseLayer) l.conf().getLayer());
        assertEquals(new Adam(2e-2), bl.getIUpdater());
        assertEquals(Activation.LEAKYRELU.getActivationFunction(), bl.getActivationFn());
    }
    assertTrue(cFound);

}
 
Example 5
Source File: ConvolutionalIterationListener.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public void onForwardPass(Model model, Map<String, INDArray> activations) {

    int iteration = (model instanceof MultiLayerNetwork ? ((MultiLayerNetwork)model).getIterationCount() : ((ComputationGraph)model).getIterationCount());
    if (iteration % freq == 0) {

        List<INDArray> tensors = new ArrayList<>();
        int cnt = 0;
        Random rnd = new Random();
        BufferedImage sourceImage = null;
        int sampleIdx = -1; //output.shape()[0] == 1 ? 0 : rnd.nextInt((int) output.shape()[0] - 1) + 1;
        if (model instanceof ComputationGraph) {
            ComputationGraph l = (ComputationGraph) model;
            Layer[] layers = l.getLayers();
            if(layers.length != activations.size())
                throw new RuntimeException("layers.length != activations.size(). Got layers.length="+layers.length+", activations.size()="+activations.size());
            for( int i=0; i<layers.length; i++ ){
                if(layers[i].type() == Layer.Type.CONVOLUTIONAL){
                    String layerName = layers[i].conf().getLayer().getLayerName();
                    INDArray output = activations.get(layerName); //Offset by 1 - activations list includes input

                    if(sampleIdx < 0){
                        sampleIdx = output.shape()[0] == 1 ? 0 : rnd.nextInt((int) output.shape()[0] - 1) + 1;
                    }

                    INDArray tad = output.tensorAlongDimension(sampleIdx, 3, 2, 1);
                    tensors.add(tad);
                    cnt++;
                }
            }
        } else {
            //MultiLayerNetwork: no op (other forward pass method should be called instead)
            return;
        }

        //Try to work out source image:
        ComputationGraph cg = (ComputationGraph)model;
        INDArray[] arr = cg.getInputs();
        if(arr.length > 1){
            throw new IllegalStateException("ConvolutionIterationListener does not support ComputationGraph models with more than 1 input; model has " +
                    arr.length + " inputs");
        }

        if(arr[0].rank() == 4){
            sourceImage = null;
            if (cnt == 0) {
                try {
                    sourceImage = restoreRGBImage(arr[0].tensorAlongDimension(sampleIdx, 3, 2, 1));
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }
            }
        }

        BufferedImage render = rasterizeConvoLayers(tensors, sourceImage);
        Persistable p = new ConvolutionListenerPersistable(sessionID, workerID, System.currentTimeMillis(), render);
        ssr.putStaticInfo(p);

        minibatchNum++;
    }
}