Java Code Examples for org.deeplearning4j.nn.conf.MultiLayerConfiguration#getInputPreProcess()

The following examples show how to use org.deeplearning4j.nn.conf.MultiLayerConfiguration#getInputPreProcess() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ConvolutionLayerSetupTest.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testDeconvolution() {

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
            //out = stride * (in-1) + filter - 2*pad -> 2 * (28-1) + 2 - 0 = 56 -> 56x56x3
            .layer(0, new Deconvolution2D.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build())
            //(56-2+2*1)/2+1 = 29 -> 29x29x3
            .layer(1, new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build())
            .layer(2, new OutputLayer.Builder().nOut(3).activation(Activation.SOFTMAX).build())
            .setInputType(InputType.convolutional(28, 28, 1));

    MultiLayerConfiguration conf = builder.build();

    assertNotNull(conf.getInputPreProcess(2));
    assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
    CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
    assertEquals(29, proc.getInputHeight());
    assertEquals(29, proc.getInputWidth());
    assertEquals(3, proc.getNumChannels());

    assertEquals(29 * 29 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}
 
Example 2
Source File: ConvolutionLayerSetupTest.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testSubSamplingWithPadding() {

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
                    .layer(0, new ConvolutionLayer.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14
                    .layer(1, new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build()) //(14-2+2)/2+1 = 8 -> 8x8x3
                    .layer(2, new OutputLayer.Builder().nOut(3).activation(Activation.SOFTMAX).build())
                    .setInputType(InputType.convolutional(28, 28, 1));

    MultiLayerConfiguration conf = builder.build();

    assertNotNull(conf.getInputPreProcess(2));
    assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
    CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
    assertEquals(8, proc.getInputHeight());
    assertEquals(8, proc.getInputWidth());
    assertEquals(3, proc.getNumChannels());

    assertEquals(8 * 8 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}
 
Example 3
Source File: ConvolutionLayerSetupTest.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testUpsampling() {

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
            .layer(new ConvolutionLayer.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14
            .layer(new Upsampling2D.Builder().size(3).build()) // 14 * 3 = 42!
            .layer(new OutputLayer.Builder().nOut(3).activation(Activation.SOFTMAX).build())
            .setInputType(InputType.convolutional(28, 28, 1));

    MultiLayerConfiguration conf = builder.build();

    assertNotNull(conf.getInputPreProcess(2));
    assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
    CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
    assertEquals(42, proc.getInputHeight());
    assertEquals(42, proc.getInputWidth());
    assertEquals(3, proc.getNumChannels());

    assertEquals(42 * 42 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}
 
Example 4
Source File: ConvolutionLayerSetupTest.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testSpaceToBatch() {

    int[] blocks = new int[] {2, 2};

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
            .layer(new ConvolutionLayer.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14
            .layer(new SpaceToBatchLayer.Builder(blocks).build()) // Divide space dimensions by blocks, i.e. 14/2 = 7
            .layer(new OutputLayer.Builder().nOut(3).activation(Activation.SOFTMAX).build())
            .setInputType(InputType.convolutional(28, 28, 1));

    MultiLayerConfiguration conf = builder.build();

    assertNotNull(conf.getInputPreProcess(2));
    assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
    CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
    assertEquals(7, proc.getInputHeight());
    assertEquals(7, proc.getInputWidth());
    assertEquals(3, proc.getNumChannels());
}
 
Example 5
Source File: ConvolutionLayerSetupTest.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testSpaceToDepth() {

    int blocks = 2;

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
            //(28-2+0)/2+1 = 14 -> 14x14x3 out
            .layer(new ConvolutionLayer.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build())
            // Divide space dimensions by blocks, i.e. 14/2 = 7 -> 7x7x12 out (3x2x2 depth)
            .layer(new SpaceToDepthLayer.Builder(blocks, SpaceToDepthLayer.DataFormat.NCHW).build())
            .layer(new OutputLayer.Builder().nIn(3 * 2 * 2).nOut(3).activation(Activation.SOFTMAX).build()) // nIn of the next layer gets multiplied by 2*2.
            .setInputType(InputType.convolutional(28, 28, 1));

    MultiLayerConfiguration conf = builder.build();

    assertNotNull(conf.getInputPreProcess(2));
    assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
    CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
    assertEquals(7, proc.getInputHeight());
    assertEquals(7, proc.getInputWidth());
    assertEquals(12, proc.getNumChannels());

}
 
Example 6
Source File: ConvolutionLayerSetupTest.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testSeparableConv2D() {

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
            .layer( new SeparableConvolution2D.Builder(2, 2)
                    .depthMultiplier(2)
                    .padding(0, 0)
                    .stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14
            .layer( new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build()) //(14-2+2)/2+1 = 8 -> 8x8x3
            .layer(2, new OutputLayer.Builder().nOut(3).activation(Activation.SOFTMAX).build())
            .setInputType(InputType.convolutional(28, 28, 1));

    MultiLayerConfiguration conf = builder.build();

    assertNotNull(conf.getInputPreProcess(2));
    assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
    CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
    assertEquals(8, proc.getInputHeight());
    assertEquals(8, proc.getInputWidth());
    assertEquals(3, proc.getNumChannels());

    assertEquals(8 * 8 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}
 
Example 7
Source File: ConvolutionLayerSetupTest.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testDeconv2D() {

    MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list()
            //out = stride * (in-1) + filter - 2*pad -> 2 * (28-1) + 2 - 0 = 56 -> 56x56x3
            .layer( new Deconvolution2D.Builder(2, 2)
                    .padding(0, 0)
                    .stride(2, 2).nIn(1).nOut(3).build())
            //(56-2+2*1)/2+1 = 29 -> 29x29x3
            .layer( new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build())
            .layer(2, new OutputLayer.Builder().nOut(3).activation(Activation.SOFTMAX).build())
            .setInputType(InputType.convolutional(28, 28, 1));

    MultiLayerConfiguration conf = builder.build();

    assertNotNull(conf.getInputPreProcess(2));
    assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor);
    CnnToFeedForwardPreProcessor proc = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(2);
    assertEquals(29, proc.getInputHeight());
    assertEquals(29, proc.getInputWidth());
    assertEquals(3, proc.getNumChannels());

    assertEquals(29 * 29 * 3, ((FeedForwardLayer) conf.getConf(2).getLayer()).getNIn());
}
 
Example 8
Source File: TestPreProcessors.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testCnnToDense() {
    MultiLayerConfiguration conf =
            new NeuralNetConfiguration.Builder()
                    .list().layer(0,
                    new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(
                            4, 4) // 28*28*1 => 15*15*10
                            .nIn(1).nOut(10).padding(2, 2)
                            .stride(2, 2)
                            .weightInit(WeightInit.RELU)
                            .activation(Activation.RELU)
                            .build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder()
                            .activation(Activation.RELU).nOut(200).build())
                    .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(200)
                            .nOut(5).weightInit(WeightInit.RELU)
                            .activation(Activation.SOFTMAX).build())
                    .setInputType(InputType.convolutionalFlat(28, 28, 1))
                    .build();

    assertNotNull(conf.getInputPreProcess(0));
    assertNotNull(conf.getInputPreProcess(1));

    assertTrue(conf.getInputPreProcess(0) instanceof FeedForwardToCnnPreProcessor);
    assertTrue(conf.getInputPreProcess(1) instanceof CnnToFeedForwardPreProcessor);

    FeedForwardToCnnPreProcessor ffcnn = (FeedForwardToCnnPreProcessor) conf.getInputPreProcess(0);
    CnnToFeedForwardPreProcessor cnnff = (CnnToFeedForwardPreProcessor) conf.getInputPreProcess(1);

    assertEquals(28, ffcnn.getInputHeight());
    assertEquals(28, ffcnn.getInputWidth());
    assertEquals(1, ffcnn.getNumChannels());

    assertEquals(15, cnnff.getInputHeight());
    assertEquals(15, cnnff.getInputWidth());
    assertEquals(10, cnnff.getNumChannels());

    assertEquals(15 * 15 * 10, ((FeedForwardLayer) conf.getConf(1).getLayer()).getNIn());
}
 
Example 9
Source File: NetworkUtils.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
/**
 * Convert a MultiLayerNetwork to a ComputationGraph
 *
 * @return ComputationGraph equivalent to this network (including parameters and updater state)
 */
public static ComputationGraph toComputationGraph(MultiLayerNetwork net) {

    //We rely heavily here on the fact that the topological sort order - and hence the layout of parameters - is
    // by definition the identical for a MLN and "single stack" computation graph. This also has to hold
    // for the updater state...

    ComputationGraphConfiguration.GraphBuilder b = new NeuralNetConfiguration.Builder()
            .dataType(net.getLayerWiseConfigurations().getDataType())
            .graphBuilder();

    MultiLayerConfiguration origConf = net.getLayerWiseConfigurations().clone();


    int layerIdx = 0;
    String lastLayer = "in";
    b.addInputs("in");
    for (NeuralNetConfiguration c : origConf.getConfs()) {
        String currLayer = String.valueOf(layerIdx);

        InputPreProcessor preproc = origConf.getInputPreProcess(layerIdx);
        b.addLayer(currLayer, c.getLayer(), preproc, lastLayer);

        lastLayer = currLayer;
        layerIdx++;
    }
    b.setOutputs(lastLayer);

    ComputationGraphConfiguration conf = b.build();

    ComputationGraph cg = new ComputationGraph(conf);
    cg.init();

    cg.setParams(net.params());

    //Also copy across updater state:
    INDArray updaterState = net.getUpdater().getStateViewArray();
    if (updaterState != null) {
        cg.getUpdater().getUpdaterStateViewArray()
                .assign(updaterState);
    }

    return cg;
}