org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer Java Examples

The following examples show how to use org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ComputationGraphTestRNN.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testInvalidTPBTT() {
    int nIn = 8;
    int nOut = 25;
    int nHiddenUnits = 17;

    try {
        new NeuralNetConfiguration.Builder()
                .graphBuilder()
                .addInputs("in")
                .layer("0", new org.deeplearning4j.nn.conf.layers.LSTM.Builder().nIn(nIn).nOut(nHiddenUnits).build(), "in")
                .layer("1", new GlobalPoolingLayer(), "0")
                .layer("2", new OutputLayer.Builder(LossFunctions.LossFunction.MSE).nIn(nHiddenUnits)
                        .nOut(nOut)
                        .activation(Activation.TANH).build(), "1")
                .setOutputs("2")
                .backpropType(BackpropType.TruncatedBPTT)
                .build();
        fail("Exception expected");
    } catch (IllegalStateException e){
        log.error("",e);
        assertTrue(e.getMessage().contains("TBPTT") && e.getMessage().contains("validateTbpttConfig"));
    }
}
 
Example #2
Source File: MultiLayerTestRNN.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testInvalidTPBTT() {
    int nIn = 8;
    int nOut = 25;
    int nHiddenUnits = 17;

    try {
        new NeuralNetConfiguration.Builder()
                .list()
                .layer(new org.deeplearning4j.nn.conf.layers.LSTM.Builder().nIn(nIn).nOut(nHiddenUnits).build())
                .layer(new GlobalPoolingLayer())
                .layer(new OutputLayer.Builder(LossFunction.MSE).nIn(nHiddenUnits)
                        .nOut(nOut)
                        .activation(Activation.TANH).build())
                .backpropType(BackpropType.TruncatedBPTT)
                .build();
        fail("Exception expected");
    } catch (IllegalStateException e){
        log.info(e.toString());
        assertTrue(e.getMessage().contains("TBPTT") && e.getMessage().contains("validateTbpttConfig"));
    }
}
 
Example #3
Source File: GlobalPoolingLayerSpace.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public GlobalPoolingLayer getValue(double[] parameterValues) {
    GlobalPoolingLayer.Builder builder = new GlobalPoolingLayer.Builder();
    super.setLayerOptionsBuilder(builder, parameterValues);
    if (poolingDimensions != null)
        builder.poolingDimensions(poolingDimensions.getValue(parameterValues));
    if (collapseDimensions != null)
        builder.collapseDimensions(collapseDimensions.getValue(parameterValues));
    if (poolingType != null)
        builder.poolingType(poolingType.getValue(parameterValues));
    if (pNorm != null)
        builder.pnorm(pNorm.getValue(parameterValues));
    return builder.build();
}
 
Example #4
Source File: AbstractZooModel.java    From wekaDeeplearning4j with GNU General Public License v3.0 5 votes vote down vote up
/**
 * Checks if we need to add a final output layer - also applies pooling beforehand if necessary
 * @param computationGraph Input ComputationGraph
 * @return Finalized ComputationGraph
 */
protected ComputationGraph addFinalOutputLayer(ComputationGraph computationGraph) {
    org.deeplearning4j.nn.conf.layers.Layer lastLayer = computationGraph.getLayers()[computationGraph.getNumLayers() - 1].conf().getLayer();
    if (!Dl4jMlpClassifier.noOutputLayer(filterMode, lastLayer)) {
        log.debug("No need to add output layer, ignoring");
        return computationGraph;
    }
    try {
        TransferLearning.GraphBuilder graphBuilder;

        if (requiresPooling)
            graphBuilder = new TransferLearning.GraphBuilder(computationGraph)
                .fineTuneConfiguration(getFineTuneConfig())
                .addLayer("intermediate_pooling", new GlobalPoolingLayer.Builder().build(), m_featureExtractionLayer)
                .addLayer(m_predictionLayerName, createOutputLayer(), "intermediate_pooling")
                .setOutputs(m_predictionLayerName);
        else
            graphBuilder = new TransferLearning.GraphBuilder(computationGraph)
                    .fineTuneConfiguration(getFineTuneConfig())
                    .addLayer(m_predictionLayerName, createOutputLayer(), m_featureExtractionLayer)
                    .setOutputs(m_predictionLayerName);

        // Remove the old output layer, but keep the connections
        graphBuilder.removeVertexKeepConnections(m_outputLayer);
        // Remove any other layers we don't want
        for (String layer : m_extraLayersToRemove) {
            graphBuilder.removeVertexAndConnections(layer);
        }

        log.debug("Finished adding output layer");
        return graphBuilder.build();
    } catch (Exception ex) {
        ex.printStackTrace();
        log.error(computationGraph.summary());
        return computationGraph;
    }

}
 
Example #5
Source File: RnnSequenceClassifier.java    From wekaDeeplearning4j with GNU General Public License v3.0 5 votes vote down vote up
/**
 * Check if the given layers are compatible for sequences (Only allow embedding and RNN for now)
 *
 * @param layer Layers to check
 * @return True if compatible
 */
protected boolean isSequenceCompatibleLayer(Layer layer) {
  return layer.getBackend() instanceof EmbeddingLayer
      || layer.getBackend() instanceof AbstractLSTM
      || layer.getBackend() instanceof RnnOutputLayer
      || layer.getBackend() instanceof GlobalPoolingLayer;
}
 
Example #6
Source File: AbstractZooModel.java    From wekaDeeplearning4j with GNU General Public License v3.0 5 votes vote down vote up
/**
 * Checks if we need to add a final output layer - also applies pooling beforehand if necessary
 * @param computationGraph Input ComputationGraph
 * @return Finalized ComputationGraph
 */
protected ComputationGraph addFinalOutputLayer(ComputationGraph computationGraph) {
    org.deeplearning4j.nn.conf.layers.Layer lastLayer = computationGraph.getLayers()[computationGraph.getNumLayers() - 1].conf().getLayer();
    if (!Dl4jMlpClassifier.noOutputLayer(filterMode, lastLayer)) {
        log.debug("No need to add output layer, ignoring");
        return computationGraph;
    }
    try {
        TransferLearning.GraphBuilder graphBuilder;

        if (requiresPooling)
            graphBuilder = new TransferLearning.GraphBuilder(computationGraph)
                .fineTuneConfiguration(getFineTuneConfig())
                .addLayer("intermediate_pooling", new GlobalPoolingLayer.Builder().build(), m_featureExtractionLayer)
                .addLayer(m_predictionLayerName, createOutputLayer(), "intermediate_pooling")
                .setOutputs(m_predictionLayerName);
        else
            graphBuilder = new TransferLearning.GraphBuilder(computationGraph)
                    .fineTuneConfiguration(getFineTuneConfig())
                    .addLayer(m_predictionLayerName, createOutputLayer(), m_featureExtractionLayer)
                    .setOutputs(m_predictionLayerName);

        // Remove the old output layer, but keep the connections
        graphBuilder.removeVertexKeepConnections(m_outputLayer);
        // Remove any other layers we don't want
        for (String layer : m_extraLayersToRemove) {
            graphBuilder.removeVertexAndConnections(layer);
        }

        log.debug("Finished adding output layer");
        return graphBuilder.build();
    } catch (Exception ex) {
        ex.printStackTrace();
        log.error(computationGraph.summary());
        return computationGraph;
    }

}
 
Example #7
Source File: RnnSequenceClassifier.java    From wekaDeeplearning4j with GNU General Public License v3.0 5 votes vote down vote up
/**
 * Check if the given layers are compatible for sequences (Only allow embedding and RNN for now)
 *
 * @param layer Layers to check
 * @return True if compatible
 */
protected boolean isSequenceCompatibleLayer(Layer layer) {
  return layer.getBackend() instanceof EmbeddingLayer
      || layer.getBackend() instanceof AbstractLSTM
      || layer.getBackend() instanceof RnnOutputLayer
      || layer.getBackend() instanceof GlobalPoolingLayer;
}
 
Example #8
Source File: KerasGlobalPooling.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * Constructor from parsed Keras layer configuration dictionary.
 *
 * @param layerConfig           dictionary containing Keras layer configuration
 * @param enforceTrainingConfig whether to enforce training-related configuration options
 * @throws InvalidKerasConfigurationException     Invalid Keras config
 * @throws UnsupportedKerasConfigurationException Unsupported Keras config
 */
public KerasGlobalPooling(Map<String, Object> layerConfig, boolean enforceTrainingConfig)
        throws InvalidKerasConfigurationException, UnsupportedKerasConfigurationException {
    super(layerConfig, enforceTrainingConfig);
    this.dimensions = mapGlobalPoolingDimensions(this.className, conf);
    GlobalPoolingLayer.Builder builder =
            new GlobalPoolingLayer.Builder(mapPoolingType(this.className, conf))
                    .poolingDimensions(dimensions)
                    .collapseDimensions(true) // keras 2 collapses dimensions
                    .name(this.layerName)
                    .dropOut(this.dropout);
    this.layer = builder.build();
    this.vertex = null;
}
 
Example #9
Source File: GlobalPoolingMaskingTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testMaskingRnn() {


    int timeSeriesLength = 5;
    int nIn = 5;
    int layerSize = 4;
    int nOut = 2;
    int[] minibatchSizes = new int[] {1, 3};

    for (int miniBatchSize : minibatchSizes) {

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                        .updater(new NoOp())
                        .dist(new NormalDistribution(0, 1.0)).seed(12345L).list()
                        .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH)
                                        .build())
                        .layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder()
                                        .poolingType(PoolingType.AVG).build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut).build())
                        .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        Random r = new Random(12345L);
        INDArray input = Nd4j.rand(new int[] {miniBatchSize, nIn, timeSeriesLength}).subi(0.5);

        INDArray mask;
        if (miniBatchSize == 1) {
            mask = Nd4j.create(new double[] {1, 1, 1, 1, 0}).reshape(1,5);
        } else {
            mask = Nd4j.create(new double[][] {{1, 1, 1, 1, 1}, {1, 1, 1, 1, 0}, {1, 1, 1, 0, 0}});
        }

        INDArray labels = Nd4j.zeros(miniBatchSize, nOut);
        for (int i = 0; i < miniBatchSize; i++) {
            int idx = r.nextInt(nOut);
            labels.putScalar(i, idx, 1.0);
        }

        net.setLayerMaskArrays(mask, null);
        INDArray outputMasked = net.output(input);

        net.clearLayerMaskArrays();

        for (int i = 0; i < miniBatchSize; i++) {
            INDArray maskRow = mask.getRow(i);
            int tsLength = maskRow.sumNumber().intValue();
            INDArray inputSubset = input.get(NDArrayIndex.interval(i, i, true), NDArrayIndex.all(),
                            NDArrayIndex.interval(0, tsLength));

            INDArray outSubset = net.output(inputSubset);
            INDArray outputMaskedSubset = outputMasked.getRow(i,true);

            assertEquals(outSubset, outputMaskedSubset);
        }
    }
}
 
Example #10
Source File: GlobalPoolingMaskingTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testMaskLayerDataTypes(){

    for(DataType dt : new DataType[]{DataType.FLOAT16, DataType.BFLOAT16, DataType.FLOAT, DataType.DOUBLE,
            DataType.INT8, DataType.INT16, DataType.INT32, DataType.INT64,
            DataType.UINT8, DataType.UINT16, DataType.UINT32, DataType.UINT64}){
        INDArray mask = Nd4j.rand(DataType.FLOAT, 2, 10).addi(0.3).castTo(dt);

        for(DataType networkDtype : new DataType[]{DataType.FLOAT16, DataType.BFLOAT16, DataType.FLOAT, DataType.DOUBLE}){

            INDArray in = Nd4j.rand(networkDtype, 2, 5, 10);
            INDArray label1 = Nd4j.rand(networkDtype, 2, 5);
            INDArray label2 = Nd4j.rand(networkDtype, 2, 5, 10);

            for(PoolingType pt : PoolingType.values()) {
                //System.out.println("Net: " + networkDtype + ", mask: " + dt + ", pt=" + pt);

                MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                        .list()
                        .layer(new GlobalPoolingLayer(pt))
                        .layer(new OutputLayer.Builder().nIn(5).nOut(5).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build())
                        .build();

                MultiLayerNetwork net = new MultiLayerNetwork(conf);
                net.init();

                net.output(in, false, mask, null);
                net.output(in, false, mask, null);


                MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder()

                        .list()
                        .layer(new RnnOutputLayer.Builder().nIn(5).nOut(5).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build())
                        .build();

                MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
                net2.init();

                net2.output(in, false, mask, mask);
                net2.output(in, false, mask, mask);

                net.fit(in, label1, mask, null);
                net2.fit(in, label2, mask, mask);
            }
        }
    }
}
 
Example #11
Source File: GlobalPoolingMaskingTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testMaskingCnnDim23() {
    //Test masking, where mask is along dimension 2 AND 3
    //For example, input images of 2 different sizes

    int minibatch = 2;
    int depthIn = 2;
    int depthOut = 4;
    int nOut = 5;
    int height = 5;
    int width = 4;

    PoolingType[] poolingTypes =
            new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};

    for (PoolingType pt : poolingTypes) {
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
                .convolutionMode(ConvolutionMode.Same).seed(12345L).list()
                .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, 2)
                        .stride(1, 1).activation(Activation.TANH).build())
                .layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
                        .build())
                .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                        .activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
                .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});

        //Second example in minibatch: size [3,2]
        inToBeMasked.get(point(1), NDArrayIndex.all(), NDArrayIndex.interval(3,height), NDArrayIndex.all()).assign(0);
        inToBeMasked.get(point(1), NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(2,width)).assign(0);

        //Shape for mask: [minibatch, 1, height, 1] -> broadcast
        INDArray maskArray = Nd4j.create(minibatch, 1, height, width);
        maskArray.get(point(0), all(), all(), all()).assign(1);
        maskArray.get(point(1), all(), interval(0,3), interval(0,2)).assign(1);

        net.setLayerMaskArrays(maskArray, null);

        INDArray outMasked = net.output(inToBeMasked);
        net.clearLayerMaskArrays();

        net.setLayerMaskArrays(maskArray, null);

        for (int i = 0; i < minibatch; i++) {
            INDArray subset;
            if(i == 0){
                subset = inToBeMasked.get(interval(i, i, true), all(), all(), all());
            } else {
                subset = inToBeMasked.get(interval(i, i, true), all(), interval(0,3), interval(0,2));
            }

            net.clear();
            net.clearLayerMaskArrays();
            INDArray outSubset = net.output(subset);
            INDArray outMaskedSubset = outMasked.getRow(i,true);

            assertEquals("minibatch: " + i + ", " + pt, outSubset, outMaskedSubset);
        }
    }
}
 
Example #12
Source File: GlobalPoolingMaskingTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testMaskingCnnDim2() {
    //Test masking, where mask is along dimension 2

    int minibatch = 3;
    int depthIn = 3;
    int depthOut = 4;
    int nOut = 5;
    int height = 5;
    int width = 4;

    PoolingType[] poolingTypes =
                    new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};

    for (PoolingType pt : poolingTypes) {
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
                        .convolutionMode(ConvolutionMode.Same).seed(12345L).list()
                        .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, width)
                                        .stride(1, width).activation(Activation.TANH).build())
                        .layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
                                        .build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
                        .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});

        //Shape for mask: [minibatch, 1, height, 1] -> broadcast
        INDArray maskArray = Nd4j.create(new double[][] {{1, 1, 1, 1, 1}, {1, 1, 1, 1, 0}, {1, 1, 1, 0, 0}})
                .reshape('c', minibatch, 1, height, 1);

        //Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
        // as would be the case in practice...
        Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 2));


        net.setLayerMaskArrays(maskArray, null);

        INDArray outMasked = net.output(inToBeMasked);
        net.clearLayerMaskArrays();

        for (int i = 0; i < minibatch; i++) {
            int numSteps = height - i;
            INDArray subset = inToBeMasked.get(NDArrayIndex.interval(i, i, true), NDArrayIndex.all(),
                            NDArrayIndex.interval(0, numSteps), NDArrayIndex.all());
            assertArrayEquals(new long[] {1, depthIn, height - i, width}, subset.shape());

            INDArray outSubset = net.output(subset);
            INDArray outMaskedSubset = outMasked.getRow(i, true);

            assertEquals("minibatch: " + i, outSubset, outMaskedSubset);
        }
    }
}
 
Example #13
Source File: GlobalPoolingMaskingTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testMaskingCnnDim3() {
    //Test masking, where mask is along dimension 3

    int minibatch = 3;
    int depthIn = 3;
    int depthOut = 4;
    int nOut = 5;
    int height = 3;
    int width = 6;

    PoolingType[] poolingTypes =
                    new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};

    for (PoolingType pt : poolingTypes) {
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
                        .convolutionMode(ConvolutionMode.Same).seed(12345L).list()
                        .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(height, 2)
                                        .stride(height, 1).activation(Activation.TANH).build())
                        .layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
                                        .build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
                        .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});

        //Shape for mask: [minibatch, width]
        INDArray maskArray = Nd4j.create(new double[][] {{1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 0}, {1, 1, 1, 1, 0, 0}})
                .reshape('c', minibatch, 1, 1, width);

        //Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
        // as would be the case in practice...
        Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 3));


        net.setLayerMaskArrays(maskArray, null);

        INDArray outMasked = net.output(inToBeMasked);
        net.clearLayerMaskArrays();

        for (int i = 0; i < minibatch; i++) {
            int numSteps = width - i;
            INDArray subset = inToBeMasked.get(NDArrayIndex.interval(i, i, true), NDArrayIndex.all(),
                            NDArrayIndex.all(), NDArrayIndex.interval(0, numSteps));
            assertArrayEquals(new long[] {1, depthIn, height, width - i}, subset.shape());

            INDArray outSubset = net.output(subset);
            INDArray outMaskedSubset = outMasked.getRow(i, true);

            assertEquals("minibatch: " + i, outSubset, outMaskedSubset);
        }
    }
}
 
Example #14
Source File: GlobalPoolingMaskingTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testMaskingCnnDim2_SingleExample() {
    //Test masking, where mask is along dimension 2

    int minibatch = 1;
    int depthIn = 2;
    int depthOut = 2;
    int nOut = 2;
    int height = 6;
    int width = 3;

    PoolingType[] poolingTypes =
                    new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};

    for (PoolingType pt : poolingTypes) {
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
                        .convolutionMode(ConvolutionMode.Same).seed(12345L).list()
                        .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, width)
                                        .stride(1, width).activation(Activation.TANH).build())
                        .layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
                                        .build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
                        .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});

        //Shape for mask: [minibatch, width]
        INDArray maskArray = Nd4j.create(new double[] {1, 1, 1, 1, 1, 0}, new int[]{1,1,height,1});

        //Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
        // as would be the case in practice...
        Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 2));


        net.setLayerMaskArrays(maskArray, null);

        INDArray outMasked = net.output(inToBeMasked);
        net.clearLayerMaskArrays();

        int numSteps = height - 1;
        INDArray subset = inToBeMasked.get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.all(),
                        NDArrayIndex.interval(0, numSteps), NDArrayIndex.all());
        assertArrayEquals(new long[] {1, depthIn, 5, width}, subset.shape());

        INDArray outSubset = net.output(subset);
        INDArray outMaskedSubset = outMasked.getRow(0);

        assertEquals(outSubset, outMaskedSubset);

        //Finally: check gradient calc for exceptions
        net.setLayerMaskArrays(maskArray, null);
        net.setInput(inToBeMasked);
        INDArray labels = Nd4j.create(new double[] {0, 1}, new long[]{1,2});
        net.setLabels(labels);

        net.computeGradientAndScore();
    }
}
 
Example #15
Source File: GlobalPoolingMaskingTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testMaskingCnnDim3_SingleExample() {
    //Test masking, where mask is along dimension 3

    int minibatch = 1;
    int depthIn = 2;
    int depthOut = 2;
    int nOut = 2;
    int height = 3;
    int width = 6;

    PoolingType[] poolingTypes =
                    new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM};

    for (PoolingType pt : poolingTypes) {
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER)
                        .convolutionMode(ConvolutionMode.Same).seed(12345L).list()
                        .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(height, 2)
                                        .stride(height, 1).activation(Activation.TANH).build())
                        .layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt)
                                        .build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                        .activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build())
                        .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width});

        //Shape for mask: [minibatch, 1, 1, width]
        INDArray maskArray = Nd4j.create(new double[] {1, 1, 1, 1, 1, 0}, new int[]{1,1,1,width});

        //Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector
        // as would be the case in practice...
        Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 3));


        net.setLayerMaskArrays(maskArray, null);

        INDArray outMasked = net.output(inToBeMasked);
        net.clearLayerMaskArrays();

        int numSteps = width - 1;
        INDArray subset = inToBeMasked.get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.all(),
                        NDArrayIndex.all(), NDArrayIndex.interval(0, numSteps));
        assertArrayEquals(new long[] {1, depthIn, height, 5}, subset.shape());

        INDArray outSubset = net.output(subset);
        INDArray outMaskedSubset = outMasked.getRow(0);

        assertEquals(outSubset, outMaskedSubset);

        //Finally: check gradient calc for exceptions
        net.setLayerMaskArrays(maskArray, null);
        net.setInput(inToBeMasked);
        INDArray labels = Nd4j.create(new double[] {0, 1}, new long[]{1,2});
        net.setLabels(labels);

        net.computeGradientAndScore();
    }
}
 
Example #16
Source File: Vasttext.java    From scava with Eclipse Public License 2.0 4 votes vote down vote up
private ComputationGraph VasttextTextualAndNumeric()
{
	Activation activation = null;
	LossFunction loss = null;
	//If multilabel, it is considered according to the book "Deep Learning with Python" to use the following parameters
	if(multiLabel)
	{
		activation = Activation.SIGMOID;
		loss = LossFunction.XENT; //Binary Crossentropy
	}
	else
	{
		//We're using a softmax/cross entropy for the binary classification, as the number of neurons is two. If the number of neurons would be one, then
		//the activation would be sigmoid and the loss binary crossentropy
		activation = Activation.SOFTMAX;
		loss = LossFunction.MCXENT;	//CATEGORICAL_CROSSENTROPY
	}

	System.err.println("LR:"+lr);
	
	System.err.println("Dense:"+denseDimension);

	ComputationGraphConfiguration  nnConf = new NeuralNetConfiguration.Builder()
			.updater(new Adam(lr))
			.weightInit(WeightInit.XAVIER)
			.trainingWorkspaceMode(WorkspaceMode.ENABLED)
               .inferenceWorkspaceMode(WorkspaceMode.ENABLED)
			.graphBuilder()
			.addInputs("Text", "Extra")
			//Embeddings Parts
			.addLayer("Embeddings", new EmbeddingSequenceLayer.Builder()
                       .nIn(textFeaturesSize)
                       .nOut(denseDimension)
                       .activation(Activation.IDENTITY)
                       //.activation(Activation.TANH)
                       //.dropOut(0.0)
                       .build(), "Text")
			.addLayer("GlobalPooling", new GlobalPoolingLayer.Builder()
                       .poolingType(PoolingType.AVG)
                       .poolingDimensions(2)
                       .collapseDimensions(true)
                       //.dropOut(0.0)
                       .build(), "Embeddings")
			//We're merging directly the values from the extra
			.addVertex("Merge", new MergeVertex(), "GlobalPooling","Extra")
			.addLayer("DenseAll", new DenseLayer.Builder()
					.nIn(denseDimension+numericFeaturesSize)
					.nOut(denseDimension/2)
					//.dropOut(0.5)
					//.l2(0.001)
					.build(), "Merge")
			.addLayer("Output", new OutputLayer.Builder()
					//.dropOut(0.5)
					.nIn(denseDimension/2)
                       .nOut(labelsSize)
                       .activation(activation)
                       .lossFunction(loss)
                       .build(), "DenseAll")
			.setOutputs("Output")
			.pretrain(false)
			.backprop(true)
			.build();

	return new ComputationGraph(nnConf);
}
 
Example #17
Source File: RegressionTest100b4.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testSyntheticBidirectionalRNNGraph() throws Exception {

    File f = Resources.asFile("regression_testing/100b4/SyntheticBidirectionalRNNGraph_100b4.bin");
    ComputationGraph net = ComputationGraph.load(f, true);

    Bidirectional l0 = (Bidirectional) net.getLayer("rnn1").conf().getLayer();

    LSTM l1 = (LSTM) l0.getFwd();
    assertEquals(16, l1.getNOut());
    assertEquals(new ActivationReLU(), l1.getActivationFn());
    assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l1));

    LSTM l2 = (LSTM) l0.getBwd();
    assertEquals(16, l2.getNOut());
    assertEquals(new ActivationReLU(), l2.getActivationFn());
    assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l2));

    Bidirectional l3 = (Bidirectional) net.getLayer("rnn2").conf().getLayer();

    SimpleRnn l4 = (SimpleRnn) l3.getFwd();
    assertEquals(16, l4.getNOut());
    assertEquals(new ActivationReLU(), l4.getActivationFn());
    assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l4));

    SimpleRnn l5 = (SimpleRnn) l3.getBwd();
    assertEquals(16, l5.getNOut());
    assertEquals(new ActivationReLU(), l5.getActivationFn());
    assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l5));

    MergeVertex mv = (MergeVertex) net.getVertex("concat");

    GlobalPoolingLayer gpl = (GlobalPoolingLayer) net.getLayer("pooling").conf().getLayer();
    assertEquals(PoolingType.MAX, gpl.getPoolingType());
    assertArrayEquals(new int[]{2}, gpl.getPoolingDimensions());
    assertTrue(gpl.isCollapseDimensions());

    OutputLayer outl = (OutputLayer) net.getLayer("out").conf().getLayer();
    assertEquals(3, outl.getNOut());
    assertEquals(new LossMCXENT(), outl.getLossFn());

    INDArray outExp;
    File f2 = Resources.asFile("regression_testing/100b4/SyntheticBidirectionalRNNGraph_Output_100b4.bin");
    try (DataInputStream dis = new DataInputStream(new FileInputStream(f2))) {
        outExp = Nd4j.read(dis);
    }

    INDArray in;
    File f3 = Resources.asFile("regression_testing/100b4/SyntheticBidirectionalRNNGraph_Input_100b4.bin");
    try (DataInputStream dis = new DataInputStream(new FileInputStream(f3))) {
        in = Nd4j.read(dis);
    }

    INDArray outAct = net.output(in)[0];

    assertEquals(outExp, outAct);
}
 
Example #18
Source File: Vasttext.java    From scava with Eclipse Public License 2.0 4 votes vote down vote up
private MultiLayerNetwork VasttextTextual()
{
	Activation activation = null;
	LossFunction loss = null;
	//If multilabel, it is considered according to the book "Deep Learning with Python" to use the following parameters
	if(multiLabel)
	{
		activation = Activation.SIGMOID;
		loss = LossFunction.XENT; //Binary Crossentropy
	}
	else
	{
		//We're using a softmax/cross entropy for the binary classification, as the number of neurons is two. If the number of neurons would be one, then
		//the activation would be sigmoid and the loss binary crossentropy
		activation = Activation.SOFTMAX;
		loss = LossFunction.MCXENT;	//CATEGORICAL_CROSSENTROPY
	}

	MultiLayerConfiguration nnConf = new NeuralNetConfiguration.Builder()
               .updater(new Adam(lr))
               .weightInit(WeightInit.XAVIER)
               .trainingWorkspaceMode(WorkspaceMode.ENABLED)
               .inferenceWorkspaceMode(WorkspaceMode.ENABLED)
               .list()
               .layer(0, new EmbeddingSequenceLayer.Builder()
                       .nIn(textFeaturesSize)
                       .nOut(denseDimension)
                       .activation(Activation.IDENTITY)
                       .build())
               .layer(1, new GlobalPoolingLayer.Builder()
                       .poolingType(PoolingType.AVG)
                       .poolingDimensions(2)
                       .collapseDimensions(true)
                       .build())
               .layer(2, new OutputLayer.Builder()
                       .nIn(denseDimension)
                       .nOut(labelsSize)
                       .activation(activation)
                       .lossFunction(loss)
                       .build())
               .pretrain(false).backprop(true).build();

       return new MultiLayerNetwork(nnConf);
}
 
Example #19
Source File: KerasGlobalPooling.java    From deeplearning4j with Apache License 2.0 2 votes vote down vote up
/**
 * Get DL4J SubsamplingLayer.
 *
 * @return SubsamplingLayer
 */
public GlobalPoolingLayer getGlobalPoolingLayer() {
    return (GlobalPoolingLayer) this.layer;
}