org.deeplearning4j.nn.conf.layers.RnnOutputLayer Java Examples

The following examples show how to use org.deeplearning4j.nn.conf.layers.RnnOutputLayer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MultiLayerTestRNN.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testRnnTimeStepWithPreprocessor() {

    MultiLayerConfiguration conf =
                    new NeuralNetConfiguration.Builder()
                                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                                    .list()
                                    .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(10)
                                                    .nOut(10).activation(Activation.TANH).build())
                                    .layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(10)
                                                    .nOut(10).activation(Activation.TANH).build())
                                    .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                                    .activation(Activation.SOFTMAX).nIn(10).nOut(10).build())
                                    .inputPreProcessor(0, new FeedForwardToRnnPreProcessor())
                                    .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    INDArray in = Nd4j.rand(1, 10);
    net.rnnTimeStep(in);
}
 
Example #2
Source File: MultiLayerTestRNN.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testRnnTimeStepWithPreprocessorGraph() {

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .graphBuilder().addInputs("in")
                    .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(10).nOut(10)
                                    .activation(Activation.TANH).build(), "in")
                    .addLayer("1", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(10).nOut(10)
                                    .activation(Activation.TANH).build(), "0")
                    .addLayer("2", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(10).nOut(10).build(), "1")
                    .setOutputs("2").inputPreProcessor("0", new FeedForwardToRnnPreProcessor())
                    .build();

    ComputationGraph net = new ComputationGraph(conf);
    net.init();

    INDArray in = Nd4j.rand(1, 10);
    net.rnnTimeStep(in);
}
 
Example #3
Source File: ComputationGraphTestRNN.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testTbpttMasking() {
    //Simple "does it throw an exception" type test...
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .graphBuilder().addInputs("in")
                    .addLayer("out", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE)
                                    .activation(Activation.IDENTITY).nIn(1).nOut(1).build(), "in")
                    .setOutputs("out").backpropType(BackpropType.TruncatedBPTT).tBPTTForwardLength(8)
                    .tBPTTBackwardLength(8).build();

    ComputationGraph net = new ComputationGraph(conf);
    net.init();

    MultiDataSet data = new MultiDataSet(new INDArray[] {Nd4j.linspace(1, 10, 10, Nd4j.dataType()).reshape(1, 1, 10)},
                    new INDArray[] {Nd4j.linspace(2, 20, 10, Nd4j.dataType()).reshape(1, 1, 10)}, null,
                    new INDArray[] {Nd4j.ones(1, 10)});

    net.fit(data);
}
 
Example #4
Source File: ComputationGraphTestRNN.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testTruncatedBPTTSimple() {
    //Extremely simple test of the 'does it throw an exception' variety
    int timeSeriesLength = 12;
    int miniBatchSize = 7;
    int nIn = 5;
    int nOut = 4;

    int nTimeSlices = 20;

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder()
                    .addInputs("in")
                    .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7)
                                    .activation(Activation.TANH)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "in")
                    .addLayer("1", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8)
                                    .activation(Activation.TANH)
                                    .dist(new NormalDistribution(0,
                                                    0.5))
                                    .build(), "0")
                    .addLayer("out", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .nIn(8).nOut(nOut)
                                    .activation(Activation.SOFTMAX)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "1")
                    .setOutputs("out").backpropType(BackpropType.TruncatedBPTT)
                    .tBPTTBackwardLength(timeSeriesLength).tBPTTForwardLength(timeSeriesLength).build();

    Nd4j.getRandom().setSeed(12345);
    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    INDArray inputLong = Nd4j.rand(new int[] {miniBatchSize, nIn, nTimeSlices * timeSeriesLength});
    INDArray labelsLong = Nd4j.rand(new int[] {miniBatchSize, nOut, nTimeSlices * timeSeriesLength});

    graph.fit(new INDArray[] {inputLong}, new INDArray[] {labelsLong});
}
 
Example #5
Source File: MultiLayerTestRNN.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testTruncatedBPTTSimple() {
    //Extremely simple test of the 'does it throw an exception' variety
    int timeSeriesLength = 12;
    int miniBatchSize = 7;
    int nIn = 5;
    int nOut = 4;

    int nTimeSlices = 20;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list()
                    .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7)
                                    .activation(Activation.TANH)
                                    .dist(new NormalDistribution(0, 0.5)).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8)
                                    .activation(Activation.TANH)
                                    .dist(
                                                    new NormalDistribution(0,
                                                                    0.5))
                                    .build())
                    .layer(2, new RnnOutputLayer.Builder(LossFunction.MCXENT)
                                    .nIn(8).nOut(nOut).activation(Activation.SOFTMAX)
                                    .dist(new NormalDistribution(0, 0.5))
                                    .build())
                    .backpropType(BackpropType.TruncatedBPTT)
                    .tBPTTBackwardLength(timeSeriesLength).tBPTTForwardLength(timeSeriesLength).build();

    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork mln = new MultiLayerNetwork(conf);
    mln.init();

    INDArray inputLong = Nd4j.rand(new int[] {miniBatchSize, nIn, nTimeSlices * timeSeriesLength});
    INDArray labelsLong = Nd4j.rand(new int[] {miniBatchSize, nOut, nTimeSlices * timeSeriesLength});

    mln.fit(inputLong, labelsLong);
}
 
Example #6
Source File: SinCosLstm.java    From dl4j-tutorials with MIT License 5 votes vote down vote up
public static void main(String[] args) {
    List<Data> data = readFile("");

    RegIterator trainIter = new RegIterator(data, 1, 5, 5);

    // 构建模型
    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .seed(1234)
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
            .weightInit(WeightInit.XAVIER)
            .updater(new Nesterovs(0.01, 0.9))
            .list().layer(0, new GravesLSTM.Builder().activation(Activation.TANH).nIn(1).nOut(32)
                    .build())
            .layer(1, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE)
                    .activation(Activation.IDENTITY).nIn(32).nOut(1).build())
            .build();

    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.setListeners(new ScoreIterationListener(1));
    network.init();

    int epoch = 10;
    for (int i = 0; i < epoch; i++) {
        while (trainIter.hasNext()) {
            DataSet dataSets = trainIter.next();
            network.fit(dataSets);
        }
        trainIter.reset();
    }

}
 
Example #7
Source File: ComputationGraphTestRNN.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testTBPTTLongerThanTS() {
    int tbpttLength = 100;
    int timeSeriesLength = 20;
    int miniBatchSize = 7;
    int nIn = 5;
    int nOut = 4;

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder()
                    .addInputs("in")
                    .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7)
                                    .activation(Activation.TANH)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "in")
                    .addLayer("1", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8)
                                    .activation(Activation.TANH)
                                    .dist(new NormalDistribution(0,
                                                    0.5))
                                    .build(), "0")
                    .addLayer("out", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .nIn(8).nOut(nOut)
                                    .activation(Activation.SOFTMAX)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "1")
                    .setOutputs("out").backpropType(BackpropType.TruncatedBPTT)
                    .tBPTTBackwardLength(tbpttLength).tBPTTForwardLength(tbpttLength).build();

    Nd4j.getRandom().setSeed(12345);
    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    INDArray inputLong = Nd4j.rand(new int[] {miniBatchSize, nIn, timeSeriesLength});
    INDArray labelsLong = Nd4j.rand(new int[] {miniBatchSize, nOut, timeSeriesLength});

    INDArray initialParams = graph.params().dup();
    graph.fit(new INDArray[] {inputLong}, new INDArray[] {labelsLong});
    INDArray afterParams = graph.params();

    assertNotEquals(initialParams, afterParams);
}
 
Example #8
Source File: RnnSequenceClassifier.java    From wekaDeeplearning4j with GNU General Public License v3.0 5 votes vote down vote up
@Override
protected void createModel() throws Exception {
  final INDArray features = getFirstBatchFeatures(trainData);
  log.info("Feature shape: {}", features.shape());
  ComputationGraphConfiguration.GraphBuilder gb =
      netConfig
          .builder()
          .seed(getSeed())
          .graphBuilder()
          .backpropType(BackpropType.TruncatedBPTT)
          .tBPTTBackwardLength(tBPTTbackwardLength)
          .tBPTTForwardLength(tBPTTforwardLength);

  // Set ouput size
  final Layer lastLayer = layers[layers.length - 1];
  final int nOut = trainData.numClasses();
  if (lastLayer.getBackend() instanceof RnnOutputLayer) {
    ((weka.dl4j.layers.RnnOutputLayer) lastLayer).setNOut(nOut);
  }

  String currentInput = "input";
  gb.addInputs(currentInput);
  // Collect layers
  for (Layer layer : layers) {
    String lName = layer.getLayerName();
    gb.addLayer(lName, layer.getBackend().clone(), currentInput);
    currentInput = lName;
  }
  gb.setOutputs(currentInput);
  gb.setInputTypes(InputType.inferInputType(features));

  ComputationGraphConfiguration conf = gb.build();
  ComputationGraph model = new ComputationGraph(conf);
  model.init();
  this.model = model;
}
 
Example #9
Source File: RnnSequenceClassifier.java    From wekaDeeplearning4j with GNU General Public License v3.0 5 votes vote down vote up
/**
 * Check if the given layers are compatible for sequences (Only allow embedding and RNN for now)
 *
 * @param layer Layers to check
 * @return True if compatible
 */
protected boolean isSequenceCompatibleLayer(Layer layer) {
  return layer.getBackend() instanceof EmbeddingLayer
      || layer.getBackend() instanceof AbstractLSTM
      || layer.getBackend() instanceof RnnOutputLayer
      || layer.getBackend() instanceof GlobalPoolingLayer;
}
 
Example #10
Source File: MultiLayerTestRNN.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testTBPTTLongerThanTS() {
    //Extremely simple test of the 'does it throw an exception' variety
    int timeSeriesLength = 20;
    int tbpttLength = 1000;
    int miniBatchSize = 7;
    int nIn = 5;
    int nOut = 4;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                    .weightInit(WeightInit.XAVIER).list()
                    .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7)
                                    .activation(Activation.TANH).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8)
                                    .activation(Activation.TANH).build())
                    .layer(2, new RnnOutputLayer.Builder(LossFunction.MSE).nIn(8).nOut(nOut)
                                    .activation(Activation.IDENTITY).build())
                    .backpropType(BackpropType.TruncatedBPTT)
                    .tBPTTBackwardLength(tbpttLength).tBPTTForwardLength(tbpttLength).build();

    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork mln = new MultiLayerNetwork(conf);
    mln.init();

    INDArray features = Nd4j.rand(new int[] {miniBatchSize, nIn, timeSeriesLength});
    INDArray labels = Nd4j.rand(new int[] {miniBatchSize, nOut, timeSeriesLength});

    INDArray maskArrayInput = Nd4j.ones(miniBatchSize, timeSeriesLength);
    INDArray maskArrayOutput = Nd4j.ones(miniBatchSize, timeSeriesLength);

    DataSet ds = new DataSet(features, labels, maskArrayInput, maskArrayOutput);

    INDArray initialParams = mln.params().dup();
    mln.fit(ds);
    INDArray afterParams = mln.params();
    assertNotEquals(initialParams, afterParams);
}
 
Example #11
Source File: RnnSequenceClassifier.java    From wekaDeeplearning4j with GNU General Public License v3.0 5 votes vote down vote up
@Override
protected void createModel() throws Exception {
  final INDArray features = getFirstBatchFeatures(trainData);
  log.info("Feature shape: {}", features.shape());
  ComputationGraphConfiguration.GraphBuilder gb =
      netConfig
          .builder()
          .seed(getSeed())
          .graphBuilder()
          .backpropType(BackpropType.TruncatedBPTT)
          .tBPTTBackwardLength(tBPTTbackwardLength)
          .tBPTTForwardLength(tBPTTforwardLength);

  // Set ouput size
  final Layer lastLayer = layers[layers.length - 1];
  final int nOut = trainData.numClasses();
  if (lastLayer.getBackend() instanceof RnnOutputLayer) {
    ((weka.dl4j.layers.RnnOutputLayer) lastLayer).setNOut(nOut);
  }

  String currentInput = "input";
  gb.addInputs(currentInput);
  // Collect layers
  for (Layer layer : layers) {
    String lName = layer.getLayerName();
    gb.addLayer(lName, layer.getBackend().clone(), currentInput);
    currentInput = lName;
  }
  gb.setOutputs(currentInput);
  gb.setInputTypes(InputType.inferInputType(features));

  ComputationGraphConfiguration conf = gb.build();
  ComputationGraph model = new ComputationGraph(conf);
  model.init();
  this.model = model;
}
 
Example #12
Source File: RnnSequenceClassifier.java    From wekaDeeplearning4j with GNU General Public License v3.0 5 votes vote down vote up
/**
 * Check if the given layers are compatible for sequences (Only allow embedding and RNN for now)
 *
 * @param layer Layers to check
 * @return True if compatible
 */
protected boolean isSequenceCompatibleLayer(Layer layer) {
  return layer.getBackend() instanceof EmbeddingLayer
      || layer.getBackend() instanceof AbstractLSTM
      || layer.getBackend() instanceof RnnOutputLayer
      || layer.getBackend() instanceof GlobalPoolingLayer;
}
 
Example #13
Source File: RnnDataFormatTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
private MultiLayerNetwork getNetWithLayer(Layer layer, RNNFormat format, boolean lastTimeStep, boolean maskZeros) {
    if (maskZeros){
        layer = new MaskZeroLayer.Builder().setMaskValue(0.).setUnderlying(layer).build();
    }
    if(lastTimeStep){
        layer = new LastTimeStep(layer);
    }
    NeuralNetConfiguration.ListBuilder builder = new NeuralNetConfiguration.Builder()
            .seed(12345)
            .list()
            .layer(new LSTM.Builder()
                    .nIn(3)
                    .activation(Activation.TANH)
                    .dataFormat(format)
                    .nOut(3)
                    .helperAllowFallback(false)
                    .build())
            .layer(layer)
            .layer(
                    (lastTimeStep)?new OutputLayer.Builder().activation(Activation.SOFTMAX).nOut(10).build():
    new RnnOutputLayer.Builder().activation(Activation.SOFTMAX).nOut(10).dataFormat(format).build()
            )
            .setInputType(InputType.recurrent(3, 12, format));

    MultiLayerNetwork net = new MultiLayerNetwork(builder.build());
    net.init();
    return net;
}
 
Example #14
Source File: TestGraphNodes.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testDuplicateToTimeSeriesVertex() {

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder()
                    .addInputs("in2d", "in3d")
                    .addVertex("duplicateTS", new DuplicateToTimeSeriesVertex("in3d"), "in2d")
                    .addLayer("out", new OutputLayer.Builder().nIn(1).nOut(1).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build(), "duplicateTS")
                    .addLayer("out3d", new RnnOutputLayer.Builder().nIn(1).nOut(1).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build(), "in3d")
                    .setOutputs("out", "out3d").build();

    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    INDArray in2d = Nd4j.rand(3, 5);
    INDArray in3d = Nd4j.rand(new int[] {3, 2, 7});

    graph.setInputs(in2d, in3d);

    INDArray expOut = Nd4j.zeros(3, 5, 7);
    for (int i = 0; i < 7; i++) {
        expOut.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(i)}, in2d);
    }

    GraphVertex gv = graph.getVertex("duplicateTS");
    gv.setInputs(in2d);
    INDArray outFwd = gv.doForward(true, LayerWorkspaceMgr.noWorkspaces());
    assertEquals(expOut, outFwd);

    INDArray expOutBackward = expOut.sum(2);
    gv.setEpsilon(expOut);
    INDArray outBwd = gv.doBackward(false, LayerWorkspaceMgr.noWorkspaces()).getSecond()[0];
    assertEquals(expOutBackward, outBwd);

    String json = conf.toJson();
    ComputationGraphConfiguration conf2 = ComputationGraphConfiguration.fromJson(json);
    assertEquals(conf, conf2);
}
 
Example #15
Source File: ComputationGraphTestRNN.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testRnnTimeStepGravesLSTM() {
    Nd4j.getRandom().setSeed(12345);
    int timeSeriesLength = 12;

    //4 layer network: 2 GravesLSTM + DenseLayer + RnnOutputLayer. Hence also tests preprocessors.
    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder()
                    .addInputs("in")
                    .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(5).nOut(7)
                                    .activation(Activation.TANH)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "in")
                    .addLayer("1", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8)
                                    .activation(Activation.TANH)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "0")
                    .addLayer("2", new DenseLayer.Builder().nIn(8).nOut(9).activation(Activation.TANH)

                                    .dist(new NormalDistribution(0,
                                                    0.5))
                                    .build(), "1")
                    .addLayer("3", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .nIn(9).nOut(4)
                                    .activation(Activation.SOFTMAX)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "2")
                    .setOutputs("3").inputPreProcessor("2", new RnnToFeedForwardPreProcessor())
                    .inputPreProcessor("3", new FeedForwardToRnnPreProcessor())
                    .build();
    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    INDArray input = Nd4j.rand(new int[] {3, 5, timeSeriesLength});

    Map<String, INDArray> allOutputActivations = graph.feedForward(input, true);
    INDArray fullOutL0 = allOutputActivations.get("0");
    INDArray fullOutL1 = allOutputActivations.get("1");
    INDArray fullOutL3 = allOutputActivations.get("3");

    assertArrayEquals(new long[] {3, 7, timeSeriesLength}, fullOutL0.shape());
    assertArrayEquals(new long[] {3, 8, timeSeriesLength}, fullOutL1.shape());
    assertArrayEquals(new long[] {3, 4, timeSeriesLength}, fullOutL3.shape());

    int[] inputLengths = {1, 2, 3, 4, 6, 12};

    //Do steps of length 1, then of length 2, ..., 12
    //Should get the same result regardless of step size; should be identical to standard forward pass
    for (int i = 0; i < inputLengths.length; i++) {
        int inLength = inputLengths[i];
        int nSteps = timeSeriesLength / inLength; //each of length inLength

        graph.rnnClearPreviousState();

        for (int j = 0; j < nSteps; j++) {
            int startTimeRange = j * inLength;
            int endTimeRange = startTimeRange + inLength;

            INDArray inputSubset = input.get(NDArrayIndex.all(), NDArrayIndex.all(),
                            NDArrayIndex.interval(startTimeRange, endTimeRange));
            if (inLength > 1)
                assertTrue(inputSubset.size(2) == inLength);

            INDArray[] outArr = graph.rnnTimeStep(inputSubset);
            assertEquals(1, outArr.length);
            INDArray out = outArr[0];

            INDArray expOutSubset;
            if (inLength == 1) {
                val sizes = new long[] {fullOutL3.size(0), fullOutL3.size(1), 1};
                expOutSubset = Nd4j.create(DataType.FLOAT, sizes);
                expOutSubset.tensorAlongDimension(0, 1, 0).assign(fullOutL3.get(NDArrayIndex.all(),
                                NDArrayIndex.all(), NDArrayIndex.point(startTimeRange)));
            } else {
                expOutSubset = fullOutL3.get(NDArrayIndex.all(), NDArrayIndex.all(),
                                NDArrayIndex.interval(startTimeRange, endTimeRange));
            }

            assertEquals(expOutSubset, out);

            Map<String, INDArray> currL0State = graph.rnnGetPreviousState("0");
            Map<String, INDArray> currL1State = graph.rnnGetPreviousState("1");

            INDArray lastActL0 = currL0State.get(GravesLSTM.STATE_KEY_PREV_ACTIVATION);
            INDArray lastActL1 = currL1State.get(GravesLSTM.STATE_KEY_PREV_ACTIVATION);

            INDArray expLastActL0 = fullOutL0.tensorAlongDimension(endTimeRange - 1, 1, 0);
            INDArray expLastActL1 = fullOutL1.tensorAlongDimension(endTimeRange - 1, 1, 0);

            assertEquals(expLastActL0, lastActL0);
            assertEquals(expLastActL1, lastActL1);
        }
    }
}
 
Example #16
Source File: MultiLayerTestRNN.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testRnnTimeStep2dInput() {
    Nd4j.getRandom().setSeed(12345);
    int timeSeriesLength = 6;

    MultiLayerConfiguration conf =
                    new NeuralNetConfiguration.Builder()
                                    .list().layer(0,
                                                    new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder()
                                                                    .nIn(5).nOut(7).activation(Activation.TANH)

                                                                    .dist(new NormalDistribution(0, 0.5)).build())
                                    .layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7)
                                                    .nOut(8).activation(Activation.TANH)

                                                    .dist(new NormalDistribution(0,
                                                                    0.5))
                                                    .build())
                                    .layer(2, new RnnOutputLayer.Builder(LossFunction.MCXENT)
                                                    .nIn(8).nOut(4)
                                                    .activation(Activation.SOFTMAX)

                                                    .dist(new NormalDistribution(0, 0.5)).build())
                                    .build();
    MultiLayerNetwork mln = new MultiLayerNetwork(conf);
    mln.init();

    INDArray input3d = Nd4j.rand(new long[] {3, 5, timeSeriesLength});
    INDArray out3d = mln.rnnTimeStep(input3d);
    assertArrayEquals(out3d.shape(), new long[] {3, 4, timeSeriesLength});

    mln.rnnClearPreviousState();
    for (int i = 0; i < timeSeriesLength; i++) {
        INDArray input2d = input3d.tensorAlongDimension(i, 1, 0);
        INDArray out2d = mln.rnnTimeStep(input2d);

        assertArrayEquals(out2d.shape(), new long[] {3, 4});

        INDArray expOut2d = out3d.tensorAlongDimension(i, 1, 0);
        assertEquals(out2d, expOut2d);
    }

    //Check same but for input of size [3,5,1]. Expect [3,4,1] out
    mln.rnnClearPreviousState();
    for (int i = 0; i < timeSeriesLength; i++) {
        INDArray temp = Nd4j.create(new int[] {3, 5, 1});
        temp.tensorAlongDimension(0, 1, 0).assign(input3d.tensorAlongDimension(i, 1, 0));
        INDArray out3dSlice = mln.rnnTimeStep(temp);
        assertArrayEquals(out3dSlice.shape(), new long[] {3, 4, 1});

        assertTrue(out3dSlice.tensorAlongDimension(0, 1, 0).equals(out3d.tensorAlongDimension(i, 1, 0)));
    }
}
 
Example #17
Source File: TestPreProcessors.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testAutoAdditionOfPreprocessors() {
    //FF->RNN and RNN->FF
    MultiLayerConfiguration conf1 =
                    new NeuralNetConfiguration.Builder().list()
                                    .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(5)
                                                    .nOut(6).build())
                                    .layer(1, new GravesLSTM.Builder().nIn(6).nOut(7).build())
                                    .layer(2, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(7)
                                                    .nOut(8).build())
                                    .layer(3, new RnnOutputLayer.Builder().nIn(8).nOut(9).activation(Activation.SOFTMAX).build()).build();
    //Expect preprocessors: layer1: FF->RNN; 2: RNN->FF; 3: FF->RNN
    assertEquals(3, conf1.getInputPreProcessors().size());
    assertTrue(conf1.getInputPreProcess(1) instanceof FeedForwardToRnnPreProcessor);
    assertTrue(conf1.getInputPreProcess(2) instanceof RnnToFeedForwardPreProcessor);
    assertTrue(conf1.getInputPreProcess(3) instanceof FeedForwardToRnnPreProcessor);


    //FF-> CNN, CNN-> FF, FF->RNN
    MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().list()
                    .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder().nOut(10)
                                    .kernelSize(5, 5).stride(1, 1).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nOut(6).build())
                    .layer(2, new RnnOutputLayer.Builder().nIn(6).nOut(5).activation(Activation.SOFTMAX).build())
                    .setInputType(InputType.convolutionalFlat(28, 28, 1)).build();
    //Expect preprocessors: 0: FF->CNN; 1: CNN->FF; 2: FF->RNN
    assertEquals(3, conf2.getInputPreProcessors().size());
    assertTrue(conf2.getInputPreProcess(0) instanceof FeedForwardToCnnPreProcessor);
    assertTrue(conf2.getInputPreProcess(1) instanceof CnnToFeedForwardPreProcessor);
    assertTrue(conf2.getInputPreProcess(2) instanceof FeedForwardToRnnPreProcessor);

    //CNN-> FF, FF->RNN - InputType.convolutional instead of convolutionalFlat
    MultiLayerConfiguration conf2a = new NeuralNetConfiguration.Builder().list()
                    .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder().nOut(10)
                                    .kernelSize(5, 5).stride(1, 1).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nOut(6).build())
                    .layer(2, new RnnOutputLayer.Builder().nIn(6).nOut(5).activation(Activation.SOFTMAX).build())
                    .setInputType(InputType.convolutional(28, 28, 1)).build();
    //Expect preprocessors: 1: CNN->FF; 2: FF->RNN
    assertEquals(2, conf2a.getInputPreProcessors().size());
    assertTrue(conf2a.getInputPreProcess(1) instanceof CnnToFeedForwardPreProcessor);
    assertTrue(conf2a.getInputPreProcess(2) instanceof FeedForwardToRnnPreProcessor);


    //FF->CNN and CNN->RNN:
    MultiLayerConfiguration conf3 = new NeuralNetConfiguration.Builder().list()
                    .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder().nOut(10)
                                    .kernelSize(5, 5).stride(1, 1).build())
                    .layer(1, new GravesLSTM.Builder().nOut(6).build())
                    .layer(2, new RnnOutputLayer.Builder().nIn(6).nOut(5).activation(Activation.SOFTMAX).build())
                    .setInputType(InputType.convolutionalFlat(28, 28, 1)).build();
    //Expect preprocessors: 0: FF->CNN, 1: CNN->RNN;
    assertEquals(2, conf3.getInputPreProcessors().size());
    assertTrue(conf3.getInputPreProcess(0) instanceof FeedForwardToCnnPreProcessor);
    assertTrue(conf3.getInputPreProcess(1) instanceof CnnToRnnPreProcessor);
}
 
Example #18
Source File: MultiLayerTestRNN.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testTruncatedBPTTWithMasking() {
    //Extremely simple test of the 'does it throw an exception' variety
    int timeSeriesLength = 100;
    int tbpttLength = 10;
    int miniBatchSize = 7;
    int nIn = 5;
    int nOut = 4;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                    .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list()
                    .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7)
                                    .activation(Activation.TANH)
                                    .dist(new NormalDistribution(0, 0.5)).build())
                    .layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8)
                                    .activation(Activation.TANH)
                                    .dist(
                                                    new NormalDistribution(0,
                                                                    0.5))
                                    .build())
                    .layer(2, new RnnOutputLayer.Builder(LossFunction.MCXENT)
                                    .nIn(8).nOut(nOut).activation(Activation.SOFTMAX)
                                    .dist(new NormalDistribution(0, 0.5))
                                    .build())
                    .backpropType(BackpropType.TruncatedBPTT)
                    .tBPTTBackwardLength(tbpttLength).tBPTTForwardLength(tbpttLength).build();

    Nd4j.getRandom().setSeed(12345);
    MultiLayerNetwork mln = new MultiLayerNetwork(conf);
    mln.init();

    INDArray features = Nd4j.rand(new int[] {miniBatchSize, nIn, timeSeriesLength});
    INDArray labels = Nd4j.rand(new int[] {miniBatchSize, nOut, timeSeriesLength});

    INDArray maskArrayInput = Nd4j.ones(miniBatchSize, timeSeriesLength);
    INDArray maskArrayOutput = Nd4j.ones(miniBatchSize, timeSeriesLength);

    DataSet ds = new DataSet(features, labels, maskArrayInput, maskArrayOutput);

    mln.fit(ds);
}
 
Example #19
Source File: MultiLayerTestRNN.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testRnnActivateUsingStoredState() {
        int timeSeriesLength = 12;
        int miniBatchSize = 7;
        int nIn = 5;
        int nOut = 4;

        int nTimeSlices = 5;

        MultiLayerConfiguration conf =
                        new NeuralNetConfiguration.Builder().seed(12345).list().layer(0,
                                        new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7)
                                                        .activation(Activation.TANH)
                                                        .dist(new NormalDistribution(0, 0.5)).build())
                                        .layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7)
                                                        .nOut(8).activation(Activation.TANH)

                                                        .dist(new NormalDistribution(0,
                                                                        0.5))
                                                        .build())
                                        .layer(2, new RnnOutputLayer.Builder(LossFunction.MCXENT)
                                                        .nIn(8).nOut(nOut)
                                                        .activation(Activation.SOFTMAX)

                                                        .dist(new NormalDistribution(0, 0.5)).build())
                                        .build();

        Nd4j.getRandom().setSeed(12345);
        MultiLayerNetwork mln = new MultiLayerNetwork(conf);
        mln.init();

        INDArray inputLong = Nd4j.rand(new int[] {miniBatchSize, nIn, nTimeSlices * timeSeriesLength});
        INDArray input = inputLong.get(NDArrayIndex.all(), NDArrayIndex.all(),
                        NDArrayIndex.interval(0, timeSeriesLength));

        List<INDArray> outStandard = mln.feedForward(input, true);
        List<INDArray> outRnnAct = mln.rnnActivateUsingStoredState(input, true, true);

        //As initially state is zeros: expect these to be the same
        assertEquals(outStandard, outRnnAct);

        //Furthermore, expect multiple calls to this function to be the same:
        for (int i = 0; i < 3; i++) {
            assertEquals(outStandard, mln.rnnActivateUsingStoredState(input, true, true));
        }

        List<INDArray> outStandardLong = mln.feedForward(inputLong, true);
        BaseRecurrentLayer<?> l0 = ((BaseRecurrentLayer<?>) mln.getLayer(0));
        BaseRecurrentLayer<?> l1 = ((BaseRecurrentLayer<?>) mln.getLayer(1));

        for (int i = 0; i < nTimeSlices; i++) {
            INDArray inSlice = inputLong.get(NDArrayIndex.all(), NDArrayIndex.all(),
                            NDArrayIndex.interval(i * timeSeriesLength, (i + 1) * timeSeriesLength));
            List<INDArray> outSlice = mln.rnnActivateUsingStoredState(inSlice, true, true);
            List<INDArray> expOut = new ArrayList<>();
            for (INDArray temp : outStandardLong) {
                expOut.add(temp.get(NDArrayIndex.all(), NDArrayIndex.all(),
                                NDArrayIndex.interval(i * timeSeriesLength, (i + 1) * timeSeriesLength)));
            }

            for (int j = 0; j < expOut.size(); j++) {
                INDArray exp = expOut.get(j);
                INDArray act = outSlice.get(j);
//                System.out.println(j);
//                System.out.println(exp.sub(act));
                assertEquals(exp, act);
            }

            assertEquals(expOut, outSlice);

            //Again, expect multiple calls to give the same output
            for (int j = 0; j < 3; j++) {
                outSlice = mln.rnnActivateUsingStoredState(inSlice, true, true);
                assertEquals(expOut, outSlice);
            }

            l0.rnnSetPreviousState(l0.rnnGetTBPTTState());
            l1.rnnSetPreviousState(l1.rnnGetTBPTTState());
        }
    }
 
Example #20
Source File: BidirectionalTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testSerializationCompGraph() throws Exception {

    for(WorkspaceMode wsm : WorkspaceMode.values()) {
        log.info("*** Starting workspace mode: " + wsm);

        Nd4j.getRandom().setSeed(12345);

        ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder()
                .activation(Activation.TANH)
                .weightInit(WeightInit.XAVIER)
                .trainingWorkspaceMode(wsm)
                .inferenceWorkspaceMode(wsm)
                .updater(new Adam())
                .graphBuilder()
                .addInputs("in")
                .layer("0", new Bidirectional(Bidirectional.Mode.ADD, new GravesLSTM.Builder().nIn(10).nOut(10).dataFormat(rnnDataFormat).build()), "in")
                .layer("1", new Bidirectional(Bidirectional.Mode.ADD, new GravesLSTM.Builder().nIn(10).nOut(10).dataFormat(rnnDataFormat).build()), "0")
                .layer("2", new RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).dataFormat(rnnDataFormat)
                        .nIn(10).nOut(10).build(), "1")
                .setOutputs("2")
                .build();

        ComputationGraph net1 = new ComputationGraph(conf1);
        net1.init();
        long[] inshape = (rnnDataFormat == NCW)? new long[]{3, 10, 5}: new long[]{3, 5, 10};
        INDArray in = Nd4j.rand(inshape);
        INDArray labels = Nd4j.rand(inshape);

        net1.fit(new DataSet(in, labels));

        byte[] bytes;
        try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
            ModelSerializer.writeModel(net1, baos, true);
            bytes = baos.toByteArray();
        }


        ComputationGraph net2 = ModelSerializer.restoreComputationGraph(new ByteArrayInputStream(bytes), true);


        in = Nd4j.rand(inshape);
        labels = Nd4j.rand(inshape);

        INDArray out1 = net1.outputSingle(in);
        INDArray out2 = net2.outputSingle(in);

        assertEquals(out1, out2);

        net1.setInput(0, in);
        net2.setInput(0, in);
        net1.setLabels(labels);
        net2.setLabels(labels);

        net1.computeGradientAndScore();
        net2.computeGradientAndScore();

        assertEquals(net1.score(), net2.score(), 1e-6);
        assertEquals(net1.gradient().gradient(), net2.gradient().gradient());
    }
}
 
Example #21
Source File: ComputationGraphTestRNN.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testRnnTimeStep2dInput() {
    Nd4j.getRandom().setSeed(12345);
    int timeSeriesLength = 6;

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in")
                    .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(5).nOut(7)
                                    .activation(Activation.TANH)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "in")
                    .addLayer("1", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(7).nOut(8)
                                    .activation(Activation.TANH)
                                    .dist(new NormalDistribution(0,
                                                    0.5))
                                    .build(), "0")
                    .addLayer("2", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .nIn(8).nOut(4)
                                    .activation(Activation.SOFTMAX)
                                    .dist(new NormalDistribution(0, 0.5)).build(), "1")
                    .setOutputs("2").build();
    ComputationGraph graph = new ComputationGraph(conf);
    graph.init();

    INDArray input3d = Nd4j.rand(new int[] {3, 5, timeSeriesLength});
    INDArray out3d = graph.rnnTimeStep(input3d)[0];
    assertArrayEquals(out3d.shape(), new long[] {3, 4, timeSeriesLength});

    graph.rnnClearPreviousState();
    for (int i = 0; i < timeSeriesLength; i++) {
        INDArray input2d = input3d.tensorAlongDimension(i, 1, 0);
        INDArray out2d = graph.rnnTimeStep(input2d)[0];

        assertArrayEquals(out2d.shape(), new long[] {3, 4});

        INDArray expOut2d = out3d.tensorAlongDimension(i, 1, 0);
        assertEquals(out2d, expOut2d);
    }

    //Check same but for input of size [3,5,1]. Expect [3,4,1] out
    graph.rnnClearPreviousState();
    for (int i = 0; i < timeSeriesLength; i++) {
        INDArray temp = Nd4j.create(new int[] {3, 5, 1});
        temp.tensorAlongDimension(0, 1, 0).assign(input3d.tensorAlongDimension(i, 1, 0));
        INDArray out3dSlice = graph.rnnTimeStep(temp)[0];
        assertArrayEquals(out3dSlice.shape(), new long[] {3, 4, 1});

        assertTrue(out3dSlice.tensorAlongDimension(0, 1, 0).equals(out3d.tensorAlongDimension(i, 1, 0)));
    }
}
 
Example #22
Source File: TestGraphNodes.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testLastTimeStepWithTransfer(){
    int lstmLayerSize = 16;
    int numLabelClasses = 10;
    int numInputs = 5;

    ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
            .trainingWorkspaceMode(WorkspaceMode.NONE)
            .inferenceWorkspaceMode(WorkspaceMode.NONE)
            .seed(123)    //Random number generator seed for improved repeatability. Optional.
            .updater(new AdaDelta())
            .weightInit(WeightInit.XAVIER)
            .graphBuilder()
            .addInputs("rr")
            .setInputTypes(InputType.recurrent(30))
            .addLayer("1", new GravesLSTM.Builder().activation(Activation.TANH).nIn(numInputs).nOut(lstmLayerSize).dropOut(0.9).build(), "rr")
            .addLayer("2", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                    .activation(Activation.SOFTMAX).nOut(numLabelClasses).build(), "1")

            .setOutputs("2")
            .build();


    ComputationGraph net = new ComputationGraph(conf);
    net.init();

    ComputationGraph updatedModel = new TransferLearning.GraphBuilder(net)
            .addVertex("laststepoutput", new LastTimeStepVertex("rr"), "2")
            .setOutputs("laststepoutput")
            .build();


    INDArray input = Nd4j.rand(new int[]{10, numInputs, 16});

    INDArray[] out = updatedModel.output(input);

    assertNotNull(out);
    assertEquals(1, out.length);
    assertNotNull(out[0]);

    assertArrayEquals(new long[]{10, numLabelClasses}, out[0].shape());

    Map<String,INDArray> acts = updatedModel.feedForward(input, false);

    assertEquals(4, acts.size());   //2 layers + input + vertex output
    assertNotNull(acts.get("laststepoutput"));
    assertArrayEquals(new long[]{10, numLabelClasses}, acts.get("laststepoutput").shape());

    String toString = out[0].toString();
}
 
Example #23
Source File: TestVariableLengthTSCG.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testOutputMaskingScoreMagnitudes() {
    //Idea: check magnitude of scores, with differing number of values masked out
    //i.e., MSE with zero weight init and 1.0 labels: know what to expect in terms of score

    int nIn = 3;
    int[] timeSeriesLengths = {3, 10};
    int[] outputSizes = {1, 2, 5};
    int[] miniBatchSizes = {1, 4};

    Random r = new Random(12345);

    for (int tsLength : timeSeriesLengths) {
        for (int nOut : outputSizes) {
            for (int miniBatch : miniBatchSizes) {
                for (int nToMask = 0; nToMask < tsLength - 1; nToMask++) {
                    String msg = "tsLen=" + tsLength + ", nOut=" + nOut + ", miniBatch=" + miniBatch;

                    INDArray labelMaskArray = Nd4j.ones(miniBatch, tsLength);
                    for (int i = 0; i < miniBatch; i++) {
                        //For each example: select which outputs to mask...
                        int nMasked = 0;
                        while (nMasked < nToMask) {
                            int tryIdx = r.nextInt(tsLength);
                            if (labelMaskArray.getDouble(i, tryIdx) == 0.0)
                                continue;
                            labelMaskArray.putScalar(new int[] {i, tryIdx}, 0.0);
                            nMasked++;
                        }
                    }

                    INDArray input = Nd4j.rand(new int[] {miniBatch, nIn, tsLength});
                    INDArray labels = Nd4j.ones(miniBatch, nOut, tsLength);

                    ComputationGraphConfiguration conf =
                                    new NeuralNetConfiguration.Builder().seed(12345L)
                                                    .graphBuilder()
                                                    .addInputs("in").addLayer("0",
                                                                    new GravesLSTM.Builder().nIn(nIn).nOut(5)

                                                                                    .dist(new NormalDistribution(0,
                                                                                                    1))
                                                                                    .updater(new NoOp()).build(),
                                                                    "in")
                                                    .addLayer("1", new RnnOutputLayer.Builder(
                                                                    LossFunctions.LossFunction.MSE)
                                                                                    .activation(Activation.IDENTITY)
                                                                                    .nIn(5).nOut(nOut)
                                                                                    .weightInit(WeightInit.ZERO)
                                                                                    .updater(new NoOp()).build(),
                                                                    "0")
                                                    .setOutputs("1").build();
                    ComputationGraph net = new ComputationGraph(conf);
                    net.init();

                    //MSE loss function: 1/n * sum(squaredErrors)... but sum(squaredErrors) = n * (1-0) here -> sum(squaredErrors)
                    double expScore = tsLength - nToMask; //Sum over minibatches, then divide by minibatch size

                    net.setLayerMaskArrays(null, new INDArray[] {labelMaskArray});
                    net.setInput(0, input);
                    net.setLabel(0, labels);

                    net.computeGradientAndScore();
                    double score = net.score();

                    assertEquals(msg, expScore, score, 0.1);
                }
            }
        }
    }
}
 
Example #24
Source File: TestVariableLengthTSCG.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testOutputMasking() {
    //If labels are masked: want zero outputs for that time step.

    int nIn = 3;
    int[] timeSeriesLengths = {3, 10};
    int[] outputSizes = {1, 2, 5};
    int[] miniBatchSizes = {1, 4};

    Random r = new Random(12345);

    for (int tsLength : timeSeriesLengths) {
        for (int nOut : outputSizes) {
            for (int miniBatch : miniBatchSizes) {
                for (int nToMask = 0; nToMask < tsLength - 1; nToMask++) {
                    INDArray labelMaskArray = Nd4j.ones(miniBatch, tsLength);
                    for (int i = 0; i < miniBatch; i++) {
                        //For each example: select which outputs to mask...
                        int nMasked = 0;
                        while (nMasked < nToMask) {
                            int tryIdx = r.nextInt(tsLength);
                            if (labelMaskArray.getDouble(i, tryIdx) == 0.0)
                                continue;
                            labelMaskArray.putScalar(new int[] {i, tryIdx}, 0.0);
                            nMasked++;
                        }
                    }

                    INDArray input = Nd4j.rand(new int[] {miniBatch, nIn, tsLength});

                    ComputationGraphConfiguration conf =
                                    new NeuralNetConfiguration.Builder().seed(12345L)
                                                    .graphBuilder()
                                                    .addInputs("in").addLayer("0",
                                                                    new GravesLSTM.Builder().nIn(nIn).nOut(5)

                                                                                    .dist(new NormalDistribution(0,
                                                                                                    1))
                                                                                    .updater(new NoOp()).build(),
                                                                    "in")
                                                    .addLayer("1", new RnnOutputLayer.Builder(
                                                                    LossFunctions.LossFunction.MSE)
                                                                                    .activation(Activation.IDENTITY)
                                                                                    .nIn(5).nOut(nOut)
                                                                                    .weightInit(WeightInit.XAVIER)
                                                                                    .updater(new NoOp()).build(),
                                                                    "0")
                                                    .setOutputs("1").build();
                    ComputationGraph net = new ComputationGraph(conf);
                    net.init();

                    ComputationGraphConfiguration conf2 =
                                    new NeuralNetConfiguration.Builder().seed(12345L)
                                                    .graphBuilder()
                                                    .addInputs("in").addLayer("0",
                                                                    new GravesLSTM.Builder().nIn(nIn).nOut(5)

                                                                                    .dist(new NormalDistribution(0,
                                                                                                    1))
                                                                                    .updater(new NoOp()).build(),
                                                                    "in")
                                                    .addLayer("1", new RnnOutputLayer.Builder(
                                                                    LossFunctions.LossFunction.XENT)
                                                                                    .activation(Activation.SIGMOID)
                                                                                    .nIn(5).nOut(nOut)
                                                                                    .weightInit(WeightInit.XAVIER)
                                                                                    .updater(new NoOp()).build(),
                                                                    "0")
                                                    .setOutputs("1").build();
                    ComputationGraph net2 = new ComputationGraph(conf2);
                    net2.init();

                    net.setLayerMaskArrays(null, new INDArray[] {labelMaskArray});
                    net2.setLayerMaskArrays(null, new INDArray[] {labelMaskArray});


                    INDArray out = net.output(input)[0];
                    INDArray out2 = net2.output(input)[0];
                    for (int i = 0; i < miniBatch; i++) {
                        for (int j = 0; j < tsLength; j++) {
                            double m = labelMaskArray.getDouble(i, j);
                            if (m == 0.0) {
                                //Expect outputs to be exactly 0.0
                                INDArray outRow = out.get(NDArrayIndex.point(i), NDArrayIndex.all(),
                                                NDArrayIndex.point(j));
                                INDArray outRow2 = out2.get(NDArrayIndex.point(i), NDArrayIndex.all(),
                                                NDArrayIndex.point(j));
                                for (int k = 0; k < nOut; k++) {
                                    assertEquals(0.0, outRow.getDouble(k), 0.0);
                                    assertEquals(0.0, outRow2.getDouble(k), 0.0);
                                }
                            }
                        }
                    }
                }
            }
        }
    }
}
 
Example #25
Source File: ValidateCudnnLSTM.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void validateImplSimple() throws Exception {

    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int inputSize = 3;
    int lstmLayerSize = 4;
    int timeSeriesLength = 3;
    int nOut = 2;
    INDArray input = Nd4j.rand(new int[] {minibatch, inputSize, timeSeriesLength});
    INDArray labels = Nd4j.zeros(minibatch, nOut, timeSeriesLength);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        for (int j = 0; j < timeSeriesLength; j++) {
            labels.putScalar(i, r.nextInt(nOut), j, 1.0);
        }
    }

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().inferenceWorkspaceMode(WorkspaceMode.NONE)
                    .trainingWorkspaceMode(WorkspaceMode.NONE).updater(new NoOp())
                    .seed(12345L)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new LSTM.Builder().nIn(input.size(1)).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(1, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(lstmLayerSize).nOut(nOut).build())
                    .build();

    MultiLayerNetwork mln1 = new MultiLayerNetwork(conf.clone());
    mln1.init();

    MultiLayerNetwork mln2 = new MultiLayerNetwork(conf.clone());
    mln2.init();


    assertEquals(mln1.params(), mln2.params());

    Field f = org.deeplearning4j.nn.layers.recurrent.LSTM.class.getDeclaredField("helper");
    f.setAccessible(true);

    Layer l0 = mln1.getLayer(0);
    f.set(l0, null);
    assertNull(f.get(l0));

    l0 = mln2.getLayer(0);
    assertTrue(f.get(l0) instanceof CudnnLSTMHelper);


    INDArray out1 = mln1.output(input);
    INDArray out2 = mln2.output(input);

    assertEquals(out1, out2);


    mln1.setInput(input);
    mln1.setLabels(labels);

    mln2.setInput(input);
    mln2.setLabels(labels);

    mln1.computeGradientAndScore();
    mln2.computeGradientAndScore();

    assertEquals(mln1.score(), mln2.score(), 1e-5);

    Gradient g1 = mln1.gradient();
    Gradient g2 = mln2.gradient();

    for (Map.Entry<String, INDArray> entry : g1.gradientForVariable().entrySet()) {
        INDArray exp = entry.getValue();
        INDArray act = g2.gradientForVariable().get(entry.getKey());

        //System.out.println(entry.getKey() + "\t" + exp.equals(act));
    }

    assertEquals(mln1.getFlattenedGradients(), mln2.getFlattenedGradients());
}
 
Example #26
Source File: ValidateCudnnLSTM.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void validateImplMultiLayer() throws Exception {

    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int inputSize = 3;
    int lstmLayerSize = 4;
    int timeSeriesLength = 3;
    int nOut = 2;
    INDArray input = Nd4j.rand(new int[] {minibatch, inputSize, timeSeriesLength});
    INDArray labels = Nd4j.zeros(minibatch, nOut, timeSeriesLength);
    Random r = new Random(12345);
    for (int i = 0; i < minibatch; i++) {
        for (int j = 0; j < timeSeriesLength; j++) {
            labels.putScalar(i, r.nextInt(nOut), j, 1.0);
        }
    }

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .dataType(DataType.DOUBLE)
                    .inferenceWorkspaceMode(WorkspaceMode.NONE).trainingWorkspaceMode(WorkspaceMode.NONE)
                    .seed(12345L)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new LSTM.Builder().nIn(input.size(1)).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(1, new LSTM.Builder().nIn(lstmLayerSize).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(lstmLayerSize).nOut(nOut).build())
                    .build();

    MultiLayerNetwork mln1 = new MultiLayerNetwork(conf.clone());
    mln1.init();

    MultiLayerNetwork mln2 = new MultiLayerNetwork(conf.clone());
    mln2.init();


    assertEquals(mln1.params(), mln2.params());

    Field f = org.deeplearning4j.nn.layers.recurrent.LSTM.class.getDeclaredField("helper");
    f.setAccessible(true);

    Layer l0 = mln1.getLayer(0);
    Layer l1 = mln1.getLayer(1);
    f.set(l0, null);
    f.set(l1, null);
    assertNull(f.get(l0));
    assertNull(f.get(l1));

    l0 = mln2.getLayer(0);
    l1 = mln2.getLayer(1);
    assertTrue(f.get(l0) instanceof CudnnLSTMHelper);
    assertTrue(f.get(l1) instanceof CudnnLSTMHelper);


    INDArray out1 = mln1.output(input);
    INDArray out2 = mln2.output(input);

    assertEquals(out1, out2);

    for (int x = 0; x < 10; x++) {
        input = Nd4j.rand(new int[] {minibatch, inputSize, timeSeriesLength});
        labels = Nd4j.zeros(minibatch, nOut, timeSeriesLength);
        for (int i = 0; i < minibatch; i++) {
            for (int j = 0; j < timeSeriesLength; j++) {
                labels.putScalar(i, r.nextInt(nOut), j, 1.0);
            }
        }

        mln1.setInput(input);
        mln1.setLabels(labels);

        mln2.setInput(input);
        mln2.setLabels(labels);

        mln1.computeGradientAndScore();
        mln2.computeGradientAndScore();

        assertEquals(mln1.score(), mln2.score(), 1e-5);

        assertEquals(mln1.getFlattenedGradients(), mln2.getFlattenedGradients());

        mln1.fit(new DataSet(input, labels));
        mln2.fit(new DataSet(input, labels));

        assertEquals("Iteration: " + x, mln1.params(), mln2.params());
    }
}
 
Example #27
Source File: ValidateCudnnLSTM.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void validateImplMultiLayerTBPTT() throws Exception {

    Nd4j.getRandom().setSeed(12345);
    int minibatch = 10;
    int inputSize = 3;
    int lstmLayerSize = 4;
    int timeSeriesLength = 23;
    int tbpttLength = 5;
    int nOut = 2;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new NoOp())
                    .inferenceWorkspaceMode(WorkspaceMode.NONE).trainingWorkspaceMode(WorkspaceMode.NONE)
                    .seed(12345L)
                    .dist(new NormalDistribution(0, 2)).list()
                    .layer(0, new LSTM.Builder().nIn(inputSize).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(1, new LSTM.Builder().nIn(lstmLayerSize).nOut(lstmLayerSize)
                                    .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                    .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                                    .activation(Activation.SOFTMAX).nIn(lstmLayerSize).nOut(nOut).build())
                    .backpropType(BackpropType.TruncatedBPTT)
                    .tBPTTLength(tbpttLength).build();

    MultiLayerNetwork mln1 = new MultiLayerNetwork(conf.clone());
    mln1.init();

    MultiLayerNetwork mln2 = new MultiLayerNetwork(conf.clone());
    mln2.init();


    assertEquals(mln1.params(), mln2.params());

    Field f = org.deeplearning4j.nn.layers.recurrent.LSTM.class.getDeclaredField("helper");
    f.setAccessible(true);

    Layer l0 = mln1.getLayer(0);
    Layer l1 = mln1.getLayer(1);
    f.set(l0, null);
    f.set(l1, null);
    assertNull(f.get(l0));
    assertNull(f.get(l1));

    l0 = mln2.getLayer(0);
    l1 = mln2.getLayer(1);
    assertTrue(f.get(l0) instanceof CudnnLSTMHelper);
    assertTrue(f.get(l1) instanceof CudnnLSTMHelper);

    Random r = new Random(12345);
    for (int x = 0; x < 1; x++) {
        INDArray input = Nd4j.rand(new int[] {minibatch, inputSize, timeSeriesLength});
        INDArray labels = Nd4j.zeros(minibatch, nOut, timeSeriesLength);
        for (int i = 0; i < minibatch; i++) {
            for (int j = 0; j < timeSeriesLength; j++) {
                labels.putScalar(i, r.nextInt(nOut), j, 1.0);
            }
        }

        DataSet ds = new DataSet(input, labels);
        mln1.fit(ds);
        mln2.fit(ds);
    }


    assertEquals(mln1.params(), mln2.params());
}
 
Example #28
Source File: ValidateCudnnLSTM.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void validateImplMultiLayerRnnTimeStep() throws Exception {

    for(WorkspaceMode wsm : new WorkspaceMode[]{WorkspaceMode.NONE, WorkspaceMode.ENABLED}) {
        Nd4j.getRandom().setSeed(12345);
        int minibatch = 10;
        int inputSize = 3;
        int lstmLayerSize = 4;
        int timeSeriesLength = 3;
        int tbpttLength = 5;
        int nOut = 2;

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new NoOp())
                .inferenceWorkspaceMode(WorkspaceMode.NONE).trainingWorkspaceMode(WorkspaceMode.NONE)
                .cacheMode(CacheMode.NONE).seed(12345L)
                .dist(new NormalDistribution(0, 2)).list()
                .layer(0, new LSTM.Builder().nIn(inputSize).nOut(lstmLayerSize)
                        .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                .layer(1, new LSTM.Builder().nIn(lstmLayerSize).nOut(lstmLayerSize)
                        .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build())
                .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                        .activation(Activation.SOFTMAX).nIn(lstmLayerSize).nOut(nOut).build())
                .backpropType(BackpropType.TruncatedBPTT)
                .tBPTTLength(tbpttLength).build();

        MultiLayerNetwork mln1 = new MultiLayerNetwork(conf.clone());
        mln1.init();

        MultiLayerNetwork mln2 = new MultiLayerNetwork(conf.clone());
        mln2.init();


        assertEquals(mln1.params(), mln2.params());

        Field f = org.deeplearning4j.nn.layers.recurrent.LSTM.class.getDeclaredField("helper");
        f.setAccessible(true);

        Layer l0 = mln1.getLayer(0);
        Layer l1 = mln1.getLayer(1);
        f.set(l0, null);
        f.set(l1, null);
        assertNull(f.get(l0));
        assertNull(f.get(l1));

        l0 = mln2.getLayer(0);
        l1 = mln2.getLayer(1);
        assertTrue(f.get(l0) instanceof CudnnLSTMHelper);
        assertTrue(f.get(l1) instanceof CudnnLSTMHelper);

        Random r = new Random(12345);
        for (int x = 0; x < 5; x++) {
            INDArray input = Nd4j.rand(new int[]{minibatch, inputSize, timeSeriesLength});

            INDArray step1 = mln1.rnnTimeStep(input);
            INDArray step2 = mln2.rnnTimeStep(input);

            assertEquals("Step: " + x, step1, step2);
        }

        assertEquals(mln1.params(), mln2.params());

        //Also check fit (mainly for workspaces sanity check):
        INDArray in = Nd4j.rand(new int[]{minibatch, inputSize, 3 * tbpttLength});
        INDArray label = TestUtils.randomOneHotTimeSeries(minibatch, nOut, 3 * tbpttLength);
        for( int i=0; i<3; i++ ){
            mln1.fit(in, label);
            mln2.fit(in, label);
        }
    }
}
 
Example #29
Source File: RnnOutputLayerSpace.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public RnnOutputLayer getValue(double[] values) {
    RnnOutputLayer.Builder b = new RnnOutputLayer.Builder();
    setLayerOptionsBuilder(b, values);
    return b.build();
}
 
Example #30
Source File: RnnOutputLayerSpace.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
protected void setLayerOptionsBuilder(RnnOutputLayer.Builder builder, double[] values) {
    super.setLayerOptionsBuilder(builder, values);
}