Java Code Examples for org.nd4j.linalg.factory.Nd4j#rand()

The following examples show how to use org.nd4j.linalg.factory.Nd4j#rand() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SameDiffTests.java    From nd4j with Apache License 2.0 6 votes vote down vote up
@Test
public void validateMeanDiff() {
    Nd4j.getRandom().setSeed(12345);

    INDArray arr = Nd4j.rand(3, 4);

    SameDiff sd = SameDiff.create();
    SDVariable v = sd.var("in", arr);
    SDVariable mean = sd.mean("mean", v);

    INDArray out = sd.execAndEndResult();
    assertEquals(out, arr.mean(Integer.MAX_VALUE));

    sd.execBackwards();
    INDArray dLdIn = sd.grad("in").getArr();

    //If L = mean(in)
    //then dL/dIn = 1/N

    assertEquals(Nd4j.valueArrayOf(arr.shape(), 1.0 / arr.length()), dLdIn);
}
 
Example 2
Source File: SpecialWorkspaceTests.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testMmapedWorkspace_Path_Limits_1() throws Exception {
    if (!Nd4j.getEnvironment().isCPU())
        return;

    // getting very long file name
    val builder = new StringBuilder("long_file_name_");
    for (int e = 0; e < 100; e++)
        builder.append("9");


    val tmpFile = Files.createTempFile("some", builder.toString());
    val mmap = WorkspaceConfiguration.builder()
            .initialSize(200 * 1024L * 1024L) // 200mbs
            .tempFilePath(tmpFile.toAbsolutePath().toString())
            .policyLocation(LocationPolicy.MMAP)
            .policyLearning(LearningPolicy.NONE)
            .build();

    try (val ws = Nd4j.getWorkspaceManager().getAndActivateWorkspace(mmap, "M2")) {
        val x = Nd4j.rand(DataType.FLOAT, 1024);
    }
}
 
Example 3
Source File: WritableTest.java    From DataVec with Apache License 2.0 6 votes vote down vote up
@Test
public void testWritableEqualityReflexive() {
    assertEquals(new IntWritable(1), new IntWritable(1));
    assertEquals(new LongWritable(1), new LongWritable(1));
    assertEquals(new DoubleWritable(1), new DoubleWritable(1));
    assertEquals(new FloatWritable(1), new FloatWritable(1));
    assertEquals(new Text("Hello"), new Text("Hello"));
    assertEquals(new BytesWritable("Hello".getBytes()),new BytesWritable("Hello".getBytes()));
    INDArray ndArray = Nd4j.rand(new int[]{1, 100});

    assertEquals(new NDArrayWritable(ndArray), new NDArrayWritable(ndArray));
    assertEquals(new NullWritable(), new NullWritable());
    assertEquals(new BooleanWritable(true), new BooleanWritable(true));
    byte b = 0;
    assertEquals(new ByteWritable(b), new ByteWritable(b));
}
 
Example 4
Source File: NearestNeighborTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void vpTreeTest() throws Exception {
    INDArray matrix = Nd4j.rand(new int[] {400,10});
    INDArray rowVector = matrix.getRow(70);
    INDArray resultArr = Nd4j.zeros(400,1);
    Executor executor = Executors.newSingleThreadExecutor();
    VPTree vpTree = new VPTree(matrix);
    System.out.println("Ran!");
}
 
Example 5
Source File: RnnDataFormatTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testGraveBiLSTM() {
    try {

        Nd4j.getRandom().setSeed(12345);
        Nd4j.getEnvironment().allowHelpers(helpers);
        String msg = "Helpers: " + helpers + ", lastTimeStep: " + lastTimeStep + ", maskZeros: " + maskZeros;
        System.out.println(" --- " + msg + " ---");

        INDArray inNCW = Nd4j.rand(DataType.FLOAT, 2, 3, 12);

        INDArray labelsNWC = (lastTimeStep) ?TestUtils.randomOneHot(2, 10): TestUtils.randomOneHot(2 * 12, 10).reshape(2, 12, 10);

        TestCase tc = TestCase.builder()
                .msg(msg)
                .net1(getGravesBidirectionalLstmNet(RNNFormat.NCW, true, lastTimeStep, maskZeros))
                .net2(getGravesBidirectionalLstmNet(RNNFormat.NCW, false, lastTimeStep, maskZeros))
                .net3(getGravesBidirectionalLstmNet(RNNFormat.NWC, true, lastTimeStep, maskZeros))
                .net4(getGravesBidirectionalLstmNet(RNNFormat.NWC, false, lastTimeStep, maskZeros))
                .inNCW(inNCW)
                .labelsNCW((lastTimeStep)? labelsNWC: labelsNWC.permute(0, 2, 1))
                .labelsNWC(labelsNWC)
                .testLayerIdx(1)
                .build();

        TestCase.testHelper(tc);


    } finally {
        Nd4j.getEnvironment().allowHelpers(true);
    }
}
 
Example 6
Source File: ConvDataFormatTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testDepthwiseConv2d() {
    try {
        for (boolean helpers : new boolean[]{false, true}) {
            for (ConvolutionMode cm : new ConvolutionMode[]{ConvolutionMode.Truncate, ConvolutionMode.Same}) {
                Nd4j.getRandom().setSeed(12345);
                Nd4j.getEnvironment().allowHelpers(helpers);
                String msg = helpers ? "With helpers (" + cm + ")" : "No helpers (" + cm + ")";
                System.out.println(" --- " + msg + " ---");

                INDArray inNCHW = Nd4j.rand(this.dataType, 2, 3, 12, 12);
                INDArray labels = TestUtils.randomOneHot(2, 10);

                TestCase tc = TestCase.builder()
                        .msg(msg)
                        .net1(getDepthwiseConv2dNet(CNN2DFormat.NCHW, true, cm))
                        .net2(getDepthwiseConv2dNet(CNN2DFormat.NCHW, false, cm))
                        .net3(getDepthwiseConv2dNet(CNN2DFormat.NHWC, true, cm))
                        .net4(getDepthwiseConv2dNet(CNN2DFormat.NHWC, false, cm))
                        .inNCHW(inNCHW)
                        .labelsNCHW(labels)
                        .labelsNHWC(labels)
                        .testLayerIdx(1)
                        .helpers(helpers)
                        .build();

                testHelper(tc);
            }
        }
    } finally {
        Nd4j.getEnvironment().allowHelpers(true);
    }
}
 
Example 7
Source File: EvaluationCalibrationTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testEvaluationCalibration3dMasking() {
    INDArray prediction = Nd4j.rand(DataType.FLOAT, 2, 3, 10);
    INDArray label = Nd4j.rand(DataType.FLOAT, 2, 3, 10);

    List<INDArray> rowsP = new ArrayList<>();
    List<INDArray> rowsL = new ArrayList<>();

    //Check "DL4J-style" 2d per timestep masking [minibatch, seqLength] mask shape
    INDArray mask2d = Nd4j.randomBernoulli(0.5, 2, 10);
    NdIndexIterator iter = new NdIndexIterator(2, 10);
    while (iter.hasNext()) {
        long[] idx = iter.next();
        if(mask2d.getDouble(idx[0], idx[1]) != 0.0) {
            INDArrayIndex[] idxs = new INDArrayIndex[]{NDArrayIndex.point(idx[0]), NDArrayIndex.all(), NDArrayIndex.point(idx[1])};
            rowsP.add(prediction.get(idxs));
            rowsL.add(label.get(idxs));
        }
    }
    INDArray p2d = Nd4j.vstack(rowsP);
    INDArray l2d = Nd4j.vstack(rowsL);

    EvaluationCalibration e3d_m2d = new EvaluationCalibration();
    EvaluationCalibration e2d_m2d = new EvaluationCalibration();
    e3d_m2d.eval(label, prediction, mask2d);
    e2d_m2d.eval(l2d, p2d);

    assertEquals(e3d_m2d, e2d_m2d);
}
 
Example 8
Source File: CuDNNGradientChecks.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testDenseBatchNorm(){


    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .dataType(DataType.DOUBLE)
            .seed(12345)
            .weightInit(WeightInit.XAVIER)
            .updater(new NoOp())
            .list()
            .layer(new DenseLayer.Builder().nIn(5).nOut(5).activation(Activation.TANH).build())
            .layer(new BatchNormalization.Builder().nOut(5).build())
            .layer(new OutputLayer.Builder().nIn(5).nOut(5).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build())
            .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    INDArray in = Nd4j.rand(3, 5);
    INDArray labels = TestUtils.randomOneHot(3, 5);

    //Mean and variance vars are not gradient checkable; mean/variance "gradient" is used to implement running mean/variance calc
    //i.e., runningMean = decay * runningMean + (1-decay) * batchMean
    //However, numerical gradient will be 0 as forward pass doesn't depend on this "parameter"
    Set<String> excludeParams = new HashSet<>(Arrays.asList("1_mean", "1_var", "1_log10stdev"));
    boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR,
            DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, in, labels, null, null, false, -1, excludeParams, null);

    assertTrue(gradOK);

    TestUtils.testModelSerialization(net);
}
 
Example 9
Source File: RandomProjectionLSHTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() {
    Nd4j.getRandom().setSeed(12345);
    Nd4j.setDefaultDataTypes(DataType.DOUBLE, DataType.DOUBLE);
    rpLSH = new RandomProjectionLSH(hashLength, numTables, intDimensions, 0.1f);
    inputs = Nd4j.rand(DataType.DOUBLE, 100, intDimensions);
    e1 = Nd4j.ones(DataType.DOUBLE, 1, intDimensions);
}
 
Example 10
Source File: TestSetGetParameters.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testSetParametersRNN() {
    //Set up a MLN, then do set(get) on parameters. Results should be identical compared to before doing this.

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list()
                    .layer(0, new GravesLSTM.Builder().nIn(9).nOut(10)
                                    .dist(new NormalDistribution(0, 1)).build())
                    .layer(1, new GravesLSTM.Builder().nIn(10).nOut(11)
                                    .dist(new NormalDistribution(0, 1)).build())
                    .layer(2, new RnnOutputLayer.Builder(LossFunction.MSE)
                                    .dist(new NormalDistribution(0, 1)).nIn(11).nOut(12).build())
                    .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    INDArray initParams = net.params().dup();
    Map<String, INDArray> initParams2 = net.paramTable();

    net.setParams(net.params());

    INDArray initParamsAfter = net.params();
    Map<String, INDArray> initParams2After = net.paramTable();

    for (String s : initParams2.keySet()) {
        assertTrue("Params differ: " + s, initParams2.get(s).equals(initParams2After.get(s)));
    }

    assertEquals(initParams, initParamsAfter);

    //Now, try the other way: get(set(random))
    INDArray randomParams = Nd4j.rand(initParams.dataType(), initParams.shape());
    net.setParams(randomParams.dup());

    assertEquals(net.params(), randomParams);
}
 
Example 11
Source File: RandomTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testStepOver4() throws Exception {
    Random random1 = Nd4j.getRandomFactory().getNewRandomInstance(119, 100000);
    Random random2 = Nd4j.getRandomFactory().getNewRandomInstance(119, 100000);

    for (int x = 0; x < 1000; x++) {
        INDArray z1 = Nd4j.rand(1, 10000, random1);
        INDArray z2 = Nd4j.rand(1, 10000, random2);

        assertEquals(z1, z2);
    }
}
 
Example 12
Source File: DataSetTest.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testGetRangeMask() {
    org.nd4j.linalg.dataset.api.DataSet ds = new DataSet();
    //Checking printing of masks
    int numExamples = 10;
    int inSize = 13;
    int labelSize = 5;
    int minTSLength = 10; //Lengths 10, 11, ..., 19

    Nd4j.getRandom().setSeed(12345);
    List<DataSet> list = new ArrayList<>(numExamples);
    for (int i = 0; i < numExamples; i++) {
        INDArray in = Nd4j.rand(new long[] {1, inSize, minTSLength + i});
        INDArray out = Nd4j.rand(new long[] {1, labelSize, minTSLength + i});
        list.add(new DataSet(in, out));
    }

    int from = 3;
    int to = 9;
    ds = DataSet.merge(list);
    org.nd4j.linalg.dataset.api.DataSet newDs = ds.getRange(from, to);
    //The feature mask does not have to be equal to the label mask, just in this ex it should be
    assertEquals(newDs.getLabelsMaskArray(), newDs.getFeaturesMaskArray());
    //System.out.println(newDs);
    assertEquals(Nd4j.linspace(numExamples + from, numExamples + to - 1, to - from),
                    newDs.getLabelsMaskArray().sum(1));
}
 
Example 13
Source File: TestPythonTransformProcess.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000L)
public void testNDArrayMixed() throws Exception{
    long[] shape = new long[]{3, 2};
    INDArray arr1 = Nd4j.rand(DataType.DOUBLE, shape);
    INDArray arr2 = Nd4j.rand(DataType.DOUBLE, shape);
    INDArray expectedOutput = arr1.add(arr2.castTo(DataType.DOUBLE));

    Builder schemaBuilder = new Builder();
    schemaBuilder
            .addColumnNDArray("col1", shape)
            .addColumnNDArray("col2", shape);

    Schema initialSchema = schemaBuilder.build();
    schemaBuilder.addColumnNDArray("col3", shape);
    Schema finalSchema = schemaBuilder.build();

    String pythonCode = "col3 = col1 + col2";
    TransformProcess tp = new TransformProcess.Builder(initialSchema).transform(
            PythonTransform.builder().code(pythonCode)
                    .outputSchema(finalSchema)
                    .build()
    ).build();

    List<Writable> inputs = Arrays.asList(
            (Writable)
            new NDArrayWritable(arr1),
            new NDArrayWritable(arr2)
    );

    List<Writable> outputs = tp.execute(inputs);
    assertEquals(arr1, ((NDArrayWritable)outputs.get(0)).get());
    assertEquals(arr2, ((NDArrayWritable)outputs.get(1)).get());
    assertEquals(expectedOutput,((NDArrayWritable)outputs.get(2)).get());

}
 
Example 14
Source File: TransferLearningComplex.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testLessSimpleMergeBackProp() {

    NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.9))
                    .activation(Activation.IDENTITY);

    /*
            inCentre                inRight
               |                        |
         denseCentre0               denseRight0
               |                        |
               |------ mergeRight ------|
               |            |
             outCentre     outRight
    
    */

    ComputationGraphConfiguration conf = overallConf.graphBuilder().addInputs("inCentre", "inRight")
                    .addLayer("denseCentre0", new DenseLayer.Builder().nIn(2).nOut(2).build(), "inCentre")
                    .addLayer("outCentre", new OutputLayer.Builder(LossFunctions.LossFunction.MSE).nIn(2).nOut(2).build(),"denseCentre0")
                    .addLayer("denseRight0", new DenseLayer.Builder().nIn(3).nOut(2).build(), "inRight")
                    .addVertex("mergeRight", new MergeVertex(), "denseCentre0", "denseRight0")
                    .addLayer("outRight", new OutputLayer.Builder(LossFunctions.LossFunction.MSE).nIn(4).nOut(2).build(),"mergeRight")
                    .setOutputs("outCentre", "outRight").build();
    ComputationGraph modelToTune = new ComputationGraph(conf);
    modelToTune.init();
    modelToTune.getVertex("denseCentre0").setLayerAsFrozen();

    MultiDataSet randData = new MultiDataSet(new INDArray[] {Nd4j.rand(2, 2), Nd4j.rand(2, 3)},
                    new INDArray[] {Nd4j.rand(2, 2), Nd4j.rand(2, 2)});
    INDArray denseCentre0 = modelToTune.feedForward(randData.getFeatures(), false).get("denseCentre0");
    MultiDataSet otherRandData =
                    new MultiDataSet(new INDArray[] {denseCentre0, randData.getFeatures(1)}, randData.getLabels());

    ComputationGraph modelNow =
                    new TransferLearning.GraphBuilder(modelToTune).setFeatureExtractor("denseCentre0").build();
    assertTrue(modelNow.getLayer("denseCentre0") instanceof FrozenLayer);
    int n = 0;
    while (n < 5) {
        //confirm activations out of frozen vertex is the same as the input to the other model
        modelToTune.fit(randData);
        modelNow.fit(randData);

        assertEquals(otherRandData.getFeatures(0),
                        modelNow.feedForward(randData.getFeatures(), false).get("denseCentre0"));
        assertEquals(otherRandData.getFeatures(0),
                        modelToTune.feedForward(randData.getFeatures(), false).get("denseCentre0"));

        assertEquals(modelToTune.getLayer("denseRight0").params(), modelNow.getLayer("denseRight0").params());

        assertEquals(modelToTune.getLayer("outRight").params(), modelNow.getLayer("outRight").params());

        assertEquals(modelToTune.getLayer("outCentre").params(), modelNow.getLayer("outCentre").params());
        n++;
    }

}
 
Example 15
Source File: TestConstraints.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testLayerWeightsAndBiasConstraints() throws Exception {

    LayerConstraint[] constraints = new LayerConstraint[]{
            new MaxNormConstraint(0.5, 1),
            new MinMaxNormConstraint(0.3, 0.4, 1.0, 1),
            new NonNegativeConstraint(),
            new UnitNormConstraint(1)
    };

    for (LayerConstraint lc : constraints) {

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .updater(new Sgd(0.0))
                .dist(new NormalDistribution(0, 5))
                .biasInit(0.2)
                .list()
                .layer(new DenseLayer.Builder().nIn(12).nOut(10)
                        .constrainAllParameters(lc).build())
                .layer(new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(10).nOut(8).build())
                .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        LayerConstraint exp = lc.clone();
        assertEquals(exp.toString(), net.getLayer(0).conf().getLayer().getConstraints().get(0).toString());

        INDArray input = Nd4j.rand(3, 12);
        INDArray labels = Nd4j.rand(3, 8);

        net.fit(input, labels);

        INDArray w0 = net.getParam("0_W");
        INDArray b0 = net.getParam("0_b");


        if (lc instanceof MaxNormConstraint) {
            assertTrue(w0.norm2(1).maxNumber().doubleValue() <= 0.5);
            assertTrue(b0.norm2(1).maxNumber().doubleValue() <= 0.5);

        } else if (lc instanceof MinMaxNormConstraint) {
            assertTrue(w0.norm2(1).minNumber().doubleValue() >= 0.3);
            assertTrue(w0.norm2(1).maxNumber().doubleValue() <= 0.4);
            assertTrue(b0.norm2(1).minNumber().doubleValue() >= 0.3);
            assertTrue(b0.norm2(1).maxNumber().doubleValue() <= 0.4);
        } else if (lc instanceof NonNegativeConstraint) {
            assertTrue(w0.minNumber().doubleValue() >= 0.0);
            assertTrue(b0.minNumber().doubleValue() >= 0.0);
        } else if (lc instanceof UnitNormConstraint) {
            assertEquals(1.0, w0.norm2(1).minNumber().doubleValue(), 1e-6);
            assertEquals(1.0, w0.norm2(1).maxNumber().doubleValue(), 1e-6);
            assertEquals(1.0, b0.norm2(1).minNumber().doubleValue(), 1e-6);
            assertEquals(1.0, b0.norm2(1).maxNumber().doubleValue(), 1e-6);
        }

        TestUtils.testModelSerialization(net);
    }
}
 
Example 16
Source File: CNNProcessorTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testCnnToFeedForwardPreProcessor2() {
    int[] nRows = {1, 5, 20};
    int[] nCols = {1, 5, 20};
    int[] nDepth = {1, 3};
    int[] nMiniBatchSize = {1, 5};
    for (int rows : nRows) {
        for (int cols : nCols) {
            for (int d : nDepth) {
                CnnToFeedForwardPreProcessor convProcessor = new CnnToFeedForwardPreProcessor(rows, cols, d);

                for (int miniBatch : nMiniBatchSize) {
                    long[] convActShape = new long[] {miniBatch, d, rows, cols};
                    INDArray rand = Nd4j.rand(convActShape);
                    INDArray convInput_c = Nd4j.create(DataType.FLOAT, convActShape, 'c');
                    INDArray convInput_f = Nd4j.create(DataType.FLOAT, convActShape, 'f');
                    convInput_c.assign(rand);
                    convInput_f.assign(rand);
                    assertEquals(convInput_c, convInput_f);

                    //Test forward pass:
                    INDArray ffAct_c = convProcessor.preProcess(convInput_c, -1, LayerWorkspaceMgr.noWorkspaces());
                    INDArray ffAct_f = convProcessor.preProcess(convInput_f, -1, LayerWorkspaceMgr.noWorkspaces());
                    long[] ffActShape = {miniBatch, d * rows * cols};
                    assertArrayEquals(ffActShape, ffAct_c.shape());
                    assertArrayEquals(ffActShape, ffAct_f.shape());
                    assertEquals(ffAct_c, ffAct_f);

                    //Check values:
                    //CNN reshaping (for each example) takes a 1d vector and converts it to 3d
                    // (4d total, for minibatch data)
                    //1d vector is assumed to be rows from channels 0 concatenated, followed by channels 1, etc
                    for (int ex = 0; ex < miniBatch; ex++) {
                        for (int r = 0; r < rows; r++) {
                            for (int c = 0; c < cols; c++) {
                                for (int depth = 0; depth < d; depth++) {
                                    int vectorPosition = depth * (rows * cols) + r * cols + c; //pos in vector after reshape
                                    double vecValue = ffAct_c.getDouble(ex, vectorPosition);
                                    double convValue = convInput_c.getDouble(ex, depth, r, c);
                                    assertEquals(convValue, vecValue, 0.0);
                                }
                            }
                        }
                    }

                    //Test backward pass:
                    //Idea is that backward pass should do opposite to forward pass
                    INDArray epsilon2_c = Nd4j.create(DataType.FLOAT, ffActShape, 'c');
                    INDArray epsilon2_f = Nd4j.create(DataType.FLOAT, ffActShape, 'f');
                    epsilon2_c.assign(ffAct_c);
                    epsilon2_f.assign(ffAct_c);
                    INDArray epsilon4_c = convProcessor.backprop(epsilon2_c, -1, LayerWorkspaceMgr.noWorkspaces());
                    INDArray epsilon4_f = convProcessor.backprop(epsilon2_f, -1, LayerWorkspaceMgr.noWorkspaces());
                    assertEquals(convInput_c, epsilon4_c);
                    assertEquals(convInput_c, epsilon4_f);
                }
            }
        }
    }
}
 
Example 17
Source File: GradientCheckTestsComputationGraph.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testMultipleOutputsMergeVertex() {
        Nd4j.getRandom().setSeed(12345);
        ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345)
                        .dataType(DataType.DOUBLE)
                        .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                        .dist(new NormalDistribution(0, 1))
                        .updater(new NoOp()).activation(Activation.TANH).graphBuilder().addInputs("i0", "i1", "i2")
                        .addLayer("d0", new DenseLayer.Builder().nIn(2).nOut(2).build(), "i0")
                        .addLayer("d1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "i1")
                        .addLayer("d2", new DenseLayer.Builder().nIn(2).nOut(2).build(), "i2")
                        .addVertex("m", new MergeVertex(), "d0", "d1", "d2")
                        .addLayer("D0", new DenseLayer.Builder().nIn(6).nOut(2).build(), "m")
                        .addLayer("D1", new DenseLayer.Builder().nIn(6).nOut(2).build(), "m")
                        .addLayer("D2", new DenseLayer.Builder().nIn(6).nOut(2).build(), "m")
                        .addLayer("out", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(6)
                                        .nOut(2).build(), "D0", "D1", "D2")
                        .setOutputs("out").build();

        ComputationGraph graph = new ComputationGraph(conf);
        graph.init();

        int[] minibatchSizes = {1, 3};
        for (int mb : minibatchSizes) {
            INDArray[] input = new INDArray[3];
            for (int i = 0; i < 3; i++) {
                input[i] = Nd4j.rand(mb, 2);
            }
            INDArray out = Nd4j.rand(mb, 2);


            String msg = "testMultipleOutputsMergeVertex() - minibatchSize = " + mb;
            if (PRINT_RESULTS) {
                System.out.println(msg);
//                for (int j = 0; j < graph.getNumLayers(); j++)
//                    System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams());
            }

            boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(input)
                    .labels(new INDArray[]{out}));

            assertTrue(msg, gradOK);
            TestUtils.testModelSerialization(graph);
        }
    }
 
Example 18
Source File: ParallelInferenceTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
private void testInputMasking() throws Exception {
        Nd4j.getRandom().setSeed(12345);

        int nIn = 10;
        int tsLength = 16;

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .activation(Activation.TANH)
                .seed(12345)
                .list()
                .layer(new LSTM.Builder().nIn(nIn).nOut(5).build())
                .layer(new GlobalPoolingLayer(PoolingType.AVG))
                .layer(new OutputLayer.Builder().nIn(5).nOut(5).activation(Activation.SOFTMAX).build())
                .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

//        InferenceMode[] inferenceModes = new InferenceMode[]{InferenceMode.SEQUENTIAL, InferenceMode.BATCHED, InferenceMode.INPLACE, InferenceMode.SEQUENTIAL};
//        int[] workers = new int[]{2, 2, 2, 1};
//        boolean[] randomTS = new boolean[]{true, false, true, false};

        Random r = new Random();
        for( InferenceMode m : InferenceMode.values()) {
            log.info("Testing inference mode: [{}]", m);
            for( int w : new int[]{1,2}) {
                for (boolean randomTSLength : new boolean[]{false, true}) {
                    final ParallelInference inf =
                            new ParallelInference.Builder(net)
                                    .inferenceMode(m)
                                    .batchLimit(5)
                                    .queueLimit(64)
                                    .workers(w).build();

                    List<INDArray> in = new ArrayList<>();
                    List<INDArray> inMasks = new ArrayList<>();
                    List<INDArray> exp = new ArrayList<>();
                    int nRuns = isIntegrationTests() ? 100 : 10;
                    for (int i = 0; i < nRuns; i++) {
                        int currTSLength = (randomTSLength ? 1 + r.nextInt(tsLength) : tsLength);
                        int currNumEx = 1 + r.nextInt(3);
                        INDArray inArr = Nd4j.rand(new int[]{currNumEx, nIn, currTSLength});
                        in.add(inArr);

                        INDArray inMask = null;
                        if(r.nextDouble() < 0.5){
                            inMask = Nd4j.ones(currNumEx, currTSLength);
                            for( int mb = 0; mb < currNumEx; mb++) {
                                if (currTSLength > 1) {
                                    int firstMaskedStep = 1 + r.nextInt(currTSLength);
                                    for (int j = firstMaskedStep; j < currTSLength; j++) {
                                        inMask.putScalar(mb, j, 0.0);
                                    }
                                }
                            }
                        }
                        inMasks.add(inMask);

                        INDArray out = net.output(inArr, false, inMask, null);
                        exp.add(out);
                    }

                    testParallelInference(inf, in, inMasks, exp);

                    inf.shutdown();
                }
            }
        }
    }
 
Example 19
Source File: OpExecutionerTestsC.java    From nd4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testNorm2_2() throws Exception {
    INDArray array = Nd4j.rand(127, 164, 100, 1, 1);

    double norm2 = array.norm2Number().doubleValue();
}
 
Example 20
Source File: ParallelInferenceTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 30000L)
public void testParallelInferenceVariableSizeCNN2() throws Exception {
    //Variable size input for CNN model - for example, YOLO models
    //In these cases, we can't batch and have to execute the different size inputs separately

    Nd4j.getRandom().setSeed(12345);

    int nIn = 3;
    int[] defaultShape = new int[]{1, nIn, 16, 16};

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .activation(Activation.TANH)
            .seed(12345)
            .convolutionMode(ConvolutionMode.Same)
            .list()
            .layer(new ConvolutionLayer.Builder().nIn(nIn).nOut(5).build())
            .layer(new CnnLossLayer.Builder().activation(Activation.SOFTMAX).build())
            .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    for( InferenceMode m : InferenceMode.values()) {
        for( int w : new int[]{1,2}) {

            final ParallelInference inf =
                    new ParallelInference.Builder(net)
                            .inferenceMode(m)
                            .batchLimit(20)
                            .queueLimit(64)
                            .workers(w).build();

            List<INDArray> arrs = new ArrayList<>();
            List<INDArray> exp = new ArrayList<>();
            Random r = new Random();
            int runs = isIntegrationTests() ? 500 : 20;
            for( int i=0; i<runs; i++ ){
                int[] shape = defaultShape;
                if(r.nextDouble() < 0.4){
                    shape = new int[]{r.nextInt(5)+1, nIn, 10, r.nextInt(10)+1};
                }

                INDArray in = Nd4j.rand(shape);
                arrs.add(in);
                INDArray out = net.output(in);
                exp.add(out);
            }
            testParallelInference(inf, arrs, exp);

            inf.shutdown();
        }
    }
}