Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#addiRowVector()

The following examples show how to use org.nd4j.linalg.api.ndarray.INDArray#addiRowVector() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LongTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testLongTadOp2() {

    INDArray hugeX = Nd4j.create(2300000, 1000).assign(1.0);
    hugeX.addiRowVector(Nd4j.create(1000).assign(2.0));

    for (int x = 0; x < hugeX.rows(); x++) {
        assertEquals("Failed at row " + x, 3000, hugeX.getRow(x).sumNumber().intValue());
    }
}
 
Example 2
Source File: Nd4jMatrix.java    From jstarcraft-ai with Apache License 2.0 5 votes vote down vote up
@Override
public MathMatrix addRowVector(MathVector vector) {
    if (vector instanceof Nd4jVector) {
        Nd4jEnvironmentThread thread = EnvironmentThread.getThread(Nd4jEnvironmentThread.class);
        try (MemoryWorkspace workspace = thread.getSpace()) {
            INDArray thisArray = this.getArray();
            INDArray thatArray = Nd4jVector.class.cast(vector).getArray();
            thisArray.addiRowVector(thatArray);
            return this;
        }
    } else {
        return MathMatrix.super.addRowVector(vector);
    }
}
 
Example 3
Source File: InputValidationTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testInvalidRowVectorOp1() {
    INDArray first = Nd4j.create(10, 10);
    INDArray row = Nd4j.create(1, 5);
    try {
        first.addiRowVector(row);
        fail("Should have thrown IllegalStateException");
    } catch (IllegalStateException e) {
        //OK
    }
}
 
Example 4
Source File: CudaBroadcastTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testPinnedAddiRowVector() throws Exception {
    // simple way to stop test if we're not on CUDA backend here
    assertEquals("JcublasLevel1", Nd4j.getBlasWrapper().level1().getClass().getSimpleName());

    for (int iter = 0; iter < 100; iter++) {

        INDArray array1 = Nd4j.zeros(15, 15);

        for (int y = 0; y < 15; y++) {
            for (int x = 0; x < 15; x++) {
                assertEquals("Failed on iteration: ["+iter+"], y.x: ["+y+"."+x+"]", 0.0f, array1.getRow(y).getFloat(x), 0.01);
            }
        }
        INDArray array2 = Nd4j.create(new float[]{2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f});

        for (int i = 0; i < 30; i++) {
            array1.addiRowVector(array2);
        }

        //System.out.println("Array1: " + array1);
        //System.out.println("Array2: " + array2);

        for (int y = 0; y < 15; y++) {
            for (int x = 0; x < 15; x++) {
                assertEquals("Failed on iteration: ["+iter+"], y.x: ["+y+"."+x+"]", 60.0f, array1.getRow(y).getFloat(x), 0.01);
            }
        }
    }
}
 
Example 5
Source File: LongTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testLongTadOp2() {

    INDArray hugeX = Nd4j.create(2300000, 1000).assign(1.0);
    hugeX.addiRowVector(Nd4j.create(1000).assign(2.0));

    for (int x = 0; x < hugeX.rows(); x++) {
        assertEquals("Failed at row " + x, 3000, hugeX.getRow(x).sumNumber().intValue());
    }
}
 
Example 6
Source File: StandardizeStrategy.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * Denormalize a data array
 *
 * @param array the data to denormalize
 * @param stats statistics of the data population
 */
@Override
public void revert(INDArray array, INDArray maskArray, DistributionStats stats) {
    if (array.rank() <= 2) {
        array.muliRowVector(filteredStd(stats));
        array.addiRowVector(stats.getMean());
    } else {
        Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(array, filteredStd(stats).castTo(array.dataType()), array, 1));
        Nd4j.getExecutioner().execAndReturn(new BroadcastAddOp(array, stats.getMean().castTo(array.dataType()), array, 1));
    }

    if (maskArray != null) {
        DataSetUtil.setMaskedValuesToZero(array, maskArray);
    }
}
 
Example 7
Source File: InputValidationTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testInvalidRowVectorOp1() {
    INDArray first = Nd4j.create(10, 10);
    INDArray row = Nd4j.create(1, 5);
    try {
        first.addiRowVector(row);
        fail("Should have thrown IllegalStateException");
    } catch (IllegalStateException e) {
        //OK
    }
}
 
Example 8
Source File: SporadicTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testReduceX() throws Exception {
    CudaEnvironment.getInstance().getConfiguration().setMaximumGridSize(11);
    INDArray x = Nd4j.create(500, 500);
    INDArray exp_0 = Nd4j.linspace(1, 500, 500);
    INDArray exp_1 = Nd4j.create(500).assign(250.5);

    x.addiRowVector(Nd4j.linspace(1, 500, 500));

    assertEquals(exp_0, x.mean(0));
    assertEquals(exp_1, x.mean(1));

    assertEquals(250.5, x.meanNumber().doubleValue(), 1e-5);
}
 
Example 9
Source File: SporadicTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testIndexReduceX() throws Exception {
    CudaEnvironment.getInstance().getConfiguration().setMaximumGridSize(11);
    INDArray x = Nd4j.create(500, 500);
    INDArray exp_0 = Nd4j.create(500).assign(0);
    INDArray exp_1 = Nd4j.create(500).assign(499);

    x.addiRowVector(Nd4j.linspace(1, 500, 500));

    assertEquals(exp_0, Nd4j.argMax(x, 0));
    assertEquals(exp_1, Nd4j.argMax(x, 1));
}
 
Example 10
Source File: DeepFMInputLayer.java    From jstarcraft-rns with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray preOutput(boolean training, LayerWorkspaceMgr workspaceMgr) {
    assertInputSet(false);
    applyDropOutIfNecessary(training, workspaceMgr);
    INDArray W = getParamWithNoise(DefaultParamInitializer.WEIGHT_KEY, training, workspaceMgr);
    INDArray b = getParamWithNoise(DefaultParamInitializer.BIAS_KEY, training, workspaceMgr);

    INDArray ret = workspaceMgr.createUninitialized(ArrayType.ACTIVATIONS, input.size(0), W.size(1));
    ret.assign(0F);
    for (int row = 0; row < input.rows(); row++) {
        for (int column = 0; column < W.columns(); column++) {
            float value = 0F;
            int cursor = 0;
            for (int index = 0; index < input.columns(); index++) {
                value += W.getFloat(cursor + input.getInt(row, index), column);
                cursor += dimensionSizes[index];
            }
            ret.put(row, column, value);
        }
    }

    if (hasBias()) {
        ret.addiRowVector(b);
    }

    if (maskArray != null) {
        applyMask(ret);
    }

    return ret;
}
 
Example 11
Source File: RandomProjectionLSH.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * This picks uniformaly distributed random points on the unit of a sphere using the method of:
 *
 * An efficient method for generating uniformly distributed points on the surface of an n-dimensional sphere
 * JS Hicks, RF Wheeling - Communications of the ACM, 1959
 * @param data a query to generate multiple probes for
 * @return `numTables`
 */
public INDArray entropy(INDArray data){

    INDArray data2 =
                Nd4j.getExecutioner().exec(new GaussianDistribution(Nd4j.create(numTables, inDimension), radius));

    INDArray norms = Nd4j.norm2(data2.dup(), -1);

    Preconditions.checkState(norms.rank() == 1 && norms.size(0) == numTables, "Expected norm2 to have shape [%s], is %ndShape", norms.size(0), norms);

    data2.diviColumnVector(norms);
    data2.addiRowVector(data);
    return data2;
}
 
Example 12
Source File: RowVectorOpsC.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testAddi() {
    INDArray arr = Nd4j.linspace(1, 4, 4, DataType.DOUBLE).reshape(2, 2);
    arr.addiRowVector(Nd4j.create(new double[] {1, 2}));
    INDArray assertion = Nd4j.create(new double[][] {{2, 4}, {4, 6}});
    assertEquals(assertion, arr);
}
 
Example 13
Source File: EndlessTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testBroadcastForever(){
    INDArray arr = Nd4j.ones(100,100);
    INDArray arr2 = Nd4j.ones(1,100);

    for (int i = 0; i < RUN_LIMIT; i++ ) {
        arr.addiRowVector(arr2);
    }
}
 
Example 14
Source File: StandardizeStrategy.java    From nd4j with Apache License 2.0 5 votes vote down vote up
/**
 * Denormalize a data array
 *
 * @param array the data to denormalize
 * @param stats statistics of the data population
 */
@Override
public void revert(INDArray array, INDArray maskArray, DistributionStats stats) {
    if (array.rank() <= 2) {
        array.muliRowVector(filteredStd(stats));
        array.addiRowVector(stats.getMean());
    } else {
        Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(array, filteredStd(stats), array, 1));
        Nd4j.getExecutioner().execAndReturn(new BroadcastAddOp(array, stats.getMean(), array, 1));
    }

    if (maskArray != null) {
        DataSetUtil.setMaskedValuesToZero(array, maskArray);
    }
}
 
Example 15
Source File: RnnOpValidation.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testGRUCell(){
    Nd4j.getRandom().setSeed(12345);
    int mb = 2;
    int nIn = 3;
    int nOut = 4;

    SameDiff sd = SameDiff.create();
    SDVariable x = sd.constant(Nd4j.rand(DataType.FLOAT, mb, nIn));
    SDVariable hLast = sd.constant(Nd4j.rand(DataType.FLOAT, mb, nOut));
    SDVariable Wru = sd.constant(Nd4j.rand(DataType.FLOAT, (nIn+nOut), 2*nOut));
    SDVariable Wc = sd.constant(Nd4j.rand(DataType.FLOAT, (nIn+nOut), nOut));
    SDVariable bru = sd.constant(Nd4j.rand(DataType.FLOAT, 2*nOut));
    SDVariable bc = sd.constant(Nd4j.rand(DataType.FLOAT, nOut));

    double fb = 1.0;
    GRUWeights weights = GRUWeights.builder()
            .ruWeight(Wru)
            .cWeight(Wc)
            .ruBias(bru)
            .cBias(bc)
            .build();

    SDVariable[] v = sd.rnn().gruCell(x, hLast, weights);
    List<String> toExec = new ArrayList<>();
    for(SDVariable sdv : v){
        toExec.add(sdv.name());
    }

    //Test forward pass:
    Map<String,INDArray> m = sd.output(null, toExec);

    //Weights and bias order: [r, u], [c]

    //Reset gate:
    INDArray wr_x = Wru.getArr().get(NDArrayIndex.interval(0,nIn), NDArrayIndex.interval(0, nOut));           //Input weights
    INDArray wr_r = Wru.getArr().get(NDArrayIndex.interval(nIn,nIn+nOut), NDArrayIndex.interval(0, nOut));    //Recurrent weights
    INDArray br = bru.getArr().get(NDArrayIndex.interval(0, nOut));

    INDArray rExp = x.getArr().mmul(wr_x).addiRowVector(br);        //[mb,nIn]*[nIn, nOut] + [nOut]
    rExp.addi(hLast.getArr().mmul(wr_r));   //[mb,nOut]*[nOut,nOut]
    Transforms.sigmoid(rExp,false);

    INDArray rAct = m.get(toExec.get(0));
    assertEquals(rExp, rAct);

    //Update gate:
    INDArray wu_x = Wru.getArr().get(NDArrayIndex.interval(0,nIn), NDArrayIndex.interval(nOut, 2*nOut));           //Input weights
    INDArray wu_r = Wru.getArr().get(NDArrayIndex.interval(nIn,nIn+nOut), NDArrayIndex.interval(nOut, 2*nOut));    //Recurrent weights
    INDArray bu = bru.getArr().get(NDArrayIndex.interval(nOut, 2*nOut));

    INDArray uExp = x.getArr().mmul(wu_x).addiRowVector(bu);        //[mb,nIn]*[nIn, nOut] + [nOut]
    uExp.addi(hLast.getArr().mmul(wu_r));   //[mb,nOut]*[nOut,nOut]
    Transforms.sigmoid(uExp,false);

    INDArray uAct = m.get(toExec.get(1));
    assertEquals(uExp, uAct);

    //c = tanh(x * Wcx + Wcr * (hLast .* r))
    INDArray Wcx = Wc.getArr().get(NDArrayIndex.interval(0,nIn), NDArrayIndex.all());
    INDArray Wcr = Wc.getArr().get(NDArrayIndex.interval(nIn, nIn+nOut), NDArrayIndex.all());
    INDArray cExp = x.getArr().mmul(Wcx);
    cExp.addi(hLast.getArr().mul(rExp).mmul(Wcr));
    cExp.addiRowVector(bc.getArr());
    Transforms.tanh(cExp, false);

    assertEquals(cExp, m.get(toExec.get(2)));

    //h = u * hLast + (1-u) * c
    INDArray hExp = uExp.mul(hLast.getArr()).add(uExp.rsub(1.0).mul(cExp));
    assertEquals(hExp, m.get(toExec.get(3)));
}
 
Example 16
Source File: TestSimpleRnn.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testSimpleRnn(){
    Nd4j.getRandom().setSeed(12345);

    int m = 3;
    int nIn = 5;
    int layerSize = 6;
    int tsLength = 7;
    INDArray in;
    if (rnnDataFormat == RNNFormat.NCW){
        in = Nd4j.rand(DataType.FLOAT, m, nIn, tsLength);
    }
    else{
        in = Nd4j.rand(DataType.FLOAT, m, tsLength, nIn);
    }


    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .updater(new NoOp())
            .weightInit(WeightInit.XAVIER)
            .activation(Activation.TANH)
            .list()
            .layer(new SimpleRnn.Builder().nIn(nIn).nOut(layerSize).dataFormat(rnnDataFormat).build())
            .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    INDArray out = net.output(in);

    INDArray w = net.getParam("0_W");
    INDArray rw = net.getParam("0_RW");
    INDArray b = net.getParam("0_b");

    INDArray outLast = null;
    for( int i=0; i<tsLength; i++ ){
        INDArray inCurrent;
        if (rnnDataFormat == RNNFormat.NCW){
            inCurrent = in.get(all(), all(), point(i));
        }
        else{
            inCurrent = in.get(all(), point(i), all());
        }

        INDArray outExpCurrent = inCurrent.mmul(w);
        if(outLast != null){
            outExpCurrent.addi(outLast.mmul(rw));
        }

        outExpCurrent.addiRowVector(b);

        Transforms.tanh(outExpCurrent, false);

        INDArray outActCurrent;
        if (rnnDataFormat == RNNFormat.NCW){
            outActCurrent = out.get(all(), all(), point(i));
        }
        else{
            outActCurrent = out.get(all(), point(i), all());
        }
        assertEquals(String.valueOf(i), outExpCurrent, outActCurrent);

        outLast = outExpCurrent;
    }


    TestUtils.testModelSerialization(net);
}
 
Example 17
Source File: CudaPairwiseTrainformsTests.java    From nd4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testPinnedAddiRowVector() throws Exception {
    // simple way to stop test if we're not on CUDA backend here
    assertEquals("JcublasLevel1", Nd4j.getBlasWrapper().level1().getClass().getSimpleName());

    INDArray array1 = Nd4j.create(new float[]{1.5f, 1.5f, 1.5f, 1.5f, 1.5f, 1.5f, 1.5f, 1.5f, 1.5f, 1.5f, 1.5f, 1.5f, 1.5f, 1.5f, 1.5f});
    INDArray array2 = Nd4j.create(new float[]{2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f});

    array1.addiRowVector(array2);

    System.out.println("Array1: " + array1);
    System.out.println("Array2: " + array2);

    assertEquals(3.5f, array1.getRow(0).getFloat(0), 0.01);

}
 
Example 18
Source File: AtomicAllocatorTest.java    From nd4j with Apache License 2.0 4 votes vote down vote up
@Override
public void run() {
    log.info(this.getName() + "/"+ this.getId() + " started on device ["+AtomicAllocator.getInstance().getDeviceId()+"]");
    AtomicLong cnt = new AtomicLong(0);
    AtomicLong cntX = new AtomicLong(0);
    while(true) {
        INDArray array1 = Nd4j.zeros(15,15);
        INDArray array2 = Nd4j.create(new float[]{2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f, 2.0f});


        int idx = 0;
        long time1 = 0;
        long time2 = 0;
        for (int x = 0; x < 30; x++) {
            time1 = System.nanoTime();
            array1.addiRowVector(array2);
            time2 = System.nanoTime();
            cntX.incrementAndGet();
        }

        if (cnt.incrementAndGet() % 1000 == 0) {
            log.info("AddiRowVector execution time: [" + (time2 - time1) + "] ns on device ["+ allocator.getDeviceId(array1)+"]");

            for (int y = 0; y < 15; y++) {
                for (int x = 0; x < 15; x++) {
                    assertEquals(60.0f, array1.getRow(y).getFloat(x), 0.01);
                }
            }
            if (threadId == 0) {
                log.info("Total calls: " + cntX.get() * 4);
                log.info("Total memory allocated on device [0]: " + allocator.getTotalAllocatedDeviceMemory(0));
            }

            try {
                Thread.sleep(5000);
            } catch (Exception e) {
                throw  new RuntimeException(e);
            }
        }
    }
}
 
Example 19
Source File: BaseLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
protected Pair<INDArray, INDArray> preOutputWithPreNorm(boolean training, boolean forBackprop, LayerWorkspaceMgr workspaceMgr) {
    assertInputSet(forBackprop);
    applyDropOutIfNecessary(training, workspaceMgr);
    INDArray W = getParamWithNoise(DefaultParamInitializer.WEIGHT_KEY, training, workspaceMgr);
    INDArray b = getParamWithNoise(DefaultParamInitializer.BIAS_KEY, training, workspaceMgr);
    INDArray g = (hasLayerNorm() ? getParam(DefaultParamInitializer.GAIN_KEY) : null);

    INDArray input = this.input.castTo(dataType);

    //Input validation:
    if (input.rank() != 2 || input.columns() != W.rows()) {
        if (input.rank() != 2) {
            throw new DL4JInvalidInputException("Input that is not a matrix; expected matrix (rank 2), got rank "
                    + input.rank() + " array with shape " + Arrays.toString(input.shape())
                    + ". Missing preprocessor or wrong input type? " + layerId());
        }
        throw new DL4JInvalidInputException(
                "Input size (" + input.columns() + " columns; shape = " + Arrays.toString(input.shape())
                        + ") is invalid: does not match layer input size (layer # inputs = "
                        + W.size(0) + ") " + layerId());
    }


    INDArray ret = workspaceMgr.createUninitialized(ArrayType.ACTIVATIONS, W.dataType(), input.size(0), W.size(1));
    input.castTo(ret.dataType()).mmuli(W, ret);     //TODO Can we avoid this cast? (It sohuld be a no op if not required, however)

    INDArray preNorm = ret;
    if(hasLayerNorm()){
        preNorm = (forBackprop ? ret.dup(ret.ordering()) : ret);
        Nd4j.getExecutioner().exec(new LayerNorm(preNorm, g, ret, true, 1));
    }

    if(hasBias()){
        ret.addiRowVector(b);
    }

    if (maskArray != null) {
        applyMask(ret);
    }

    return new Pair<>(ret, preNorm);
}
 
Example 20
Source File: CrashTest.java    From deeplearning4j with Apache License 2.0 2 votes vote down vote up
protected void op(INDArray x, INDArray y, int i) {
    // broadcast along row & column
    INDArray row = Nd4j.ones(64);
    INDArray column = Nd4j.ones(1024, 1);

    x.addiRowVector(row);
    x.addiColumnVector(column);

    // casual scalar
    x.addi(i * 2);

    // reduction along all dimensions
    float sum = x.sumNumber().floatValue();

    // index reduction
    Nd4j.getExecutioner().exec(new ArgMax(x));

    // casual transform
    Nd4j.getExecutioner().exec(new Sqrt(x, x));

    //  dup
    INDArray x1 = x.dup(x.ordering());
    INDArray x2 = x.dup(x.ordering());
    INDArray x3 = x.dup('c');
    INDArray x4 = x.dup('f');


    // vstack && hstack
    INDArray vstack = Nd4j.vstack(x, x1, x2, x3, x4);

    INDArray hstack = Nd4j.hstack(x, x1, x2, x3, x4);

    // reduce3 call
    Nd4j.getExecutioner().exec(new ManhattanDistance(x, x2));


    // flatten call
    INDArray flat = Nd4j.toFlattened(x, x1, x2, x3, x4);


    // reduction along dimension: row & column
    INDArray max_0 = x.max(0);
    INDArray max_1 = x.max(1);


    // index reduction along dimension: row & column
    INDArray imax_0 = Nd4j.argMax(x, 0);
    INDArray imax_1 = Nd4j.argMax(x, 1);


    // logisoftmax, softmax & softmax derivative
    Nd4j.getExecutioner().exec((CustomOp) new SoftMax(x));
    Nd4j.getExecutioner().exec((CustomOp) new LogSoftMax(x));


    // BooleanIndexing
    BooleanIndexing.replaceWhere(x, 5f, Conditions.lessThan(8f));

    // assing on view
    BooleanIndexing.assignIf(x, x1, Conditions.greaterThan(-1000000000f));

    // std var along all dimensions
    float std = x.stdNumber().floatValue();

    // std var along row & col
    INDArray xStd_0 = x.std(0);
    INDArray xStd_1 = x.std(1);

    // blas call
    float dot = (float) Nd4j.getBlasWrapper().dot(x, x1);

    // mmul
    for (boolean tA : paramsA) {
        for (boolean tB : paramsB) {

            INDArray xT = tA ? x.dup() : x.dup().transpose();
            INDArray yT = tB ? y.dup() : y.dup().transpose();

            Nd4j.gemm(xT, yT, tA, tB);
        }
    }

    // specially for views, checking here without dup and rollover
    Nd4j.gemm(x, y, false, false);

    log.debug("Iteration passed: " + i);
}