Java Code Examples for org.nd4j.linalg.factory.Nd4j#toFlattened()

The following examples show how to use org.nd4j.linalg.factory.Nd4j#toFlattened() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CudaFloatDataBufferTest.java    From nd4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testFlattened1() throws Exception {
    List<INDArray> test = new ArrayList<>();
    for (int x = 0; x < 100; x++) {
        INDArray array = Nd4j.linspace(0, 99, 100);
        test.add(array);
    }

    INDArray ret = Nd4j.toFlattened(test);

    assertEquals(10000, ret.length());
    for (int x = 0; x < 100; x++) {
        for (int y = 0; y < 100; y++) {
            assertEquals("X: ["+x+"], Y: ["+y+"] failed: ",y, ret.getFloat((x * 100) + y), 0.01f);
        }
    }
}
 
Example 2
Source File: DefaultGradient.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
private void flattenGradient() {
    if (flatteningOrders != null) {
        //Arrays with non-default order get flattened to row vector first, then everything is flattened to f order
        //TODO revisit this, and make more efficient
        List<INDArray> toFlatten = new ArrayList<>();
        for (Map.Entry<String, INDArray> entry : gradients.entrySet()) {
            if (flatteningOrders.containsKey(entry.getKey())
                            && flatteningOrders.get(entry.getKey()) != DEFAULT_FLATTENING_ORDER) {
                //Specific flattening order for this array, that isn't the default
                toFlatten.add(Nd4j.toFlattened(flatteningOrders.get(entry.getKey()), entry.getValue()));
            } else {
                //default flattening order for this array
                toFlatten.add(entry.getValue());
            }
        }
        flattenedGradient = Nd4j.toFlattened(DEFAULT_FLATTENING_ORDER, toFlatten);
    } else if( !gradients.values().isEmpty() ){ //Edge case: can be empty for nets with 0 params
        //Standard case: flatten all to f order
        flattenedGradient = Nd4j.toFlattened(DEFAULT_FLATTENING_ORDER, gradients.values());

    }
    if(flattenedGradient.rank() == 1){
        flattenedGradient = flattenedGradient.reshape('c', 1, flattenedGradient.length());
    }
}
 
Example 3
Source File: WeightInitIdentity.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
private INDArray setIdentity2D(long[] shape, char order, INDArray paramView) {
    INDArray ret;
    if (order == Nd4j.order()) {
        ret = Nd4j.eye(shape[0]);
    } else {
        ret = Nd4j.createUninitialized(shape, order).assign(Nd4j.eye(shape[0]));
    }

    if(scale != null){
        ret.muli(scale);
    }

    INDArray flat = Nd4j.toFlattened(order, ret);
    paramView.assign(flat);
    return paramView.reshape(order, shape);
}
 
Example 4
Source File: FaceNetSmallV2Helper.java    From Java-Machine-Learning-for-Computer-Vision with MIT License 5 votes vote down vote up
private static INDArray mergeAll(List<double[]> all) {
    INDArray[] allArr = new INDArray[all.size()];
    int index = 0;
    for (double[] doubles : all) {
        allArr[index++] = Nd4j.create(doubles);
    }
    return Nd4j.toFlattened(allArr);
}
 
Example 5
Source File: MtcnnUtil.java    From mtcnn-java with Apache License 2.0 5 votes vote down vote up
public static INDArray append(INDArray arr1, INDArray values, int dimension) {
	if (dimension == -1) {
		return Nd4j.toFlattened(arr1, values);
	}
	else {
		return Nd4j.concat(dimension, arr1, values);
	}
}
 
Example 6
Source File: CudaFloatDataBufferTest.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testToFlattenedOrder() throws Exception {
    INDArray concatC = Nd4j.linspace(1,4,4).reshape('c',2,2);
    INDArray concatF = Nd4j.create(new int[]{2,2},'f');
    concatF.assign(concatC);
    INDArray assertionC = Nd4j.create(new double[]{1,2,3,4,1,2,3,4});
    //INDArray testC = Nd4j.toFlattened('c',concatC,concatF);
    //assertEquals(assertionC,testC);
    System.out.println("P0: --------------------------------------------------------");
    INDArray test = Nd4j.toFlattened('f',concatC,concatF);
    System.out.println("P1: --------------------------------------------------------");
    INDArray assertion = Nd4j.create(new double[]{1,3,2,4,1,3,2,4});
    assertEquals(assertion,test);
}
 
Example 7
Source File: CudaFloatDataBufferTest.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testToFlattenedWithOrder(){
    int[] firstShape = {10,3};
    int firstLen = ArrayUtil.prod(firstShape);
    int[] secondShape = {2,7};
    int secondLen = ArrayUtil.prod(secondShape);
    int[] thirdShape = {3,3};
    int thirdLen = ArrayUtil.prod(thirdShape);
    INDArray firstC = Nd4j.linspace(1,firstLen,firstLen).reshape('c',firstShape);
    INDArray firstF = Nd4j.create(firstShape,'f').assign(firstC);
    INDArray secondC = Nd4j.linspace(1,secondLen,secondLen).reshape('c',secondShape);
    INDArray secondF = Nd4j.create(secondShape,'f').assign(secondC);
    INDArray thirdC = Nd4j.linspace(1,thirdLen,thirdLen).reshape('c',thirdShape);
    INDArray thirdF = Nd4j.create(thirdShape,'f').assign(thirdC);


    assertEquals(firstC,firstF);
    assertEquals(secondC,secondF);
    assertEquals(thirdC,thirdF);

    INDArray cc = Nd4j.toFlattened('c',firstC,secondC,thirdC);
    INDArray cf = Nd4j.toFlattened('c',firstF,secondF,thirdF);
    assertEquals(cc,cf);

    INDArray cmixed = Nd4j.toFlattened('c',firstC,secondF,thirdF);
    assertEquals(cc,cmixed);

    INDArray fc = Nd4j.toFlattened('f',firstC,secondC,thirdC);
    assertNotEquals(cc,fc);

    INDArray ff = Nd4j.toFlattened('f',firstF,secondF,thirdF);
    assertEquals(fc,ff);

    INDArray fmixed = Nd4j.toFlattened('f',firstC,secondF,thirdF);
    assertEquals(fc,fmixed);
}
 
Example 8
Source File: NDArrayTestsFortran.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testAppendBias() {
    INDArray rand = Nd4j.linspace(1, 25, 25).transpose();
    INDArray test = Nd4j.appendBias(rand);
    INDArray assertion = Nd4j.toFlattened(rand, Nd4j.scalar(1));
    assertEquals(assertion, test);
}
 
Example 9
Source File: BaseWeightInitScheme.java    From nd4j with Apache License 2.0 5 votes vote down vote up
protected INDArray handleParamsView(INDArray outputArray, INDArray paramView) {
    //minor optimization when the views are the same, just return
    if(paramView == null || paramView == outputArray)
        return outputArray;
    INDArray flat = Nd4j.toFlattened(order(), outputArray);
    if (flat.length() != paramView.length())
        throw new RuntimeException("ParamView length does not match initialized weights length (view length: "
                + paramView.length() + ", view shape: " + Arrays.toString(paramView.shape())
                + "; flattened length: " + flat.length());

    paramView.assign(flat);

    return paramView.reshape(order(), outputArray.shape());
}
 
Example 10
Source File: BaseWeightInitScheme.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
protected INDArray handleParamsView(INDArray outputArray, INDArray paramView) {
    //minor optimization when the views are the same, just return
    if(paramView == null || paramView == outputArray)
        return outputArray;
    INDArray flat = Nd4j.toFlattened(order(), outputArray);
    if (flat.length() != paramView.length())
        throw new RuntimeException("ParamView length does not match initialized weights length (view length: "
                + paramView.length() + ", view shape: " + Arrays.toString(paramView.shape())
                + "; flattened length: " + flat.length());

    paramView.assign(flat);

    return paramView.reshape(order(), outputArray.shape());
}
 
Example 11
Source File: MultiLayerNetwork.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * As per {@link #scoreExamples(DataSet, boolean)} - the outputs (example scores) for all DataSets in the iterator are concatenated
 */
public INDArray scoreExamples(DataSetIterator iter, boolean addRegularizationTerms) {
    List<INDArray> out = new ArrayList<>();

    while (iter.hasNext()) {
        out.add(scoreExamples(iter.next(), addRegularizationTerms));
    }
    return Nd4j.toFlattened('f', out);
}
 
Example 12
Source File: WeightInitUtil.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public static INDArray initWeights(double fanIn, double fanOut, long[] shape, WeightInit initScheme,
                Distribution dist, char order, INDArray paramView) {
    switch (initScheme) {
        case DISTRIBUTION:
            if (dist instanceof OrthogonalDistribution) {
                dist.sample(paramView.reshape(order, shape));
            } else {
                dist.sample(paramView);
            }
            break;
        case RELU:
            Nd4j.randn(paramView).muli(FastMath.sqrt(2.0 / fanIn)); //N(0, 2/nIn)
            break;
        case RELU_UNIFORM:
            double u = Math.sqrt(6.0 / fanIn);
            Nd4j.rand(paramView, Nd4j.getDistributions().createUniform(-u, u)); //U(-sqrt(6/fanIn), sqrt(6/fanIn)
            break;
        case SIGMOID_UNIFORM:
            double r = 4.0 * Math.sqrt(6.0 / (fanIn + fanOut));
            Nd4j.rand(paramView, Nd4j.getDistributions().createUniform(-r, r));
            break;
        case UNIFORM:
            double a = 1.0 / Math.sqrt(fanIn);
            Nd4j.rand(paramView, Nd4j.getDistributions().createUniform(-a, a));
            break;
        case LECUN_UNIFORM:
            double b = 3.0 / Math.sqrt(fanIn);
            Nd4j.rand(paramView, Nd4j.getDistributions().createUniform(-b, b));
            break;
        case XAVIER:
            Nd4j.randn(paramView).muli(FastMath.sqrt(2.0 / (fanIn + fanOut)));
            break;
        case XAVIER_UNIFORM:
            //As per Glorot and Bengio 2010: Uniform distribution U(-s,s) with s = sqrt(6/(fanIn + fanOut))
            //Eq 16: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
            double s = Math.sqrt(6.0) / Math.sqrt(fanIn + fanOut);
            Nd4j.rand(paramView, Nd4j.getDistributions().createUniform(-s, s));
            break;
        case LECUN_NORMAL:  //Fall through: these 3 are equivalent
        case NORMAL:
        case XAVIER_FAN_IN:
            Nd4j.randn(paramView).divi(FastMath.sqrt(fanIn));
            break;
        case XAVIER_LEGACY:
            Nd4j.randn(paramView).divi(FastMath.sqrt(shape[0] + shape[1]));
            break;
        case ZERO:
            paramView.assign(0.0);
            break;
        case ONES:
            paramView.assign(1.0);
            break;
        case IDENTITY:
            if(shape.length != 2 || shape[0] != shape[1]){
                throw new IllegalStateException("Cannot use IDENTITY init with parameters of shape "
                        + Arrays.toString(shape) + ": weights must be a square matrix for identity");
            }
            INDArray ret;
            if(order == Nd4j.order()){
                ret = Nd4j.eye(shape[0]);
            } else {
                ret = Nd4j.createUninitialized(shape, order).assign(Nd4j.eye(shape[0]));
            }
            INDArray flat = Nd4j.toFlattened(order, ret);
            paramView.assign(flat);
            break;
        case VAR_SCALING_NORMAL_FAN_IN:
            Nd4j.exec(new TruncatedNormalDistribution(paramView, 0.0, Math.sqrt(1.0 / fanIn)));
            break;
        case VAR_SCALING_NORMAL_FAN_OUT:
            Nd4j.exec(new TruncatedNormalDistribution(paramView, 0.0, Math.sqrt(1.0 / fanOut)));
            break;
        case VAR_SCALING_NORMAL_FAN_AVG:
            Nd4j.exec(new TruncatedNormalDistribution(paramView, 0.0, Math.sqrt(2.0 / (fanIn + fanOut))));
            break;
        case VAR_SCALING_UNIFORM_FAN_IN:
            double scalingFanIn = 3.0 / Math.sqrt(fanIn);
            Nd4j.rand(paramView, Nd4j.getDistributions().createUniform(-scalingFanIn, scalingFanIn));
            break;
        case VAR_SCALING_UNIFORM_FAN_OUT:
            double scalingFanOut = 3.0 / Math.sqrt(fanOut);
            Nd4j.rand(paramView, Nd4j.getDistributions().createUniform(-scalingFanOut, scalingFanOut));
            break;
        case VAR_SCALING_UNIFORM_FAN_AVG:
            double scalingFanAvg = 3.0 / Math.sqrt((fanIn + fanOut) / 2);
            Nd4j.rand(paramView, Nd4j.getDistributions().createUniform(-scalingFanAvg, scalingFanAvg));
            break;
        default:
            throw new IllegalStateException("Illegal weight init value: " + initScheme);
    }

    return paramView.reshape(order, shape);
}
 
Example 13
Source File: CrashTest.java    From nd4j with Apache License 2.0 2 votes vote down vote up
protected void op(INDArray x, INDArray y, int i) {
    // broadcast along row & column
    INDArray row = Nd4j.ones(64);
    INDArray column = Nd4j.ones(1024, 1);

    x.addiRowVector(row);
    x.addiColumnVector(column);

    // casual scalar
    x.addi(i * 2);

    // reduction along all dimensions
    float sum = x.sumNumber().floatValue();

    // index reduction
    Nd4j.getExecutioner().exec(new IMax(x), Integer.MAX_VALUE);

    // casual transform
    Nd4j.getExecutioner().exec(new Sqrt(x, x));

    //  dup
    INDArray x1 = x.dup(x.ordering());
    INDArray x2 = x.dup(x.ordering());
    INDArray x3 = x.dup('c');
    INDArray x4 = x.dup('f');


    // vstack && hstack
    INDArray vstack = Nd4j.vstack(x, x1, x2, x3, x4);

    INDArray hstack = Nd4j.hstack(x, x1, x2, x3, x4);

    // reduce3 call
    Nd4j.getExecutioner().exec(new ManhattanDistance(x, x2));


    // flatten call
    INDArray flat = Nd4j.toFlattened(x, x1, x2, x3, x4);


    // reduction along dimension: row & column
    INDArray max_0 = x.max(0);
    INDArray max_1 = x.max(1);


    // index reduction along dimension: row & column
    INDArray imax_0 = Nd4j.argMax(x, 0);
    INDArray imax_1 = Nd4j.argMax(x, 1);


    // logisoftmax, softmax & softmax derivative
    Nd4j.getExecutioner().exec(new OldSoftMax(x));
    Nd4j.getExecutioner().exec(new SoftMaxDerivative(x));
    Nd4j.getExecutioner().exec(new LogSoftMax(x));


    // BooleanIndexing
    BooleanIndexing.replaceWhere(x, 5f, Conditions.lessThan(8f));

    // assing on view
    BooleanIndexing.assignIf(x, x1, Conditions.greaterThan(-1000000000f));

    // std var along all dimensions
    float std = x.stdNumber().floatValue();

    // std var along row & col
    INDArray xStd_0 = x.std(0);
    INDArray xStd_1 = x.std(1);

    // blas call
    float dot = (float) Nd4j.getBlasWrapper().dot(x, x1);

    // mmul
    for (boolean tA : paramsA) {
        for (boolean tB : paramsB) {

            INDArray xT = tA ? x.dup() : x.dup().transpose();
            INDArray yT = tB ? y.dup() : y.dup().transpose();

            Nd4j.gemm(xT, yT, tA, tB);
        }
    }

    // specially for views, checking here without dup and rollover
    Nd4j.gemm(x, y, false, false);

    log.debug("Iteration passed: " + i);
}
 
Example 14
Source File: CrashTest.java    From deeplearning4j with Apache License 2.0 2 votes vote down vote up
protected void op(INDArray x, INDArray y, int i) {
    // broadcast along row & column
    INDArray row = Nd4j.ones(64);
    INDArray column = Nd4j.ones(1024, 1);

    x.addiRowVector(row);
    x.addiColumnVector(column);

    // casual scalar
    x.addi(i * 2);

    // reduction along all dimensions
    float sum = x.sumNumber().floatValue();

    // index reduction
    Nd4j.getExecutioner().exec(new ArgMax(x));

    // casual transform
    Nd4j.getExecutioner().exec(new Sqrt(x, x));

    //  dup
    INDArray x1 = x.dup(x.ordering());
    INDArray x2 = x.dup(x.ordering());
    INDArray x3 = x.dup('c');
    INDArray x4 = x.dup('f');


    // vstack && hstack
    INDArray vstack = Nd4j.vstack(x, x1, x2, x3, x4);

    INDArray hstack = Nd4j.hstack(x, x1, x2, x3, x4);

    // reduce3 call
    Nd4j.getExecutioner().exec(new ManhattanDistance(x, x2));


    // flatten call
    INDArray flat = Nd4j.toFlattened(x, x1, x2, x3, x4);


    // reduction along dimension: row & column
    INDArray max_0 = x.max(0);
    INDArray max_1 = x.max(1);


    // index reduction along dimension: row & column
    INDArray imax_0 = Nd4j.argMax(x, 0);
    INDArray imax_1 = Nd4j.argMax(x, 1);


    // logisoftmax, softmax & softmax derivative
    Nd4j.getExecutioner().exec((CustomOp) new SoftMax(x));
    Nd4j.getExecutioner().exec((CustomOp) new LogSoftMax(x));


    // BooleanIndexing
    BooleanIndexing.replaceWhere(x, 5f, Conditions.lessThan(8f));

    // assing on view
    BooleanIndexing.assignIf(x, x1, Conditions.greaterThan(-1000000000f));

    // std var along all dimensions
    float std = x.stdNumber().floatValue();

    // std var along row & col
    INDArray xStd_0 = x.std(0);
    INDArray xStd_1 = x.std(1);

    // blas call
    float dot = (float) Nd4j.getBlasWrapper().dot(x, x1);

    // mmul
    for (boolean tA : paramsA) {
        for (boolean tB : paramsB) {

            INDArray xT = tA ? x.dup() : x.dup().transpose();
            INDArray yT = tB ? y.dup() : y.dup().transpose();

            Nd4j.gemm(xT, yT, tA, tB);
        }
    }

    // specially for views, checking here without dup and rollover
    Nd4j.gemm(x, y, false, false);

    log.debug("Iteration passed: " + i);
}