Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#ordering()

The following examples show how to use org.nd4j.linalg.api.ndarray.INDArray#ordering() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FeedForwardToCnnPreProcessor.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Override
public INDArray preProcess(INDArray input, int miniBatchSize, LayerWorkspaceMgr workspaceMgr) {
    this.shape = input.shape();
    if (input.rank() == 4)
        return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, input);

    if (input.columns() != inputWidth * inputHeight * numChannels)
        throw new IllegalArgumentException("Invalid input: expect output columns must be equal to rows "
                + inputHeight + " x columns " + inputWidth + " x channels " + numChannels
                + " but was instead " + Arrays.toString(input.shape()));

    if (input.ordering() != 'c' || !Shape.hasDefaultStridesForShape(input))
        input = workspaceMgr.dup(ArrayType.ACTIVATIONS, input, 'c');

    return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS,
            input.reshape('c', input.size(0), numChannels, inputHeight, inputWidth));
}
 
Example 2
Source File: ImageFlatteningDataSetPreProcessor.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Override
public void preProcess(DataSet toPreProcess) {
    INDArray input = toPreProcess.getFeatures();
    if (input.rank() == 2)
        return; //No op: should usually never happen in a properly configured data pipeline

    //Assume input is standard rank 4 activations - i.e., CNN image data
    //First: we require input to be in c order. But c order (as declared in array order) isn't enough; also need strides to be correct
    if (input.ordering() != 'c' || !Shape.strideDescendingCAscendingF(input))
        input = input.dup('c');

    val inShape = input.shape(); //[miniBatch,depthOut,outH,outW]
    val outShape = new long[] {inShape[0], inShape[1] * inShape[2] * inShape[3]};

    INDArray reshaped = input.reshape('c', outShape);
    toPreProcess.setFeatures(reshaped);
}
 
Example 3
Source File: Shape.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**
 * This method is used in DL4J LSTM implementation
 * @param input
 * @return
 */
public static INDArray toMmulCompatible(INDArray input) {
    if (input.rank() != 2)
        throw new IllegalArgumentException("Input must be rank 2 (matrix)");
    //Same conditions as GemmParams.copyIfNecessary()
    boolean doCopy = false;
    if (input.ordering() == 'c' && (input.stride(0) != input.size(1) || input.stride(1) != 1))
        doCopy = true;
    else if (input.ordering() == 'f' && (input.stride(0) != 1 || input.stride(1) != input.size(0)))
        doCopy = true;

    if (doCopy)
        return Shape.toOffsetZeroCopyAnyOrder(input);
    else
        return input;
}
 
Example 4
Source File: Shape.java    From nd4j with Apache License 2.0 6 votes vote down vote up
/**
 * This method is used in DL4J LSTM implementation
 * @param input
 * @return
 */
public static INDArray toMmulCompatible(INDArray input) {
    if (input.rank() != 2)
        throw new IllegalArgumentException("Input must be rank 2 (matrix)");
    //Same conditions as GemmParams.copyIfNecessary()
    boolean doCopy = false;
    if (input.ordering() == 'c' && (input.stride(0) != input.size(1) || input.stride(1) != 1))
        doCopy = true;
    else if (input.ordering() == 'f' && (input.stride(0) != 1 || input.stride(1) != input.size(0)))
        doCopy = true;

    if (doCopy)
        return Shape.toOffsetZeroCopyAnyOrder(input);
    else
        return input;
}
 
Example 5
Source File: TimeSeriesUtils.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**
 * Reverse an input time series along the time dimension
 *
 * @param in Input activations to reverse, with shape [minibatch, size, timeSeriesLength]
 * @return Reversed activations
 */
public static INDArray reverseTimeSeries(INDArray in){
    if(in == null){
        return null;
    }

    if(in.ordering() != 'f' || in.isView() || !Shape.strideDescendingCAscendingF(in)){
        in = in.dup('f');
    }

    int[] idxs = new int[(int) in.size(2)];
    int j=0;
    for( int i=idxs.length-1; i>=0; i--){
        idxs[j++] = i;
    }

    INDArray inReshape = in.reshape('f', in.size(0)*in.size(1), in.size(2));

    INDArray outReshape = Nd4j.pullRows(inReshape, 0, idxs, 'f');
    return outReshape.reshape('f', in.size(0), in.size(1), in.size(2));
}
 
Example 6
Source File: Shape.java    From nd4j with Apache License 2.0 6 votes vote down vote up
/** Are the elements in the buffer contiguous for this NDArray? */
public static boolean isContiguousInBuffer(INDArray in) {
    long length = in.length();
    long dLength = in.data().length();
    if (length == dLength)
        return true; //full buffer, always contiguous

    char order = in.ordering();

    long[] shape = in.shape();
    long[] stridesIfContiguous;
    if (order == 'f') {
        stridesIfContiguous = ArrayUtil.calcStridesFortran(shape);
    } else if (order == 'c') {
        stridesIfContiguous = ArrayUtil.calcStrides(shape);
    } else if (order == 'a') {
        stridesIfContiguous = new long[] {1, 1};
    } else {
        throw new RuntimeException("Invalid order: not c or f (is: " + order + ")");
    }

    return Arrays.equals(in.stride(), stridesIfContiguous);
}
 
Example 7
Source File: FeedForwardToRnnPreProcessor.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Override
public INDArray preProcess(INDArray input, int miniBatchSize, LayerWorkspaceMgr workspaceMgr) {
    //Need to reshape FF activations (2d) activations to 3d (for input into RNN layer)
    if (input.rank() != 2)
        throw new IllegalArgumentException(
                        "Invalid input: expect NDArray with rank 2 (i.e., activations for FF layer)");
    if (input.ordering() != 'f' || !Shape.hasDefaultStridesForShape(input))
        input = workspaceMgr.dup(ArrayType.ACTIVATIONS, input, 'f');

    val shape = input.shape();
    INDArray reshaped = input.reshape('f', miniBatchSize, shape[0] / miniBatchSize, shape[1]);
    if (rnnDataFormat == RNNFormat.NCW){
        reshaped = reshaped.permute(0, 2, 1);
    }
    return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, reshaped);
}
 
Example 8
Source File: GemvParameters.java    From nd4j with Apache License 2.0 5 votes vote down vote up
private INDArray copyIfNecessary(INDArray arr) {
    //See also: Shape.toMmulCompatible - want same conditions here and there
    //Check if matrix values are contiguous in memory. If not: dup
    //Contiguous for c if: stride[0] == shape[1] and stride[1] = 1
    //Contiguous for f if: stride[0] == 1 and stride[1] == shape[0]
    if (arr.ordering() == 'c' && (arr.stride(0) != arr.size(1) || arr.stride(1) != 1))
        return arr.dup();
    else if (arr.ordering() == 'f' && (arr.stride(0) != 1 || arr.stride(1) != arr.size(0)))
        return arr.dup();
    else if (arr.elementWiseStride() < 1)
        return arr.dup();
    return arr;
}
 
Example 9
Source File: ConvolutionUtils.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
public static INDArray reshape3dMask(INDArray mask, LayerWorkspaceMgr workspaceMgr, ArrayType type){
    //Assume mask has shape [n,h,w] and will be broadcast along dimension
    if(mask.ordering() != 'c' || !Shape.hasDefaultStridesForShape(mask))
        mask = workspaceMgr.dup(type, mask, 'c');

    return mask.reshape('c', mask.length(), 1);
}
 
Example 10
Source File: Shape.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
public static boolean hasDefaultStridesForShape(INDArray input){
    if(input.rank() == 0)
        return true;
    if(!strideDescendingCAscendingF(input)){
        return false;
    }
    char order = input.ordering();
    long[] defaultStrides;
    if(order == 'f'){
        defaultStrides = ArrayUtil.calcStridesFortran(input.shape());
    } else {
        defaultStrides = ArrayUtil.calcStrides(input.shape());
    }
    return Arrays.equals(input.stride(), defaultStrides);
}
 
Example 11
Source File: CudaAffinityManager.java    From nd4j with Apache License 2.0 5 votes vote down vote up
/**
 * This method replicates given INDArray, and places it to target device.
 *
 * @param deviceId target deviceId
 * @param array    INDArray to replicate
 * @return
 */
@Override
public synchronized INDArray replicateToDevice(Integer deviceId, INDArray array) {
    if (array == null)
        return null;

    if (array.isView())
        throw new UnsupportedOperationException("It's impossible to replicate View");

    val shape = array.shape();
    val stride = array.stride();
    val elementWiseStride = array.elementWiseStride();
    val ordering = array.ordering();
    val length = array.length();

    // we use this call to get device memory updated
    AtomicAllocator.getInstance().getPointer(array,
                    (CudaContext) AtomicAllocator.getInstance().getDeviceContext().getContext());

    int currentDeviceId = getDeviceForCurrentThread();

    NativeOpsHolder.getInstance().getDeviceNativeOps().setDevice(new CudaPointer(deviceId));
    attachThreadToDevice(Thread.currentThread().getId(), deviceId);


    DataBuffer newDataBuffer = replicateToDevice(deviceId, array.data());
    DataBuffer newShapeBuffer = Nd4j.getShapeInfoProvider().createShapeInformation(shape, stride, 0,
                    elementWiseStride, ordering).getFirst();
    INDArray result = Nd4j.createArrayFromShapeBuffer(newDataBuffer, newShapeBuffer);

    attachThreadToDevice(Thread.currentThread().getId(), currentDeviceId);
    NativeOpsHolder.getInstance().getDeviceNativeOps().setDevice(new CudaPointer(currentDeviceId));


    return result;
}
 
Example 12
Source File: Nd4jTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testExpandDims(){
    final List<Pair<INDArray, String>> testMatricesC = NDArrayCreationUtil.getAllTestMatricesWithShape('c', 3, 5, 0xDEAD, DataType.DOUBLE);
    final List<Pair<INDArray, String>> testMatricesF = NDArrayCreationUtil.getAllTestMatricesWithShape('f', 7, 11, 0xBEEF, DataType.DOUBLE);

    final ArrayList<Pair<INDArray, String>> testMatrices = new ArrayList<>(testMatricesC);
    testMatrices.addAll(testMatricesF);

    for (Pair<INDArray, String> testMatrixPair : testMatrices) {
        final String recreation = testMatrixPair.getSecond();
        final INDArray testMatrix = testMatrixPair.getFirst();
        final char ordering = testMatrix.ordering();
        val shape = testMatrix.shape();
        final int rank = testMatrix.rank();
        for (int i = -rank; i <= rank; i++) {
            final INDArray expanded = Nd4j.expandDims(testMatrix, i);

            final String message = "Expanding in Dimension " + i + "; Shape before expanding: " + Arrays.toString(shape) + " "+ordering+" Order; Shape after expanding: " + Arrays.toString(expanded.shape()) +  " "+expanded.ordering()+"; Input Created via: " + recreation;

            val tmR = testMatrix.ravel();
            val expR = expanded.ravel();
            assertEquals(message, 1, expanded.shape()[i < 0 ? i + rank : i]);
            assertEquals(message, tmR, expR);
            assertEquals(message, ordering,  expanded.ordering());

            testMatrix.assign(Nd4j.rand(DataType.DOUBLE, shape));
            assertEquals(message, testMatrix.ravel(), expanded.ravel());
        }
    }
}
 
Example 13
Source File: JcublasLapack.java    From nd4j with Apache License 2.0 4 votes vote down vote up
@Override
public void sgetrf(int M, int N, INDArray A, INDArray IPIV, INDArray INFO) {
    INDArray a = A;
    if (Nd4j.dataType() != DataBuffer.Type.FLOAT)
        log.warn("FLOAT getrf called in DOUBLE environment");

    if (A.ordering() == 'c')
        a = A.dup('f');


    if (Nd4j.getExecutioner() instanceof GridExecutioner)
        ((GridExecutioner) Nd4j.getExecutioner()).flushQueue();

    // Get context for current thread
    CudaContext ctx = (CudaContext) allocator.getDeviceContext().getContext();

    // setup the solver handles for cuSolver calls
    cusolverDnHandle_t handle = ctx.getSolverHandle();
    cusolverDnContext solverDn = new cusolverDnContext(handle);

    // synchronized on the solver
    synchronized (handle) {
        int result = cusolverDnSetStream(new cusolverDnContext(handle), new CUstream_st(ctx.getOldStream()));
        if (result != 0)
            throw new BlasException("solverSetStream failed");

        // transfer the INDArray into GPU memory
        CublasPointer xAPointer = new CublasPointer(a, ctx);

        // this output - indicates how much memory we'll need for the real operation
        DataBuffer worksizeBuffer = Nd4j.getDataBufferFactory().createInt(1);

        int stat = cusolverDnSgetrf_bufferSize(solverDn, M, N, (FloatPointer) xAPointer.getDevicePointer(), M,
                        (IntPointer) worksizeBuffer.addressPointer() // we intentionally use host pointer here
        );

        if (stat != CUSOLVER_STATUS_SUCCESS) {
            throw new BlasException("cusolverDnSgetrf_bufferSize failed", stat);
        }

        int worksize = worksizeBuffer.getInt(0);
        // Now allocate memory for the workspace, the permutation matrix and a return code
        Pointer workspace = new Workspace(worksize * Nd4j.sizeOfDataType());

        // Do the actual LU decomp
        stat = cusolverDnSgetrf(solverDn, M, N, (FloatPointer) xAPointer.getDevicePointer(), M,
                        new CudaPointer(workspace).asFloatPointer(),
                        new CudaPointer(allocator.getPointer(IPIV, ctx)).asIntPointer(),
                        new CudaPointer(allocator.getPointer(INFO, ctx)).asIntPointer());

        // we do sync to make sure getrf is finished
        //ctx.syncOldStream();

        if (stat != CUSOLVER_STATUS_SUCCESS) {
            throw new BlasException("cusolverDnSgetrf failed", stat);
        }
    }
    allocator.registerAction(ctx, a);
    allocator.registerAction(ctx, INFO);
    allocator.registerAction(ctx, IPIV);

    if (a != A)
        A.assign(a);
}
 
Example 14
Source File: CpuLapack.java    From nd4j with Apache License 2.0 4 votes vote down vote up
protected static int getLda(INDArray A) {
    // FIXME: int cast
    return A.ordering() == 'f' ? (int) A.rows() : (int) A.columns();
}
 
Example 15
Source File: JcublasLapack.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public void sgetrf(int M, int N, INDArray A, INDArray IPIV, INDArray INFO) {
    INDArray a = A;
    if (Nd4j.dataType() != DataType.FLOAT)
        log.warn("FLOAT getrf called in DOUBLE environment");

    if (A.ordering() == 'c')
        a = A.dup('f');

    if (Nd4j.getExecutioner() instanceof GridExecutioner)
        ((GridExecutioner) Nd4j.getExecutioner()).flushQueue();

    // Get context for current thread
    val ctx = allocator.getDeviceContext();

    // setup the solver handles for cuSolver calls
    cusolverDnHandle_t handle = ctx.getSolverHandle();
    cusolverDnContext solverDn = new cusolverDnContext(handle);

    // synchronized on the solver
    synchronized (handle) {
        int result = cusolverDnSetStream(new cusolverDnContext(handle), new CUstream_st(ctx.getCublasStream()));
        if (result != 0)
            throw new BlasException("solverSetStream failed");

        // transfer the INDArray into GPU memory
        CublasPointer xAPointer = new CublasPointer(a, ctx);

        // this output - indicates how much memory we'll need for the real operation
        val worksizeBuffer = (BaseCudaDataBuffer) Nd4j.getDataBufferFactory().createInt(1);
        worksizeBuffer.lazyAllocateHostPointer();

        int stat = cusolverDnSgetrf_bufferSize(solverDn, M, N, (FloatPointer) xAPointer.getDevicePointer(), M,
                (IntPointer) worksizeBuffer.addressPointer() // we intentionally use host pointer here
        );

        if (stat != CUSOLVER_STATUS_SUCCESS) {
            throw new BlasException("cusolverDnSgetrf_bufferSize failed", stat);
        }

        int worksize = worksizeBuffer.getInt(0);
        // Now allocate memory for the workspace, the permutation matrix and a return code
        Pointer workspace = new Workspace(worksize * Nd4j.sizeOfDataType());

        // Do the actual LU decomp
        stat = cusolverDnSgetrf(solverDn, M, N, (FloatPointer) xAPointer.getDevicePointer(), M,
                new CudaPointer(workspace).asFloatPointer(),
                new CudaPointer(allocator.getPointer(IPIV, ctx)).asIntPointer(),
                new CudaPointer(allocator.getPointer(INFO, ctx)).asIntPointer());

        // we do sync to make sure getrf is finished
        //ctx.syncOldStream();

        if (stat != CUSOLVER_STATUS_SUCCESS) {
            throw new BlasException("cusolverDnSgetrf failed", stat);
        }
    }
    allocator.registerAction(ctx, a);
    allocator.registerAction(ctx, INFO);
    allocator.registerAction(ctx, IPIV);

    if (a != A)
        A.assign(a);
}
 
Example 16
Source File: GanCnnInputPreProcessor.java    From dl4j-tutorials with MIT License 4 votes vote down vote up
@Override
public INDArray preProcess(INDArray input, int miniBatchSize, LayerWorkspaceMgr workspaceMgr) {
	// [1 , numChannels * 2, inputHeight, inputWidth]
	this.shape = input.shape();
	// System.out.println("input = " + input);
	// System.out.println("input.sumNumber() = " + input.sumNumber());
	if (printLog) {
		System.out.println("this.shape = " + Arrays.toString(this.shape));
	}
	// Input: 4d activations (CNN)
	// Output: 4d activations (CNN)
	if (input.rank() != 4) {
		throw new IllegalArgumentException(
				"Invalid input: expect CNN activations with rank 4 (received input with shape " + Arrays.toString(input.shape()) + ")");
	}

	if (input.ordering() != 'c' || !Shape.hasDefaultStridesForShape(input)) {
		input = input.dup('c');
		// input = workspaceMgr.dup(ArrayType.ACTIVATIONS, input, 'c');
	}

	// 将2张CNN转为1张CNN
	INDArray newInput = Nd4j.zeros(shape[0], shape[1] / 2, shape[2], shape[3]);
	for (int i = 0; i < shape[0]; i++) {
		// [numChannels * 2, inputHeight, inputWidth]: z + r
		INDArray multyImage = input.get(NDArrayIndex.point(i), NDArrayIndex.all());
		// System.out.println("multyImage.sumNumber() = " + multyImage.sumNumber());
		// [numChannels * 1, inputHeight, inputWidth]
		INDArray newMultyImage = newInput.getRow(i);

		int newRowIndex = 0;
		for (int j = 0; j < shape[1] / 2; j++) {
			// [inputHeight, inputWidth]
			INDArray rImageWH = null;
			if (j == 0) {
				// 第一步,读取rImageWH,并判断它是否为空
				// "z-input", "r-input"
				rImageWH = multyImage.get(NDArrayIndex.point(j + shape[1] / 2), NDArrayIndex.all());
				// System.out.println("rImageWH.sumNumber() = " + rImageWH.sumNumber());
				double firstPixelValue = rImageWH.getDouble(0, 0);
				if (firstPixelValue != -9999) {
					this.isRInputEmpty = false;
				} else {
					this.isRInputEmpty = true;
				}
			}

			if (!this.isRInputEmpty) {
				if (rImageWH == null) {
					rImageWH = multyImage.get(NDArrayIndex.point(j + shape[1] / 2), NDArrayIndex.all());
				}
				// System.out.println("newRowIndex = " + newRowIndex);
				newMultyImage.putRow(newRowIndex, rImageWH);
				// System.out.println("newMultyImage.sumNumber() = " + newMultyImage.sumNumber());
			} else {
				INDArray zImageWH = multyImage.get(NDArrayIndex.point(j), NDArrayIndex.all());
				newMultyImage.putRow(newRowIndex, zImageWH);
			}
			newRowIndex++;
		}

		newInput.putRow(i, newMultyImage);
	}
	// System.out.println("newInput = " + newInput);
	// System.out.println("newInput.sumNumber() = " + newInput.sumNumber());

	// return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, newInput);
	if (save) {
		ImageUtils.save("/myself/tmp/dl4j/gan/data/train/0/0.jpg", newInput.dup().mul(255));
	}
	return newInput;
}
 
Example 17
Source File: Flatten.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public Flatten(INDArray output, INDArray... inputs) {
    this(output.ordering(), inputs);

    outputArguments.add(output);
}
 
Example 18
Source File: BlasBufferUtil.java    From deeplearning4j with Apache License 2.0 3 votes vote down vote up
/**
 * Return the proper stride
 * through a vector
 * relative to the ordering of the array
 * This is for incX/incY parameters in BLAS.
 *
 * @param arr the array to get the stride for
 * @return the stride wrt the ordering
 * for the given array
 */
public static int getStrideForOrdering(INDArray arr) {
    if (arr.ordering() == NDArrayFactory.FORTRAN) {
        return getBlasStride(arr);
    } else {
        return arr.stride(1);
    }
}
 
Example 19
Source File: BlasBufferUtil.java    From nd4j with Apache License 2.0 3 votes vote down vote up
/**
 * Return the proper stride
 * through a vector
 * relative to the ordering of the array
 * This is for incX/incY parameters in BLAS.
 *
 * @param arr the array to get the stride for
 * @return the stride wrt the ordering
 * for the given array
 */
public static int getStrideForOrdering(INDArray arr) {
    if (arr.ordering() == NDArrayFactory.FORTRAN) {
        return getBlasStride(arr);
    } else {
        if (arr instanceof IComplexNDArray)
            return arr.stride(1) / 2;
        return arr.stride(1);
    }
}
 
Example 20
Source File: ProtectedCudaShapeInfoProviderTest.java    From nd4j with Apache License 2.0 2 votes vote down vote up
@Test
    public void testPurge2() throws Exception {
        INDArray arrayA = Nd4j.create(10, 10);

        DataBuffer shapeInfoA = arrayA.shapeInfoDataBuffer();

        INDArray arrayE = Nd4j.create(10, 10);

        DataBuffer shapeInfoE = arrayE.shapeInfoDataBuffer();

        int[] arrayShapeA = shapeInfoA.asInt();

        assertTrue(shapeInfoA == shapeInfoE);

        ShapeDescriptor descriptor = new ShapeDescriptor(arrayA.shape(), arrayA.stride(), 0, arrayA.elementWiseStride(), arrayA.ordering());
        ConstantProtector protector = ConstantProtector.getInstance();
        AllocationPoint pointA = AtomicAllocator.getInstance().getAllocationPoint(arrayA.shapeInfoDataBuffer());

        assertEquals(true, protector.containsDataBuffer(0, descriptor));

////////////////////////////////////

        Nd4j.getMemoryManager().purgeCaches();

////////////////////////////////////


        assertEquals(false, protector.containsDataBuffer(0, descriptor));

        INDArray arrayB = Nd4j.create(10, 10);

        DataBuffer shapeInfoB = arrayB.shapeInfoDataBuffer();

        assertFalse(shapeInfoA == shapeInfoB);

        AllocationPoint pointB = AtomicAllocator.getInstance().getAllocationPoint(arrayB.shapeInfoDataBuffer());


        assertArrayEquals(arrayShapeA, shapeInfoB.asInt());

        // pointers should be equal, due to offsets reset
        assertEquals(pointA.getPointers().getDevicePointer().address(), pointB.getPointers().getDevicePointer().address());
    }