Java Code Examples for org.nd4j.linalg.factory.Nd4j#createArrayFromShapeBuffer()

The following examples show how to use org.nd4j.linalg.factory.Nd4j#createArrayFromShapeBuffer() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CompressionTests.java    From nd4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testBitmapEncoding3() throws Exception {
    INDArray initial = Nd4j.create(new double[] {0.0, -6e-4, 1e-3, -1e-3, 0.0, 0.0});
    INDArray exp_0 = Nd4j.create(new double[] {0.0, -1e-4, 0.0, 0.0, 0.0, 0.0});
    INDArray exp_1 = Nd4j.create(new double[] {0.0, -5e-4, 1e-3, -1e-3, 0.0, 0.0});

    DataBuffer ib = Nd4j.getDataBufferFactory().createInt(5);
    INDArray enc = Nd4j.createArrayFromShapeBuffer(ib, initial.shapeInfoDataBuffer());

    long elements = Nd4j.getExecutioner().bitmapEncode(initial, enc, 1e-3);
    log.info("Encoded: {}", Arrays.toString(enc.data().asInt()));
    assertArrayEquals(new int[] {6, 6, 981668463, 1, 655372}, enc.data().asInt());

    assertEquals(3, elements);

    assertEquals(exp_0, initial);

    INDArray target = Nd4j.create(6);

    Nd4j.getExecutioner().bitmapDecode(enc, target);
    log.info("Target: {}", Arrays.toString(target.data().asFloat()));
    assertEquals(exp_1, target);
}
 
Example 2
Source File: CpuLapack.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
public int dsyev( char jobz, char uplo, int N, INDArray A, INDArray R ) {

	DoublePointer dp = new DoublePointer(1) ;
	int status = LAPACKE_dsyev_work( getColumnOrder(A), (byte)jobz, (byte)uplo, 
					N, (DoublePointer)A.data().addressPointer(), getLda(A),
					(DoublePointer)R.data().addressPointer(), dp, -1 ) ;
	if( status == 0 ) {
		int lwork = (int)dp.get() ;
		INDArray work = Nd4j.createArrayFromShapeBuffer(Nd4j.getDataBufferFactory().createDouble(lwork),
		                Nd4j.getShapeInfoProvider().createShapeInformation(new long[] {lwork}, A.dataType()).getFirst());

		status = LAPACKE_dsyev( getColumnOrder(A), (byte)jobz, (byte)uplo, N, 
		            (DoublePointer)A.data().addressPointer(), getLda(A),
			    (DoublePointer)work.data().addressPointer() ) ;

		if( status == 0 ) {
			R.assign( work.get( NDArrayIndex.interval(0,N) ) ) ;
		}
	}
	return status ;
    }
 
Example 3
Source File: JCublasNDArray.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray dup(char order) {
    if (this.isCompressed() && this.ordering() == order) {
        INDArray ret = Nd4j.createArrayFromShapeBuffer(data().dup(), this.shapeInfoDataBuffer());
        ret.markAsCompressed(true);
        return ret;
    }
    /*
    if (!isView() && ordering() == order && Shape.strideDescendingCAscendingF(this)) {
        AtomicAllocator allocator = AtomicAllocator.getInstance();
        INDArray array = Nd4j.createUninitialized(shape(), order);
    
        CudaContext context = allocator.getFlowController().prepareAction(array, this);
    
        Configuration configuration = CudaEnvironment.getInstance().getConfiguration();
    
        if (configuration.getMemoryModel() == Configuration.MemoryModel.IMMEDIATE && configuration.getFirstMemory() == AllocationStatus.DEVICE) {
            allocator.memcpyDevice(array.data(), allocator.getPointer(this.data, context), this.data.length() * this.data().getElementSize(), 0, context);
        } else if (configuration.getMemoryModel() == Configuration.MemoryModel.DELAYED || configuration.getFirstMemory() == AllocationStatus.HOST) {
            AllocationPoint pointSrc = allocator.getAllocationPoint(this);
            AllocationPoint pointDst = allocator.getAllocationPoint(array);
    
            if (pointSrc.getAllocationStatus() == AllocationStatus.HOST) {
                NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getPointers().getHostPointer(), pointSrc.getPointers().getHostPointer(), length * data.getElementSize(), CudaConstants.cudaMemcpyHostToHost, context.getOldStream());
            } else {
                // this code branch is possible only with DELAYED memoryModel and src point being allocated on device
                if (pointDst.getAllocationStatus() != AllocationStatus.DEVICE) {
                    allocator.getMemoryHandler().alloc(AllocationStatus.DEVICE, pointDst, pointDst.getShape(), false);
                }
    
                NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getPointers().getDevicePointer(), pointSrc.getPointers().getDevicePointer(), length * data.getElementSize(), CudaConstants.cudaMemcpyHostToDevice, context.getOldStream());
            }
        }
    
        allocator.getFlowController().registerAction(context, array, this);
    
        return array;
    } else */return super.dup(order);
}
 
Example 4
Source File: AbstractCompressor.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * This method creates compressed INDArray from Java float array, skipping usual INDArray instantiation routines
 *
 * @param data
 * @param shape
 * @param order
 * @return
 */
@Override
public INDArray compress(float[] data, int[] shape, char order) {
    FloatPointer pointer = new FloatPointer(data);

    DataBuffer shapeInfo = Nd4j.getShapeInfoProvider().createShapeInformation(ArrayUtil.toLongArray(shape), order, DataType.FLOAT).getFirst();
    DataBuffer buffer = compressPointer(DataTypeEx.FLOAT, pointer, data.length, 4);

    return Nd4j.createArrayFromShapeBuffer(buffer, shapeInfo);
}
 
Example 5
Source File: AbstractCompressor.java    From nd4j with Apache License 2.0 5 votes vote down vote up
/**
 * This method creates compressed INDArray from Java double array, skipping usual INDArray instantiation routines
 *
 * @param data
 * @param shape
 * @param order
 * @return
 */
@Override
public INDArray compress(double[] data, int[] shape, char order) {
    DoublePointer pointer = new DoublePointer(data);

    DataBuffer shapeInfo = Nd4j.getShapeInfoProvider().createShapeInformation(shape, order).getFirst();
    DataBuffer buffer = compressPointer(DataBuffer.TypeEx.DOUBLE, pointer, data.length, 8);

    return Nd4j.createArrayFromShapeBuffer(buffer, shapeInfo);
}
 
Example 6
Source File: JCublasNDArray.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray convertToDoubles() {
    if (data.dataType() == DataBuffer.Type.DOUBLE)
        return this;

    val factory = Nd4j.getNDArrayFactory();
    val buffer = Nd4j.createBuffer(new long[]{this.length()}, DataBuffer.Type.DOUBLE);

    factory.convertDataEx(convertType(data.dataType()), AtomicAllocator.getInstance().getHostPointer(this.data()), DataBuffer.TypeEx.DOUBLE, AtomicAllocator.getInstance().getHostPointer(buffer), buffer.length());

    AtomicAllocator.getInstance().getAllocationPoint(buffer).tickHostWrite();

    return Nd4j.createArrayFromShapeBuffer(buffer, this.shapeInformation);
}
 
Example 7
Source File: CpuThreshold.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public DataBuffer compress(DataBuffer buffer) {
    INDArray temp = Nd4j.createArrayFromShapeBuffer(buffer, Nd4j.getShapeInfoProvider().createShapeInformation(new long[]{1, buffer.length()}, buffer.dataType()).getFirst());
    MatchCondition condition = new MatchCondition(temp, Conditions.absGreaterThanOrEqual(threshold));
    int cntAbs = Nd4j.getExecutioner().exec(condition).getInt(0);


    //log.info("density ratio: {}", String.format("%.2f", cntAbs * 100.0f / buffer.length()));

    if (cntAbs < 2)
        return null;

    long originalLength = buffer.length() * Nd4j.sizeOfDataType(buffer.dataType());
    int compressedLength = cntAbs + 4;
    // first 3 elements contain header
    IntPointer pointer = new IntPointer(compressedLength);
    pointer.put(0, cntAbs);
    pointer.put(1, (int) buffer.length());
    pointer.put(2, Float.floatToIntBits(threshold));
    pointer.put(3, 0);

    CompressionDescriptor descriptor = new CompressionDescriptor();
    descriptor.setCompressedLength(compressedLength * 4); // sizeOf(INT)
    descriptor.setOriginalLength(originalLength);
    descriptor.setOriginalElementSize(Nd4j.sizeOfDataType(buffer.dataType()));
    descriptor.setNumberOfElements(buffer.length());

    descriptor.setCompressionAlgorithm(getDescriptor());
    descriptor.setCompressionType(getCompressionType());



    CompressedDataBuffer cbuff = new CompressedDataBuffer(pointer, descriptor);

    Nd4j.getNDArrayFactory().convertDataEx(getBufferTypeEx(buffer), buffer.addressPointer(), DataTypeEx.THRESHOLD, pointer, buffer.length());

    Nd4j.getAffinityManager().tagLocation(buffer, AffinityManager.Location.HOST);

    return cbuff;
}
 
Example 8
Source File: BaseNDArray.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray migrate(boolean detachOnNoWs){
    WorkspaceUtils.assertValidArray(this, "Cannot leverage INDArray to new workspace");

    MemoryWorkspace current = Nd4j.getMemoryManager().getCurrentWorkspace();

    if (current == null) {
        if(detachOnNoWs){
            return detach();
        } else {
            return this;
        }
    }

    INDArray copy = null;

    if (!this.isView()) {
        Nd4j.getExecutioner().commit();
        DataBuffer buffer = Nd4j.createBuffer(this.dataType(), this.length(), false);
        Nd4j.getMemoryManager().memcpy(buffer, this.data());

        copy = Nd4j.createArrayFromShapeBuffer(buffer, this.shapeInfoDataBuffer());
    } else {
        copy = this.dup(this.ordering());
        Nd4j.getExecutioner().commit();
    }

    return copy;
}
 
Example 9
Source File: CpuFlexibleThreshold.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public DataBuffer compress(DataBuffer buffer) {
    INDArray temp = Nd4j.createArrayFromShapeBuffer(buffer, Nd4j.getShapeInfoProvider().createShapeInformation(new long[]{1, buffer.length()}, DataType.INT).getFirst());
    double max = temp.amaxNumber().doubleValue();

    int cntAbs = temp.scan(Conditions.absGreaterThanOrEqual(max - (max * threshold))).intValue();

    long originalLength = buffer.length() * Nd4j.sizeOfDataType(buffer.dataType());
    int compressedLength = cntAbs + 4;
    // first 3 elements contain header
    IntPointer pointer = new IntPointer(compressedLength);
    pointer.put(0, cntAbs);
    pointer.put(1, (int) buffer.length());
    pointer.put(2, Float.floatToIntBits(threshold)); // please note, this value will be ovewritten anyway
    pointer.put(3, 0);

    CompressionDescriptor descriptor = new CompressionDescriptor();
    descriptor.setCompressedLength(compressedLength * 4); // sizeOf(INT)
    descriptor.setOriginalLength(originalLength);
    descriptor.setOriginalElementSize(Nd4j.sizeOfDataType(buffer.dataType()));
    descriptor.setNumberOfElements(buffer.length());

    descriptor.setCompressionAlgorithm(getDescriptor());
    descriptor.setCompressionType(getCompressionType());

    CompressedDataBuffer cbuff = new CompressedDataBuffer(pointer, descriptor);

    Nd4j.getNDArrayFactory().convertDataEx(getBufferTypeEx(buffer), buffer.addressPointer(), DataTypeEx.FTHRESHOLD, pointer, buffer.length());

    Nd4j.getAffinityManager().tagLocation(buffer, AffinityManager.Location.HOST);

    return cbuff;
}
 
Example 10
Source File: CudaAffinityManager.java    From nd4j with Apache License 2.0 5 votes vote down vote up
/**
 * This method replicates given INDArray, and places it to target device.
 *
 * @param deviceId target deviceId
 * @param array    INDArray to replicate
 * @return
 */
@Override
public synchronized INDArray replicateToDevice(Integer deviceId, INDArray array) {
    if (array == null)
        return null;

    if (array.isView())
        throw new UnsupportedOperationException("It's impossible to replicate View");

    val shape = array.shape();
    val stride = array.stride();
    val elementWiseStride = array.elementWiseStride();
    val ordering = array.ordering();
    val length = array.length();

    // we use this call to get device memory updated
    AtomicAllocator.getInstance().getPointer(array,
                    (CudaContext) AtomicAllocator.getInstance().getDeviceContext().getContext());

    int currentDeviceId = getDeviceForCurrentThread();

    NativeOpsHolder.getInstance().getDeviceNativeOps().setDevice(new CudaPointer(deviceId));
    attachThreadToDevice(Thread.currentThread().getId(), deviceId);


    DataBuffer newDataBuffer = replicateToDevice(deviceId, array.data());
    DataBuffer newShapeBuffer = Nd4j.getShapeInfoProvider().createShapeInformation(shape, stride, 0,
                    elementWiseStride, ordering).getFirst();
    INDArray result = Nd4j.createArrayFromShapeBuffer(newDataBuffer, newShapeBuffer);

    attachThreadToDevice(Thread.currentThread().getId(), currentDeviceId);
    NativeOpsHolder.getInstance().getDeviceNativeOps().setDevice(new CudaPointer(currentDeviceId));


    return result;
}
 
Example 11
Source File: AbstractCompressor.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * This method creates compressed INDArray from Java double array, skipping usual INDArray instantiation routines
 *
 * @param data
 * @param shape
 * @param order
 * @return
 */
@Override
public INDArray compress(double[] data, int[] shape, char order) {
    DoublePointer pointer = new DoublePointer(data);

    DataBuffer shapeInfo = Nd4j.getShapeInfoProvider().createShapeInformation(ArrayUtil.toLongArray(shape), order, DataType.DOUBLE).getFirst();
    DataBuffer buffer = compressPointer(DataTypeEx.DOUBLE, pointer, data.length, 8);

    return Nd4j.createArrayFromShapeBuffer(buffer, shapeInfo);
}
 
Example 12
Source File: BaseNDArray.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray convertToDoubles() {
    if (data.dataType() == DataBuffer.Type.DOUBLE)
        return this;

    val factory = Nd4j.getNDArrayFactory();
    val buffer = Nd4j.createBuffer(new long[]{this.length()}, DataBuffer.Type.DOUBLE);

    factory.convertDataEx(convertType(data.dataType()), this.data().addressPointer(), DataBuffer.TypeEx.DOUBLE, buffer.addressPointer(), buffer.length());

    return Nd4j.createArrayFromShapeBuffer(buffer, this.shapeInformation);
}
 
Example 13
Source File: BaseNDArray.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray convertToFloats() {
    if (data.dataType() == DataBuffer.Type.FLOAT)
        return this;

    val factory = Nd4j.getNDArrayFactory();
    val buffer = Nd4j.createBuffer(new long[]{this.length()}, DataBuffer.Type.FLOAT);

    factory.convertDataEx(convertType(data.dataType()), this.data().addressPointer(), DataBuffer.TypeEx.FLOAT, buffer.addressPointer(), buffer.length());

    return Nd4j.createArrayFromShapeBuffer(buffer, this.shapeInformation);
}
 
Example 14
Source File: JcublasLapack.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public int dsyev(char _jobz, char _uplo, int N, INDArray A, INDArray R) {
    int status = -1;

    int jobz = _jobz == 'V' ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR;
    int uplo = _uplo == 'L' ? CUBLAS_FILL_MODE_LOWER : CUBLAS_FILL_MODE_UPPER;

    INDArray a = A;

    if (A.ordering() == 'c')
        a = A.dup('f');

    if (A.rows() > Integer.MAX_VALUE) {
        throw new RuntimeException("Rows overflow");
    }

    int M = (int) A.rows();

    if (Nd4j.getExecutioner() instanceof GridExecutioner)
        ((GridExecutioner) Nd4j.getExecutioner()).flushQueue();

    // Get context for current thread
    val ctx = allocator.getDeviceContext();

    // setup the solver handles for cuSolver calls
    cusolverDnHandle_t handle = ctx.getSolverHandle();
    cusolverDnContext solverDn = new cusolverDnContext(handle);

    // synchronized on the solver
    synchronized (handle) {
        status = cusolverDnSetStream(new cusolverDnContext(handle), new CUstream_st(ctx.getCublasStream()));
        if (status == 0) {
            // transfer the INDArray into GPU memory
            CublasPointer xAPointer = new CublasPointer(a, ctx);
            CublasPointer xRPointer = new CublasPointer(R, ctx);

            // this output - indicates how much memory we'll need for the real operation
            val worksizeBuffer = (BaseCudaDataBuffer) Nd4j.getDataBufferFactory().createInt(1);
            worksizeBuffer.lazyAllocateHostPointer();

            status = cusolverDnDsyevd_bufferSize(
                    solverDn, jobz, uplo, M,
                    (DoublePointer) xAPointer.getDevicePointer(), M,
                    (DoublePointer) xRPointer.getDevicePointer(),
                    (IntPointer) worksizeBuffer.addressPointer());

            if (status == CUSOLVER_STATUS_SUCCESS) {
                int worksize = worksizeBuffer.getInt(0);

                // allocate memory for the workspace, the non-converging row buffer and a return code
                Pointer workspace = new Workspace(worksize * 8);        //8 = double width

                INDArray INFO = Nd4j.createArrayFromShapeBuffer(Nd4j.getDataBufferFactory().createInt(1),
                        Nd4j.getShapeInfoProvider().createShapeInformation(new long[]{1, 1}, A.dataType()));


                // Do the actual decomp
                status = cusolverDnDsyevd(solverDn, jobz, uplo, M,
                        (DoublePointer) xAPointer.getDevicePointer(), M,
                        (DoublePointer) xRPointer.getDevicePointer(),
                        new CudaPointer(workspace).asDoublePointer(), worksize,
                        new CudaPointer(allocator.getPointer(INFO, ctx)).asIntPointer());

                allocator.registerAction(ctx, INFO);
                if (status == 0) status = INFO.getInt(0);
            }
        }
    }
    if (status == 0) {
        allocator.registerAction(ctx, R);
        allocator.registerAction(ctx, a);

        if (a != A)
            A.assign(a);
    }
    return status;
}
 
Example 15
Source File: JCublasNDArray.java    From nd4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray dup() {
    if (this.isCompressed() && this.ordering() == Nd4j.order().charValue()) {
        INDArray ret = Nd4j.createArrayFromShapeBuffer(data().dup(), this.shapeInfoDataBuffer());
        ret.markAsCompressed(true);
        return ret;
    }
    /*
        Special case for cuda: if we have not a view, and shapes do match - we
    */
    /*
    if (!isView() && ordering() == Nd4j.order() && Shape.strideDescendingCAscendingF(this)) {
        AtomicAllocator allocator = AtomicAllocator.getInstance();
        INDArray array = Nd4j.createUninitialized(shape(), ordering());
    
        CudaContext context = allocator.getFlowController().prepareAction(array, this);
    
        Configuration configuration = CudaEnvironment.getInstance().getConfiguration();
    
        if (configuration.getMemoryModel() == Configuration.MemoryModel.IMMEDIATE && configuration.getFirstMemory() == AllocationStatus.DEVICE) {
    //                log.info("Path 0");
            allocator.memcpyDevice(array.data(), allocator.getPointer(this.data, context), this.data.length() * this.data().getElementSize(), 0, context);
        } else if (configuration.getMemoryModel() == Configuration.MemoryModel.DELAYED || configuration.getFirstMemory() == AllocationStatus.HOST) {
            AllocationPoint pointSrc = allocator.getAllocationPoint(this);
            AllocationPoint pointDst = allocator.getAllocationPoint(array);
    
            if (pointSrc.getAllocationStatus() == AllocationStatus.HOST) {
    //                    log.info("Path A");
                NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getPointers().getHostPointer(), pointSrc.getPointers().getHostPointer(), length * data.getElementSize(), CudaConstants.cudaMemcpyHostToHost, context.getOldStream());
            } else {
    //                    log.info("Path B. SRC dId: [{}], DST dId: [{}], cId: [{}]", pointSrc.getDeviceId(), pointDst.getDeviceId(), allocator.getDeviceId());
                // this code branch is possible only with DELAYED memoryModel and src point being allocated on device
                if (pointDst.getAllocationStatus() != AllocationStatus.DEVICE) {
                    allocator.getMemoryHandler().alloc(AllocationStatus.DEVICE, pointDst, pointDst.getShape(), false);
                }
    
                NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getPointers().getDevicePointer(), pointSrc.getPointers().getHostPointer(), length * data.getElementSize(), CudaConstants.cudaMemcpyHostToDevice, context.getOldStream());
            }
        }
    
        allocator.getFlowController().registerAction(context, array, this);
        return array;
    } else */return super.dup();
}
 
Example 16
Source File: BaseNDArray.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray leverage() {
    WorkspaceUtils.assertValidArray(this, "Cannot leverage INDArray to new workspace");
    if (!isAttached())
        return this;

    MemoryWorkspace workspace = Nd4j.getMemoryManager().getCurrentWorkspace();
    if (workspace == null) {
        return this.detach();
    }

    MemoryWorkspace parentWorkspace = workspace.getParentWorkspace();

    if (this.data.getParentWorkspace() == parentWorkspace)
        return this;

    // if there's no parent ws - just detach
    if (parentWorkspace == null)
        return this.detach();
    else {
        Nd4j.getExecutioner().commit();

        // temporary set parent ws as current ws
        Nd4j.getMemoryManager().setCurrentWorkspace(parentWorkspace);

        INDArray copy = null;
        if (!this.isView()) {
            Nd4j.getExecutioner().commit();
            DataBuffer buffer = Nd4j.createBuffer(this.length(), false);
            Nd4j.getMemoryManager().memcpy(buffer, this.data());

            copy = Nd4j.createArrayFromShapeBuffer(buffer, this.shapeInfoDataBuffer());
        } else {
            copy = this.dup(this.ordering());
            Nd4j.getExecutioner().commit();
        }

        // restore current ws
        Nd4j.getMemoryManager().setCurrentWorkspace(workspace);
        return copy;
    }
}
 
Example 17
Source File: NDArray.java    From nd4j with Apache License 2.0 4 votes vote down vote up
/**
 * This method does direct array copy. Impossible to use on views or mixed orders.
 *
 * PLEASE NOTE: YOU SHOULD NEVER USE THIS METHOD, UNLESS YOU 100% CLEAR ABOUT IT
 *
 * @return
 */
@Override
public INDArray unsafeDuplication() {
    WorkspaceUtils.assertValidArray(this, "Cannot duplicate array");
    if (isView())
        return this.dup(this.ordering());

    DataBuffer rb = Nd4j.getMemoryManager().getCurrentWorkspace() == null ? Nd4j.getDataBufferFactory().createSame(this.data, false) : Nd4j.getDataBufferFactory().createSame(this.data, false, Nd4j.getMemoryManager().getCurrentWorkspace());

    INDArray ret = Nd4j.createArrayFromShapeBuffer(rb, this.shapeInfoDataBuffer());

    val perfD = PerformanceTracker.getInstance().helperStartTransaction();

    Pointer.memcpy(ret.data().addressPointer(), this.data().addressPointer(), this.data().length() * this.data().getElementSize());

    PerformanceTracker.getInstance().helperRegisterTransaction(0, perfD, this.data().length() * this.data().getElementSize(), MemcpyDirection.HOST_TO_HOST);

    return ret;
}
 
Example 18
Source File: IntDataBufferTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testBasicSerde1() throws Exception {


    DataBuffer dataBuffer = Nd4j.createBuffer(new int[] {1, 2, 3, 4, 5});
    DataBuffer shapeBuffer = Nd4j.getShapeInfoProvider().createShapeInformation(new long[] {1, 5}, DataType.INT).getFirst();
    INDArray intArray = Nd4j.createArrayFromShapeBuffer(dataBuffer, shapeBuffer);

    File tempFile = File.createTempFile("test", "test");
    tempFile.deleteOnExit();

    Nd4j.saveBinary(intArray, tempFile);

    InputStream stream = new FileInputStream(tempFile);
    BufferedInputStream bis = new BufferedInputStream(stream);
    DataInputStream dis = new DataInputStream(bis);

    INDArray loaded = Nd4j.read(dis);

    assertEquals(DataType.INT, loaded.data().dataType());
    assertEquals(DataType.LONG, loaded.shapeInfoDataBuffer().dataType());

    assertEquals(intArray.data().length(), loaded.data().length());

    assertArrayEquals(intArray.data().asInt(), loaded.data().asInt());
}
 
Example 19
Source File: JCublasNDArray.java    From nd4j with Apache License 2.0 4 votes vote down vote up
@Override
    public INDArray leverageTo(String id) {
        if (!isAttached()) {
//            log.info("Skipping detached");
            return this;
        }

        if (!Nd4j.getWorkspaceManager().checkIfWorkspaceExists(id)) {
//            log.info("Skipping non-existent");
            return this;
        }

        WorkspaceUtils.assertValidArray(this, "Cannot leverage INDArray to new workspace");

        MemoryWorkspace current = Nd4j.getMemoryManager().getCurrentWorkspace();

        MemoryWorkspace target = Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(id);

        if (current == target) {
//            log.info("Skipping equals A");
            return this;
        }

        if (this.data.getParentWorkspace() == target) {
//            log.info("Skipping equals B");
            return this;
        }

        Nd4j.getMemoryManager().setCurrentWorkspace(target);

//        log.info("Leveraging...");

        INDArray copy = null;
        if (!this.isView()) {
        //if (1 < 0) {
            Nd4j.getExecutioner().commit();

            DataBuffer buffer = Nd4j.createBuffer(this.lengthLong(), false);

            AllocationPoint pointDst = AtomicAllocator.getInstance().getAllocationPoint(buffer);
            AllocationPoint pointSrc = AtomicAllocator.getInstance().getAllocationPoint(this.data);

            CudaContext context = AtomicAllocator.getInstance().getFlowController().prepareAction(pointDst, pointSrc);
/*
            if (NativeOpsHolder.getInstance().getDeviceNativeOps().memsetAsync(pointDst.getDevicePointer(), 0, 1, 0, context.getOldStream()) == 0)
                throw new ND4JIllegalStateException("memsetAsync 1 failed");

            context.syncOldStream();

            if (NativeOpsHolder.getInstance().getDeviceNativeOps().memsetAsync(pointSrc.getDevicePointer(), 0, 1, 0, context.getOldStream()) == 0)
                throw new ND4JIllegalStateException("memsetAsync 2 failed");

            context.syncOldStream();
*/

            MemcpyDirection direction = MemcpyDirection.DEVICE_TO_DEVICE;
            val perfD = PerformanceTracker.getInstance().helperStartTransaction();

            if (pointSrc.isActualOnDeviceSide()) {
                if (NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getDevicePointer(), pointSrc.getDevicePointer(), this.lengthLong() * Nd4j.sizeOfDataType(buffer.dataType()), CudaConstants.cudaMemcpyDeviceToDevice, context.getOldStream()) == 0)
                    throw new ND4JIllegalStateException("memcpyAsync failed");
            } else {
                if (NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getDevicePointer(), pointSrc.getHostPointer(), this.lengthLong() * Nd4j.sizeOfDataType(buffer.dataType()), CudaConstants.cudaMemcpyHostToDevice, context.getOldStream()) == 0)
                    throw new ND4JIllegalStateException("memcpyAsync failed");

                direction = MemcpyDirection.HOST_TO_DEVICE;
            }

            context.syncOldStream();

            PerformanceTracker.getInstance().helperRegisterTransaction(pointDst.getDeviceId(), perfD, pointSrc.getNumberOfBytes(), MemcpyDirection.HOST_TO_DEVICE);

            copy = Nd4j.createArrayFromShapeBuffer(buffer, this.shapeInfoDataBuffer());

            // tag buffer as valid on device side
            pointDst.tickHostRead();
            pointDst.tickDeviceWrite();

            AtomicAllocator.getInstance().getFlowController().registerAction(context, pointDst, pointSrc);
        } else {
            copy = this.dup(this.ordering());

            Nd4j.getExecutioner().commit();
        }

        Nd4j.getMemoryManager().setCurrentWorkspace(current);

        return copy;
    }
 
Example 20
Source File: DefaultOpExecutioner.java    From nd4j with Apache License 2.0 3 votes vote down vote up
@Override
public INDArray bitmapEncode(INDArray indArray, double threshold) {
    DataBuffer buffer = Nd4j.getDataBufferFactory().createInt(indArray.length() / 16 + 5);

    INDArray ret = Nd4j.createArrayFromShapeBuffer(buffer, indArray.shapeInfoDataBuffer());

    bitmapEncode(indArray, ret, threshold);

    return ret;
}