Java Code Examples for org.nd4j.linalg.api.buffer.DataBuffer#getElementSize()

The following examples show how to use org.nd4j.linalg.api.buffer.DataBuffer#getElementSize() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NumpyArray.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
public NumpyArray(INDArray nd4jArray) {
    Nd4j.getAffinityManager().ensureLocation(nd4jArray, AffinityManager.Location.HOST);
    DataBuffer buff = nd4jArray.data();
    address = buff.pointer().address();
    shape = nd4jArray.shape();
    long[] nd4jStrides = nd4jArray.stride();
    strides = new long[nd4jStrides.length];
    int elemSize = buff.getElementSize();
    for (int i = 0; i < strides.length; i++) {
        strides[i] = nd4jStrides[i] * elemSize;
    }
    dtype = nd4jArray.dataType();
    this.nd4jArray = nd4jArray;
    String cacheKey = address + "_" + nd4jArray.length() + "_" + dtype + "_" + ArrayUtils.toString(strides);
    arrayCache.put(cacheKey, nd4jArray);
}
 
Example 2
Source File: BaseCudaDataBuffer.java    From nd4j with Apache License 2.0 5 votes vote down vote up
public BaseCudaDataBuffer(@NonNull DataBuffer underlyingBuffer, long length, long offset) {
    //this(length, underlyingBuffer.getElementSize(), offset);
    this.allocationMode = AllocationMode.LONG_SHAPE;
    initTypeAndSize();
    this.wrappedDataBuffer = underlyingBuffer;
    this.originalBuffer = underlyingBuffer.originalDataBuffer() == null ? underlyingBuffer
                    : underlyingBuffer.originalDataBuffer();
    this.length = length;
    this.offset = offset;
    this.originalOffset = offset;
    this.trackingPoint = underlyingBuffer.getTrackingPoint();
    this.elementSize = (byte) underlyingBuffer.getElementSize();
    this.allocationPoint = ((BaseCudaDataBuffer) underlyingBuffer).allocationPoint;

    if (underlyingBuffer.dataType() == Type.DOUBLE) {
        this.pointer = new CudaPointer(allocationPoint.getPointers().getHostPointer(), originalBuffer.length()).asDoublePointer();
        indexer = DoubleIndexer.create((DoublePointer) pointer);
    } else if (underlyingBuffer.dataType() == Type.FLOAT) {
        this.pointer = new CudaPointer(allocationPoint.getPointers().getHostPointer(), originalBuffer.length()).asFloatPointer();
        indexer = FloatIndexer.create((FloatPointer) pointer);
    } else if (underlyingBuffer.dataType() == Type.INT) {
        this.pointer = new CudaPointer(allocationPoint.getPointers().getHostPointer(), originalBuffer.length()).asIntPointer();
        indexer = IntIndexer.create((IntPointer) pointer);
    } else if (underlyingBuffer.dataType() == Type.HALF) {
        this.pointer = new CudaPointer(allocationPoint.getPointers().getHostPointer(), originalBuffer.length()).asShortPointer();
        indexer = HalfIndexer.create((ShortPointer) pointer);
    } else if (underlyingBuffer.dataType() == Type.LONG) {
        this.pointer = new CudaPointer(allocationPoint.getPointers().getHostPointer(), originalBuffer.length()).asLongPointer();
        indexer = LongIndexer.create((LongPointer) pointer);
    }
}
 
Example 3
Source File: CudaZeroHandler.java    From nd4j with Apache License 2.0 5 votes vote down vote up
/**
 * PLEASE NOTE: This method always returns pointer within OS memory space
 *
 * @param buffer
 * @return
 */
@Override
public org.bytedeco.javacpp.Pointer getHostPointer(DataBuffer buffer) {
    AllocationPoint dstPoint = ((BaseCudaDataBuffer) buffer).getAllocationPoint();

    // return pointer with offset if needed. length is specified for constructor compatibility purposes
    if (dstPoint.getPointers().getHostPointer() == null) {
        log.info("DevicePointer: " + dstPoint.getPointers().getDevicePointer());
        log.info("HostPointer: " + dstPoint.getPointers().getHostPointer());
        log.info("AllocStatus: " + dstPoint.getAllocationStatus());
        throw new RuntimeException("pointer is null");
    }
    //dstPoint.tickHostWrite();
    //dstPoint.tickHostRead();
    //log.info("Requesting host pointer for {}", buffer);
    //getCudaContext().syncOldStream();
    synchronizeThreadDevice(Thread.currentThread().getId(), dstPoint.getDeviceId(), dstPoint);

    CudaPointer p = new CudaPointer(dstPoint.getPointers().getHostPointer(), buffer.length(),
                    (buffer.offset() * buffer.getElementSize()));
    switch (buffer.dataType()) {
        case DOUBLE:
            return p.asDoublePointer();
        case FLOAT:
            return p.asFloatPointer();
        case INT:
            return p.asIntPointer();
        case HALF:
            return p.asShortPointer();
        case LONG:
            return p.asLongPointer();
        default:
            return p;
    }
}
 
Example 4
Source File: NumpyArray.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
private void setND4JArray() {

        long size = 1;
        for (long d : shape) {
            size *= d;
        }

        String cacheKey = address + "_" + size + "_" + dtype + "_" + ArrayUtils.toString(strides);
        nd4jArray = arrayCache.get(cacheKey);
        if (nd4jArray == null) {
            Pointer ptr = nativeOps.pointerForAddress(address);
            ptr = ptr.limit(size);
            ptr = ptr.capacity(size);
            DataBuffer buff = Nd4j.createBuffer(ptr, size, dtype);

            int elemSize = buff.getElementSize();
            long[] nd4jStrides = new long[strides.length];
            for (int i = 0; i < strides.length; i++) {
                nd4jStrides[i] = strides[i] / elemSize;
            }

            nd4jArray = Nd4j.create(buff, shape, nd4jStrides, 0, Shape.getOrder(shape, nd4jStrides, 1), dtype);
            arrayCache.put(cacheKey, nd4jArray);
        }
        else{
            if (!Arrays.equals(nd4jArray.shape(), shape)){
                nd4jArray = nd4jArray.reshape(shape);
            }
        }
        Nd4j.getAffinityManager().ensureLocation(nd4jArray, AffinityManager.Location.HOST);
    }
 
Example 5
Source File: CompressionDescriptor.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * Create a  compression descriptor from the given
 * data buffer elements
 * @param buffer the databuffer to create
 *               the compression descriptor from
 */
public CompressionDescriptor(DataBuffer buffer) {
    this.originalLength = buffer.length() * buffer.getElementSize();
    this.numberOfElements = buffer.length();
    this.originalElementSize = buffer.getElementSize();
    this.originalDataType = buffer.dataType();
}
 
Example 6
Source File: CudaZeroHandler.java    From nd4j with Apache License 2.0 4 votes vote down vote up
/**
 * PLEASE NOTE: Specific implementation, on systems without special devices can return HostPointer here
 *
 * @param buffer
 * @return
 */
@Override
public org.bytedeco.javacpp.Pointer getDevicePointer(DataBuffer buffer, CudaContext context) {
    // TODO: It would be awesome to get rid of typecasting here
    //getCudaContext().syncOldStream();
    AllocationPoint dstPoint = ((BaseCudaDataBuffer) buffer).getAllocationPoint();

    //log.info("getDevicePointer called");
    /*
    if (configuration.getMemoryModel() == Configuration.MemoryModel.DELAYED && dstPoint.getAllocationStatus() == AllocationStatus.HOST) {
    
        // if we have constant buffer (aka shapeInfo or other constant stuff)
        if (buffer.isConstant()) {
            Nd4j.getConstantHandler().moveToConstantSpace(buffer);
        } else {
            PointersPair pair = memoryProvider.malloc(dstPoint.getShape(), dstPoint, AllocationStatus.DEVICE);
    
            if (pair != null) {
                Integer deviceId = getDeviceId();
    
                dstPoint.getPointers().setDevicePointer(pair.getDevicePointer());
                dstPoint.setAllocationStatus(AllocationStatus.DEVICE);
    
                deviceAllocations.get(deviceId).put(dstPoint.getObjectId(), dstPoint.getObjectId());
    
                zeroAllocations.get(dstPoint.getBucketId()).remove(dstPoint.getObjectId());
                deviceMemoryTracker.addToAllocation(Thread.currentThread().getId(), deviceId, AllocationUtils.getRequiredMemory(dstPoint.getShape()));
    
    
                dstPoint.tickHostWrite();
            }
        }
    }
    */
    // here's the place, where we do care about promotion. but we only care about promotion of original  buffers
    if (dstPoint.getAllocationStatus() == AllocationStatus.HOST && buffer.offset() == 0 && 1 < 0) {
        if (dstPoint.getDeviceTicks() > configuration.getMinimumRelocationThreshold()) {
            // at this point we know, that this request is done withing some existent context
            long requiredMemory = AllocationUtils.getRequiredMemory(dstPoint.getShape());
            if (deviceMemoryTracker.reserveAllocationIfPossible(Thread.currentThread().getId(), getDeviceId(),
                            requiredMemory) && pingDeviceForFreeMemory(getDeviceId(), requiredMemory)) {
                // so, memory is reserved
                promoteObject(buffer);
            }
        }
    }


    // if that's device state, we probably might want to update device memory state
    if (dstPoint.getAllocationStatus() == AllocationStatus.DEVICE) {
        if (!dstPoint.isActualOnDeviceSide()) {
            //                log.info("Relocating to GPU");
            relocate(AllocationStatus.HOST, AllocationStatus.DEVICE, dstPoint, dstPoint.getShape(), context);
        } else {
            //  log.info("Buffer is actual on device side: " + dstPoint.getShape());
        }
    } //else log.info("Not on [DEVICE]");


    //  we update memory use counter, to announce that it's somehow used on device
    dstPoint.tickDeviceRead();

    // return pointer with offset if needed. length is specified for constructor compatibility purposes
    CudaPointer p = new CudaPointer(dstPoint.getPointers().getDevicePointer(), buffer.length(),
                    (buffer.offset() * buffer.getElementSize()));
    switch (buffer.dataType()) {
        case DOUBLE:
            return p.asDoublePointer();
        case FLOAT:
            return p.asFloatPointer();
        case INT:
            return p.asIntPointer();
        case HALF:
            return p.asShortPointer();
        case LONG:
            return p.asLongPointer();
        default:
            return p;
    }
}
 
Example 7
Source File: BaseCudaDataBuffer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public BaseCudaDataBuffer(@NonNull DataBuffer underlyingBuffer, long length, long offset) {
    if (underlyingBuffer.wasClosed())
        throw new IllegalStateException("You can't use DataBuffer once it was released");

    //this(length, underlyingBuffer.getElementSize(), offset);
    this.allocationMode = AllocationMode.MIXED_DATA_TYPES;
    initTypeAndSize();
    this.wrappedDataBuffer = underlyingBuffer;
    this.originalBuffer = underlyingBuffer.originalDataBuffer() == null ? underlyingBuffer
                    : underlyingBuffer.originalDataBuffer();
    this.length = length;
    this.offset = offset;
    this.originalOffset = offset;
    this.elementSize = (byte) underlyingBuffer.getElementSize();

    // in case of view creation, we initialize underlying buffer regardless of anything
    ((BaseCudaDataBuffer) underlyingBuffer).lazyAllocateHostPointer();

    // we're creating view of the native DataBuffer
    ptrDataBuffer = ((BaseCudaDataBuffer) underlyingBuffer).ptrDataBuffer.createView(length * underlyingBuffer.getElementSize(), offset * underlyingBuffer.getElementSize());
    this.allocationPoint = new AllocationPoint(ptrDataBuffer, length);
    val hostPointer = allocationPoint.getHostPointer();

    Nd4j.getDeallocatorService().pickObject(this);

    switch (underlyingBuffer.dataType()) {
        case DOUBLE:
            this.pointer = new CudaPointer(hostPointer, originalBuffer.length()).asDoublePointer();
            indexer = DoubleIndexer.create((DoublePointer) pointer);
            break;
        case FLOAT:
            this.pointer = new CudaPointer(hostPointer, originalBuffer.length()).asFloatPointer();
            indexer = FloatIndexer.create((FloatPointer) pointer);
            break;
        case UINT32:
            this.pointer = new CudaPointer(hostPointer, originalBuffer.length()).asIntPointer();
            indexer = UIntIndexer.create((IntPointer) pointer);
            break;
        case INT:
            this.pointer = new CudaPointer(hostPointer, originalBuffer.length()).asIntPointer();
            indexer = IntIndexer.create((IntPointer) pointer);
            break;
        case BFLOAT16:
            this.pointer = new CudaPointer(hostPointer, originalBuffer.length()).asShortPointer();
            indexer = Bfloat16Indexer.create((ShortPointer) pointer);
            break;
        case HALF:
            this.pointer = new CudaPointer(hostPointer, originalBuffer.length()).asShortPointer();
            indexer = HalfIndexer.create((ShortPointer) pointer);
            break;
        case UINT64: //Fall through
        case LONG:
            this.pointer = new CudaPointer(hostPointer, originalBuffer.length()).asLongPointer();
            indexer = LongIndexer.create((LongPointer) pointer);
            break;
        case UINT16:
            this.pointer = new CudaPointer(hostPointer, originalBuffer.length()).asShortPointer();
            indexer = UShortIndexer.create((ShortPointer) pointer);
            break;
        case SHORT:
            this.pointer = new CudaPointer(hostPointer, originalBuffer.length()).asShortPointer();
            indexer = ShortIndexer.create((ShortPointer) pointer);
            break;
        case BOOL:
            this.pointer = new CudaPointer(hostPointer, originalBuffer.length()).asBooleanPointer();
            indexer = BooleanIndexer.create((BooleanPointer) pointer);
            break;
        case BYTE:
            this.pointer = new CudaPointer(hostPointer, originalBuffer.length()).asBytePointer();
            indexer = ByteIndexer.create((BytePointer) pointer);
            break;
        case UBYTE:
            this.pointer = new CudaPointer(hostPointer, originalBuffer.length()).asBytePointer();
            indexer = UByteIndexer.create((BytePointer) pointer);
            break;
        case UTF8:
            Preconditions.checkArgument(offset == 0, "String array can't be a view");

            this.pointer = new CudaPointer(hostPointer, originalBuffer.length()).asBytePointer();
            indexer = ByteIndexer.create((BytePointer) pointer);
            break;
        default:
            throw new UnsupportedOperationException();
    }
}
 
Example 8
Source File: NumpyArray.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray toJava(PythonObject pythonObject) {
    log.info("Converting PythonObject to INDArray...");
    PyObject np = PyImport_ImportModule("numpy");
    PyObject ndarray = PyObject_GetAttrString(np, "ndarray");
    if (PyObject_IsInstance(pythonObject.getNativePythonObject(), ndarray) != 1) {
        Py_DecRef(ndarray);
        Py_DecRef(np);
        throw new PythonException("Object is not a numpy array! Use Python.ndarray() to convert object to a numpy array.");
    }
    Py_DecRef(ndarray);
    Py_DecRef(np);
    PyArrayObject npArr = new PyArrayObject(pythonObject.getNativePythonObject());
    long[] shape = new long[PyArray_NDIM(npArr)];
    SizeTPointer shapePtr = PyArray_SHAPE(npArr);
    if (shapePtr != null)
        shapePtr.get(shape, 0, shape.length);
    long[] strides = new long[shape.length];
    SizeTPointer stridesPtr = PyArray_STRIDES(npArr);
    if (stridesPtr != null)
        stridesPtr.get(strides, 0, strides.length);
    int npdtype = PyArray_TYPE(npArr);

    DataType dtype;
    switch (npdtype) {
        case NPY_DOUBLE:
            dtype = DataType.DOUBLE;
            break;
        case NPY_FLOAT:
            dtype = DataType.FLOAT;
            break;
        case NPY_SHORT:
            dtype = DataType.SHORT;
            break;
        case NPY_INT:
            dtype = DataType.INT32;
            break;
        case NPY_LONG:
            dtype = DataType.INT64;
            break;
        case NPY_UINT:
            dtype = DataType.UINT32;
            break;
        case NPY_BYTE:
            dtype = DataType.INT8;
            break;
        case NPY_UBYTE:
            dtype = DataType.UINT8;
            break;
        case NPY_BOOL:
            dtype = DataType.BOOL;
            break;
        case NPY_HALF:
            dtype = DataType.FLOAT16;
            break;
        case NPY_LONGLONG:
            dtype = DataType.INT64;
            break;
        case NPY_USHORT:
            dtype = DataType.UINT16;
            break;
        case NPY_ULONG:
        case NPY_ULONGLONG:
            dtype = DataType.UINT64;
            break;
        default:
            throw new PythonException("Unsupported array data type: " + npdtype);
    }
    long size = 1;
    for (int i = 0; i < shape.length; size *= shape[i++]) ;

    INDArray ret;
    long address = PyArray_DATA(npArr).address();
    String key = address + "_" + size + "_" + dtype;
    DataBuffer buff = cache.get(key);
    if (buff == null) {
        try (MemoryWorkspace ws = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) {
            Pointer ptr = NativeOpsHolder.getInstance().getDeviceNativeOps().pointerForAddress(address);
            ptr = ptr.limit(size);
            ptr = ptr.capacity(size);
            buff = Nd4j.createBuffer(ptr, size, dtype);
            cache.put(key, buff);
        }
    }
    int elemSize = buff.getElementSize();
    long[] nd4jStrides = new long[strides.length];
    for (int i = 0; i < strides.length; i++) {
        nd4jStrides[i] = strides[i] / elemSize;
    }
    ret = Nd4j.create(buff, shape, nd4jStrides, 0, Shape.getOrder(shape, nd4jStrides, 1), dtype);
    Nd4j.getAffinityManager().tagLocation(ret, AffinityManager.Location.HOST);
    log.info("Done.");
    return ret;


}
 
Example 9
Source File: NoOp.java    From nd4j with Apache License 2.0 3 votes vote down vote up
@Override
public DataBuffer compress(DataBuffer buffer) {

    CompressionDescriptor descriptor = new CompressionDescriptor(buffer, this);

    BytePointer ptr = new BytePointer(buffer.length() * buffer.getElementSize());
    CompressedDataBuffer result = new CompressedDataBuffer(ptr, descriptor);

    Nd4j.getMemoryManager().memcpy(result, buffer);

    return result;
}
 
Example 10
Source File: NoOp.java    From deeplearning4j with Apache License 2.0 3 votes vote down vote up
@Override
public DataBuffer compress(DataBuffer buffer) {

    CompressionDescriptor descriptor = new CompressionDescriptor(buffer, this);

    BytePointer ptr = new BytePointer(buffer.length() * buffer.getElementSize());
    CompressedDataBuffer result = new CompressedDataBuffer(ptr, descriptor);

    Nd4j.getMemoryManager().memcpy(result, buffer);

    return result;
}
 
Example 11
Source File: CompressionDescriptor.java    From nd4j with Apache License 2.0 2 votes vote down vote up
/**
 * Create a  compression descriptor from the given
 * data buffer elements
 * @param buffer the databuffer to create
 *               the compression descriptor from
 */
public CompressionDescriptor(DataBuffer buffer) {
    this.originalLength = buffer.length() * buffer.getElementSize();
    this.numberOfElements = buffer.length();
    this.originalElementSize = buffer.getElementSize();
}