Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#isView()

The following examples show how to use org.nd4j.linalg.api.ndarray.INDArray#isView() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JCublasNDArrayFactory.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**
 * This method converts Single/Double precision databuffer to Half-precision databuffer
 *
 * @param typeSrc
 * @param source
 * @param typeDst @return
 */
@Override
public INDArray convertDataEx(DataTypeEx typeSrc, INDArray source, DataTypeEx typeDst) {
    if (source.isView())
        throw new UnsupportedOperationException("Impossible to compress View. Consider using dup() before. ");

    DataBuffer buffer = convertDataEx(typeSrc, source.data(), typeDst);
    source.setData(buffer);

    if (buffer instanceof CompressedDataBuffer)
        source.markAsCompressed(true);
    else
        source.markAsCompressed(false);

    return source;
}
 
Example 2
Source File: JCublasNDArrayFactory.java    From nd4j with Apache License 2.0 6 votes vote down vote up
/**
 * This method converts Single/Double precision databuffer to Half-precision databuffer
 *
 * @param typeSrc
 * @param source
 * @param typeDst @return
 */
@Override
public INDArray convertDataEx(DataBuffer.TypeEx typeSrc, INDArray source, DataBuffer.TypeEx typeDst) {
    if (source.isView())
        throw new UnsupportedOperationException("Impossible to compress View. Consider using dup() before. ");

    DataBuffer buffer = convertDataEx(typeSrc, source.data(), typeDst);
    source.setData(buffer);

    if (buffer instanceof CompressedDataBuffer)
        source.markAsCompressed(true);
    else
        source.markAsCompressed(false);

    return source;
}
 
Example 3
Source File: TimeSeriesUtils.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**
 * Reverse an input time series along the time dimension
 *
 * @param in Input activations to reverse, with shape [minibatch, size, timeSeriesLength]
 * @return Reversed activations
 */
public static INDArray reverseTimeSeries(INDArray in){
    if(in == null){
        return null;
    }

    if(in.ordering() != 'f' || in.isView() || !Shape.strideDescendingCAscendingF(in)){
        in = in.dup('f');
    }

    int[] idxs = new int[(int) in.size(2)];
    int j=0;
    for( int i=idxs.length-1; i>=0; i--){
        idxs[j++] = i;
    }

    INDArray inReshape = in.reshape('f', in.size(0)*in.size(1), in.size(2));

    INDArray outReshape = Nd4j.pullRows(inReshape, 0, idxs, 'f');
    return outReshape.reshape('f', in.size(0), in.size(1), in.size(2));
}
 
Example 4
Source File: RowVectorSerializer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public void serialize(INDArray array, JsonGenerator jsonGenerator, SerializerProvider serializerProvider)
                throws IOException {
    if (array.isView()) {
        array = array.dup();
    }
    double[] dArr = array.data().asDouble();
    jsonGenerator.writeObject(dArr);
}
 
Example 5
Source File: VectorSerializer.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public void serialize(INDArray indArray, JsonGenerator jsonGenerator, SerializerProvider serializerProvider)
                throws IOException {
    if (indArray.isView())
        indArray = indArray.dup(indArray.ordering());
    jsonGenerator.writeStartObject();
    DataBuffer view = indArray.data();
    jsonGenerator.writeArrayFieldStart("dataBuffer");
    for (int i = 0; i < view.length(); i++) {
        jsonGenerator.writeNumber(view.getDouble(i));
    }

    jsonGenerator.writeEndArray();

    jsonGenerator.writeArrayFieldStart("shapeField");
    for (int i = 0; i < indArray.rank(); i++) {
        jsonGenerator.writeNumber(indArray.size(i));
    }
    jsonGenerator.writeEndArray();

    jsonGenerator.writeArrayFieldStart("strideField");
    for (int i = 0; i < indArray.rank(); i++)
        jsonGenerator.writeNumber(indArray.stride(i));
    jsonGenerator.writeEndArray();

    jsonGenerator.writeNumberField("offsetField", indArray.offset());
    jsonGenerator.writeStringField("typeField", indArray instanceof IComplexNDArray ? "complex" : "real");
    jsonGenerator.writeNumberField("rankField", indArray.rank());
    jsonGenerator.writeNumberField("numElements", view.length());
    jsonGenerator.writeStringField("orderingField", String.valueOf(indArray.ordering()));
    jsonGenerator.writeEndObject();
}
 
Example 6
Source File: RowVectorSerializer.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public void serialize(INDArray array, JsonGenerator jsonGenerator, SerializerProvider serializerProvider)
                throws IOException {
    if (array.isView()) {
        array = array.dup();
    }
    double[] dArr = array.data().asDouble();
    jsonGenerator.writeObject(dArr);
}
 
Example 7
Source File: AbstractCompressor.java    From nd4j with Apache License 2.0 5 votes vote down vote up
/**
 * Inplace compression of INDArray
 *
 * @param array
 */
@Override
public void compressi(INDArray array) {
    // TODO: lift this restriction
    if (array.isView())
        throw new UnsupportedOperationException("Impossible to apply inplace compression on View");

    array.setData(compress(array.data()));
    array.markAsCompressed(true);
}
 
Example 8
Source File: CpuMemoryManager.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public void memset(INDArray array) {
    if (array.isView()) {
        array.assign(0.0);
        return;
    }

    Pointer.memset(array.data().addressPointer(), 0, array.data().length() * Nd4j.sizeOfDataType(array.data().dataType()));
}
 
Example 9
Source File: TimeSeriesUtils.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * Reverse an input time series along the time dimension
 *
 * @param in Input activations to reverse, with shape [minibatch, size, timeSeriesLength]
 * @return Reversed activations
 */
public static INDArray reverseTimeSeries(INDArray in, LayerWorkspaceMgr workspaceMgr, ArrayType arrayType){
    if(in == null){
        return null;
    }

    if(in.ordering() != 'f' || in.isView() || !Shape.strideDescendingCAscendingF(in)){
        in = workspaceMgr.dup(arrayType, in, 'f');
    }

    if (in.size(2) > Integer.MAX_VALUE)
        throw new ND4JArraySizeException();
    int[] idxs = new int[(int) in.size(2)];
    int j=0;
    for( int i=idxs.length-1; i>=0; i--){
        idxs[j++] = i;
    }

    INDArray inReshape = in.reshape('f', in.size(0)*in.size(1), in.size(2));

    INDArray outReshape = workspaceMgr.create(arrayType, in.dataType(), new long[]{inReshape.size(0), idxs.length}, 'f');
    Nd4j.pullRows(inReshape, outReshape, 0, idxs);
    return workspaceMgr.leverageTo(arrayType, outReshape.reshape('f', in.size(0), in.size(1), in.size(2)));

    /*
    INDArray out = Nd4j.createUninitialized(in.shape(), 'f');
    CustomOp op = DynamicCustomOp.builder("reverse")
            .addIntegerArguments(new int[]{0,1})
            .addInputs(in)
            .addOutputs(out)
            .callInplace(false)
            .build();
    Nd4j.getExecutioner().exec(op);
    return out;
    */
}
 
Example 10
Source File: CudaMemoryManager.java    From nd4j with Apache License 2.0 5 votes vote down vote up
/**
 * This method detaches off-heap memory from passed INDArray instances, and optionally stores them in cache for future reuse
 * PLEASE NOTE: Cache options depend on specific implementations
 *
 * @param arrays
 */
@Override
public void collect(INDArray... arrays) {
    // we basically want to free memory, without touching INDArray itself.
    // so we don't care when gc is going to release object: memory is already cached

    Nd4j.getExecutioner().commit();

    int cnt = -1;
    AtomicAllocator allocator = AtomicAllocator.getInstance();
    for (INDArray array : arrays) {
        cnt++;
        // we don't collect views, since they don't have their own memory
        if (array == null || array.isView())
            continue;

        AllocationPoint point = allocator.getAllocationPoint(array);

        if (point.getAllocationStatus() == AllocationStatus.HOST)
            allocator.getMemoryHandler().free(point, AllocationStatus.HOST);
        else if (point.getAllocationStatus() == AllocationStatus.DEVICE) {
            allocator.getMemoryHandler().free(point, AllocationStatus.DEVICE);
            allocator.getMemoryHandler().free(point, AllocationStatus.HOST);
        } else if (point.getAllocationStatus() == AllocationStatus.DEALLOCATED) {
            // do nothing
        } else
            throw new RuntimeException(
                            "Unknown AllocationStatus: " + point.getAllocationStatus() + " for argument: " + cnt);

        point.setAllocationStatus(AllocationStatus.DEALLOCATED);
    }
}
 
Example 11
Source File: CpuMemoryManager.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public void memset(INDArray array) {
    if (array.isView()) {
        array.assign(0.0);
        return;
    }

    Pointer.memset(array.data().addressPointer(), 0, array.data().length() * Nd4j.sizeOfDataType(array.data().dataType()));
}
 
Example 12
Source File: ND4JConverters.java    From konduit-serving with Apache License 2.0 5 votes vote down vote up
public SerializedNDArray convert(INDArray from){
    if(from.isView() || from.ordering() != 'c' || !Shape.hasDefaultStridesForShape(from))
        from = from.dup('c');

    NDArrayType type = ND4JUtil.typeNd4jToNDArrayType(from.dataType());
    long[] shape = from.shape();
    ByteBuffer bb = from.data().asNio();

    return new SerializedNDArray(type, shape, bb);
}
 
Example 13
Source File: Nd4jMatrix.java    From jstarcraft-ai with Apache License 2.0 5 votes vote down vote up
@Override
@Deprecated
// TODO 准备与dotProduct整合
public MathMatrix accumulateProduct(MathVector rowVector, MathVector columnVector, MathCalculator mode) {
    if (rowVector instanceof Nd4jVector && columnVector instanceof Nd4jVector) {
        Nd4jEnvironmentThread thread = EnvironmentThread.getThread(Nd4jEnvironmentThread.class);
        try (MemoryWorkspace workspace = thread.getSpace()) {
            INDArray leftArray = Nd4jVector.class.cast(rowVector).getArray();
            // TODO 此处需要想方案优化,否则存在性能问题.
            if (leftArray.isView()) {
                // 此处执行复制是由于gemm不支持视图向量.
                leftArray = leftArray.dup();
            }
            if (leftArray.rows() == 1) {
                leftArray = leftArray.transpose();
            }
            INDArray rightArray = Nd4jVector.class.cast(columnVector).getArray();
            if (rightArray.isView()) {
                // 此处执行复制是由于gemm不支持视图向量.
                rightArray = rightArray.dup();
            }
            if (rightArray.columns() == 1) {
                rightArray = rightArray.transpose();
            }
            INDArray dataArray = this.getArray();
            INDArray cacheArray = Nd4j.zeros(dataArray.shape(), dataArray.ordering());
            leftArray.mmul(rightArray, cacheArray);
            dataArray.addi(cacheArray);
            return this;
        }
    } else {
        return MathMatrix.super.accumulateProduct(rowVector, columnVector, mode);
    }
}
 
Example 14
Source File: CudaMemoryManager.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public void memset(INDArray array) {
    if (array.isView()) {
        array.assign(0.0);

        // we don't want any mGRID activations here
        Nd4j.getExecutioner().commit();
        return;
    }

    // we want to be sure we have no trails left in mGRID
    Nd4j.getExecutioner().push();

    AllocationPoint point = AtomicAllocator.getInstance().getAllocationPoint(array);

    if (point.getAllocationStatus() == AllocationStatus.DEVICE) {
        CudaContext context = AtomicAllocator.getInstance().getDeviceContext();
        NativeOpsHolder.getInstance().getDeviceNativeOps().memsetAsync(AtomicAllocator.getInstance().getPointer(array, context),0, array.data().length() * Nd4j.sizeOfDataType(array.data().dataType()),0, context.getOldStream());

        // we also memset host pointer
        Pointer.memset(AtomicAllocator.getInstance().getHostPointer(array), 0, array.data().length() * Nd4j.sizeOfDataType(array.data().dataType()));

        // better be safe then sorry
        context.getOldStream().synchronize();
        point.tickDeviceWrite();
        point.tickHostRead();
    } else if (point.getAllocationStatus() == AllocationStatus.HOST) {
        Nd4j.getExecutioner().commit();

        // just casual memset
        Pointer.memset(AtomicAllocator.getInstance().getHostPointer(array), 0, array.data().length() * Nd4j.sizeOfDataType(array.data().dataType()));
        point.tickHostWrite();
    }
}
 
Example 15
Source File: Nd4jApacheAdapterUtils.java    From gatk-protected with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
/**
 * INDArray to Apache
 *
 * @param matrix rank-2 INDArray
 * @return Apache matrix
 */
public static RealMatrix convertINDArrayToApacheMatrix(@Nonnull final INDArray matrix) {
    Utils.validateArg(matrix.rank() == 2, "Input rank is not 2 (not matrix)");
    final int[] shape = matrix.shape();
    final INDArray concreteMatrix = matrix.isView() ? matrix.dup() : matrix;
    final double[] data = concreteMatrix.data().asDouble();
    final char ordering = concreteMatrix.ordering();
    if (ordering == 'c') {
        return new BlockRealMatrix(monoToBiDiArrayRowMajor(data, shape[0], shape[1]));
    } else { /* ordering == 'f' */
        return new BlockRealMatrix(monoToBiDiArrayColumnMajor(data, shape[0], shape[1]));
    }
}
 
Example 16
Source File: Nd4jVector.java    From jstarcraft-ai with Apache License 2.0 5 votes vote down vote up
@Override
@Deprecated
// TODO 准备与dotProduct整合
public MathVector accumulateProduct(MathVector leftVector, MathMatrix rightMatrix, boolean transpose, MathCalculator mode) {
    if (leftVector instanceof Nd4jVector && rightMatrix instanceof Nd4jMatrix) {
        Nd4jEnvironmentThread thread = EnvironmentThread.getThread(Nd4jEnvironmentThread.class);
        try (MemoryWorkspace workspace = thread.getSpace()) {
            INDArray leftArray = Nd4jVector.class.cast(leftVector).getArray();
            if (leftArray.isView()) {
                // 此处执行复制是由于gemm不支持视图向量.
                leftArray = leftArray.dup();
            }
            if (leftArray.columns() == 1) {
                leftArray = leftArray.transpose();
            }
            INDArray rightArray = transpose ? Nd4jMatrix.class.cast(rightMatrix).getArray().transpose() : Nd4jMatrix.class.cast(rightMatrix).getArray();
            INDArray dataArray = this.getArray();
            INDArray cacheArray = Nd4j.zeros(dataArray.shape(), dataArray.ordering());
            leftArray.mmul(rightArray, cacheArray);
            dataArray.addi(cacheArray);
            // Nd4j.getBlasWrapper().level3().gemm(leftArray, rightArray, dataArray, false,
            // false, one, zero);
            return this;
        }
    } else {
        return MathVector.super.accumulateProduct(leftVector, rightMatrix, transpose, mode);
    }
}
 
Example 17
Source File: AssignRequestMessage.java    From nd4j with Apache License 2.0 4 votes vote down vote up
public AssignRequestMessage(@NonNull Integer key, @NonNull INDArray array) {
    this();
    this.key = key;
    this.payload = array.isView() ? array.dup(array.ordering()) : array;
}
 
Example 18
Source File: CudnnConvolutionHelper.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
/**
 * @param poolingType     Used when preparing data for subsampling layers ONLY. Null for convolution layers
 * @return
 */
public static CudnnForwardArgs getCudnnForwardArgs(INDArray input, int[] kernel, int[] strides, int[] padding, int[] dilation,
                                                   ConvolutionMode convolutionMode, PoolingType poolingType, CNN2DFormat format){
    INDArray origInput = input;

    //Check if we need to dup the input: views, non-contiguous, etc. CuDNN also seems to have has issues if strides
    // are non-default for C order - even if they *should* be OK otherwise
    if(input.isView() || !Shape.hasDefaultStridesForShape(input)){
        input = input.dup('c');
    }

    boolean nchw = format == CNN2DFormat.NCHW;
    int hIdx = nchw ? 2 : 1;
    int wIdx = nchw ? 3 : 2;

    val inH = input.size(hIdx);
    val inW = input.size(wIdx);

    boolean manualPadBottom = false;
    boolean manualPadRight = false;

    int[] outSize;
    if (convolutionMode == ConvolutionMode.Same) {
        outSize = ConvolutionUtils.getOutputSize(input, kernel, strides, null, convolutionMode, dilation, format); //Also performs validation
        padding = ConvolutionUtils.getSameModeTopLeftPadding(outSize, new int[] {(int) inH, (int) inW}, kernel, strides, dilation);
        int[] padBottomRight = ConvolutionUtils.getSameModeBottomRightPadding(outSize, new int[] {(int) inH, (int) inW}, kernel, strides, dilation);
        if(!Arrays.equals(padding, padBottomRight)){
            /*
            CuDNN - even as of 7.1 (CUDA 9.1) still doesn't have support for proper SAME mode padding (i.e., asymmetric
            padding) - padding can *only* be specified as the same amount for both the top/bottom, and for left/right.
            In SAME mode padding, sometimes these are the same - but often they are not.
            Note that when they differ, the bottom or right padding will be exactly 1 more than the top or left padding.
            As per TF, we'll manually pad here: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/conv_ops.cc#L571-L607
             */
            manualPadBottom = (padding[0] != padBottomRight[0]);
            manualPadRight = (padding[1] != padBottomRight[1]);

            //NCHW format
            long[] newShape;
            if(nchw){
                newShape = new long[]{input.size(0), input.size(1),
                        input.size(2) + (manualPadBottom ? 1 : 0),
                        input.size(3) + (manualPadRight ? 1 : 0)};
            } else {
                newShape = new long[]{input.size(0),
                        input.size(1) + (manualPadBottom ? 1 : 0),
                        input.size(2) + (manualPadRight ? 1 : 0),
                        input.size(3)};
            }
            INDArray newInput;
            if(poolingType == null || poolingType != PoolingType.MAX){
                newInput = Nd4j.create(input.dataType(), newShape);
            } else {
                //For max pooling, we don't want to include the padding in the maximum values. But, CuDNN doesn't knowm
                // that these values are padding and hence should be excluded. Instead: We'll use -infinity so that,
                // if the 'real' (non-padding) values are all < 0, we take the real value, not the padding value
                newInput = Nd4j.valueArrayOf(newShape, Double.NEGATIVE_INFINITY, input.dataType());
            }

            if(nchw){
                newInput.put(new INDArrayIndex[]{all(), all(), interval(0,input.size(2)),
                        interval(0, input.size(3))}, input);
            } else {
                newInput.put(new INDArrayIndex[]{all(), interval(0,input.size(1)),
                        interval(0, input.size(2)), all()}, input);
            }

            input = newInput;
            //Now: we've manually applied the "extra" bottom/right padding only - if required. Consequently, we
            // now have the same amount of padding required for top/bottom, and left/right - which we'll let
            // CuDNN handle
        }
    } else {
        outSize = ConvolutionUtils.getOutputSize(input, kernel, strides, padding, convolutionMode, dilation, format); //Also performs validation
    }

    return new CudnnForwardArgs(manualPadBottom, manualPadRight, input, origInput, padding, outSize);
}
 
Example 19
Source File: JCublasNDArrayFactory.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray sort(INDArray x, boolean descending) {
    if (x.isScalar())
        return x;

    Nd4j.getExecutioner().push();

    CudaContext context = AtomicAllocator.getInstance().getFlowController().prepareAction(x);

    Pointer ptr = AtomicAllocator.getInstance().getHostPointer(x.shapeInfoDataBuffer());

    PointerPointer extraz = new PointerPointer(ptr, // 0
            context.getOldStream(), // 1
            AtomicAllocator.getInstance().getDeviceIdPointer(), // 2
            null, // 3
            context.getBufferReduction(), // 4
            context.getBufferScalar(), // 5
            null, // 6
            ptr, // 7
            AtomicAllocator.getInstance().getHostPointer(x.shapeInfoDataBuffer()), // 8
            ptr, // 9
            ptr, // 10
            ptr, // 11
            ptr, // 12
            ptr, // 13
            ptr, // 14
            ptr, // special pointer for IsMax  // 15
            ptr, // special pointer for IsMax  // 16
            ptr, // special pointer for IsMax // 17
            new CudaPointer(0));

    // we're sending > 10m elements to radixSort
    boolean isRadix = !x.isView() && (x.length() > 1024 * 1024 * 10);
    INDArray tmpX = x;

    // we need to guarantee all threads are finished here
    if (isRadix)
        Nd4j.getExecutioner().commit();


    nativeOps.sort(extraz,
                null,
                (LongPointer) x.shapeInfoDataBuffer().addressPointer(),
                AtomicAllocator.getInstance().getPointer(tmpX, context),
                (LongPointer) AtomicAllocator.getInstance().getPointer(tmpX.shapeInfoDataBuffer(), context),
                descending
        );

    if (nativeOps.lastErrorCode() != 0)
        throw new RuntimeException(nativeOps.lastErrorMessage());

    AtomicAllocator.getInstance().getFlowController().registerAction(context, x);

    return x;
}
 
Example 20
Source File: ArrowSerde.java    From deeplearning4j with Apache License 2.0 3 votes vote down vote up
/**
 * Create a {@link Buffer}
 * representing the location metadata of the actual data
 * contents for the ndarrays' {@link DataBuffer}
 * @param bufferBuilder the buffer builder in use
 * @param arr the array to add the underlying data for
 * @return the offset added
 */
public static int addDataForArr(FlatBufferBuilder bufferBuilder, INDArray arr) {
    DataBuffer toAdd = arr.isView() ? arr.dup().data() : arr.data();
    int offset = DataBufferStruct.createDataBufferStruct(bufferBuilder,toAdd);
    int ret = Buffer.createBuffer(bufferBuilder,offset,toAdd.length() * toAdd.getElementSize());
    return ret;

}