Java Code Examples for org.nd4j.linalg.api.shape.Shape#wholeArrayDimension()

The following examples show how to use org.nd4j.linalg.api.shape.Shape#wholeArrayDimension() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CudaGridExecutioner.java    From nd4j with Apache License 2.0 5 votes vote down vote up
protected void buildZ(IndexAccumulation op, int... dimension) {
    Arrays.sort(dimension);

    for (int i = 0; i < dimension.length; i++) {
        if (dimension[i] < 0)
            dimension[i] += op.x().rank();
    }

    //do op along all dimensions
    if (dimension.length == op.x().rank())
        dimension = new int[] {Integer.MAX_VALUE};


    long[] retShape = Shape.wholeArrayDimension(dimension) ? new long[] {1, 1}
            : ArrayUtil.removeIndex(op.x().shape(), dimension);
    //ensure vector is proper shape
    if (retShape.length == 1) {
        if (dimension[0] == 0)
            retShape = new long[] {1, retShape[0]};
        else
            retShape = new long[] {retShape[0], 1};
    } else if (retShape.length == 0) {
        retShape = new long[] {1, 1};
    }

    if(op.z() == null || op.z() == op.x()){
        INDArray ret = null;
        if (Math.abs(op.zeroDouble()) < Nd4j.EPS_THRESHOLD) {
            ret = Nd4j.zeros(retShape);
        } else {
            ret = Nd4j.valueArrayOf(retShape, op.zeroDouble());
        }

        op.setZ(ret);
    } else if(!Arrays.equals(retShape, op.z().shape())){
        throw new IllegalStateException("Z array shape does not match expected return type for op " + op
                + ": expected shape " + Arrays.toString(retShape) + ", z.shape()=" + Arrays.toString(op.z().shape()));
    }
}
 
Example 2
Source File: CudaGridExecutioner.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
protected void buildZ(IndexAccumulation op, int... dimension) {
    Arrays.sort(dimension);

    for (int i = 0; i < dimension.length; i++) {
        if (dimension[i] < 0)
            dimension[i] += op.x().rank();
    }

    //do op along all dimensions
    if (dimension.length == op.x().rank())
        dimension = new int[] {Integer.MAX_VALUE};


    long[] retShape = Shape.wholeArrayDimension(dimension) ? new long[] {1, 1}
            : ArrayUtil.removeIndex(op.x().shape(), dimension);
    //ensure vector is proper shape
    if (retShape.length == 1) {
        if (dimension[0] == 0)
            retShape = new long[] {1, retShape[0]};
        else
            retShape = new long[] {retShape[0], 1};
    } else if (retShape.length == 0) {
        retShape = new long[] {1, 1};
    }

    if(op.z() == null || op.z() == op.x()){
        INDArray ret = null;
        ret = Nd4j.createUninitialized(retShape);


        op.setZ(ret);
    } else if(!Arrays.equals(retShape, op.z().shape())){
        throw new IllegalStateException("Z array shape does not match expected return type for op " + op
                + ": expected shape " + Arrays.toString(retShape) + ", z.shape()=" + Arrays.toString(op.z().shape()));
    }
}
 
Example 3
Source File: CudaGridExecutioner.java    From nd4j with Apache License 2.0 4 votes vote down vote up
protected void buildZ(Accumulation op, int... dimension) {
    Arrays.sort(dimension);

    for (int i = 0; i < dimension.length; i++) {
        if (dimension[i] < 0)
            dimension[i] += op.x().rank();
    }

    //do op along all dimensions
    if (dimension.length == op.x().rank())
        dimension = new int[] {Integer.MAX_VALUE};


    long[] retShape = Shape.wholeArrayDimension(dimension) ? new long[] {1, 1}
            : ArrayUtil.removeIndex(op.x().shape(), dimension);
    //ensure vector is proper shape
    if (retShape.length == 1) {
        if (dimension[0] == 0)
            retShape = new long[] {1, retShape[0]};
        else
            retShape = new long[] {retShape[0], 1};
    } else if (retShape.length == 0) {
        retShape = new long[] {1, 1};
    }

    /*
    if(op.x().isVector() && op.x().length() == ArrayUtil.prod(retShape))
        return op.noOp();
    */

    INDArray ret = null;
    if (op.z() == null || op.z() == op.x()) {
        if (op.isComplexAccumulation()) {
            val xT = op.x().tensorssAlongDimension(dimension);
            val yT = op.y().tensorssAlongDimension(dimension);

            ret = Nd4j.create(xT, yT);
        } else {
            if (Math.abs(op.zeroDouble()) < Nd4j.EPS_THRESHOLD) {
                ret = Nd4j.zeros(retShape);
            } else {
                ret = Nd4j.valueArrayOf(retShape, op.zeroDouble());
            }
        }

        op.setZ(ret);
    } else {
        // compare length
        if (op.z().lengthLong() != ArrayUtil.prodLong(retShape))
            throw new ND4JIllegalStateException("Shape of target array for reduction [" + Arrays.toString(op.z().shape()) + "] doesn't match expected [" + Arrays.toString(retShape) + "]");

        if (op.x().data().dataType() == DataBuffer.Type.DOUBLE) {
            op.z().assign(op.zeroDouble());
        } else if (op.x().data().dataType() == DataBuffer.Type.FLOAT) {
            op.z().assign(op.zeroFloat());
        } else if (op.x().data().dataType() == DataBuffer.Type.HALF) {
            op.z().assign(op.zeroHalf());
        }

        ret = op.z();
    }
}
 
Example 4
Source File: CudaExecutioner.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray exec(ReduceOp op) {
    checkForCompression(op);

    if(op instanceof BaseReduceOp && ((BaseReduceOp)op).isEmptyReduce()){
        //Edge case for TF import compatibility: [x,y].reduce(empty) = [x,y]
        //Note that "empty" axis is NOT the same as length 0, as in INDArray.sum(new int[0]), which means "all dimensions"
        if(op.z() != null){
            Preconditions.checkState(op.x().equalShapes(op.z()), "For empty reductions, result (z) array must have same shape as x shape." +
                    " Got: x=%ndShape, z=%ndShape", op.x(), op.z());
            op.z().assign(op.x());
            return op.z();
        } else {
            op.setZ(op.x().dup());
            return op.z();
        }
    }

    val dimension = op.dimensions().toIntVector();

    if (extraz.get() == null)
        extraz.set(new PointerPointer(32));

    val maxShape = Shape.getMaxShape(op.x(),op.y());

    val wholeDims = Shape.wholeArrayDimension(dimension) || op.x().rank() == dimension.length || dimension.length == 0;
    val retShape = Shape.reductionShape(op.y() == null ? op.x() : op.x().length() > op.y().length() ? op.x() : op.y(), dimension, true, op.isKeepDims());

    if (op.x().isVector() && op.x().length() == ArrayUtil.prod(retShape) && ArrayUtil.prodLong(retShape) > 1 && op.y() == null)
        return op.noOp();

    val dtype = op.resultType();
    INDArray ret = null;
    if (op.z() == null || op.z() == op.x()) {
        if (op.isComplexAccumulation()) {
            val xT = op.x().tensorsAlongDimension(dimension);
            val yT = op.y().tensorsAlongDimension(dimension);

            // we intentionally want to set it to 0.0
            ret = Nd4j.createUninitialized(dtype, new long[] {xT, yT});
        } else {
            if (op.y() != null) {
                //2 options here: either pairwise, equal sizes - OR every X TAD vs. entirety of Y
                if (op.x().length() == op.y().length()) {
                    //Pairwise
                    if (!wholeDims && op.x().tensorsAlongDimension(dimension) != op.y().tensorsAlongDimension(dimension)) {
                        throw new ND4JIllegalStateException("Number of TADs along dimension don't match: (x shape = " +
                                Arrays.toString(op.x().shape()) + ", y shape = " + Arrays.toString(op.y().shape()) +
                                ", dimension = " + Arrays.toString(dimension) + ")");
                    }
                } else {
                    if (dimension.length == 0)
                        throw new ND4JIllegalStateException("TAD vs TAD comparison requires dimension (or other comparison mode was supposed to be used?)");

                    //Every X TAD vs. entirety of Y
                    val xTADSize = op.x().length() / op.x().tensorsAlongDimension(dimension);

                    if (xTADSize != op.y().length()) {
                        throw new ND4JIllegalStateException("Size of TADs along dimension don't match for pairwise execution:" +
                                " (x TAD size = " + xTADSize + ", y size = " + op.y().length());
                    }
                }
            }

            // in case of regular accumulation we don't care about array state before op
            ret = Nd4j.create(dtype, retShape);
        }
        op.setZ(ret);
    } else {
        // compare length

        if (op.z().length() != (retShape.length == 0 ? 1 : ArrayUtil.prodLong(retShape)))
            throw new ND4JIllegalStateException("Shape of target array for reduction [" + Arrays.toString(op.z().shape()) + "] doesn't match expected [" + Arrays.toString(retShape) + "]");
    }

    long st = profilingConfigurableHookIn(op);
    naiveExec(op, dimension);

    profilingConfigurableHookOut(op, null, st);

    return op.z();
}
 
Example 5
Source File: CudaGridExecutioner.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
protected void buildZ(ReduceOp op, int... dimension) {
    Arrays.sort(dimension);

    for (int i = 0; i < dimension.length; i++) {
        if (dimension[i] < 0)
            dimension[i] += op.x().rank();
    }

    //do op along all dimensions
    if (dimension.length == op.x().rank())
        dimension = new int[] {Integer.MAX_VALUE};


    long[] retShape = Shape.wholeArrayDimension(dimension) ? new long[] {1, 1}
            : ArrayUtil.removeIndex(op.x().shape(), dimension);
    //ensure vector is proper shape
    if (retShape.length == 1) {
        if (dimension[0] == 0)
            retShape = new long[] {1, retShape[0]};
        else
            retShape = new long[] {retShape[0], 1};
    } else if (retShape.length == 0) {
        retShape = new long[] {1, 1};
    }

    /*
    if(op.x().isVector() && op.x().length() == ArrayUtil.prod(retShape))
        return op.noOp();
    */

    INDArray ret = null;
    if (op.z() == null || op.z() == op.x()) {
        if (op.isComplexAccumulation()) {
            val xT = op.x().tensorsAlongDimension(dimension);
            val yT = op.y().tensorsAlongDimension(dimension);

            ret = Nd4j.create(xT, yT);
        } else {
                ret = Nd4j.zeros(retShape);
        }

        op.setZ(ret);
    } else {
        // compare length
        if (op.z().length() != ArrayUtil.prodLong(retShape))
            throw new ND4JIllegalStateException("Shape of target array for reduction [" + Arrays.toString(op.z().shape()) + "] doesn't match expected [" + Arrays.toString(retShape) + "]");

        ret = op.z();
    }
}