Java Code Examples for org.nd4j.linalg.api.buffer.DataType#DOUBLE

The following examples show how to use org.nd4j.linalg.api.buffer.DataType#DOUBLE . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseLevel1.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**
 * computes a vector-vector dot product.
 *
 * @param n number of accessed element
 * @param alpha
 * @param X an INDArray
 * @param Y an INDArray
 * @return the vector-vector dot product of X and Y
 */
@Override
public double dot(long n, double alpha, INDArray X, INDArray Y) {
    if (Nd4j.getExecutioner().getProfilingMode() == OpExecutioner.ProfilingMode.ALL)
        OpProfiler.getInstance().processBlasCall(false, X, Y);

    if (X.data().dataType() == DataType.DOUBLE) {
        DefaultOpExecutioner.validateDataType(DataType.DOUBLE, X, Y);
        return ddot(n, X, BlasBufferUtil.getBlasStride(X), Y, BlasBufferUtil.getBlasStride(Y));
    } else if (X.data().dataType() == DataType.FLOAT) {
        DefaultOpExecutioner.validateDataType(DataType.FLOAT, X, Y);
        return sdot(n, X, BlasBufferUtil.getBlasStride(X), Y, BlasBufferUtil.getBlasStride(Y));
    } else {
        DefaultOpExecutioner.validateDataType(DataType.HALF, X, Y);
        return hdot(n, X, BlasBufferUtil.getBlasStride(X), Y, BlasBufferUtil.getBlasStride(Y));
    }

}
 
Example 2
Source File: BaseLapack.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Override
public int syev(char jobz, char uplo, INDArray A, INDArray V) {

    if (A.rows() != A.columns()) {
        throw new Error("syev: A must be square.");
    }
    if (A.rows() != V.length()) {
        throw new Error("syev: V must be the length of the matrix dimension.");
    }

    if (A.rows() > Integer.MAX_VALUE || A.columns() > Integer.MAX_VALUE)
        throw new ND4JArraySizeException();

    int status = -1;
    if (A.data().dataType() == DataType.DOUBLE) {
        status = dsyev(jobz, uplo, (int) A.rows(), A, V);
    } else if (A.data().dataType() == DataType.FLOAT) {
        status = ssyev(jobz, uplo, (int) A.rows(), A, V);
    } else {
        throw new UnsupportedOperationException();
    }

    return status;
}
 
Example 3
Source File: BaseNDArrayList.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Override
public boolean add(X aX) {
    if(container == null) {
        container = Nd4j.create(10);
    }
    else if(size == container.length()) {
        growCapacity(size * 2);
    }
    if(DataTypeUtil.getDtypeFromContext() == DataType.DOUBLE)
        container.putScalar(size,aX.doubleValue());
    else {
        container.putScalar(size,aX.floatValue());

    }

    size++;
    return true;
}
 
Example 4
Source File: BaseLevel2.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**
 * syr2 performs a rank-2 update of an n-by-n symmetric matrix a:
 * a := alpha*x*y' + alpha*y*x' + a.
 *
 * @param order
 * @param Uplo
 * @param TransA
 * @param Diag
 * @param A
 * @param X
 */
@Override
public void tbmv(char order, char Uplo, char TransA, char Diag, INDArray A, INDArray X) {
    if (Nd4j.getExecutioner().getProfilingMode() == OpExecutioner.ProfilingMode.ALL)
        OpProfiler.getInstance().processBlasCall(false, A, X);

    if (X.length() > Integer.MAX_VALUE || A.columns() > Integer.MAX_VALUE || A.size(0) > Integer.MAX_VALUE) {
        throw new ND4JArraySizeException();
    }

    if (X.data().dataType() == DataType.DOUBLE) {
        DefaultOpExecutioner.validateDataType(DataType.DOUBLE, A, X);
        dtbmv(order, Uplo, TransA, Diag, (int) X.length(), (int) A.columns(), A, (int) A.size(0), X, X.stride(-1));
    } else {
        DefaultOpExecutioner.validateDataType(DataType.FLOAT, A, X);
        stbmv(order, Uplo, TransA, Diag, (int) X.length(), (int) A.columns(), A, (int) A.size(0), X, X.stride(-1));
    }
}
 
Example 5
Source File: MiscOpValidation.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testConfusionMatrix(){
    DataType dt = DataType.DOUBLE;

    for(boolean withMax : new boolean[]{true, false}){

        SameDiff sd = SameDiff.create();

        SDVariable labels = sd.constant("labels", Nd4j.createFromArray(1, 2, 4));
        SDVariable predictions = sd.constant("predictions", Nd4j.createFromArray(2, 2, 4));

        INDArray exp = Nd4j.create(new double[][]{
                {0, 0, 0, 0, 0},
                {0, 0, 1, 0, 0},
                {0, 0, 1, 0, 0},
                {0, 0, 0, 0, 0},
                {0, 0, 0, 0, 1}}).castTo(DataType.FLOAT);

        SDVariable confMatrix;
        if(withMax){
            confMatrix = sd.math().confusionMatrix(labels, predictions, 5).castTo(DataType.FLOAT);
        } else {
            confMatrix = sd.math().confusionMatrix("cm", labels, predictions, DataType.FLOAT);
        }

        SDVariable loss = confMatrix.castTo(DataType.DOUBLE).std(true);


        String err = OpValidation.validate(new TestCase(sd)
                .gradientCheck(false)   //Not gradient checkable
                .expected(confMatrix, exp));

        assertNull(err);
    }
}
 
Example 6
Source File: DeviceLocalNDArrayTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testDtypes(){
    for(DataType globalDType : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}){
        Nd4j.setDefaultDataTypes(globalDType, globalDType);
        for(DataType arrayDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}){
            INDArray arr = Nd4j.linspace(arrayDtype, 1, 10, 1);
            DeviceLocalNDArray dl = new DeviceLocalNDArray(arr);
            INDArray get = dl.get();
            assertEquals(arr, get);
        }
    }
}
 
Example 7
Source File: NotEqualsCondition.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public Boolean apply(Number input) {
    if (Nd4j.dataType() == DataType.DOUBLE)
        return input.doubleValue() != value.doubleValue();
    else
        return input.floatValue() != value.floatValue();
}
 
Example 8
Source File: BaseBlasWrapper.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray ger(float alpha, INDArray x, INDArray y, INDArray a) {
    LinAlgExceptions.assertVector(x, y);
    LinAlgExceptions.assertMatrix(a);


    if (x.data().dataType() == DataType.DOUBLE) {
        return ger((double) alpha, x, y, a);
    }

    level2().ger('N', alpha, x, y, a);
    return a;
}
 
Example 9
Source File: BaseLevel1.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * performs rotation of points in the plane.
 *
 * @param N
 * @param X
 * @param Y
 * @param c
 * @param s
 */
@Override
public void rot(long N, INDArray X, INDArray Y, double c, double s) {

    if (Nd4j.getExecutioner().getProfilingMode() == OpExecutioner.ProfilingMode.ALL)
        OpProfiler.getInstance().processBlasCall(false, X, Y);

    if (X.data().dataType() == DataType.DOUBLE) {
        DefaultOpExecutioner.validateDataType(DataType.DOUBLE, X, Y);
        drot(N, X, BlasBufferUtil.getBlasStride(X), Y, BlasBufferUtil.getBlasStride(X), c, s);
    } else {
        DefaultOpExecutioner.validateDataType(DataType.FLOAT, X, Y);
        srot(N, X, BlasBufferUtil.getBlasStride(X), Y, BlasBufferUtil.getBlasStride(X), (float) c, (float) s);
    }
}
 
Example 10
Source File: PythonUtils.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
private static NumpyArray jsonToNumpyArray(JSONObject map) {
    String dtypeName = (String) map.get("dtype");
    DataType dtype;
    if (dtypeName.equals("float64")) {
        dtype = DataType.DOUBLE;
    } else if (dtypeName.equals("float32")) {
        dtype = DataType.FLOAT;
    } else if (dtypeName.equals("int16")) {
        dtype = DataType.SHORT;
    } else if (dtypeName.equals("int32")) {
        dtype = DataType.INT;
    } else if (dtypeName.equals("int64")) {
        dtype = DataType.LONG;
    } else {
        throw new RuntimeException("Unsupported array type " + dtypeName + ".");
    }
    List shapeList = map.getJSONArray("shape").toList();
    long[] shape = new long[shapeList.size()];
    for (int i = 0; i < shape.length; i++) {
        shape[i] = ((Number) shapeList.get(i)).longValue();
    }

    List strideList = map.getJSONArray("shape").toList();
    long[] stride = new long[strideList.size()];
    for (int i = 0; i < stride.length; i++) {
        stride[i] = ((Number) strideList.get(i)).longValue();
    }
    long address = ((Number) map.get("address")).longValue();
    NumpyArray numpyArray = new NumpyArray(address, shape, stride, dtype, true);
    return numpyArray;
}
 
Example 11
Source File: PythonUtils.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
public static NumpyArray mapToNumpyArray(Map map) {
    String dtypeName = (String) map.get("dtype");
    DataType dtype;
    if (dtypeName.equals("float64")) {
        dtype = DataType.DOUBLE;
    } else if (dtypeName.equals("float32")) {
        dtype = DataType.FLOAT;
    } else if (dtypeName.equals("int16")) {
        dtype = DataType.SHORT;
    } else if (dtypeName.equals("int32")) {
        dtype = DataType.INT;
    } else if (dtypeName.equals("int64")) {
        dtype = DataType.LONG;
    } else {
        throw new RuntimeException("Unsupported array type " + dtypeName + ".");
    }
    List shapeList = (List) map.get("shape");
    long[] shape = new long[shapeList.size()];
    for (int i = 0; i < shape.length; i++) {
        shape[i] = (Long) shapeList.get(i);
    }

    List strideList = (List) map.get("shape");
    long[] stride = new long[strideList.size()];
    for (int i = 0; i < stride.length; i++) {
        stride[i] = (Long) strideList.get(i);
    }
    long address = (Long) map.get("address");
    NumpyArray numpyArray = new NumpyArray(address, shape, stride, dtype, true);
    return numpyArray;
}
 
Example 12
Source File: NumpyArray.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray toJava(PythonObject pythonObject) {
    log.info("Converting PythonObject to INDArray...");
    PyObject np = PyImport_ImportModule("numpy");
    PyObject ndarray = PyObject_GetAttrString(np, "ndarray");
    if (PyObject_IsInstance(pythonObject.getNativePythonObject(), ndarray) != 1) {
        Py_DecRef(ndarray);
        Py_DecRef(np);
        throw new PythonException("Object is not a numpy array! Use Python.ndarray() to convert object to a numpy array.");
    }
    Py_DecRef(ndarray);
    Py_DecRef(np);
    PyArrayObject npArr = new PyArrayObject(pythonObject.getNativePythonObject());
    long[] shape = new long[PyArray_NDIM(npArr)];
    SizeTPointer shapePtr = PyArray_SHAPE(npArr);
    if (shapePtr != null)
        shapePtr.get(shape, 0, shape.length);
    long[] strides = new long[shape.length];
    SizeTPointer stridesPtr = PyArray_STRIDES(npArr);
    if (stridesPtr != null)
        stridesPtr.get(strides, 0, strides.length);
    int npdtype = PyArray_TYPE(npArr);

    DataType dtype;
    switch (npdtype) {
        case NPY_DOUBLE:
            dtype = DataType.DOUBLE;
            break;
        case NPY_FLOAT:
            dtype = DataType.FLOAT;
            break;
        case NPY_SHORT:
            dtype = DataType.SHORT;
            break;
        case NPY_INT:
            dtype = DataType.INT32;
            break;
        case NPY_LONG:
            dtype = DataType.INT64;
            break;
        case NPY_UINT:
            dtype = DataType.UINT32;
            break;
        case NPY_BYTE:
            dtype = DataType.INT8;
            break;
        case NPY_UBYTE:
            dtype = DataType.UINT8;
            break;
        case NPY_BOOL:
            dtype = DataType.BOOL;
            break;
        case NPY_HALF:
            dtype = DataType.FLOAT16;
            break;
        case NPY_LONGLONG:
            dtype = DataType.INT64;
            break;
        case NPY_USHORT:
            dtype = DataType.UINT16;
            break;
        case NPY_ULONG:
        case NPY_ULONGLONG:
            dtype = DataType.UINT64;
            break;
        default:
            throw new PythonException("Unsupported array data type: " + npdtype);
    }
    long size = 1;
    for (int i = 0; i < shape.length; size *= shape[i++]) ;

    INDArray ret;
    long address = PyArray_DATA(npArr).address();
    String key = address + "_" + size + "_" + dtype;
    DataBuffer buff = cache.get(key);
    if (buff == null) {
        try (MemoryWorkspace ws = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) {
            Pointer ptr = NativeOpsHolder.getInstance().getDeviceNativeOps().pointerForAddress(address);
            ptr = ptr.limit(size);
            ptr = ptr.capacity(size);
            buff = Nd4j.createBuffer(ptr, size, dtype);
            cache.put(key, buff);
        }
    }
    int elemSize = buff.getElementSize();
    long[] nd4jStrides = new long[strides.length];
    for (int i = 0; i < strides.length; i++) {
        nd4jStrides[i] = strides[i] / elemSize;
    }
    ret = Nd4j.create(buff, shape, nd4jStrides, 0, Shape.getOrder(shape, nd4jStrides, 1), dtype);
    Nd4j.getAffinityManager().tagLocation(ret, AffinityManager.Location.HOST);
    log.info("Done.");
    return ret;


}
 
Example 13
Source File: DTypeTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testMultiLayerNetworkTypeConversion() {

    for (DataType dt : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        Nd4j.setDefaultDataTypes(dt, dt);

        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .seed(12345)
                .weightInit(WeightInit.XAVIER)
                .updater(new Adam(0.01))
                .dataType(DataType.DOUBLE)
                .list()
                .layer(new DenseLayer.Builder().activation(Activation.TANH).nIn(10).nOut(10).build())
                .layer(new DenseLayer.Builder().activation(Activation.TANH).nIn(10).nOut(10).build())
                .layer(new OutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build())
                .build();

        MultiLayerNetwork net = new MultiLayerNetwork(conf);
        net.init();

        INDArray inD = Nd4j.rand(DataType.DOUBLE, 1, 10);
        INDArray lD = Nd4j.create(DataType.DOUBLE, 1, 10);
        net.fit(inD, lD);

        INDArray outDouble = net.output(inD);
        net.setInput(inD);
        net.setLabels(lD);
        net.computeGradientAndScore();
        double scoreDouble = net.score();
        INDArray grads = net.getFlattenedGradients();
        INDArray u = net.getUpdater().getStateViewArray();
        assertEquals(DataType.DOUBLE, net.params().dataType());
        assertEquals(DataType.DOUBLE, grads.dataType());
        assertEquals(DataType.DOUBLE, u.dataType());


        MultiLayerNetwork netFloat = net.convertDataType(DataType.FLOAT);
        netFloat.initGradientsView();
        assertEquals(DataType.FLOAT, netFloat.params().dataType());
        assertEquals(DataType.FLOAT, netFloat.getFlattenedGradients().dataType());
        assertEquals(DataType.FLOAT, netFloat.getUpdater(true).getStateViewArray().dataType());
        INDArray inF = inD.castTo(DataType.FLOAT);
        INDArray lF = lD.castTo(DataType.FLOAT);
        INDArray outFloat = netFloat.output(inF);
        netFloat.setInput(inF);
        netFloat.setLabels(lF);
        netFloat.computeGradientAndScore();
        double scoreFloat = netFloat.score();
        INDArray gradsFloat = netFloat.getFlattenedGradients();
        INDArray uFloat = netFloat.getUpdater().getStateViewArray();

        assertEquals(scoreDouble, scoreFloat, 1e-6);
        assertEquals(outDouble.castTo(DataType.FLOAT), outFloat);
        assertEquals(grads.castTo(DataType.FLOAT), gradsFloat);
        INDArray uCast = u.castTo(DataType.FLOAT);
        assertTrue(uCast.equalsWithEps(uFloat, 1e-4));

        MultiLayerNetwork netFP16 = net.convertDataType(DataType.HALF);
        netFP16.initGradientsView();
        assertEquals(DataType.HALF, netFP16.params().dataType());
        assertEquals(DataType.HALF, netFP16.getFlattenedGradients().dataType());
        assertEquals(DataType.HALF, netFP16.getUpdater(true).getStateViewArray().dataType());

        INDArray inH = inD.castTo(DataType.HALF);
        INDArray lH = lD.castTo(DataType.HALF);
        INDArray outHalf = netFP16.output(inH);
        netFP16.setInput(inH);
        netFP16.setLabels(lH);
        netFP16.computeGradientAndScore();
        double scoreHalf = netFP16.score();
        INDArray gradsHalf = netFP16.getFlattenedGradients();
        INDArray uHalf = netFP16.getUpdater().getStateViewArray();

        assertEquals(scoreDouble, scoreHalf, 1e-4);
        boolean outHalfEq = outDouble.castTo(DataType.HALF).equalsWithEps(outHalf, 1e-3);
        assertTrue(outHalfEq);
        boolean gradsHalfEq = grads.castTo(DataType.HALF).equalsWithEps(gradsHalf, 1e-3);
        assertTrue(gradsHalfEq);
        INDArray uHalfCast = u.castTo(DataType.HALF);
        assertTrue(uHalfCast.equalsWithEps(uHalf, 1e-4));
    }
}
 
Example 14
Source File: ConvDataFormatTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Parameterized.Parameters(name = "{0}")
public static Object[] params(){
    return new DataType[]{DataType.FLOAT, DataType.DOUBLE};
}
 
Example 15
Source File: DTypeTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testDtypesModelVsGlobalDtypeRnn() {
    for (DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        Nd4j.setDefaultDataTypes(globalDtype, globalDtype);
        for (DataType networkDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
            for (int outputLayer = 0; outputLayer < 3; outputLayer++) {
                assertEquals(globalDtype, Nd4j.dataType());
                assertEquals(globalDtype, Nd4j.defaultFloatingPointType());

                String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", outputLayer=" + outputLayer;

                Layer ol;
                Layer secondLast;
                switch (outputLayer) {
                    case 0:
                        ol = new RnnOutputLayer.Builder().nOut(5).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build();
                        secondLast = new SimpleRnn.Builder().nOut(5).activation(Activation.TANH).build();
                        break;
                    case 1:
                        ol = new RnnLossLayer.Builder().activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build();
                        secondLast = new SimpleRnn.Builder().nOut(5).activation(Activation.TANH).build();
                        break;
                    case 2:
                        ol = new OutputLayer.Builder().nOut(5).build();
                        secondLast = new LastTimeStep(new SimpleRnn.Builder().nOut(5).activation(Activation.TANH).build());
                        break;
                    default:
                        throw new RuntimeException();
                }

                MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                        .dataType(networkDtype)
                        .convolutionMode(ConvolutionMode.Same)
                        .updater(new Adam(1e-2))
                        .list()
                        .layer(new LSTM.Builder().nIn(5).nOut(5).activation(Activation.TANH).build())
                        .layer(new GravesLSTM.Builder().nIn(5).nOut(5).activation(Activation.TANH).build())
                        .layer(new DenseLayer.Builder().nOut(5).build())
                        .layer(new GravesBidirectionalLSTM.Builder().nIn(5).nOut(5).activation(Activation.TANH).build())
                        .layer(new Bidirectional(new LSTM.Builder().nIn(5).nOut(5).activation(Activation.TANH).build()))
                        .layer(new TimeDistributed(new DenseLayer.Builder().nIn(10).nOut(5).activation(Activation.TANH).build()))
                        .layer(new SimpleRnn.Builder().nIn(5).nOut(5).build())
                        .layer(new MaskZeroLayer.Builder().underlying(new SimpleRnn.Builder().nIn(5).nOut(5).build()).maskValue(0.0).build())
                        .layer(secondLast)
                        .layer(ol)
                        .build();

                MultiLayerNetwork net = new MultiLayerNetwork(conf);
                net.init();

                net.initGradientsView();
                assertEquals(msg, networkDtype, net.params().dataType());
                assertEquals(msg, networkDtype, net.getFlattenedGradients().dataType());
                assertEquals(msg, networkDtype, net.getUpdater(true).getStateViewArray().dataType());

                INDArray in = Nd4j.rand(networkDtype, 2, 5, 2);
                INDArray label;
                if (outputLayer == 2) {
                    label = TestUtils.randomOneHot(2, 5).castTo(networkDtype);
                } else {
                    label = TestUtils.randomOneHotTimeSeries(2, 5, 2).castTo(networkDtype);
                }


                INDArray out = net.output(in);
                assertEquals(msg, networkDtype, out.dataType());
                List<INDArray> ff = net.feedForward(in);
                for (int i = 0; i < ff.size(); i++) {
                    assertEquals(msg, networkDtype, ff.get(i).dataType());
                }

                net.setInput(in);
                net.setLabels(label);
                net.computeGradientAndScore();

                net.fit(new DataSet(in, label, Nd4j.ones(networkDtype, 2, 2), outputLayer == 2 ? null : Nd4j.ones(networkDtype, 2, 2)));

                logUsedClasses(net);

                //Now, test mismatched dtypes for input/labels:
                for (DataType inputLabelDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
                    INDArray in2 = in.castTo(inputLabelDtype);
                    INDArray label2 = label.castTo(inputLabelDtype);
                    net.output(in2);
                    net.setInput(in2);
                    net.setLabels(label2);
                    net.computeGradientAndScore();

                    net.fit(new DataSet(in2, label2));
                }
            }
        }
    }
}
 
Example 16
Source File: DTypeTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testCapsNetDtypes() {
    for (DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        Nd4j.setDefaultDataTypes(globalDtype, globalDtype);
        for (DataType networkDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
            assertEquals(globalDtype, Nd4j.dataType());
            assertEquals(globalDtype, Nd4j.defaultFloatingPointType());

            String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype;

            int primaryCapsDim = 2;
            int primarpCapsChannel = 8;
            int capsule = 5;
            int minibatchSize = 8;
            int routing = 1;
            int capsuleDim = 4;
            int height = 6;
            int width = 6;
            int inputDepth = 4;

            MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .dataType(networkDtype)
                    .seed(123)
                    .updater(new NoOp())
                    .weightInit(new WeightInitDistribution(new UniformDistribution(-6, 6)))
                    .list()
                    .layer(new PrimaryCapsules.Builder(primaryCapsDim, primarpCapsChannel)
                            .kernelSize(3, 3)
                            .stride(2, 2)
                            .build())
                    .layer(new CapsuleLayer.Builder(capsule, capsuleDim, routing).build())
                    .layer(new CapsuleStrengthLayer.Builder().build())
                    .layer(new ActivationLayer.Builder(new ActivationSoftmax()).build())
                    .layer(new LossLayer.Builder(new LossNegativeLogLikelihood()).build())
                    .setInputType(InputType.convolutional(height, width, inputDepth))
                    .build();

            MultiLayerNetwork net = new MultiLayerNetwork(conf);
            net.init();

            INDArray in = Nd4j.rand(networkDtype, minibatchSize, inputDepth * height * width).mul(10)
                    .reshape(-1, inputDepth, height, width);
            INDArray label = Nd4j.zeros(networkDtype, minibatchSize, capsule);
            for (int i = 0; i < minibatchSize; i++) {
                label.putScalar(new int[]{i, i % capsule}, 1.0);
            }

            INDArray out = net.output(in);
            assertEquals(msg, networkDtype, out.dataType());
            List<INDArray> ff = net.feedForward(in);
            for (int i = 0; i < ff.size(); i++) {
                String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).conf().getLayer().getClass().getSimpleName());
                assertEquals(s, networkDtype, ff.get(i).dataType());
            }

            net.setInput(in);
            net.setLabels(label);
            net.computeGradientAndScore();

            net.fit(new DataSet(in, label));

            logUsedClasses(net);

            //Now, test mismatched dtypes for input/labels:
            for (DataType inputLabelDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
                INDArray in2 = in.castTo(inputLabelDtype);
                INDArray label2 = label.castTo(inputLabelDtype);
                net.output(in2);
                net.setInput(in2);
                net.setLabels(label2);
                net.computeGradientAndScore();

                net.fit(new DataSet(in2, label2));
            }
        }
    }
}
 
Example 17
Source File: EmbeddingLayerTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testEmbeddingSequenceLayerWithMasking() {
    //Idea: have masking on the input with an embedding and dense layers on input
    //Ensure that the parameter gradients for the inputs don't depend on the inputs when inputs are masked

    int[] miniBatchSizes = {1, 3};
    int nIn = 2;
    Random r = new Random(12345);

    int numInputClasses = 10;
    int timeSeriesLength = 5;

    for (DataType maskDtype : new DataType[]{DataType.FLOAT, DataType.DOUBLE, DataType.INT}) {
        for (DataType inLabelDtype : new DataType[]{DataType.FLOAT, DataType.DOUBLE, DataType.INT}) {
            for(int inputRank : new int[]{2, 3}) {
                for (int nExamples : miniBatchSizes) {
                    Nd4j.getRandom().setSeed(12345);

                    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                            .updater(new Sgd(0.1)).seed(12345).list()
                            .layer(0, new EmbeddingSequenceLayer.Builder().hasBias(true).activation(Activation.TANH).nIn(numInputClasses)
                                    .nOut(5).build())
                            .layer(1, new DenseLayer.Builder().activation(Activation.TANH).nIn(5).nOut(4).build())
                            .layer(2, new LSTM.Builder().activation(Activation.TANH).nIn(4).nOut(3).build())
                            .layer(3, new RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(3)
                                    .nOut(4).build())
                            .setInputType(InputType.recurrent(1)).build();

                    MultiLayerNetwork net = new MultiLayerNetwork(conf);
                    net.init();

                    MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder()
                            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                            .updater(new Sgd(0.1)).seed(12345).list()
                            .layer(0, new DenseLayer.Builder().activation(Activation.TANH).nIn(numInputClasses).nOut(5)
                                    .build())
                            .layer(1, new DenseLayer.Builder().activation(Activation.TANH).nIn(5).nOut(4).build())
                            .layer(2, new LSTM.Builder().activation(Activation.TANH).nIn(4).nOut(3).build())
                            .layer(3, new RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(3)
                                    .nOut(4).build())
                            .setInputType(InputType.recurrent(1)).build();

                    MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
                    net2.init();

                    net2.setParams(net.params().dup());

                    INDArray inEmbedding = Nd4j.zeros(inLabelDtype, inputRank == 2 ? new long[]{nExamples, timeSeriesLength} : new long[]{nExamples, 1, timeSeriesLength});
                    INDArray inDense = Nd4j.zeros(inLabelDtype, nExamples, numInputClasses, timeSeriesLength);

                    INDArray labels = Nd4j.zeros(inLabelDtype, nExamples, 4, timeSeriesLength);

                    for (int i = 0; i < nExamples; i++) {
                        for (int j = 0; j < timeSeriesLength; j++) {
                            int inIdx = r.nextInt(numInputClasses);
                            inEmbedding.putScalar(inputRank == 2 ? new int[]{i, j} : new int[]{i, 0, j}, inIdx);
                            inDense.putScalar(new int[]{i, inIdx, j}, 1.0);

                            int outIdx = r.nextInt(4);
                            labels.putScalar(new int[]{i, outIdx, j}, 1.0);
                        }
                    }

                    INDArray inputMask = Nd4j.zeros(maskDtype, nExamples, timeSeriesLength);
                    for (int i = 0; i < nExamples; i++) {
                        for (int j = 0; j < timeSeriesLength; j++) {
                            inputMask.putScalar(new int[]{i, j}, (r.nextBoolean() ? 1.0 : 0.0));
                        }
                    }

                    net.setLayerMaskArrays(inputMask, null);
                    net2.setLayerMaskArrays(inputMask, null);
                    List<INDArray> actEmbedding = net.feedForward(inEmbedding, false);
                    List<INDArray> actDense = net2.feedForward(inDense, false);
                    for (int i = 2; i < actEmbedding.size(); i++) { //Start from layer 2: EmbeddingSequence is 3d, first dense is 2d (before reshape)
                        assertEquals(actDense.get(i), actEmbedding.get(i));
                    }

                    net.setLabels(labels);
                    net2.setLabels(labels);
                    net.computeGradientAndScore();
                    net2.computeGradientAndScore();

                    assertEquals(net2.score(), net.score(), 1e-5);

                    Map<String, INDArray> gradients = net.gradient().gradientForVariable();
                    Map<String, INDArray> gradients2 = net2.gradient().gradientForVariable();
                    assertEquals(gradients.keySet(), gradients2.keySet());
                    for (String s : gradients.keySet()) {
                        assertEquals(gradients2.get(s), gradients.get(s));
                    }
                }
            }
        }
    }
}
 
Example 18
Source File: ConvDataFormatTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Parameterized.Parameters(name = "{0}")
public static Object[] params(){
    return new DataType[]{DataType.FLOAT, DataType.DOUBLE};
}
 
Example 19
Source File: RegressionTest100b6.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testCustomLayer() throws Exception {

        for (DataType dtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {

            String dtypeName = dtype.toString().toLowerCase();

            File f = Resources.asFile("regression_testing/100b6/CustomLayerExample_100b6_" + dtypeName + ".bin");
            MultiLayerNetwork.load(f, true);

            MultiLayerNetwork net = MultiLayerNetwork.load(f, true);
//            net = net.clone();

            DenseLayer l0 = (DenseLayer) net.getLayer(0).conf().getLayer();
            assertEquals(new ActivationTanH(), l0.getActivationFn());
            assertEquals(new L2Regularization(0.03), TestUtils.getL2Reg(l0));
            assertEquals(new RmsProp(0.95), l0.getIUpdater());

            CustomLayer l1 = (CustomLayer) net.getLayer(1).conf().getLayer();
            assertEquals(new ActivationTanH(), l1.getActivationFn());
            assertEquals(new ActivationSigmoid(), l1.getSecondActivationFunction());
            assertEquals(new RmsProp(0.95), l1.getIUpdater());

            INDArray outExp;
            File f2 = Resources
                    .asFile("regression_testing/100b6/CustomLayerExample_Output_100b6_" + dtypeName + ".bin");
            try (DataInputStream dis = new DataInputStream(new FileInputStream(f2))) {
                outExp = Nd4j.read(dis);
            }

            INDArray in;
            File f3 = Resources.asFile("regression_testing/100b6/CustomLayerExample_Input_100b6_" + dtypeName + ".bin");
            try (DataInputStream dis = new DataInputStream(new FileInputStream(f3))) {
                in = Nd4j.read(dis);
            }

            assertEquals(dtype, in.dataType());
            assertEquals(dtype, outExp.dataType());
            assertEquals(dtype, net.params().dataType());
            assertEquals(dtype, net.getFlattenedGradients().dataType());
            assertEquals(dtype, net.getUpdater().getStateViewArray().dataType());

            //System.out.println(Arrays.toString(net.params().data().asFloat()));

            INDArray outAct = net.output(in);
            assertEquals(dtype, outAct.dataType());

            assertEquals(dtype, net.getLayerWiseConfigurations().getDataType());
            assertEquals(dtype, net.params().dataType());
            boolean eq = outExp.equalsWithEps(outAct, 0.01);
            assertTrue("Test for dtype: " + dtypeName + " - " + outExp + " vs " + outAct, eq);
        }
    }
 
Example 20
Source File: NDArrayList.java    From deeplearning4j with Apache License 2.0 2 votes vote down vote up
/**
 * Initialize with the desired size.
 * This will set the list.size()
 * to be equal to the passed in size
 * @param size the initial size of the array
 */
public NDArrayList(int size) {
    this(DataType.DOUBLE, size);
}