Java Code Examples for org.nd4j.linalg.api.buffer.DataType#HALF

The following examples show how to use org.nd4j.linalg.api.buffer.DataType#HALF . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TensorflowConversion.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
private DataType typeFor(int tensorflowType) {
    switch(tensorflowType) {
        case DT_DOUBLE: return DataType.DOUBLE;
        case DT_FLOAT: return DataType.FLOAT;
        case DT_HALF: return DataType.HALF;
        case DT_INT16: return DataType.SHORT;
        case DT_INT32: return DataType.INT;
        case DT_INT64: return DataType.LONG;
        case DT_STRING: return DataType.UTF8;
        case DT_INT8: return DataType.BYTE;
        case DT_UINT8: return DataType.UBYTE;
        case DT_UINT16: return DataType.UINT16;
        case DT_UINT32: return DataType.UINT32;
        case DT_UINT64: return DataType.UINT64;
        case DT_BFLOAT16: return DataType.BFLOAT16;
        case DT_BOOL: return DataType.BOOL;
        default: throw new IllegalArgumentException("Illegal type " + tensorflowType);
    }
}
 
Example 2
Source File: SpecialTests.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
    public void reproduceWorkspaceCrash_3(){
        val conf = WorkspaceConfiguration.builder().build();

        val ws = Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(conf, "WS");
        val dtypes = new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF, DataType.LONG, DataType.INT, DataType.SHORT, DataType.BYTE, DataType.UBYTE, DataType.BOOL};
        for (val dX : dtypes) {
            for (val dZ: dtypes) {
                try(val ws2 = ws.notifyScopeEntered()) {
                    val array = Nd4j.create(dX, 2, 5).assign(1);
//                    log.info("Trying to cast {} to {}", dX, dZ);
                    val casted = array.castTo(dZ);
                    val exp = Nd4j.create(dZ, 2, 5).assign(1);
                    assertEquals(exp, casted);

                    Nd4j.getExecutioner().commit();
                }
            }
        }
    }
 
Example 3
Source File: BaseLevel1.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * computes a vector by a scalar product.
 *
 * @param N
 * @param alpha
 * @param X
 */
@Override
public void scal(long N, double alpha, INDArray X) {
    if (Nd4j.getExecutioner().getProfilingMode() == OpExecutioner.ProfilingMode.ALL)
        OpProfiler.getInstance().processBlasCall(false, X);

    if (X.data().dataType() == DataType.DOUBLE)
        dscal(N, alpha, X, BlasBufferUtil.getBlasStride(X));
    else if (X.data().dataType() == DataType.FLOAT)
        sscal(N, (float) alpha, X, BlasBufferUtil.getBlasStride(X));
    else if (X.data().dataType() == DataType.HALF)
        Nd4j.getExecutioner().exec(new ScalarMultiplication(X, alpha));
}
 
Example 4
Source File: BasicWorkspaceTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testDtypeLeverage(){

    for(DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        for (DataType arrayDType : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
            Nd4j.setDefaultDataTypes(globalDtype, globalDtype);

            WorkspaceConfiguration configOuter = WorkspaceConfiguration.builder().initialSize(10 * 1024L * 1024L)
                    .policyAllocation(AllocationPolicy.OVERALLOCATE).policyLearning(LearningPolicy.NONE).build();
            WorkspaceConfiguration configInner = WorkspaceConfiguration.builder().initialSize(10 * 1024L * 1024L)
                    .policyAllocation(AllocationPolicy.OVERALLOCATE).policyLearning(LearningPolicy.NONE).build();

            try (MemoryWorkspace ws = Nd4j.getWorkspaceManager().getAndActivateWorkspace(configOuter, "ws")) {
                INDArray arr = Nd4j.create(arrayDType, 3, 4);
                try (MemoryWorkspace wsInner = Nd4j.getWorkspaceManager().getAndActivateWorkspace(configOuter, "wsInner")) {
                    INDArray leveraged = arr.leverageTo("ws");
                    assertTrue(leveraged.isAttached());
                    assertEquals(arrayDType, leveraged.dataType());

                    INDArray detached = leveraged.detach();
                    assertFalse(detached.isAttached());
                    assertEquals(arrayDType, detached.dataType());
                }
            }
        }
    }
    Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread();
}
 
Example 5
Source File: CustomOpsTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testSizeTypes(){
    List<DataType> failed = new ArrayList<>();
    for(DataType dt : new DataType[]{DataType.LONG, DataType.INT, DataType.SHORT, DataType.BYTE,
            DataType.UINT64, DataType.UINT32, DataType.UINT16, DataType.UBYTE,
            DataType.DOUBLE, DataType.FLOAT, DataType.HALF, DataType.BFLOAT16}) {

        INDArray in = Nd4j.create(DataType.FLOAT, 100);
        INDArray out = Nd4j.scalar(dt, 0);
        INDArray e = Nd4j.scalar(dt, 100);

        DynamicCustomOp op = DynamicCustomOp.builder("size")
                .addInputs(in)
                .addOutputs(out)
                .build();

        try {
            Nd4j.exec(op);

            assertEquals(e, out);
        } catch (Throwable t){
            failed.add(dt);
        }
    }

    if(!failed.isEmpty()){
        fail("Failed datatypes: " + failed.toString());
    }
}
 
Example 6
Source File: JsonSerdeTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
    public void testNDArrayTextSerializer() throws Exception {
        for(char order : new char[]{'c', 'f'}) {
            Nd4j.factory().setOrder(order);
            for (DataType globalDT : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
                Nd4j.setDefaultDataTypes(globalDT, globalDT);

                Nd4j.getRandom().setSeed(12345);
                INDArray in = Nd4j.rand(DataType.DOUBLE, 3, 4).muli(20).subi(10);

                val om = new ObjectMapper();

                for (DataType dt : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF, DataType.LONG, DataType.INT, DataType.SHORT,
                        DataType.BYTE, DataType.UBYTE, DataType.BOOL, DataType.UTF8}) {

                    INDArray arr;
                    if(dt == DataType.UTF8){
                        arr = Nd4j.create("aaaaa", "bbbb", "ccc", "dd", "e", "f", "g", "h", "i", "j", "k", "l").reshape('c', 3, 4);
                    } else {
                        arr = in.castTo(dt);
                    }

                    TestClass tc = new TestClass(arr);

                    String s = om.writeValueAsString(tc);
//                    System.out.println(dt);
//                    System.out.println(s);
//                    System.out.println("\n\n\n");

                    TestClass deserialized = om.readValue(s, TestClass.class);
                    assertEquals(dt.toString(), tc, deserialized);
                }
            }
        }
    }
 
Example 7
Source File: DataTypeUtil.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * Get the allocation mode from the context
 * @return
 */
public static DataType getDtypeFromContext(String dType) {
    switch (dType) {
        case "double":
            return DataType.DOUBLE;
        case "float":
            return DataType.FLOAT;
        case "int":
            return DataType.INT;
        case "half":
            return DataType.HALF;
        default:
            return DataType.FLOAT;
    }
}
 
Example 8
Source File: CudnnBatchNormalizationHelper.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray getVarCache(DataType dataType) {
    INDArray ret;
    if(dataType == DataType.HALF){
        INDArray vc = varCache.castTo(DataType.HALF);
        ret = vc.mul(vc).rdivi(1.0).subi(eps);
    } else {
        ret = varCache.mul(varCache).rdivi(1.0).subi(eps);
    }
    if(dataType == DataType.HALF){
        //Buffer is FP32
        return ret.castTo(DataType.HALF);
    }
    return ret;
}
 
Example 9
Source File: ArrayOptionsHelper.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
public static DataType convertToDataType(org.tensorflow.framework.DataType dataType) {
    switch (dataType) {
        case DT_UINT16:
            return DataType.UINT16;
        case DT_UINT32:
            return DataType.UINT32;
        case DT_UINT64:
            return DataType.UINT64;
        case DT_BOOL:
            return DataType.BOOL;
        case DT_BFLOAT16:
            return DataType.BFLOAT16;
        case DT_FLOAT:
            return DataType.FLOAT;
        case DT_INT32:
            return DataType.INT;
        case DT_INT64:
            return DataType.LONG;
        case DT_INT8:
            return DataType.BYTE;
        case DT_INT16:
            return DataType.SHORT;
        case DT_DOUBLE:
            return DataType.DOUBLE;
        case DT_UINT8:
            return DataType.UBYTE;
        case DT_HALF:
            return DataType.HALF;
        case DT_STRING:
            return DataType.UTF8;
        default:
            throw new UnsupportedOperationException("Unknown TF data type: [" + dataType.name() + "]");
    }
}
 
Example 10
Source File: CudnnBatchNormalizationHelper.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray getMeanCache(DataType dataType) {
    if(dataType == DataType.HALF){
        //Buffer is FP32
        return meanCache.castTo(DataType.HALF);
    }
    return meanCache;
}
 
Example 11
Source File: DTypeTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testEmbeddingDtypes() {
    for (DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        Nd4j.setDefaultDataTypes(globalDtype, globalDtype);
        for (DataType networkDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
            for (boolean frozen : new boolean[]{false, true}) {
                for (int test = 0; test < 3; test++) {
                    assertEquals(globalDtype, Nd4j.dataType());
                    assertEquals(globalDtype, Nd4j.defaultFloatingPointType());

                    String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", test=" + test;

                    ComputationGraphConfiguration.GraphBuilder conf = new NeuralNetConfiguration.Builder()
                            .dataType(networkDtype)
                            .seed(123)
                            .updater(new NoOp())
                            .weightInit(new WeightInitDistribution(new UniformDistribution(-6, 6)))
                            .graphBuilder()
                            .addInputs("in")
                            .setOutputs("out");

                    INDArray input;
                    if (test == 0) {
                        if (frozen) {
                            conf.layer("0", new FrozenLayer(new EmbeddingLayer.Builder().nIn(5).nOut(5).build()), "in");
                        } else {
                            conf.layer("0", new EmbeddingLayer.Builder().nIn(5).nOut(5).build(), "in");
                        }
                        input = Nd4j.rand(networkDtype, 10, 1).muli(5).castTo(DataType.INT);
                        conf.setInputTypes(InputType.feedForward(1));
                    } else if (test == 1) {
                        if (frozen) {
                            conf.layer("0", new FrozenLayer(new EmbeddingSequenceLayer.Builder().nIn(5).nOut(5).build()), "in");
                        } else {
                            conf.layer("0", new EmbeddingSequenceLayer.Builder().nIn(5).nOut(5).build(), "in");
                        }
                        conf.layer("gp", new GlobalPoolingLayer.Builder(PoolingType.PNORM).pnorm(2).poolingDimensions(2).build(), "0");
                        input = Nd4j.rand(networkDtype, 10, 1, 5).muli(5).castTo(DataType.INT);
                        conf.setInputTypes(InputType.recurrent(1));
                    } else {
                        conf.layer("0", new RepeatVector.Builder().repetitionFactor(5).nOut(5).build(), "in");
                        conf.layer("gp", new GlobalPoolingLayer.Builder(PoolingType.SUM).build(), "0");
                        input = Nd4j.rand(networkDtype, 10, 5);
                        conf.setInputTypes(InputType.feedForward(5));
                    }

                    conf.appendLayer("el", new ElementWiseMultiplicationLayer.Builder().nOut(5).build())
                            .appendLayer("ae", new AutoEncoder.Builder().nOut(5).build())
                            .appendLayer("prelu", new PReLULayer.Builder().nOut(5).inputShape(5).build())
                            .appendLayer("out", new OutputLayer.Builder().nOut(10).build());

                    ComputationGraph net = new ComputationGraph(conf.build());
                    net.init();

                    INDArray label = Nd4j.zeros(networkDtype, 10, 10);

                    INDArray out = net.outputSingle(input);
                    assertEquals(msg, networkDtype, out.dataType());
                    Map<String, INDArray> ff = net.feedForward(input, false);
                    for (Map.Entry<String, INDArray> e : ff.entrySet()) {
                        if (e.getKey().equals("in"))
                            continue;
                        String s = msg + " - layer: " + e.getKey();
                        assertEquals(s, networkDtype, e.getValue().dataType());
                    }

                    net.setInput(0, input);
                    net.setLabels(label);
                    net.computeGradientAndScore();

                    net.fit(new DataSet(input, label));

                    logUsedClasses(net);

                    //Now, test mismatched dtypes for input/labels:
                    for (DataType inputLabelDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
                        INDArray in2 = input.castTo(inputLabelDtype);
                        INDArray label2 = label.castTo(inputLabelDtype);
                        net.output(in2);
                        net.setInput(0, in2);
                        net.setLabels(label2);
                        net.computeGradientAndScore();

                        net.fit(new DataSet(in2, label2));
                    }
                }
            }
        }
    }
}
 
Example 12
Source File: ExecDebuggingListener.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
private static String createString(INDArray arr){
    StringBuilder sb = new StringBuilder();

    if(arr.isEmpty()){
        sb.append("Nd4j.empty(DataType.").append(arr.dataType()).append(");");
    } else {
        sb.append("Nd4j.createFromArray(");

        DataType dt = arr.dataType();
        switch (dt){
            case DOUBLE:
                double[] dArr = arr.dup().data().asDouble();
                sb.append(Arrays.toString(dArr).replaceAll("[\\[\\]]", ""));
                break;
            case FLOAT:
            case HALF:
            case BFLOAT16:
                float[] fArr = arr.dup().data().asFloat();
                sb.append(Arrays.toString(fArr)
                        .replaceAll(",", "f,")
                        .replaceAll("]", "f")
                        .replaceAll("[\\[\\]]", ""));
                break;
            case LONG:
            case UINT32:
            case UINT64:
                long[] lArr = arr.dup().data().asLong();
                sb.append(Arrays.toString(lArr)
                        .replaceAll(",", "L,")
                        .replaceAll("]", "L")
                        .replaceAll("[\\[\\]]", ""));
                break;
            case INT:
            case SHORT:
            case UBYTE:
            case BYTE:
            case UINT16:
            case BOOL:
                int[] iArr = arr.dup().data().asInt();
                sb.append(Arrays.toString(iArr).replaceAll("[\\[\\]]", ""));
                break;
            case UTF8:
                break;
            case COMPRESSED:
            case UNKNOWN:
                break;
        }

        sb.append(").reshape(").append(Arrays.toString(arr.shape()).replaceAll("[\\[\\]]", ""))
                .append(")");

        if(dt == DataType.HALF || dt == DataType.BFLOAT16 || dt == DataType.UINT32 || dt == DataType.UINT64 ||
                dt == DataType.SHORT || dt == DataType.UBYTE || dt == DataType.BYTE || dt == DataType.UINT16 || dt == DataType.BOOL){
            sb.append(".cast(DataType.").append(arr.dataType()).append(")");
        }
    }

    return sb.toString();
}
 
Example 13
Source File: DTypeTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testDtypesModelVsGlobalDtypeCnn1d() {
    //Nd4jCpu.Environment.getInstance().setUseMKLDNN(false);

    for (DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        Nd4j.setDefaultDataTypes(globalDtype, globalDtype);
        for (DataType networkDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
            for (int outputLayer = 0; outputLayer < 3; outputLayer++) {
                assertEquals(globalDtype, Nd4j.dataType());
                assertEquals(globalDtype, Nd4j.defaultFloatingPointType());

                String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", outputLayer=" + outputLayer;

                Layer ol;
                Layer secondLast;
                switch (outputLayer) {
                    case 0:
                        ol = new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build();
                        secondLast = new GlobalPoolingLayer(PoolingType.MAX);
                        break;
                    case 1:
                        ol = new RnnOutputLayer.Builder().activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).nOut(5).build();
                        secondLast = new Convolution1D.Builder().kernelSize(2).nOut(5).build();
                        break;
                    case 2:
                        ol = new RnnLossLayer.Builder().activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build();
                        secondLast = new Convolution1D.Builder().kernelSize(2).nOut(5).build();
                        break;
                    default:
                        throw new RuntimeException();
                }


                MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                        .trainingWorkspaceMode(WorkspaceMode.NONE)
                        .inferenceWorkspaceMode(WorkspaceMode.NONE)
                        .dataType(networkDtype)
                        .convolutionMode(ConvolutionMode.Same)
                        .updater(new Adam(1e-2))
                        .list()
                        .layer(new Convolution1D.Builder().kernelSize(2).stride(1).nOut(3).activation(Activation.TANH).build())
                        .layer(new Subsampling1DLayer.Builder().poolingType(PoolingType.MAX).kernelSize(5).stride(1).build())
                        .layer(new Cropping1D.Builder(1).build())
                        .layer(new ZeroPadding1DLayer(1))
                        .layer(new Upsampling1D.Builder(2).build())
                        .layer(secondLast)
                        .layer(ol)
                        .setInputType(InputType.recurrent(5, 10))
                        .build();

                MultiLayerNetwork net = new MultiLayerNetwork(conf);
                net.init();

                net.initGradientsView();
                assertEquals(msg, networkDtype, net.params().dataType());
                assertEquals(msg, networkDtype, net.getFlattenedGradients().dataType());
                assertEquals(msg, networkDtype, net.getUpdater(true).getStateViewArray().dataType());

                INDArray in = Nd4j.rand(networkDtype, 2, 5, 10);
                INDArray label;
                if (outputLayer == 0) {
                    //OutputLayer
                    label = TestUtils.randomOneHot(2, 10).castTo(networkDtype);
                } else {
                    //RnnOutputLayer, RnnLossLayer
                    label = Nd4j.rand(networkDtype, 2, 5, 20);   //Longer sequence due to upsampling
                }

                INDArray out = net.output(in);
                assertEquals(msg, networkDtype, out.dataType());
                List<INDArray> ff = net.feedForward(in);
                for (int i = 0; i < ff.size(); i++) {
                    String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).conf().getLayer().getClass().getSimpleName());
                    assertEquals(s, networkDtype, ff.get(i).dataType());
                }

                net.setInput(in);
                net.setLabels(label);
                net.computeGradientAndScore();

                net.fit(new DataSet(in, label));

                logUsedClasses(net);

                //Now, test mismatched dtypes for input/labels:
                for (DataType inputLabelDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
                    System.out.println(msg + " - " + inputLabelDtype);
                    INDArray in2 = in.castTo(inputLabelDtype);
                    INDArray label2 = label.castTo(inputLabelDtype);
                    net.output(in2);
                    net.setInput(in2);
                    net.setLabels(label2);
                    net.computeGradientAndScore();

                    net.fit(new DataSet(in2, label2));
                }
            }
        }
    }
}
 
Example 14
Source File: Shape.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public static boolean isR(@NonNull DataType x) {
    return x == DataType.FLOAT || x == DataType.HALF || x == DataType.DOUBLE || x == DataType.BFLOAT16;
}
 
Example 15
Source File: TestDataTypes.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testDataTypesSimple() throws Exception {

    Map<DataType, INDArray> outMapTrain = new HashMap<>();
    Map<DataType, INDArray> outMapTest = new HashMap<>();
    for(DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        Nd4j.setDefaultDataTypes(globalDtype, globalDtype);
        for(DataType netDType : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
            log.info("Starting test: global dtype = {}, net dtype = {}", globalDtype, netDType);
            assertEquals(globalDtype, Nd4j.dataType());
            assertEquals(globalDtype, Nd4j.defaultFloatingPointType());

            MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .updater(new Sgd(1e-2))
                    .dataType(netDType)
                    .convolutionMode(ConvolutionMode.Same)
                    .activation(Activation.TANH)
                    .seed(12345)
                    .weightInit(WeightInit.XAVIER)
                    .list()
                    .layer(new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0).nOut(3).build())
                    .layer(new SubsamplingLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0).build())
                    .layer(new BatchNormalization.Builder().eps(1e-3).build())
                    .layer(new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0).nOut(3).build())
                    .layer(new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build())
                    .setInputType(InputType.convolutionalFlat(28, 28, 1))
                    .build();

            MultiLayerNetwork net = new MultiLayerNetwork(conf);
            net.init();


            Field f1 = org.deeplearning4j.nn.layers.convolution.ConvolutionLayer.class.getDeclaredField("helper");
            f1.setAccessible(true);

            Field f2 = org.deeplearning4j.nn.layers.convolution.subsampling.SubsamplingLayer.class.getDeclaredField("helper");
            f2.setAccessible(true);

            Field f3 = org.deeplearning4j.nn.layers.normalization.BatchNormalization.class.getDeclaredField("helper");
            f3.setAccessible(true);

            assertNotNull(f1.get(net.getLayer(0)));
            assertNotNull(f2.get(net.getLayer(1)));
            assertNotNull(f3.get(net.getLayer(2)));
            assertNotNull(f1.get(net.getLayer(3)));

            DataSet ds = new MnistDataSetIterator(32, true, 12345).next();

            //Simple sanity checks:
            //System.out.println("STARTING FIT");
            net.fit(ds);
            net.fit(ds);

            //System.out.println("STARTING OUTPUT");
            INDArray outTrain = net.output(ds.getFeatures(), false);
            INDArray outTest = net.output(ds.getFeatures(), true);

            outMapTrain.put(netDType, outTrain.castTo(DataType.DOUBLE));
            outMapTest.put(netDType, outTest.castTo(DataType.DOUBLE));
        }
    }

    Nd4j.setDataType(DataType.DOUBLE);
    INDArray fp64Train = outMapTrain.get(DataType.DOUBLE);
    INDArray fp32Train = outMapTrain.get(DataType.FLOAT).castTo(DataType.DOUBLE);
    INDArray fp16Train = outMapTrain.get(DataType.HALF).castTo(DataType.DOUBLE);

    boolean eq64_32 = fp64Train.equalsWithEps(fp32Train, 1e-3);
    boolean eq64_16 = fp64Train.equalsWithEps(fp16Train, 1e-2);

    if(!eq64_32){
        System.out.println("FP64/32");
        System.out.println("fp64Train:\n" + fp64Train);
        System.out.println("fp32Train:\n" + fp32Train);
    }

    if(!eq64_16){
        System.out.println("FP64/16");
        System.out.println("fp64Train:\n" + fp64Train);
        System.out.println("fp16Train:\n" + fp16Train);
    }

    assertTrue(eq64_32);
    assertTrue(eq64_16);
}
 
Example 16
Source File: DTypeTests.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testComputationGraphTypeConversion() {

    for (DataType dt : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        Nd4j.setDefaultDataTypes(dt, dt);

        ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder()
                .seed(12345)
                .weightInit(WeightInit.XAVIER)
                .updater(new Adam(0.01))
                .dataType(DataType.DOUBLE)
                .graphBuilder()
                .addInputs("in")
                .layer("l0", new DenseLayer.Builder().activation(Activation.TANH).nIn(10).nOut(10).build(), "in")
                .layer("l1", new DenseLayer.Builder().activation(Activation.TANH).nIn(10).nOut(10).build(), "l0")
                .layer("out", new OutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build(), "l1")
                .setOutputs("out")
                .build();

        ComputationGraph net = new ComputationGraph(conf);
        net.init();

        INDArray inD = Nd4j.rand(DataType.DOUBLE, 1, 10);
        INDArray lD = Nd4j.create(DataType.DOUBLE, 1, 10);
        net.fit(new DataSet(inD, lD));

        INDArray outDouble = net.outputSingle(inD);
        net.setInput(0, inD);
        net.setLabels(lD);
        net.computeGradientAndScore();
        double scoreDouble = net.score();
        INDArray grads = net.getFlattenedGradients();
        INDArray u = net.getUpdater().getStateViewArray();
        assertEquals(DataType.DOUBLE, net.params().dataType());
        assertEquals(DataType.DOUBLE, grads.dataType());
        assertEquals(DataType.DOUBLE, u.dataType());


        ComputationGraph netFloat = net.convertDataType(DataType.FLOAT);
        netFloat.initGradientsView();
        assertEquals(DataType.FLOAT, netFloat.params().dataType());
        assertEquals(DataType.FLOAT, netFloat.getFlattenedGradients().dataType());
        assertEquals(DataType.FLOAT, netFloat.getUpdater(true).getStateViewArray().dataType());
        INDArray inF = inD.castTo(DataType.FLOAT);
        INDArray lF = lD.castTo(DataType.FLOAT);
        INDArray outFloat = netFloat.outputSingle(inF);
        netFloat.setInput(0, inF);
        netFloat.setLabels(lF);
        netFloat.computeGradientAndScore();
        double scoreFloat = netFloat.score();
        INDArray gradsFloat = netFloat.getFlattenedGradients();
        INDArray uFloat = netFloat.getUpdater().getStateViewArray();

        assertEquals(scoreDouble, scoreFloat, 1e-6);
        assertEquals(outDouble.castTo(DataType.FLOAT), outFloat);
        assertEquals(grads.castTo(DataType.FLOAT), gradsFloat);
        INDArray uCast = u.castTo(DataType.FLOAT);
        assertTrue(uCast.equalsWithEps(uFloat, 1e-4));

        ComputationGraph netFP16 = net.convertDataType(DataType.HALF);
        netFP16.initGradientsView();
        assertEquals(DataType.HALF, netFP16.params().dataType());
        assertEquals(DataType.HALF, netFP16.getFlattenedGradients().dataType());
        assertEquals(DataType.HALF, netFP16.getUpdater(true).getStateViewArray().dataType());

        INDArray inH = inD.castTo(DataType.HALF);
        INDArray lH = lD.castTo(DataType.HALF);
        INDArray outHalf = netFP16.outputSingle(inH);
        netFP16.setInput(0, inH);
        netFP16.setLabels(lH);
        netFP16.computeGradientAndScore();
        double scoreHalf = netFP16.score();
        INDArray gradsHalf = netFP16.getFlattenedGradients();
        INDArray uHalf = netFP16.getUpdater().getStateViewArray();

        assertEquals(scoreDouble, scoreHalf, 1e-4);
        boolean outHalfEq = outDouble.castTo(DataType.HALF).equalsWithEps(outHalf, 1e-3);
        assertTrue(outHalfEq);
        boolean gradsHalfEq = grads.castTo(DataType.HALF).equalsWithEps(gradsHalf, 1e-3);
        assertTrue(gradsHalfEq);
        INDArray uHalfCast = u.castTo(DataType.HALF);
        assertTrue(uHalfCast.equalsWithEps(uHalf, 1e-4));
    }
}
 
Example 17
Source File: BaseCudaDataBuffer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public void actualizePointerAndIndexer() {
    val cptr = ptrDataBuffer.primaryBuffer();

    // skip update if pointers are equal
    if (cptr != null && pointer != null && cptr.address() == pointer.address())
        return;

    val t = dataType();
    if (t == DataType.BOOL) {
        pointer = new PagedPointer(cptr, length).asBoolPointer();
        setIndexer(BooleanIndexer.create((BooleanPointer) pointer));
    } else if (t == DataType.UBYTE) {
        pointer = new PagedPointer(cptr, length).asBytePointer();
        setIndexer(UByteIndexer.create((BytePointer) pointer));
    } else if (t == DataType.BYTE) {
        pointer = new PagedPointer(cptr, length).asBytePointer();
        setIndexer(ByteIndexer.create((BytePointer) pointer));
    } else if (t == DataType.UINT16) {
        pointer = new PagedPointer(cptr, length).asShortPointer();
        setIndexer(UShortIndexer.create((ShortPointer) pointer));
    } else if (t == DataType.SHORT) {
        pointer = new PagedPointer(cptr, length).asShortPointer();
        setIndexer(ShortIndexer.create((ShortPointer) pointer));
    } else if (t == DataType.UINT32) {
        pointer = new PagedPointer(cptr, length).asIntPointer();
        setIndexer(UIntIndexer.create((IntPointer) pointer));
    } else if (t == DataType.INT) {
        pointer = new PagedPointer(cptr, length).asIntPointer();
        setIndexer(IntIndexer.create((IntPointer) pointer));
    } else if (t == DataType.UINT64) {
        pointer = new PagedPointer(cptr, length).asLongPointer();
        setIndexer(LongIndexer.create((LongPointer) pointer));
    } else if (t == DataType.LONG) {
        pointer = new PagedPointer(cptr, length).asLongPointer();
        setIndexer(LongIndexer.create((LongPointer) pointer));
    } else if (t == DataType.BFLOAT16) {
        pointer = new PagedPointer(cptr, length).asShortPointer();
        setIndexer(Bfloat16Indexer.create((ShortPointer) pointer));
    } else if (t == DataType.HALF) {
        pointer = new PagedPointer(cptr, length).asShortPointer();
        setIndexer(HalfIndexer.create((ShortPointer) pointer));
    } else if (t == DataType.FLOAT) {
        pointer = new PagedPointer(cptr, length).asFloatPointer();
        setIndexer(FloatIndexer.create((FloatPointer) pointer));
    } else if (t == DataType.DOUBLE) {
        pointer = new PagedPointer(cptr, length).asDoublePointer();
        setIndexer(DoubleIndexer.create((DoublePointer) pointer));
    } else if (t == DataType.UTF8) {
        pointer = new PagedPointer(cptr, length()).asBytePointer();
        setIndexer(ByteIndexer.create((BytePointer) pointer));
    } else
        throw new IllegalArgumentException("Unknown datatype: " + dataType());
}
 
Example 18
Source File: CudaHalfDataBuffer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public DataType dataType() {
    return DataType.HALF;
}
 
Example 19
Source File: LocallyConnectedLayerTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testLocallyConnected(){
        for (DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
            Nd4j.setDefaultDataTypes(globalDtype, globalDtype);
            for (DataType networkDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
                assertEquals(globalDtype, Nd4j.dataType());
                assertEquals(globalDtype, Nd4j.defaultFloatingPointType());

                for (int test = 0; test < 2; test++) {
                    String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", test=" + test;

                    ComputationGraphConfiguration.GraphBuilder b = new NeuralNetConfiguration.Builder()
                            .dataType(networkDtype)
                            .seed(123)
                            .updater(new NoOp())
                            .weightInit(WeightInit.XAVIER)
                            .convolutionMode(ConvolutionMode.Same)
                            .graphBuilder();

                    INDArray[] in;
                    INDArray label;
                    switch (test){
                        case 0:
                            b.addInputs("in")
                                    .addLayer("1", new LSTM.Builder().nOut(5).build(), "in")
                                    .addLayer("2", new LocallyConnected1D.Builder().kernelSize(2).nOut(4).build(), "1")
                                    .addLayer("out", new RnnOutputLayer.Builder().nOut(10).build(), "2")
                                    .setOutputs("out")
                                    .setInputTypes(InputType.recurrent(5, 4));
                            in = new INDArray[]{Nd4j.rand(networkDtype, 2, 5, 4)};
                            label = TestUtils.randomOneHotTimeSeries(2, 10, 4).castTo(networkDtype);
                            break;
                        case 1:
                            b.addInputs("in")
                                    .addLayer("1", new ConvolutionLayer.Builder().kernelSize(2,2).nOut(5).convolutionMode(ConvolutionMode.Same).build(), "in")
                                    .addLayer("2", new LocallyConnected2D.Builder().kernelSize(2,2).nOut(5).build(), "1")
                                    .addLayer("out", new OutputLayer.Builder().nOut(10).build(), "2")
                                    .setOutputs("out")
//                                    .setInputTypes(InputType.convolutional(28, 28, 1));
//                            in = new INDArray[]{Nd4j.rand(networkDtype, 2, 1, 28, 28)};
                                    .setInputTypes(InputType.convolutional(8, 8, 1));
                            in = new INDArray[]{Nd4j.rand(networkDtype, 2, 1, 8, 8)};
                            label = TestUtils.randomOneHot(2, 10).castTo(networkDtype);
                            break;
                        default:
                            throw new RuntimeException();
                    }

                    ComputationGraph net = new ComputationGraph(b.build());
                    net.init();

                    INDArray out = net.outputSingle(in);
                    assertEquals(msg, networkDtype, out.dataType());
                    Map<String, INDArray> ff = net.feedForward(in, false);
                    for (Map.Entry<String, INDArray> e : ff.entrySet()) {
                        if (e.getKey().equals("in"))
                            continue;
                        String s = msg + " - layer: " + e.getKey();
                        assertEquals(s, networkDtype, e.getValue().dataType());
                    }

                    net.setInputs(in);
                    net.setLabels(label);
                    net.computeGradientAndScore();

                    net.fit(new MultiDataSet(in, new INDArray[]{label}));
                }
            }
        }
    }
 
Example 20
Source File: LossFunctionTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testWeightedLossFunctionDTypes(){

    for(DataType activationsDt : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}){
        for(DataType weightsDt : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}){
            for( boolean rank1W : new boolean[]{false, true}) {

                INDArray preOut = Nd4j.rand(activationsDt, 2, 3);
                INDArray l = Nd4j.rand(activationsDt, 2, 3);

                INDArray w = Nd4j.createFromArray(1.0f, 2.0f, 3.0f).castTo(weightsDt);
                if(!rank1W){
                    w = w.reshape(1, 3);
                }

                ILossFunction lf = null;
                for (int i = 0; i < 10; i++) {
                    switch (i) {
                        case 0:
                            lf = new LossBinaryXENT(w);
                            break;
                        case 1:
                            lf = new LossL1(w);
                            break;
                        case 2:
                            lf = new LossL2(w);
                            break;
                        case 3:
                            lf = new LossMAE(w);
                            break;
                        case 4:
                            lf = new LossMAPE(w);
                            break;
                        case 5:
                            lf = new LossMCXENT(w);
                            break;
                        case 6:
                            lf = new LossMSE(w);
                            break;
                        case 7:
                            lf = new LossMSLE(w);
                            break;
                        case 8:
                            lf = new LossNegativeLogLikelihood(w);
                            break;
                        case 9:
                            lf = new LossSparseMCXENT(w);
                            l = Nd4j.createFromArray(1,2).reshape(2, 1).castTo(activationsDt);
                            break;
                        default:
                            throw new RuntimeException();
                    }
                }

                //Check score
                lf.computeScore(l, preOut, new ActivationSoftmax(), null, true);

                //Check backward
                lf.computeGradient(l, preOut, new ActivationSoftmax(), null);
            }
        }
    }

}