org.nd4j.linalg.api.memory.MemoryWorkspace Java Examples

The following examples show how to use org.nd4j.linalg.api.memory.MemoryWorkspace. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: WorkspaceProviderTests.java    From nd4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testReallocate2() throws Exception {
    MemoryWorkspace workspace =
                    Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(reallocateDelayedConfiguration, "WS_1");

    for (int i = 1; i <= 10; i++) {
        try (MemoryWorkspace ws = Nd4j.getWorkspaceManager().getAndActivateWorkspace(reallocateDelayedConfiguration,
                        "WS_1")) {
            INDArray array = Nd4j.create(100 * i);
        }

        if (i >= 3)
            assertEquals("Failed on iteration " + i, 100 * i * Nd4j.sizeOfDataType(), workspace.getCurrentSize());
        else
            assertEquals(0, workspace.getCurrentSize());
    }
}
 
Example #2
Source File: Nd4jMatrix.java    From jstarcraft-ai with Apache License 2.0 6 votes vote down vote up
@Override
@Deprecated
// TODO 准备与dotProduct整合
public MathMatrix accumulateProduct(MathMatrix leftMatrix, boolean leftTranspose, MathMatrix rightMatrix, boolean rightTranspose, MathCalculator mode) {
    if (leftMatrix instanceof Nd4jMatrix && rightMatrix instanceof Nd4jMatrix) {
        Nd4jEnvironmentThread thread = EnvironmentThread.getThread(Nd4jEnvironmentThread.class);
        try (MemoryWorkspace workspace = thread.getSpace()) {
            INDArray leftArray = leftTranspose ? Nd4jMatrix.class.cast(leftMatrix).getArray().transpose() : Nd4jMatrix.class.cast(leftMatrix).getArray();
            INDArray rightArray = rightTranspose ? Nd4jMatrix.class.cast(rightMatrix).getArray().transpose() : Nd4jMatrix.class.cast(rightMatrix).getArray();
            INDArray dataArray = this.getArray();
            INDArray cacheArray = Nd4j.zeros(dataArray.shape(), dataArray.ordering());
            leftArray.mmul(rightArray, cacheArray);
            dataArray.addi(cacheArray);
            return this;
        }
    } else {
        return MathMatrix.super.accumulateProduct(leftMatrix, leftTranspose, rightMatrix, rightTranspose, mode);
    }
}
 
Example #3
Source File: SpecialWorkspaceTests.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testAlignment_1() {
    WorkspaceConfiguration initialConfig = WorkspaceConfiguration.builder().initialSize(10 * 1024L * 1024L)
            .policyAllocation(AllocationPolicy.STRICT).policyLearning(LearningPolicy.NONE).build();
    MemoryWorkspace workspace = Nd4j.getWorkspaceManager().getAndActivateWorkspace(initialConfig, "WS132143452343");

    for( int j=0; j<100; j++ ){

        try(MemoryWorkspace ws = workspace.notifyScopeEntered()) {

            for (int x = 0; x < 10; x++) {
                //System.out.println("Start iteration (" + j + "," + x + ")");
                INDArray arr = Nd4j.linspace(1,10,10, DataType.DOUBLE).reshape(1,10);
                INDArray sum = arr.sum(true, 1);
                Nd4j.create(DataType.BOOL, x+1);        //NOTE: no crash if set to FLOAT/HALF, No crash if removed entirely; same crash for BOOL/UBYTE
                //System.out.println("End iteration (" + j + "," + x + ")");
            }
        }
    }
}
 
Example #4
Source File: FloatDataBufferTest.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testReallocationWorkspace() {
    WorkspaceConfiguration initialConfig = WorkspaceConfiguration.builder().initialSize(10 * 1024L * 1024L)
                    .policyAllocation(AllocationPolicy.STRICT).policyLearning(LearningPolicy.NONE).build();
    MemoryWorkspace workspace = Nd4j.getWorkspaceManager().getAndActivateWorkspace(initialConfig, "SOME_ID");

    DataBuffer buffer = Nd4j.createBuffer(new float[] {1, 2, 3, 4});
    assertTrue(buffer.isAttached());
    float[] old = buffer.asFloat();
    assertEquals(4, buffer.capacity());
    buffer.reallocate(6);
    assertEquals(6, buffer.capacity());
    float[] newBuf = buffer.asFloat();
    assertArrayEquals(old, newBuf, 1e-4F);
    workspace.close();
}
 
Example #5
Source File: BaseOptimizer.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Override
public void updateGradientAccordingToParams(Gradient gradient, Model model, int batchSize, LayerWorkspaceMgr workspaceMgr) {
    if (model instanceof ComputationGraph) {
        ComputationGraph graph = (ComputationGraph) model;
        if (computationGraphUpdater == null) {
            try (MemoryWorkspace ws = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) {
                computationGraphUpdater = new ComputationGraphUpdater(graph);
            }
        }
        computationGraphUpdater.update(gradient, getIterationCount(model), getEpochCount(model), batchSize, workspaceMgr);
    } else {
        if (updater == null) {
            try (MemoryWorkspace ws = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) {
                updater = UpdaterCreator.getUpdater(model);
            }
        }
        Layer layer = (Layer) model;

        updater.update(layer, gradient, getIterationCount(model), getEpochCount(model), batchSize, workspaceMgr);
    }
}
 
Example #6
Source File: BaseLayer.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**
 * Get the parameter, after applying any weight noise (such as DropConnect) if necessary.
 * Note that during training, this will store the post-noise parameters, as these should be used
 * for both forward pass and backprop, for a single iteration.
 * Consequently, the parameters (post noise) should be cleared after each training iteration
 *
 * @param param    Parameter key
 * @param training If true: during training
 * @return The parameter, after applying any noise
 */
protected INDArray getParamWithNoise(String param, boolean training, LayerWorkspaceMgr workspaceMgr){
    INDArray p;
    if(layerConf().getWeightNoise() != null){
        if(training && weightNoiseParams.size() > 0 && weightNoiseParams.containsKey(param) ){
            //Re-use these weights for both forward pass and backprop - don't want to use 2 different params here
            //These should be cleared during  backprop
            return weightNoiseParams.get(param);
        } else {
            try (MemoryWorkspace ws = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) {
                p = layerConf().getWeightNoise().getParameter(this, param, getIterationCount(), getEpochCount(), training, workspaceMgr);
            }
        }

        if(training){
            //Store for re-use in backprop
            weightNoiseParams.put(param, p);
        }
    } else {
        return getParam(param);
    }

    return p;
}
 
Example #7
Source File: WorkspaceTests.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testSimpleOutputWorkspace() {
    final MemoryWorkspace workspace = Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread("ExternalTestWorkspace");

    final INDArray input = Nd4j.rand(1, 30);

    final ComputationGraphConfiguration computationGraphConfiguration = new NeuralNetConfiguration.Builder()
            .graphBuilder()
            .addInputs("state")
            .addLayer("value_output", new OutputLayer.Builder().nIn(30).nOut(1).activation(Activation.IDENTITY)
                    .lossFunction(LossFunctions.LossFunction.MSE).build(), "state")
            .setOutputs("value_output")
            .build();

    final ComputationGraph computationGraph = new ComputationGraph(computationGraphConfiguration);
    computationGraph.init();

    try (final MemoryWorkspace ws = workspace.notifyScopeEntered()) {
        computationGraph.output(false, ws, input);
    }
}
 
Example #8
Source File: BasicWorkspaceTests.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
@Ignore
public void testMmap2() throws Exception {
    // we don't support MMAP on cuda yet
    if (Nd4j.getExecutioner().getClass().getName().toLowerCase().contains("cuda"))
        return;

    File tmp = File.createTempFile("tmp", "fdsfdf");
    tmp.deleteOnExit();
    Nd4jWorkspace.fillFile(tmp, 100000);

    WorkspaceConfiguration mmap = WorkspaceConfiguration.builder()
            .policyLocation(LocationPolicy.MMAP)
            .tempFilePath(tmp.getAbsolutePath())
            .build();

    MemoryWorkspace ws = Nd4j.getWorkspaceManager().getAndActivateWorkspace(mmap, "M3");

    INDArray mArray = Nd4j.create(DOUBLE, 100);
    mArray.assign(10f);

    assertEquals(1000f, mArray.sumNumber().floatValue(), 1e-5);

    ws.notifyScopeLeft();
}
 
Example #9
Source File: Nd4jVector.java    From jstarcraft-ai with Apache License 2.0 5 votes vote down vote up
@Override
@Deprecated
// TODO 准备与dotProduct整合
public MathVector accumulateProduct(MathVector leftVector, MathMatrix rightMatrix, boolean transpose, MathCalculator mode) {
    if (leftVector instanceof Nd4jVector && rightMatrix instanceof Nd4jMatrix) {
        Nd4jEnvironmentThread thread = EnvironmentThread.getThread(Nd4jEnvironmentThread.class);
        try (MemoryWorkspace workspace = thread.getSpace()) {
            INDArray leftArray = Nd4jVector.class.cast(leftVector).getArray();
            if (leftArray.isView()) {
                // 此处执行复制是由于gemm不支持视图向量.
                leftArray = leftArray.dup();
            }
            if (leftArray.columns() == 1) {
                leftArray = leftArray.transpose();
            }
            INDArray rightArray = transpose ? Nd4jMatrix.class.cast(rightMatrix).getArray().transpose() : Nd4jMatrix.class.cast(rightMatrix).getArray();
            INDArray dataArray = this.getArray();
            INDArray cacheArray = Nd4j.zeros(dataArray.shape(), dataArray.ordering());
            leftArray.mmul(rightArray, cacheArray);
            dataArray.addi(cacheArray);
            // Nd4j.getBlasWrapper().level3().gemm(leftArray, rightArray, dataArray, false,
            // false, one, zero);
            return this;
        }
    } else {
        return MathVector.super.accumulateProduct(leftVector, rightMatrix, transpose, mode);
    }
}
 
Example #10
Source File: Nd4jMatrix.java    From jstarcraft-ai with Apache License 2.0 5 votes vote down vote up
@Override
public MathMatrix addColumnVector(MathVector vector) {
    if (vector instanceof Nd4jVector) {
        Nd4jEnvironmentThread thread = EnvironmentThread.getThread(Nd4jEnvironmentThread.class);
        try (MemoryWorkspace workspace = thread.getSpace()) {
            INDArray thisArray = this.getArray();
            INDArray thatArray = Nd4jVector.class.cast(vector).getArray();
            thisArray.addiColumnVector(thatArray);
            return this;
        }
    } else {
        return MathMatrix.super.addColumnVector(vector);
    }
}
 
Example #11
Source File: Nd4jMatrix.java    From jstarcraft-ai with Apache License 2.0 5 votes vote down vote up
@Override
public MathMatrix multiplyMatrix(MathMatrix matrix, boolean transpose) {
    if (matrix instanceof Nd4jMatrix) {
        Nd4jEnvironmentThread thread = EnvironmentThread.getThread(Nd4jEnvironmentThread.class);
        try (MemoryWorkspace workspace = thread.getSpace()) {
            INDArray thisArray = this.getArray();
            INDArray thatArray = Nd4jMatrix.class.cast(matrix).getArray();
            thisArray.muli(transpose ? thatArray.transposei() : thatArray);
            return this;
        }
    } else {
        return MathMatrix.super.multiplyMatrix(matrix, transpose);
    }
}
 
Example #12
Source File: BasicWorkspaceTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testOutOfScope1() throws Exception {
    try (Nd4jWorkspace wsOne =
                    (Nd4jWorkspace) Nd4j.getWorkspaceManager().getAndActivateWorkspace(basicConfig, "EXT")) {
        INDArray array1 = Nd4j.create(new float[] {1f, 2f, 3f, 4f, 5f});

        long reqMemory = 5 * Nd4j.sizeOfDataType();
        assertEquals(reqMemory + reqMemory % 8, wsOne.getHostOffset());

        INDArray array2;

        try (MemoryWorkspace workspace = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) {
            array2 = Nd4j.create(new float[] {1f, 2f, 3f, 4f, 5f});
        }
        assertFalse(array2.isAttached());

        log.info("Current workspace: {}", Nd4j.getMemoryManager().getCurrentWorkspace());
        assertTrue(wsOne == Nd4j.getMemoryManager().getCurrentWorkspace());

        INDArray array3 = Nd4j.create(new float[] {1f, 2f, 3f, 4f, 5f});

        reqMemory = 5 * Nd4j.sizeOfDataType();
        assertEquals((reqMemory + reqMemory % 8) * 2, wsOne.getHostOffset());

        array1.addi(array2);

        assertEquals(30.0f, array1.sumNumber().floatValue(), 0.01f);
    }
}
 
Example #13
Source File: LocallyConnected2D.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public void initializeParameters(Map<String, INDArray> params) {
    try (MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) {
        for (Map.Entry<String, INDArray> e : params.entrySet()) {
            if (ConvolutionParamInitializer.BIAS_KEY.equals(e.getKey())) {
                e.getValue().assign(0);
            } else {
                double fanIn = nIn * kernel[0] * kernel[1];
                double fanOut = nOut * kernel[0] * kernel[1] / ((double) stride[0] * stride[1]);
                WeightInitUtil.initWeights(fanIn, fanOut, e.getValue().shape(), weightInit, null, 'c',
                                e.getValue());
            }
        }
    }
}
 
Example #14
Source File: VariationalAutoencoder.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public void fit() {
    if (input == null) {
        throw new IllegalStateException("Cannot fit layer: layer input is null (not set) " + layerId());
    }

    if (solver == null) {
        try (MemoryWorkspace workspace = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) {
            solver = new Solver.Builder().model(this).configure(conf()).listeners(getListeners()).build();
        }
    }
    this.optimizer = solver.getOptimizer();
    solver.optimize(LayerWorkspaceMgr.noWorkspaces());      //TODO FIXME
}
 
Example #15
Source File: ShiftVertex.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) {
    if (!canDoForward())
        throw new IllegalStateException("Cannot do forward pass: inputs not set (ShiftVertex " + vertexName
                        + " idx " + vertexIndex + ")");

    if (inputs.length > 1)
        throw new IllegalArgumentException(
                        "ShiftVertex (name " + vertexName + " idx " + vertexIndex + ") only supports 1 input.");

    try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATIONS)){
        return inputs[0].add(shiftFactor);
    }
}
 
Example #16
Source File: BaseScalarOp.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
public BaseScalarOp(INDArray x, INDArray z, Number set) {
    super(x, null, z);
    if (x.isCompressed())
        Nd4j.getCompressor().decompressi(x);

    try(MemoryWorkspace ws = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) {
        this.scalarValue = Nd4j.scalar(x.dataType(), set);
    }
}
 
Example #17
Source File: BasicWorkspaceTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testDtypeLeverage(){

    for(DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        for (DataType arrayDType : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
            Nd4j.setDefaultDataTypes(globalDtype, globalDtype);

            WorkspaceConfiguration configOuter = WorkspaceConfiguration.builder().initialSize(10 * 1024L * 1024L)
                    .policyAllocation(AllocationPolicy.OVERALLOCATE).policyLearning(LearningPolicy.NONE).build();
            WorkspaceConfiguration configInner = WorkspaceConfiguration.builder().initialSize(10 * 1024L * 1024L)
                    .policyAllocation(AllocationPolicy.OVERALLOCATE).policyLearning(LearningPolicy.NONE).build();

            try (MemoryWorkspace ws = Nd4j.getWorkspaceManager().getAndActivateWorkspace(configOuter, "ws")) {
                INDArray arr = Nd4j.create(arrayDType, 3, 4);
                try (MemoryWorkspace wsInner = Nd4j.getWorkspaceManager().getAndActivateWorkspace(configOuter, "wsInner")) {
                    INDArray leveraged = arr.leverageTo("ws");
                    assertTrue(leveraged.isAttached());
                    assertEquals(arrayDType, leveraged.dataType());

                    INDArray detached = leveraged.detach();
                    assertFalse(detached.isAttached());
                    assertEquals(arrayDType, detached.dataType());
                }
            }
        }
    }
    Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread();
}
 
Example #18
Source File: BaseCudaDataBuffer.java    From nd4j with Apache License 2.0 5 votes vote down vote up
public BaseCudaDataBuffer(int[] data, boolean copy, long offset, MemoryWorkspace workspace) {
    this(data.length, 4, false, workspace);
    this.offset = offset;
    this.originalOffset = offset;
    this.length = data.length - offset;
    this.underlyingLength = data.length;
    set(data, this.length, offset, offset);
}
 
Example #19
Source File: WorkspaceProviderTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testMultithreading1() throws Exception {
    final List<MemoryWorkspace> workspaces = new CopyOnWriteArrayList<>();
    Nd4j.getWorkspaceManager().setDefaultWorkspaceConfiguration(basicConfiguration);

    Thread[] threads = new Thread[20];
    for (int x = 0; x < threads.length; x++) {
        threads[x] = new Thread(new Runnable() {
            @Override
            public void run() {
                MemoryWorkspace workspace = Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread();
                workspaces.add(workspace);
            }
        });

        threads[x].start();
    }

    for (int x = 0; x < threads.length; x++) {
        threads[x].join();
    }

    for (int x = 0; x < threads.length; x++) {
        for (int y = 0; y < threads.length; y++) {
            if (x == y)
                continue;

            assertFalse(workspaces.get(x) == workspaces.get(y));
        }
    }

    assertNull(Nd4j.getMemoryManager().getCurrentWorkspace());
}
 
Example #20
Source File: WorkspaceUtils.java    From nd4j with Apache License 2.0 5 votes vote down vote up
/**
 * Assert that no workspaces are currently open
 *
 * @param msg Message to include in the exception, if required
 */
public static void assertNoWorkspacesOpen(String msg) throws ND4JWorkspaceException {
    if (Nd4j.getWorkspaceManager().anyWorkspaceActiveForCurrentThread()) {
        List<MemoryWorkspace> l = Nd4j.getWorkspaceManager().getAllWorkspacesForCurrentThread();
        List<String> workspaces = new ArrayList<>(l.size());
        for (MemoryWorkspace ws : l) {
            if(ws.isScopeActive()) {
                workspaces.add(ws.getId());
            }
        }
        throw new ND4JWorkspaceException(msg + " - Open/active workspaces: " + workspaces);
    }
}
 
Example #21
Source File: CudaWorkspaceManager.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public MemoryWorkspace createNewWorkspace() {
    ensureThreadExistense();

    MemoryWorkspace workspace = new CudaWorkspace(defaultConfiguration);

    backingMap.get().put(workspace.getId(), workspace);
    pickReference(workspace);

    return workspace;
}
 
Example #22
Source File: CpuWorkspaceManager.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public MemoryWorkspace createNewWorkspace(@NonNull WorkspaceConfiguration configuration, @NonNull String id) {
    ensureThreadExistense();

    MemoryWorkspace workspace = new CpuWorkspace(configuration, id);

    backingMap.get().put(id, workspace);
    pickReference(workspace);

    return workspace;
}
 
Example #23
Source File: MultiLayerNetwork.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
private Pair<Gradient,INDArray> calculateGradientsHelper(INDArray features, INDArray label, INDArray fMask,
                                                         INDArray labelMask){
    setInput(features);
    setLabels(label);
    setLayerMaskArrays(fMask, labelMask);

    LayerWorkspaceMgr mgr;
    if(layerWiseConfigurations.getTrainingWorkspaceMode() == WorkspaceMode.NONE){
        mgr = LayerWorkspaceMgr.noWorkspaces();
    } else {
        mgr = LayerWorkspaceMgr.builder()
                .with(ArrayType.INPUT, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG)
                .with(ArrayType.ACTIVATIONS, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG)
                .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG)
                .with(ArrayType.BP_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG)
                .with(ArrayType.RNN_FF_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG)
                .with(ArrayType.RNN_BP_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG)
                .build();

        if(layerWiseConfigurations.getCacheMode() != null){
            //For now: store cache mode activations in activations workspace
            mgr.setWorkspace(ArrayType.FF_CACHE, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG);
        }
    }
    mgr.setHelperWorkspacePointers(helperWorkspaces);

    //Calculate activations (which are stored in each layer, and used in backprop)
    try(MemoryWorkspace ws = mgr.notifyScopeEntered(ArrayType.ACTIVATIONS)) {
        //First: do a feed-forward through the network
        //Note that we don't actually need to do the full forward pass through the output layer right now; but we do
        // need the input to the output layer to be set (such that backprop can be done)
        List<INDArray> activations = ffToLayerActivationsInWs(layers.length - 2, FwdPassType.STANDARD, false, input, mask, fMask);
        if (!trainingListeners.isEmpty()) {
            //TODO: We possibly do want output layer activations in some cases here...
            for (TrainingListener tl : trainingListeners) {
                tl.onForwardPass(this, activations);
            }
        }
        INDArray inputToOutputLayer = activations.get(activations.size() - 1);
        if (layerWiseConfigurations.getInputPreProcess(layers.length - 1) != null) {
            inputToOutputLayer = layerWiseConfigurations.getInputPreProcess(layers.length - 1)
                    .preProcess(inputToOutputLayer, getInputMiniBatchSize(), mgr);
            //Validate activations location
        }
        getOutputLayer().setInput(inputToOutputLayer, mgr);

        Pair<Gradient,INDArray> p = calcBackpropGradients(null, true, false, true);
        if(p.getSecond() != null){
            p.setSecond( p.getSecond().detach());
        }
        return p;
    }
}
 
Example #24
Source File: DefaultDataBufferFactory.java    From nd4j with Apache License 2.0 4 votes vote down vote up
@Override
public DataBuffer createHalf(long length, boolean initialize, MemoryWorkspace workspace) {
    throw new UnsupportedOperationException("FP16 isn't supported for CPU yet");
}
 
Example #25
Source File: CudaByteDataBuffer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public CudaByteDataBuffer(float[] data, boolean copy, long offset, MemoryWorkspace workspace) {
    super(data, copy, offset, workspace);
}
 
Example #26
Source File: BaseCudaDataBuffer.java    From nd4j with Apache License 2.0 4 votes vote down vote up
public BaseCudaDataBuffer(long length, int elementSize, boolean initialize, @NonNull MemoryWorkspace workspace) {
    this.allocationMode = AllocationMode.LONG_SHAPE;
    initTypeAndSize();

    this.attached = true;
    this.parentWorkspace = workspace;

    this.allocationPoint = AtomicAllocator.getInstance().allocateMemory(this, new AllocationShape(length, this.elementSize, dataType()), initialize);
    this.length = length;
    //allocationPoint.attachBuffer(this);
    //this.elementSize = elementSize;
    this.trackingPoint = allocationPoint.getObjectId();
    this.offset = 0;
    this.originalOffset = 0;


    if (dataType() == Type.DOUBLE) {
        this.attached = true;
        this.parentWorkspace = workspace;

        this.pointer = new CudaPointer(allocationPoint.getPointers().getHostPointer(), length, 0).asDoublePointer();
        indexer = DoubleIndexer.create((DoublePointer) pointer);
    } else if (dataType() == Type.FLOAT) {
        this.attached = true;
        this.parentWorkspace = workspace;

        this.pointer = new CudaPointer(allocationPoint.getPointers().getHostPointer(), length, 0).asFloatPointer();
        indexer = FloatIndexer.create((FloatPointer) pointer);
    } else if (dataType() == Type.INT) {
        this.attached = true;
        this.parentWorkspace = workspace;

        this.pointer = new CudaPointer(allocationPoint.getPointers().getHostPointer(), length, 0).asIntPointer();
        indexer = IntIndexer.create((IntPointer) pointer);
    } else if (dataType() == Type.HALF) {
        this.attached = true;
        this.parentWorkspace = workspace;

        // FIXME: proper pointer and proper indexer should be used here
        this.pointer = new CudaPointer(allocationPoint.getPointers().getHostPointer(), length, 0).asShortPointer();
        indexer = HalfIndexer.create((ShortPointer) pointer);
    } else if (dataType() == Type.LONG) {
        this.attached = true;
        this.parentWorkspace = workspace;

        // FIXME: proper pointer and proper indexer should be used here
        this.pointer = new CudaPointer(allocationPoint.getPointers().getHostPointer(), length, 0).asLongPointer();
        indexer = LongIndexer.create((LongPointer) pointer);
    }

    workspaceGenerationId = workspace.getGenerationId();
}
 
Example #27
Source File: IntBuffer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public IntBuffer(int[] ints, boolean copy, MemoryWorkspace workspace) {
    super(ints, copy, workspace);
}
 
Example #28
Source File: CudaUInt64DataBuffer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public CudaUInt64DataBuffer(float[] data, boolean copy, MemoryWorkspace workspace) {
    super(data, copy,0, workspace);
}
 
Example #29
Source File: DefaultDataBufferFactory.java    From nd4j with Apache License 2.0 4 votes vote down vote up
@Override
public DataBuffer createDouble(double[] data, MemoryWorkspace workspace) {
    return createDouble(data, true, workspace);
}
 
Example #30
Source File: BasicWorkspaceManager.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public List<MemoryWorkspace> getAllWorkspacesForCurrentThread() {
    ensureThreadExistense();
    return new ArrayList<>(backingMap.get().values());
}