Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#detach()

The following examples show how to use org.nd4j.linalg.api.ndarray.INDArray#detach() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseWorkspaceMgr.java    From nd4j with Apache License 2.0 6 votes vote down vote up
@Override
public INDArray leverageTo(@NonNull T arrayType, @NonNull INDArray array) {
    if(array == null || !array.isAttached()){
        return array;
    }
    validateConfig(arrayType);
    enforceExistsAndActive(arrayType);

    if(!DISABLE_LEVERAGE){
        if(scopeOutOfWs.contains(arrayType)){
            return array.detach();
        }
        return array.leverageTo(getWorkspaceName(arrayType), true);
    } else {
        if(array.isAttached()){
            if(!array.data().getParentWorkspace().getId().equals(getWorkspaceName(arrayType))){
                throw new IllegalStateException("Array of type " + arrayType + " is leveraged from " + array.data().getParentWorkspace().getId()
                        + " to " + getWorkspaceName(arrayType) + " but WorkspaceMgn.leverageTo() is currently disabled");
            }
        }
        return array;
    }
}
 
Example 2
Source File: BaseWorkspaceMgr.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Override
public INDArray leverageTo(@NonNull T arrayType, @NonNull INDArray array) {
    if(array == null || !array.isAttached()){
        return array;
    }
    validateConfig(arrayType);
    enforceExistsAndActive(arrayType);

    if(!DISABLE_LEVERAGE){
        if(scopeOutOfWs.contains(arrayType)){
            return array.detach();
        }
        return array.leverageTo(getWorkspaceName(arrayType), true);
    } else {
        if(array.isAttached()){
            if(!array.data().getParentWorkspace().getId().equals(getWorkspaceName(arrayType))){
                throw new IllegalStateException("Array of type " + arrayType + " is leveraged from " + array.data().getParentWorkspace().getId()
                        + " to " + getWorkspaceName(arrayType) + " but WorkspaceMgn.leverageTo() is currently disabled");
            }
        }
        return array;
    }
}
 
Example 3
Source File: SpecialWorkspaceTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testViewDetach_1() throws Exception {
    WorkspaceConfiguration configuration = WorkspaceConfiguration.builder().initialSize(10000000).overallocationLimit(3.0)
            .policyAllocation(AllocationPolicy.OVERALLOCATE).policySpill(SpillPolicy.REALLOCATE)
            .policyLearning(LearningPolicy.FIRST_LOOP).policyReset(ResetPolicy.BLOCK_LEFT).build();

    Nd4jWorkspace workspace =
            (Nd4jWorkspace) Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(configuration, "WS109");

    INDArray row = Nd4j.linspace(1, 10, 10);
    INDArray exp = Nd4j.create(1, 10).assign(2.0);
    INDArray result = null;
    try (MemoryWorkspace ws = Nd4j.getWorkspaceManager().getAndActivateWorkspace(configuration, "WS109")) {
        INDArray matrix = Nd4j.create(10, 10);
        for (int e = 0; e < matrix.rows(); e++)
            matrix.getRow(e).assign(row);


        INDArray column = matrix.getColumn(1);
        assertTrue(column.isView());
        assertTrue(column.isAttached());
        result = column.detach();
    }

    assertFalse(result.isView());
    assertFalse(result.isAttached());
    assertEquals(exp, result);
}
 
Example 4
Source File: BasicWorkspaceTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testDetach1() throws Exception {
    INDArray array = null;
    INDArray copy = null;
    try (Nd4jWorkspace wsI =
                    (Nd4jWorkspace) Nd4j.getWorkspaceManager().getAndActivateWorkspace(basicConfig, "ITER")) {
        array = Nd4j.create(new float[] {1f, 2f, 3f, 4f, 5f});

        // despite we're allocating this array in workspace, it's empty yet, so it's external allocation
        assertTrue(array.isInScope());
        assertTrue(array.isAttached());
        long reqMemory = 5 * Nd4j.sizeOfDataType();
        assertEquals(reqMemory + reqMemory % 8, wsI.getHostOffset());

        copy = array.detach();

        assertTrue(array.isInScope());
        assertTrue(array.isAttached());
        assertEquals(reqMemory + reqMemory % 8, wsI.getHostOffset());

        assertFalse(copy.isAttached());
        assertTrue(copy.isInScope());
        assertEquals(reqMemory + reqMemory % 8, wsI.getHostOffset());
    }

    assertEquals(15.0f, copy.sumNumber().floatValue(), 0.01f);
    assertFalse(array == copy);
}
 
Example 5
Source File: SpecialWorkspaceTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testViewDetach_1() {
    WorkspaceConfiguration configuration = WorkspaceConfiguration.builder().initialSize(10000000).overallocationLimit(3.0)
            .policyAllocation(AllocationPolicy.OVERALLOCATE).policySpill(SpillPolicy.REALLOCATE)
            .policyLearning(LearningPolicy.FIRST_LOOP).policyReset(ResetPolicy.BLOCK_LEFT).build();

    Nd4jWorkspace workspace =
            (Nd4jWorkspace) Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(configuration, "WS109");

    INDArray row = Nd4j.linspace(1, 10, 10);
    INDArray exp = Nd4j.create(10).assign(2.0);
    INDArray result = null;
    try (MemoryWorkspace ws = Nd4j.getWorkspaceManager().getAndActivateWorkspace(configuration, "WS109")) {
        INDArray matrix = Nd4j.create(10, 10);
        for (int e = 0; e < matrix.rows(); e++)
            matrix.getRow(e).assign(row);


        INDArray column = matrix.getColumn(1);
        assertTrue(column.isView());
        assertTrue(column.isAttached());
        result = column.detach();
    }

    assertFalse(result.isView());
    assertFalse(result.isAttached());
    assertEquals(exp, result);
}
 
Example 6
Source File: BasicWorkspaceTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testDetach1() {
    INDArray array = null;
    INDArray copy = null;
    try (Nd4jWorkspace wsI =
                    (Nd4jWorkspace) Nd4j.getWorkspaceManager().getAndActivateWorkspace(basicConfig, "ITER")) {
        array = Nd4j.create(new double[] {1f, 2f, 3f, 4f, 5f});

        // despite we're allocating this array in workspace, it's empty yet, so it's external allocation
        assertTrue(array.isInScope());
        assertTrue(array.isAttached());

        long reqMemory = 5 * Nd4j.sizeOfDataType(array.dataType());
        assertEquals(reqMemory + reqMemory % 8, wsI.getPrimaryOffset());

        copy = array.detach();

        assertTrue(array.isInScope());
        assertTrue(array.isAttached());
        assertEquals(reqMemory + reqMemory % 8, wsI.getPrimaryOffset());

        assertFalse(copy.isAttached());
        assertTrue(copy.isInScope());
        assertEquals(reqMemory + reqMemory % 8, wsI.getPrimaryOffset());
    }

    assertEquals(15.0f, copy.sumNumber().floatValue(), 0.01f);
    assertFalse(array == copy);
}
 
Example 7
Source File: BasicWorkspaceTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testDtypeLeverage(){

    for(DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
        for (DataType arrayDType : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) {
            Nd4j.setDefaultDataTypes(globalDtype, globalDtype);

            WorkspaceConfiguration configOuter = WorkspaceConfiguration.builder().initialSize(10 * 1024L * 1024L)
                    .policyAllocation(AllocationPolicy.OVERALLOCATE).policyLearning(LearningPolicy.NONE).build();
            WorkspaceConfiguration configInner = WorkspaceConfiguration.builder().initialSize(10 * 1024L * 1024L)
                    .policyAllocation(AllocationPolicy.OVERALLOCATE).policyLearning(LearningPolicy.NONE).build();

            try (MemoryWorkspace ws = Nd4j.getWorkspaceManager().getAndActivateWorkspace(configOuter, "ws")) {
                INDArray arr = Nd4j.create(arrayDType, 3, 4);
                try (MemoryWorkspace wsInner = Nd4j.getWorkspaceManager().getAndActivateWorkspace(configOuter, "wsInner")) {
                    INDArray leveraged = arr.leverageTo("ws");
                    assertTrue(leveraged.isAttached());
                    assertEquals(arrayDType, leveraged.dataType());

                    INDArray detached = leveraged.detach();
                    assertFalse(detached.isAttached());
                    assertEquals(arrayDType, detached.dataType());
                }
            }
        }
    }
    Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread();
}
 
Example 8
Source File: BidirectionalLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) {
    INDArray out1 = fwd.activate(training, workspaceMgr);
    INDArray out2 = bwd.activate(training, workspaceMgr);
    boolean permute = getRNNDataFormat() == RNNFormat.NWC && out1.rank() == 3;
    if(permute){
        out1 = out1.permute(0, 2, 1);
        out2 = out2.permute(0, 2, 1);
    }
    //Reverse the output time series. Note: when using LastTimeStepLayer, output can be rank 2
    out2 = out2.rank() == 2 ? out2 : TimeSeriesUtils.reverseTimeSeries(out2, workspaceMgr, ArrayType.FF_WORKING_MEM);
    INDArray ret;
    switch (layerConf.getMode()){
        case ADD:
            ret = out1.addi(out2);
            break;
        case MUL:
            //TODO may be more efficient ways than this...
            this.outFwd = out1.detach();
            this.outBwd = out2.detach();
            ret = workspaceMgr.dup(ArrayType.ACTIVATIONS, out1).muli(out2);
            break;
        case AVERAGE:
            ret = out1.addi(out2).muli(0.5);
            break;
        case CONCAT:
            ret = Nd4j.concat(1, out1, out2);
            ret = workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, ret);
            break;
        default:
            throw new RuntimeException("Unknown mode: " + layerConf.getMode());
    }
    if (permute){
        ret = ret.permute(0, 2, 1);
    }
    return ret;
}
 
Example 9
Source File: SameDiffLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) {
    assertInputSet(false);

    try(MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) {
        if (sameDiff == null) {
            doInit();
        }
    }

    org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer bl = (org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer) layerConf();
    bl.validateInput(input);

    Map<String,INDArray> phMap = new HashMap<>();
    phMap.put(INPUT_KEY, input);
    if(maskArray != null){
        phMap.put(MASK_KEY, maskArray);
    } else {
        phMap.put(MASK_KEY, layerConf().onesMaskForInput(input));
    }

    //Configure memory management for SameDiff instance - use DL4J workspaces
    String wsNameWorking = workspaceMgr.getWorkspaceName(ArrayType.FF_WORKING_MEM);
    String wsNameOutput = workspaceMgr.getWorkspaceName(ArrayType.ACTIVATIONS);
    WorkspaceConfiguration confWorking = workspaceMgr.getConfiguration(ArrayType.FF_WORKING_MEM);
    WorkspaceConfiguration confOutput = workspaceMgr.getConfiguration(ArrayType.ACTIVATIONS);
    boolean actScopedOut = workspaceMgr.isScopedOut(ArrayType.ACTIVATIONS);
    Preconditions.checkState(actScopedOut || wsNameOutput != null, "Activations must have a workspace or must be scoped out");
    SessionMemMgr mmgr = new DL4JSameDiffMemoryMgr(wsNameWorking, wsNameOutput, confWorking, confOutput);

    InferenceSession is = sameDiff.getSessions().get(Thread.currentThread().getId());
    if(is == null){
        is = new InferenceSession(sameDiff);
        sameDiff.getSessions().put(Thread.currentThread().getId(), is);
    }
    is.setMmgr(mmgr);

    Map<String,INDArray> out = sameDiff.output(phMap, outputKey);
    INDArray result = out.get(outputKey);

    //Edge case - identity activation
    //TODO there may be a cleaner way to do this...
    if(!actScopedOut && !result.data().getParentWorkspace().getId().equals(wsNameOutput)){
        result = workspaceMgr.dup(ArrayType.ACTIVATIONS, result);
    } else if(actScopedOut && result.isAttached()){
        result = result.detach();
    }


    //Clear placeholders and op inputs to ensure no out-of-scope arrays are still referenced anywhere
    sameDiff.clearPlaceholders(true);
    sameDiff.clearOpInputs();

    return result;
}
 
Example 10
Source File: SameDiffOutputLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
private INDArray activateHelper(boolean activations, LayerWorkspaceMgr workspaceMgr){
    assertInputSet(false);

    //Check where the output occurs. If it's a simple loss layer (no params) this could
    // just be the input!
    if(activations && INPUT_KEY.equals(layerConf().activationsVertexName())){
        return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, input);
    }

    //TODO optimize
    try(MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) {
        if (sameDiff == null) {
            doInit();
        }
    }

    //Configure memory management for SameDiff instance - use DL4J workspaces
    String wsNameWorking = workspaceMgr.getWorkspaceName(ArrayType.FF_WORKING_MEM);
    String wsNameOutput = workspaceMgr.getWorkspaceName(ArrayType.ACTIVATIONS);
    WorkspaceConfiguration confWorking = workspaceMgr.getConfiguration(ArrayType.FF_WORKING_MEM);
    WorkspaceConfiguration confOutput = workspaceMgr.getConfiguration(ArrayType.ACTIVATIONS);
    boolean actScopedOut = workspaceMgr.isScopedOut(ArrayType.ACTIVATIONS);
    Preconditions.checkState(actScopedOut || wsNameOutput != null, "Activations must have a workspace or must be scoped out");
    SessionMemMgr mmgr = new DL4JSameDiffMemoryMgr(wsNameWorking, wsNameOutput, confWorking, confOutput);

    InferenceSession is = sameDiff.getSessions().get(Thread.currentThread().getId());
    if(is == null){
        is = new InferenceSession(sameDiff);
        sameDiff.getSessions().put(Thread.currentThread().getId(), is);
    }
    is.setMmgr(mmgr);

    Map<String,INDArray> phMap = new HashMap<>();
    phMap.put(INPUT_KEY, input);
    if(!activations && layerConf().labelsRequired() && labels != null) {
        phMap.put(LABELS_KEY, labels);
    }

    String s = activations ? layerConf().activationsVertexName() : outputVar.name();

    INDArray out = sameDiff.outputSingle(phMap, s);

    //Clear placeholders and op inputs to ensure no out-of-scope arrays are still referenced anywhere
    sameDiff.clearPlaceholders(true);
    sameDiff.clearOpInputs();

    //Edge case: vertex is just an Identity function, for example
    //TODO there may be a cleaner way to do this...
    if(!actScopedOut && !out.data().getParentWorkspace().getId().equals(wsNameOutput)){
        out = workspaceMgr.dup(ArrayType.ACTIVATIONS, out);
    } else if(actScopedOut && out.isAttached()){
        out = out.detach();
    }

    return out;
}
 
Example 11
Source File: SameDiffOutputLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) {
    assertInputSet(true);
    Preconditions.checkState(!layerConf().labelsRequired() || labels != null, "Cannot execute backprop: Labels are not set. " +
            "If labels are not required for this SameDiff output layer, override SameDiffOutputLayer.labelsRequired()" +
            " to return false instead");
    Gradient g = new DefaultGradient();

    INDArray dLdIn;
    try(MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) {
        if (sameDiff == null) {
            //Usually doInit will be called in forward pass; not necessarily the case in output layers
            // (for efficiency, we skip output layer forward pass in MultiLayerNetwork/ComputationGraph)
            doInit();
        }
        if(sameDiff.getFunction("grad") == null)
            sameDiff.createGradFunction(INPUT_KEY);
    }

    //Configure memory management for SameDiff instance - use DL4J workspaces
    Map<Long,InferenceSession> sessionMap = sameDiff.getFunction("grad").getSessions();
    if(!sessionMap.containsKey(Thread.currentThread().getId())){
        sessionMap.put(Thread.currentThread().getId(), new InferenceSession(sameDiff.getFunction("grad")));
    }
    String wsNameWorking = workspaceMgr.getWorkspaceName(ArrayType.BP_WORKING_MEM);
    String wsNameActGrad = workspaceMgr.getWorkspaceName(ArrayType.ACTIVATION_GRAD);
    WorkspaceConfiguration confWorking = workspaceMgr.getConfiguration(ArrayType.BP_WORKING_MEM);
    WorkspaceConfiguration confOutput = workspaceMgr.getConfiguration(ArrayType.ACTIVATION_GRAD);

    boolean actGradScopedOut = workspaceMgr.isScopedOut(ArrayType.ACTIVATION_GRAD);
    Preconditions.checkState(actGradScopedOut || wsNameActGrad != null, "Activation gradients must have a workspace or be scoped out");
    SessionMemMgr mmgr = new DL4JSameDiffMemoryMgr(wsNameWorking, wsNameActGrad, confWorking, confOutput);
    sessionMap.get(Thread.currentThread().getId()).setMmgr(mmgr);

    if(!sameDiff.hasGradientFunction()) {
        //Create when scoped out, to ensure any arrays are not in WS
        sameDiff.createGradFunction(INPUT_KEY);
    }

    List<String> gradVarNames = new ArrayList<>();
    gradVarNames.addAll(paramTable.keySet());
    gradVarNames.add(INPUT_KEY);

    Map<String,INDArray> phMap = new HashMap<>();
    phMap.put(INPUT_KEY, input);
    phMap.put(LABELS_KEY, labels);

    Map<String,INDArray> grads = sameDiff.calculateGradients(phMap, gradVarNames);
    for(String s : paramTable.keySet() ){
        INDArray sdGrad = grads.get(s);
        INDArray dl4jGrad = gradTable.get(s);
        dl4jGrad.assign(sdGrad);                                            //TODO OPTIMIZE THIS
        g.gradientForVariable().put(s, dl4jGrad);
        if(sdGrad.closeable()){
            sdGrad.close();
        }
    }

    dLdIn = grads.get(INPUT_KEY);

    //Clear placeholders and op inputs to ensure no out-of-scope arrays are still referenced anywhere
    sameDiff.clearPlaceholders(true);
    sameDiff.clearOpInputs();

    //TODO there may be a cleaner way to do this...
    if(!actGradScopedOut && !dLdIn.data().getParentWorkspace().getId().equals(wsNameActGrad)){
        dLdIn = workspaceMgr.dup(ArrayType.ACTIVATION_GRAD, dLdIn);
    } else if(actGradScopedOut && dLdIn.isAttached()){
        dLdIn = dLdIn.detach();
    }

    return new Pair<>(g, dLdIn);
}
 
Example 12
Source File: SameDiffGraphVertex.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) {
    try(MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) {
        if (sameDiff == null) {
            doInit();
        }
    }

    Map<String,INDArray> phMap = new HashMap<>();
    config.validateInput(inputs);
    for(int i=0; i<inputs.length; i++ ){
        String name = config.getVertexParams().getInputs().get(i);
        final String maskName = name + "_mask";
        phMap.put(name, inputs[i]);
        if(maskArrays != null && maskArrays[i] != null) {
            phMap.put(maskName, maskArrays[i]);
        }else{
            phMap.put(maskName, createMask(dataType, inputs[i].shape()));
        }
    }


    //Configure memory management for SameDiff instance - use DL4J workspaces
    String wsNameWorking = workspaceMgr.getWorkspaceName(ArrayType.FF_WORKING_MEM);
    String wsNameOutput = workspaceMgr.getWorkspaceName(ArrayType.ACTIVATIONS);
    WorkspaceConfiguration confWorking = workspaceMgr.getConfiguration(ArrayType.FF_WORKING_MEM);
    WorkspaceConfiguration confOutput = workspaceMgr.getConfiguration(ArrayType.ACTIVATIONS);
    boolean actScopedOut = workspaceMgr.isScopedOut(ArrayType.ACTIVATIONS);
    Preconditions.checkState(actScopedOut || wsNameOutput != null, "Activations must have a workspace or must be scoped out");
    SessionMemMgr mmgr = new DL4JSameDiffMemoryMgr(wsNameWorking, wsNameOutput, confWorking, confOutput);

    InferenceSession is = sameDiff.getSessions().get(Thread.currentThread().getId());
    if(is == null){
        is = new InferenceSession(sameDiff);
        sameDiff.getSessions().put(Thread.currentThread().getId(), is);
    }
    is.setMmgr(mmgr);

    INDArray result = sameDiff.outputSingle(phMap, outputKey);

    //Edge case: "vertex" is just an identity activation, for example
    //TODO there may be a cleaner way to do this...
    if(!actScopedOut && !result.data().getParentWorkspace().getId().equals(wsNameOutput)){
        result = workspaceMgr.dup(ArrayType.ACTIVATIONS, result);
    } else if(actScopedOut && result.isAttached()){
        result = result.detach();
    }

    //Clear placeholders and op inputs to ensure no out-of-scope arrays are still referenced anywhere
    sameDiff.clearPlaceholders(true);
    sameDiff.clearOpInputs();
    return workspaceMgr.dup(ArrayType.ACTIVATIONS, result);
}