org.nd4j.common.primitives.Pair Java Examples

The following examples show how to use org.nd4j.common.primitives.Pair. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: OCNNOutputLayer.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Override
public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) {
    assertInputSet(true);
    Pair<Gradient, INDArray> pair = getGradientsAndDelta(preOutput2d(true, workspaceMgr), workspaceMgr); //Returns Gradient and delta^(this), not Gradient and epsilon^(this-1)
    //150
    long inputShape = (( org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer) this.getConf().getLayer()).getNIn();
    INDArray delta = pair.getSecond();
    //4 x 150
    INDArray epsilonNext = workspaceMgr.createUninitialized(ArrayType.ACTIVATION_GRAD, input.dataType(), new long[]{inputShape, delta.length()}, 'f');
    epsilonNext = epsilonNext.assign(delta.broadcast(epsilonNext.shape())).transpose();

    //Normally we would clear weightNoiseParams here - but we want to reuse them for forward + backward + score
    // So this is instead done in MultiLayerNetwork/CompGraph backprop methods

    return new Pair<>(pair.getFirst(), epsilonNext);
}
 
Example #2
Source File: TestGraphNodes.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Test
public void testStackNode() {
    Nd4j.getRandom().setSeed(12345);
    GraphVertex unstack = new StackVertex(null, "", -1, Nd4j.dataType());

    INDArray in1 = Nd4j.rand(5, 2);
    INDArray in2 = Nd4j.rand(5, 2);
    INDArray in3 = Nd4j.rand(5, 2);
    unstack.setInputs(in1, in2, in3);
    INDArray out = unstack.doForward(false, LayerWorkspaceMgr.noWorkspaces());
    assertEquals(in1, out.get(NDArrayIndex.interval(0, 5), NDArrayIndex.all()));
    assertEquals(in2, out.get(NDArrayIndex.interval(5, 10), NDArrayIndex.all()));
    assertEquals(in3, out.get(NDArrayIndex.interval(10, 15), NDArrayIndex.all()));

    unstack.setEpsilon(out);
    Pair<Gradient, INDArray[]> b = unstack.doBackward(false, LayerWorkspaceMgr.noWorkspaces());

    assertEquals(in1, b.getSecond()[0]);
    assertEquals(in2, b.getSecond()[1]);
    assertEquals(in3, b.getSecond()[2]);
}
 
Example #3
Source File: CloseValidationMemoryMgr.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
@Override
public void release(INDArray array) {
    Preconditions.checkState(released.containsKey(array), "Attempting to release an array that was not allocated by" +
            " this memory manager: id=%s", array.getId());
    if (released.get(array)) {
        //Already released
        InferenceSession is = sd.getSessions().get(Thread.currentThread().getId());
        IdentityDependencyTracker<INDArray, InferenceSession.Dep> arrayUseTracker = is.getArrayUseTracker();
        DependencyList<INDArray, InferenceSession.Dep> dl = arrayUseTracker.getDependencies(array);
        System.out.println(dl);
        if (dl.getDependencies() != null) {
            for (InferenceSession.Dep d : dl.getDependencies()) {
                System.out.println(d + ": " + arrayUseTracker.isSatisfied(d));
            }
        }
        if (dl.getOrDependencies() != null) {
            for (Pair<InferenceSession.Dep, InferenceSession.Dep> p : dl.getOrDependencies()) {
                System.out.println(p + " - (" + arrayUseTracker.isSatisfied(p.getFirst()) + "," + arrayUseTracker.isSatisfied(p.getSecond()));
            }
        }
    }
    Preconditions.checkState(!released.get(array), "Attempting to release an array that was already deallocated by" +
            " an earlier release call to this memory manager: id=%s", array.getId());
    log.trace("Released array: id = {}", array.getId());
    released.put(array, true);
}
 
Example #4
Source File: GraphRunner.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
private static GraphRunner getRunner(TensorDataType from,TensorDataType to) {
    Pair<TensorDataType,TensorDataType> key = Pair.of(from,to);
    if(!recastGraphDefs.containsKey(key)) {
        byte[] graphForDataType = graphForDataType(from,to);
        GraphRunner graphRunner = GraphRunner.builder()
                .graphBytes(graphForDataType)
                .inputNames(Arrays.asList("input"))
                .outputNames(Arrays.asList("cast_output"))
                .build();

        recastGraphDefs.put(key,graphRunner);
        return graphRunner;
    }

    return recastGraphDefs.get(key);
}
 
Example #5
Source File: ElementWiseMultiplicationLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
    public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) {
        //If this layer is layer L, then epsilon for this layer is ((w^(L+1)*(delta^(L+1))^T))^T (or equivalent)
        INDArray z = preOutput(true, workspaceMgr); //Note: using preOutput(INDArray) can't be used as this does a setInput(input) and resets the 'appliedDropout' flag
        INDArray delta = layerConf().getActivationFn().backprop(z, epsilon).getFirst(); //TODO handle activation function params

        if (maskArray != null) {
            applyMask(delta);
        }

        INDArray input = this.input.castTo(dataType);

        Gradient ret = new DefaultGradient();

        INDArray weightGrad =  gradientViews.get(ElementWiseParamInitializer.WEIGHT_KEY);
        weightGrad.subi(weightGrad);

        weightGrad.addi(input.mul(delta).sum(0));

        INDArray biasGrad = gradientViews.get(ElementWiseParamInitializer.BIAS_KEY);
        delta.sum(biasGrad, 0); //biasGrad is initialized/zeroed first

        ret.gradientForVariable().put(ElementWiseParamInitializer.WEIGHT_KEY, weightGrad);
        ret.gradientForVariable().put(ElementWiseParamInitializer.BIAS_KEY, biasGrad);

//      epsilonNext is a 2d matrix
        INDArray epsilonNext = delta.mulRowVector(params.get(ElementWiseParamInitializer.WEIGHT_KEY));
        epsilonNext = workspaceMgr.leverageTo(ArrayType.ACTIVATION_GRAD, epsilonNext);

        epsilonNext = backpropDropOutIfPresent(epsilonNext);
        return new Pair<>(ret, epsilonNext);
    }
 
Example #6
Source File: GenerateRestClients.java    From konduit-serving with Apache License 2.0 5 votes vote down vote up
private static Map<String, List<Pair<String, String>>> getJsonNameMappings() throws IOException {
    String resourcePath = "META-INF/konduit-serving/JsonNameMapping";
    try(BufferedReader bufferedReader = new BufferedReader(new FileReader(new ClassPathResource(resourcePath).getFile()))) {
        Map<String, List<Pair<String, String>>> mappings = new LinkedHashMap<>();
        while (true) {
            String line = bufferedReader.readLine();
            if(line == null) {
                break;
            } else {
                line = line.trim();
            }

            String[] splits = line.split(",");
            if(splits.length > 2) {
                String key = splits[2]; // Super class
                Pair<String, String> value = new Pair<>(splits[0],  splits[1]); // (Type, sub type class)
                if(mappings.containsKey(key)) {
                    mappings.get(key).add(value);
                } else {
                    mappings.put(key, new ArrayList<>(Collections.singleton(value)));
                }
            }
        }

        return mappings;
    } catch (FileNotFoundException exception) {
        log.error("Couldn't find file: {}. Installing 'konduit-serving-meta' module might fix this.", resourcePath);
        System.exit(1);
    }

    return null;
}
 
Example #7
Source File: SubsamplingLayerTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testSubSampleLayerMaxBackprop() throws Exception {
    INDArray expectedContainedEpsilonInput =
                    Nd4j.create(new double[] {1., 1., 1., 1., 1., 1., 1., 1.}, new int[] {1, 2, 2, 2}).castTo(Nd4j.defaultFloatingPointType());

    INDArray expectedContainedEpsilonResult = Nd4j.create(new double[] {0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 1.,
                    0., 0., 1., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0.},
                    new int[] {1, 2, 4, 4}).castTo(Nd4j.defaultFloatingPointType());

    INDArray input = getContainedData();

    Layer layer = getSubsamplingLayer(SubsamplingLayer.PoolingType.MAX);
    layer.activate(input, false, LayerWorkspaceMgr.noWorkspaces());

    Pair<Gradient, INDArray> containedOutput = layer.backpropGradient(expectedContainedEpsilonInput, LayerWorkspaceMgr.noWorkspaces());
    assertEquals(expectedContainedEpsilonResult, containedOutput.getSecond());
    assertEquals(null, containedOutput.getFirst().getGradientFor("W"));
    assertEquals(expectedContainedEpsilonResult.shape().length, containedOutput.getSecond().shape().length);

    INDArray input2 = getData();
    layer.activate(input2, false, LayerWorkspaceMgr.noWorkspaces());
    long depth = input2.size(1);

    epsilon = Nd4j.ones(5, depth, featureMapHeight, featureMapWidth);

    Pair<Gradient, INDArray> out = layer.backpropGradient(epsilon, LayerWorkspaceMgr.noWorkspaces());
    assertEquals(input.shape().length, out.getSecond().shape().length);
    assertEquals(depth, out.getSecond().size(1)); // channels retained
}
 
Example #8
Source File: LossMAPE.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public Pair<Double, INDArray> computeGradientAndScore(INDArray labels,
                INDArray preOutput, IActivation activationFn, INDArray mask, boolean average) {
    //TODO: probably a more efficient way to do this...

    return new Pair<>(computeScore(labels, preOutput, activationFn, mask, average),
                    computeGradient(labels, preOutput, activationFn, mask));
}
 
Example #9
Source File: DuplicateToTimeSeriesVertex.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public Pair<INDArray, MaskState> feedForwardMaskArrays(INDArray[] maskArrays, MaskState currentMaskState,
                int minibatchSize) {
    //Present for all time steps, or as per the corresponding input mask (if present)
    INDArray[] allMasks = graph.getInputMaskArrays();
    if (allMasks == null || allMasks[inputVertexIndex] == null) {
        //No mask
        return null;
    }
    return new Pair<>(allMasks[inputVertexIndex], MaskState.Active);
}
 
Example #10
Source File: ScaleVertex.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public Pair<Gradient, INDArray[]> doBackward(boolean tbptt, LayerWorkspaceMgr workspaceMgr) {
    if (!canDoBackward())
        throw new IllegalStateException("Cannot do backward pass: errors not set (ScaleVertex " + vertexName
                        + " idx " + vertexIndex + ")");

    try(MemoryWorkspace ws = workspaceMgr.notifyScopeBorrowed(ArrayType.ACTIVATION_GRAD)){
        return new Pair<>(null, new INDArray[] {epsilon.mul(scaleFactor)});
    }
}
 
Example #11
Source File: DataManager.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
public static <C> Pair<IDQN, C> load(File file, Class<C> cClass) throws IOException {
    log.info("Deserializing: " + file.getName());

    C conf = null;
    IDQN dqn = null;
    try (ZipFile zipFile = new ZipFile(file)) {
        ZipEntry config = zipFile.getEntry("configuration.json");
        InputStream stream = zipFile.getInputStream(config);
        BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
        String line = "";
        StringBuilder js = new StringBuilder();
        while ((line = reader.readLine()) != null) {
            js.append(line).append("\n");
        }
        String json = js.toString();

        reader.close();
        stream.close();

        conf = new ObjectMapper().readValue(json, cClass);

        ZipEntry dqnzip = zipFile.getEntry("dqn.bin");
        InputStream dqnstream = zipFile.getInputStream(dqnzip);
        File tmpFile = File.createTempFile("restore", "dqn");
        Files.copy(dqnstream, Paths.get(tmpFile.getAbsolutePath()), StandardCopyOption.REPLACE_EXISTING);
        dqn = new DQN(ModelSerializer.restoreMultiLayerNetwork(tmpFile));
        dqnstream.close();
    }

    return new Pair<IDQN, C>(dqn, conf);
}
 
Example #12
Source File: LayerVertex.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public Pair<Gradient, INDArray[]> doBackward(boolean tbptt, LayerWorkspaceMgr workspaceMgr) {
    if (!canDoBackward()) {
        if(inputs == null || inputs[0] == null){
            throw new IllegalStateException("Cannot do backward pass: inputs not set. Layer: \"" + vertexName
                    + "\" (idx " + vertexIndex + "), numInputs: " + getNumInputArrays());
        } else {
            throw new IllegalStateException("Cannot do backward pass: all epsilons not set. Layer \"" + vertexName
                    + "\" (idx " + vertexIndex + "), numInputs :" + getNumInputArrays() + "; numOutputs: "
                    + getNumOutputConnections());
        }
    }

    //Edge case: output layer - never did forward pass hence layer.setInput was never called...
    if(!setLayerInput){
        applyPreprocessorAndSetInput(workspaceMgr);
    }

    Pair<Gradient, INDArray> pair;
    if (tbptt && layer instanceof RecurrentLayer) {
        //Truncated BPTT for recurrent layers
        pair = ((RecurrentLayer) layer).tbpttBackpropGradient(epsilon,
                        graph.getConfiguration().getTbpttBackLength(), workspaceMgr);
    } else {
        //Normal backprop
        pair = layer.backpropGradient(epsilon, workspaceMgr); //epsTotal may be null for OutputLayers
    }

    if (layerPreProcessor != null) {
        INDArray eps = pair.getSecond();
        eps = layerPreProcessor.backprop(eps, graph.batchSize(), workspaceMgr);
        pair.setSecond(eps);
    }

    //Layers always have single activations input -> always have single epsilon output during backprop
    return new Pair<>(pair.getFirst(), new INDArray[] {pair.getSecond()});
}
 
Example #13
Source File: ZeroPadding3DLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) {
    assertInputSet(true);
    val inShape = input.shape();

    INDArray epsNext = epsilon.get(NDArrayIndex.all(), NDArrayIndex.all(),
            NDArrayIndex.interval(padding[0], padding[0] + inShape[2]),
            NDArrayIndex.interval(padding[2], padding[2] + inShape[3]),
            NDArrayIndex.interval(padding[4], padding[4] + inShape[4]));

    epsNext = workspaceMgr.leverageTo(ArrayType.ACTIVATION_GRAD, epsNext);
    return new Pair<>((Gradient) new DefaultGradient(), epsNext);
}
 
Example #14
Source File: TimeSeriesGeneratorTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void tsGeneratorTest() throws InvalidKerasConfigurationException {
    INDArray data = Nd4j.create(50, 10);
    INDArray targets = Nd4j.create(50, 10);


    int length = 10;
    int samplingRate = 2;
    int stride = 1;
    int startIndex = 0;
    int endIndex = 49;
    int batchSize = 1;

    boolean shuffle = false;
    boolean reverse = false;

    TimeSeriesGenerator gen = new TimeSeriesGenerator(data, targets, length,
            samplingRate, stride, startIndex, endIndex, shuffle, reverse, batchSize);

    assertEquals(length, gen.getLength());
    assertEquals(startIndex + length, gen.getStartIndex());
    assertEquals(reverse, gen.isReverse());
    assertEquals(shuffle, gen.isShuffle());
    assertEquals(endIndex, gen.getEndIndex());
    assertEquals(batchSize, gen.getBatchSize());
    assertEquals(samplingRate, gen.getSamplingRate());
    assertEquals(stride, gen.getStride());

    Pair<INDArray, INDArray> next = gen.next(0);
}
 
Example #15
Source File: EmbeddingLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) {
    assertInputSet(true);
    //If this layer is layer L, then epsilon is (w^(L+1)*(d^(L+1))^T) (or equivalent)
    INDArray z = preOutput(true, workspaceMgr);
    INDArray delta = layerConf().getActivationFn().backprop(z, epsilon).getFirst(); //TODO handle activation function params

    if (maskArray != null) {
        delta.muliColumnVector(maskArray.castTo(dataType));
    }

    INDArray weightGradients = gradientViews.get(DefaultParamInitializer.WEIGHT_KEY);
    weightGradients.assign(0);

    long[] indexes = new long[(int) input.length()];
    for (int i = 0; i < indexes.length; i++) {
        indexes[i] = input.getInt(i, 0);
    }

    INDArray indices = Nd4j.createFromArray(indexes);
    Nd4j.scatterUpdate(org.nd4j.linalg.api.ops.impl.scatter.ScatterUpdate.UpdateOp.ADD, weightGradients, indices, delta, DIM_1);


    Gradient ret = new DefaultGradient();
    ret.gradientForVariable().put(DefaultParamInitializer.WEIGHT_KEY, weightGradients);

    if(hasBias()) {
        INDArray biasGradientsView = gradientViews.get(DefaultParamInitializer.BIAS_KEY);
        delta.sum(biasGradientsView, 0); //biasGradientView is initialized/zeroed first in sum op
        ret.gradientForVariable().put(DefaultParamInitializer.BIAS_KEY, biasGradientsView);
    }

    return new Pair<>(ret, null); //Don't bother returning epsilons: no layer below this one...
}
 
Example #16
Source File: PipelineImageTransform.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
public PipelineImageTransform(long seed, boolean shuffle, ImageTransform... transforms) {
    super(null); // for perf reasons we ignore java Random, create our own

    List<Pair<ImageTransform, Double>> pipeline = new LinkedList<>();
    for (int i = 0; i < transforms.length; i++) {
        pipeline.add(new Pair<>(transforms[i], 1.0));
    }

    this.imageTransforms = pipeline;
    this.shuffle = shuffle;
    this.rng = Nd4j.getRandom();
    rng.setSeed(seed);
}
 
Example #17
Source File: ContextLabelTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testBasicLabel() {
    String labeledSentence = "<NEGATIVE> This sucks really bad </NEGATIVE> .";
    Pair<String, MultiDimensionalMap<Integer, Integer, String>> ret =
                    ContextLabelRetriever.stringWithLabels(labeledSentence, tokenizerFactory);
    //positive and none
    assertEquals(2, ret.getSecond().size());
    List<String> vals = new ArrayList<>(ret.getSecond().values());
    assertEquals(true, vals.contains("NEGATIVE"));
    assertEquals(true, vals.contains("none"));
    assertEquals("This sucks really bad .", ret.getFirst());
}
 
Example #18
Source File: TestBertIterator.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
private TestSentencePairsHelper(int minibatchSize) throws IOException {
    sentencesLeft = new ArrayList<>();
    sentencesRight = new ArrayList<>();
    sentencePairs = new ArrayList<>();
    labels = new ArrayList<>();
    tokenizedSentencesLeft = new ArrayList<>();
    tokenizedSentencesRight = new ArrayList<>();
    tokenizer = new BertWordPieceTokenizerFactory(pathToVocab, false, false, c);
    sentencesLeft.add(shortSentence);
    sentencesRight.add(longSentence);
    sentencePairs.add(new Pair<>(shortSentence, longSentence));
    labels.add("positive");
    if (minibatchSize > 1) {
        sentencesLeft.add(longSentence);
        sentencesRight.add(shortSentence);
        sentencePairs.add(new Pair<>(longSentence, shortSentence));
        labels.add("negative");
        if (minibatchSize > 2) {
            sentencesLeft.add(sentenceA);
            sentencesRight.add(sentenceB);
            sentencePairs.add(new Pair<>(sentenceA, sentenceB));
            labels.add("positive");
        }
    }
    for (int i = 0; i < minibatchSize; i++) {
        List<String> tokensL = tokenizer.create(sentencesLeft.get(i)).getTokens();
        List<String> tokensR = tokenizer.create(sentencesRight.get(i)).getTokens();
        if (i == 0) {
            shortL = tokensL.size();
            longL = tokensR.size();
        }
        if (i == 2) {
            sentenceALen = tokensL.size();
            sentenceBLen = tokensR.size();
        }
        tokenizedSentencesLeft.add(tokensL);
        tokenizedSentencesRight.add(tokensR);
    }
    pairSentenceProvider = new CollectionLabeledPairSentenceProvider(sentencesLeft, sentencesRight, labels, null);
}
 
Example #19
Source File: LogFileWriter.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * Write marker for final static data
 * @return
 * @throws IOException
 */
public long writeFinishStaticMarker() throws IOException {
    Preconditions.checkState(endStaticInfoOffset < 0, "Wrote final static already information already");
    Pair<Integer, FlatBufferBuilder> encoded = encodeStaticHeader(UIInfoType.START_EVENTS);
    long out = append(encoded.getSecond(), null);
    endStaticInfoOffset = file.length();
    return out;
}
 
Example #20
Source File: TestImageTransform.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testProbabilisticPipelineTransform() throws Exception {
    ImageWritable writable = makeRandomImage(0, 0, 3);
    Frame frame = writable.getFrame();

    ImageTransform randCrop = new RandomCropTransform(frame.imageHeight / 2, frame.imageWidth / 2);
    ImageTransform flip = new FlipImageTransform();
    List<Pair<ImageTransform, Double>> pipeline = new LinkedList<>();
    pipeline.add(new Pair<>(randCrop, 1.0));
    pipeline.add(new Pair<>(flip, 0.5));
    ImageTransform transform = new PipelineImageTransform(pipeline, true);

    for (int i = 0; i < 100; i++) {
        ImageWritable w = transform.transform(writable);
        Frame f = w.getFrame();
        assertTrue(f.imageHeight == frame.imageHeight / 2);
        assertTrue(f.imageWidth == frame.imageWidth / 2);
        assertEquals(f.imageChannels, frame.imageChannels);
    }
    assertEquals(null, transform.transform(null));

    transform = new PipelineImageTransform(new EqualizeHistTransform());
    writable = transform.transform(writable);
    float[] transformed = transform.query(new float[] {88, 99});
    assertEquals(88, transformed[0], 0);
    assertEquals(99, transformed[1], 0);
}
 
Example #21
Source File: LongIndexToKey.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public long getNumRecords() throws IOException {
    long max = -1;
    for (Pair<Long, Long> p : readerIndices) {
        max = Math.max(max, p.getSecond());
    }

    if (max <= 0) {
        throw new IllegalStateException("Invalid number of keys found: " + max);
    }

    return max + 1; //Zero indexed
}
 
Example #22
Source File: CpuWorkspaceDeallocator.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
public CpuWorkspaceDeallocator(@NonNull CpuWorkspace workspace) {
    this.pointersPair = workspace.workspace();
    this.pinnedPointers = workspace.pinnedPointers();
    this.externalPointers = workspace.externalPointers();
    this.location = workspace.getWorkspaceConfiguration().getPolicyLocation();

    if (workspace.mappedFileSize() > 0)
        this.mmapInfo = Pair.makePair(workspace.mmap, workspace.mappedFileSize());
}
 
Example #23
Source File: ConstantProtector.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
public void purgeProtector() {
    protector = new CopyOnWriteArrayList<>();
    deviceCache = new ArrayList<>();

    int numDevices = Nd4j.getAffinityManager().getNumberOfDevices();

    for (int i = 0; i < numDevices; i++) {
        deviceCache.add(i, new ConcurrentHashMap<LongShapeDescriptor, Pair<DataBuffer, long[]>>());
    }
}
 
Example #24
Source File: ActivationSoftSign.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public Pair<INDArray, INDArray> backprop(INDArray in, INDArray epsilon) {
    assertShape(in, epsilon);

    Nd4j.getExecutioner().execAndReturn(new SoftSignBp(in, epsilon, in));

    return new Pair<>(in, null);
}
 
Example #25
Source File: Convolution1DLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
protected Pair<INDArray, INDArray> preOutput4d(boolean training, boolean forBackprop, LayerWorkspaceMgr workspaceMgr) {
    Pair<INDArray,INDArray> preOutput = super.preOutput(true, forBackprop, workspaceMgr);
    INDArray p3d = preOutput.getFirst();
    INDArray p = preOutput.getFirst().reshape(p3d.size(0), p3d.size(1), p3d.size(2), 1);
    preOutput.setFirst(p);
    return preOutput;
}
 
Example #26
Source File: SequenceMergeFunction.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public List<List<Writable>> apply(Pair<T, Iterable<List<List<Writable>>>> t2) {
    List<List<List<Writable>>> sequences = new ArrayList<>();
    for (List<List<Writable>> l : t2.getSecond()) {
        sequences.add(l);
    }

    return sequenceMerge.mergeSequences(sequences);
}
 
Example #27
Source File: ExtractKeysFunction.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public Pair<List<Writable>, List<Writable>> apply(List<Writable> writables) {

    List<Writable> keyValues;
    if (columnIndexes.length == 1) {
        keyValues = Collections.singletonList(writables.get(columnIndexes[0]));
    } else {
        keyValues = new ArrayList<>(columnIndexes.length);
        for (int i : columnIndexes) {
            keyValues.add(writables.get(i));
        }
    }

    return Pair.of(keyValues, writables);
}
 
Example #28
Source File: KDTreeTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testTree() {
    KDTree tree = new KDTree(2);
    INDArray half = Nd4j.create(new double[] {0.5, 0.5}, new long[]{1,2}).castTo(DataType.FLOAT);
    INDArray one = Nd4j.create(new double[] {1, 1}, new long[]{1,2}).castTo(DataType.FLOAT);
    tree.insert(half);
    tree.insert(one);
    Pair<Double, INDArray> pair = tree.nn(Nd4j.create(new double[] {0.5, 0.5}, new long[]{1,2}).castTo(DataType.FLOAT));
    assertEquals(half, pair.getValue());
}
 
Example #29
Source File: ConvolutionLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public Pair<INDArray, MaskState> feedForwardMaskArray(INDArray maskArray, MaskState currentMaskState, int minibatchSize) {
    if (maskArray == null) {
        //For same mode (with stride 1): output activations size is always same size as input activations size -> mask array is same size
        return new Pair<>(maskArray, currentMaskState);
    }

    INDArray outMask = ConvolutionUtils.cnn2dMaskReduction(maskArray, layerConf().getKernelSize(), layerConf().getStride(),
            layerConf().getPadding(), layerConf().getDilation(), layerConf().getConvolutionMode());
    return new Pair<>(outMask, currentMaskState);
}
 
Example #30
Source File: LBFGS.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public void setupSearchState(Pair<Gradient, Double> pair) {
    super.setupSearchState(pair);
    INDArray params = (INDArray) searchState.get(PARAMS_KEY);
    searchState.put("s", new LinkedList<INDArray>()); // holds parameters differences
    searchState.put("y", new LinkedList<INDArray>()); // holds gradients differences
    searchState.put("rho", new LinkedList<Double>());
    searchState.put("oldparams", params.dup());

}