org.deeplearning4j.nn.conf.layers.PoolingType Java Examples

The following examples show how to use org.deeplearning4j.nn.conf.layers.PoolingType. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KerasPoolingUtils.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**
 * Map Keras pooling layers to DL4J pooling types.
 *
 * @param className name of the Keras pooling class
 * @return DL4J pooling type
 * @throws UnsupportedKerasConfigurationException Unsupported Keras config
 */
public static PoolingType mapPoolingType(String className, KerasLayerConfiguration conf)
        throws UnsupportedKerasConfigurationException {
    PoolingType poolingType;
    if (className.equals(conf.getLAYER_CLASS_NAME_MAX_POOLING_2D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_MAX_POOLING_1D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_MAX_POOLING_3D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_GLOBAL_MAX_POOLING_1D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_GLOBAL_MAX_POOLING_2D())) {
        poolingType = PoolingType.MAX;
    } else if (className.equals(conf.getLAYER_CLASS_NAME_AVERAGE_POOLING_2D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_AVERAGE_POOLING_1D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_AVERAGE_POOLING_3D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_GLOBAL_AVERAGE_POOLING_1D()) ||
            className.equals(conf.getLAYER_CLASS_NAME_GLOBAL_AVERAGE_POOLING_2D())) {
        poolingType = PoolingType.AVG;
    } else {
        throw new UnsupportedKerasConfigurationException("Unsupported Keras pooling layer " + className);
    }
    return poolingType;
}
 
Example #2
Source File: SubsamplingHelper.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
INDArray activate(INDArray input, boolean training, int[] kernel, int[] strides, int[] pad, PoolingType poolingType,
ConvolutionMode convolutionMode, int[] dilation, CNN2DFormat format, LayerWorkspaceMgr workspaceMgr);
 
Example #3
Source File: CNN3DTestCases.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
/**
 * A simple synthetic CNN 3d test case using all CNN 3d layers:
 * Subsampling, Upsampling, Convolution, Cropping, Zero padding
 */
public static TestCase getCnn3dTestCaseSynthetic(){
    return new TestCase() {
        {
            testName = "Cnn3dSynthetic";
            testType = TestType.RANDOM_INIT;
            testPredictions = true;
            testTrainingCurves = true;
            testGradients = true;
            testParamsPostTraining = true;
            testEvaluation = true;
            testOverfitting = false;
        }

        @Override
        public ModelType modelType() {
            return ModelType.MLN;
        }

        public Object getConfiguration() throws Exception {
            int nChannels = 3; // Number of input channels
            int outputNum = 10; // The number of possible outcomes
            int seed = 123;

            MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .seed(seed)
                    .l2(0.0005)
                    .weightInit(WeightInit.XAVIER)
                    .updater(new Nesterovs(0.01, 0.9))
                    .convolutionMode(ConvolutionMode.Same)
                    .list()
                    .layer(new Convolution3D.Builder(3,3,3)
                            .dataFormat(Convolution3D.DataFormat.NCDHW)
                            .nIn(nChannels)
                            .stride(2, 2, 2)
                            .nOut(8)
                            .activation(Activation.IDENTITY)
                            .build())
                    .layer(new Subsampling3DLayer.Builder(PoolingType.MAX)
                            .kernelSize(2, 2, 2)
                            .stride(2, 2, 2)
                            .build())
                    .layer(new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                            .nOut(outputNum)
                            .activation(Activation.SOFTMAX)
                            .build())
                    .setInputType(InputType.convolutional3D(8,8,8,nChannels))
                    .build();

            return conf;
        }

        @Override
        public MultiDataSet getGradientsTestData() throws Exception {
            Nd4j.getRandom().setSeed(12345);
            //NCDHW format
            INDArray arr = Nd4j.rand(new int[]{2, 3, 8, 8, 8});
            INDArray labels = org.deeplearning4j.integration.TestUtils.randomOneHot(2, 10);
            return new org.nd4j.linalg.dataset.MultiDataSet(arr, labels);
        }

        @Override
        public MultiDataSetIterator getTrainingData() throws Exception {
            return new SingletonMultiDataSetIterator(getGradientsTestData());
        }

        @Override
        public MultiDataSetIterator getEvaluationTestData() throws Exception {
            return getTrainingData();
        }

        @Override
        public List<Pair<INDArray[],INDArray[]>> getPredictionsTestData() throws Exception {
            MultiDataSet mds = getGradientsTestData();
            return Collections.singletonList(new Pair<>(mds.getFeatures(), null));
        }

        @Override
        public IEvaluation[] getNewEvaluations(){
            return new IEvaluation[]{new Evaluation()};
        }

    };
}
 
Example #4
Source File: CudnnConvolutionHelper.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
/**
 * @param poolingType     Used when preparing data for subsampling layers ONLY. Null for convolution layers
 * @return
 */
public static CudnnForwardArgs getCudnnForwardArgs(INDArray input, int[] kernel, int[] strides, int[] padding, int[] dilation,
                                                   ConvolutionMode convolutionMode, PoolingType poolingType, CNN2DFormat format){
    INDArray origInput = input;

    //Check if we need to dup the input: views, non-contiguous, etc. CuDNN also seems to have has issues if strides
    // are non-default for C order - even if they *should* be OK otherwise
    if(input.isView() || !Shape.hasDefaultStridesForShape(input)){
        input = input.dup('c');
    }

    boolean nchw = format == CNN2DFormat.NCHW;
    int hIdx = nchw ? 2 : 1;
    int wIdx = nchw ? 3 : 2;

    val inH = input.size(hIdx);
    val inW = input.size(wIdx);

    boolean manualPadBottom = false;
    boolean manualPadRight = false;

    int[] outSize;
    if (convolutionMode == ConvolutionMode.Same) {
        outSize = ConvolutionUtils.getOutputSize(input, kernel, strides, null, convolutionMode, dilation, format); //Also performs validation
        padding = ConvolutionUtils.getSameModeTopLeftPadding(outSize, new int[] {(int) inH, (int) inW}, kernel, strides, dilation);
        int[] padBottomRight = ConvolutionUtils.getSameModeBottomRightPadding(outSize, new int[] {(int) inH, (int) inW}, kernel, strides, dilation);
        if(!Arrays.equals(padding, padBottomRight)){
            /*
            CuDNN - even as of 7.1 (CUDA 9.1) still doesn't have support for proper SAME mode padding (i.e., asymmetric
            padding) - padding can *only* be specified as the same amount for both the top/bottom, and for left/right.
            In SAME mode padding, sometimes these are the same - but often they are not.
            Note that when they differ, the bottom or right padding will be exactly 1 more than the top or left padding.
            As per TF, we'll manually pad here: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/conv_ops.cc#L571-L607
             */
            manualPadBottom = (padding[0] != padBottomRight[0]);
            manualPadRight = (padding[1] != padBottomRight[1]);

            //NCHW format
            long[] newShape;
            if(nchw){
                newShape = new long[]{input.size(0), input.size(1),
                        input.size(2) + (manualPadBottom ? 1 : 0),
                        input.size(3) + (manualPadRight ? 1 : 0)};
            } else {
                newShape = new long[]{input.size(0),
                        input.size(1) + (manualPadBottom ? 1 : 0),
                        input.size(2) + (manualPadRight ? 1 : 0),
                        input.size(3)};
            }
            INDArray newInput;
            if(poolingType == null || poolingType != PoolingType.MAX){
                newInput = Nd4j.create(input.dataType(), newShape);
            } else {
                //For max pooling, we don't want to include the padding in the maximum values. But, CuDNN doesn't knowm
                // that these values are padding and hence should be excluded. Instead: We'll use -infinity so that,
                // if the 'real' (non-padding) values are all < 0, we take the real value, not the padding value
                newInput = Nd4j.valueArrayOf(newShape, Double.NEGATIVE_INFINITY, input.dataType());
            }

            if(nchw){
                newInput.put(new INDArrayIndex[]{all(), all(), interval(0,input.size(2)),
                        interval(0, input.size(3))}, input);
            } else {
                newInput.put(new INDArrayIndex[]{all(), interval(0,input.size(1)),
                        interval(0, input.size(2)), all()}, input);
            }

            input = newInput;
            //Now: we've manually applied the "extra" bottom/right padding only - if required. Consequently, we
            // now have the same amount of padding required for top/bottom, and left/right - which we'll let
            // CuDNN handle
        }
    } else {
        outSize = ConvolutionUtils.getOutputSize(input, kernel, strides, padding, convolutionMode, dilation, format); //Also performs validation
    }

    return new CudnnForwardArgs(manualPadBottom, manualPadRight, input, origInput, padding, outSize);
}
 
Example #5
Source File: CudnnSubsamplingHelper.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray activate(INDArray input, boolean training, int[] kernel, int[] strides, int[] pad,
                PoolingType poolingType, ConvolutionMode convolutionMode, int[] dilation, CNN2DFormat format, LayerWorkspaceMgr workspaceMgr) {
    if(dilation[0] != 1 || dilation[1] != 1){
        //CuDNN doesn't support dilated subsampling
        return null;
    }

    boolean nchw = format == CNN2DFormat.NCHW;
    int chIdx = nchw ? 1 : 3;
    int hIdx = nchw ? 2 : 1;
    int wIdx = nchw ? 3 : 2;

    val miniBatch = input.size(0);
    val inDepth = input.size(nchw ? 1 : 3);

    CudnnConvolutionHelper.CudnnForwardArgs args = getCudnnForwardArgs(input, kernel, strides, pad, dilation, convolutionMode, poolingType, format);
    input = args.getInput();
    val inH = input.size(nchw ? 2 : 1);
    val inW = input.size(nchw ? 3 : 2);
    val srcStride = input.stride();
    val outSize = args.getOutSize();
    int outH = outSize[0];
    int outW = outSize[1];


    int poolingMode;
    switch (poolingType) {
        case AVG:
            poolingMode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
            break;
        case MAX:
            poolingMode = CUDNN_POOLING_MAX;
            break;
        default:
            return null;
    }

    if (Nd4j.getExecutioner() instanceof GridExecutioner)
        ((GridExecutioner) Nd4j.getExecutioner()).flushQueue();

    checkCudnn(cudnnSetPooling2dDescriptor(cudnnContext.poolingDesc, poolingMode, CUDNN_PROPAGATE_NAN, kernel[0],
                    kernel[1], pad[0], pad[1], strides[0], strides[1]));
    checkCudnn(cudnnSetTensor4dDescriptorEx(cudnnContext.srcTensorDesc, dataType, (int) miniBatch, (int) inDepth, (int) inH, (int) inW,
            (int) srcStride[0], (int) srcStride[chIdx], (int) srcStride[hIdx], (int) srcStride[wIdx]));

    long[] outShape = nchw ? new long[] {miniBatch, inDepth, outH, outW} : new long[] {miniBatch, outH, outW, inDepth};
    INDArray reduced = workspaceMgr.createUninitialized(ArrayType.ACTIVATIONS, input.dataType(), outShape, 'c');

    val dstStride = reduced.stride();
    checkCudnn(cudnnSetTensor4dDescriptorEx(cudnnContext.dstTensorDesc, dataType, (int) miniBatch, (int) inDepth, (int) outH, (int) outW,
            (int) dstStride[0], (int) dstStride[chIdx], (int) dstStride[hIdx], (int) dstStride[wIdx]));

    Allocator allocator = AtomicAllocator.getInstance();
    CudaContext context = allocator.getFlowController().prepareAction(input, reduced);
    Pointer srcData = allocator.getPointer(input, context);
    Pointer dstData = allocator.getPointer(reduced, context);

    checkCudnn(cudnnSetStream(cudnnContext, new CUstream_st(context.getCublasStream())));
    checkCudnn(cudnnPoolingForward(cudnnContext, cudnnContext.poolingDesc, alpha, cudnnContext.srcTensorDesc,
                    srcData, beta, cudnnContext.dstTensorDesc, dstData));

    allocator.registerAction(context, reduced, input);

    if (CudaEnvironment.getInstance().getConfiguration().isDebug())
        context.syncOldStream();

    return reduced;
}
 
Example #6
Source File: RegressionTest100b4.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testSyntheticBidirectionalRNNGraph() throws Exception {

    File f = Resources.asFile("regression_testing/100b4/SyntheticBidirectionalRNNGraph_100b4.bin");
    ComputationGraph net = ComputationGraph.load(f, true);

    Bidirectional l0 = (Bidirectional) net.getLayer("rnn1").conf().getLayer();

    LSTM l1 = (LSTM) l0.getFwd();
    assertEquals(16, l1.getNOut());
    assertEquals(new ActivationReLU(), l1.getActivationFn());
    assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l1));

    LSTM l2 = (LSTM) l0.getBwd();
    assertEquals(16, l2.getNOut());
    assertEquals(new ActivationReLU(), l2.getActivationFn());
    assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l2));

    Bidirectional l3 = (Bidirectional) net.getLayer("rnn2").conf().getLayer();

    SimpleRnn l4 = (SimpleRnn) l3.getFwd();
    assertEquals(16, l4.getNOut());
    assertEquals(new ActivationReLU(), l4.getActivationFn());
    assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l4));

    SimpleRnn l5 = (SimpleRnn) l3.getBwd();
    assertEquals(16, l5.getNOut());
    assertEquals(new ActivationReLU(), l5.getActivationFn());
    assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l5));

    MergeVertex mv = (MergeVertex) net.getVertex("concat");

    GlobalPoolingLayer gpl = (GlobalPoolingLayer) net.getLayer("pooling").conf().getLayer();
    assertEquals(PoolingType.MAX, gpl.getPoolingType());
    assertArrayEquals(new int[]{2}, gpl.getPoolingDimensions());
    assertTrue(gpl.isCollapseDimensions());

    OutputLayer outl = (OutputLayer) net.getLayer("out").conf().getLayer();
    assertEquals(3, outl.getNOut());
    assertEquals(new LossMCXENT(), outl.getLossFn());

    INDArray outExp;
    File f2 = Resources.asFile("regression_testing/100b4/SyntheticBidirectionalRNNGraph_Output_100b4.bin");
    try (DataInputStream dis = new DataInputStream(new FileInputStream(f2))) {
        outExp = Nd4j.read(dis);
    }

    INDArray in;
    File f3 = Resources.asFile("regression_testing/100b4/SyntheticBidirectionalRNNGraph_Input_100b4.bin");
    try (DataInputStream dis = new DataInputStream(new FileInputStream(f3))) {
        in = Nd4j.read(dis);
    }

    INDArray outAct = net.output(in)[0];

    assertEquals(outExp, outAct);
}
 
Example #7
Source File: MaskedReductionUtil.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public static INDArray maskedPoolingEpsilonCnn(PoolingType poolingType, INDArray input, INDArray mask,
                INDArray epsilon2d, int pnorm, DataType dataType) {

    // [minibatch, channels, h=1, w=X] or [minibatch, channels, h=X, w=1] data
    // with a mask array of shape [minibatch, X]

    //If masking along height: broadcast dimensions are [0,2]
    //If masking along width: broadcast dimensions are [0,3]

    mask = mask.castTo(dataType);   //No-op if correct type

    //General case: must be equal or 1 on each dimension
    int[] dimensions = new int[4];
    int count = 0;
    for(int i=0; i<4; i++ ){
        if(input.size(i) == mask.size(i)){
            dimensions[count++] = i;
        }
    }
    if(count < 4){
        dimensions = Arrays.copyOfRange(dimensions, 0, count);
    }

    switch (poolingType) {
        case MAX:
            //TODO This is ugly - replace it with something better... Need something like a Broadcast CAS op
            INDArray negInfMask;
            if(mask.dataType() == DataType.BOOL){
                negInfMask = Transforms.not(mask).castTo(dataType);
            } else {
                negInfMask = mask.rsub(1.0);
            }
            BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));

            INDArray withInf = Nd4j.createUninitialized(dataType, input.shape());
            Nd4j.getExecutioner().exec(new BroadcastAddOp(input, negInfMask, withInf, dimensions));
            //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op

            INDArray isMax = Nd4j.exec(new IsMax(withInf, withInf.ulike(), 2, 3))[0];

            return Nd4j.getExecutioner().exec(new BroadcastMulOp(isMax, epsilon2d, isMax, 0, 1));
        case AVG:
        case SUM:
            //if out = sum(in,dims) then dL/dIn = dL/dOut -> duplicate to each step and mask
            //if out = avg(in,dims) then dL/dIn = 1/N * dL/dOut
            //With masking: N differs for different time series

            INDArray out = Nd4j.createUninitialized(dataType, input.shape(), 'f');

            //Broadcast copy op, then divide and mask to 0 as appropriate
            Nd4j.getExecutioner().exec(new BroadcastCopyOp(out, epsilon2d, out, 0, 1));
            Nd4j.getExecutioner().exec(new BroadcastMulOp(out, mask, out, dimensions));

            if (poolingType == PoolingType.SUM) {
                return out;
            }

            //Note that with CNNs, current design is restricted to [minibatch, channels, 1, W] ot [minibatch, channels, H, 1]
            INDArray nEachTimeSeries = mask.sum(1,2,3); //[minibatchSize,tsLength] -> [minibatchSize,1]
            Nd4j.getExecutioner().exec(new BroadcastDivOp(out, nEachTimeSeries, out, 0));

            return out;

        case PNORM:
            //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
            INDArray masked2 = Nd4j.createUninitialized(dataType, input.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(input, mask, masked2, dimensions));

            INDArray abs = Transforms.abs(masked2, true);
            Transforms.pow(abs, pnorm, false);
            INDArray pNorm = Transforms.pow(abs.sum(2, 3), 1.0 / pnorm);

            INDArray numerator;
            if (pnorm == 2) {
                numerator = input.dup();
            } else {
                INDArray absp2 = Transforms.pow(Transforms.abs(input, true), pnorm - 2, false);
                numerator = input.mul(absp2);
            }

            INDArray denom = Transforms.pow(pNorm, pnorm - 1, false);
            denom.rdivi(epsilon2d);
            Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(numerator, denom, numerator, 0, 1));
            Nd4j.getExecutioner().exec(new BroadcastMulOp(numerator, mask, numerator, dimensions)); //Apply mask

            return numerator;
        default:
            throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);

    }
}
 
Example #8
Source File: MaskedReductionUtil.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public static INDArray maskedPoolingConvolution(PoolingType poolingType, INDArray toReduce, INDArray mask, int pnorm, DataType dataType) {
    if(mask.rank() != 4){
        //TODO BETTER ERROR MESSAGE EXPLAINING FORMAT
        //TODO ALSO HANDLE LEGACY FORMAT WITH WARNING WHERE POSSIBLE
        throw new IllegalStateException("Expected rank 4 mask array: Got array with shape " + Arrays.toString(mask.shape()));
    }

    mask = mask.castTo(dataType);   //no-op if already correct dtype

    // [minibatch, channels, h, w] data with a mask array of shape [minibatch, 1, X, Y]
    // where X=(1 or inH) and Y=(1 or inW)

    //General case: must be equal or 1 on each dimension
    int[] dimensions = new int[4];
    int count = 0;
    for(int i=0; i<4; i++ ){
        if(toReduce.size(i) == mask.size(i)){
            dimensions[count++] = i;
        }
    }
    if(count < 4){
        dimensions = Arrays.copyOfRange(dimensions, 0, count);
    }

    switch (poolingType) {
        case MAX:
            //TODO This is ugly - replace it with something better... Need something like a Broadcast CAS op
            INDArray negInfMask;
            if(mask.dataType() == DataType.BOOL){
                negInfMask = Transforms.not(mask).castTo(dataType);
            } else {
                negInfMask = mask.rsub(1.0);
            }
            BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));

            INDArray withInf = Nd4j.createUninitialized(dataType, toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastAddOp(toReduce, negInfMask, withInf, dimensions));
            //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op

            return withInf.max(2, 3);
        case AVG:
        case SUM:
            INDArray masked = Nd4j.createUninitialized(dataType, toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked, dimensions));

            INDArray summed = masked.sum(2, 3);
            if (poolingType == PoolingType.SUM) {
                return summed;
            }
            INDArray maskCounts = mask.sum(1,2,3);
            summed.diviColumnVector(maskCounts);
            return summed;

        case PNORM:
            //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
            INDArray masked2 = Nd4j.createUninitialized(dataType, toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked2, dimensions));

            INDArray abs = Transforms.abs(masked2, true);
            Transforms.pow(abs, pnorm, false);
            INDArray pNorm = abs.sum(2, 3);

            return Transforms.pow(pNorm, 1.0 / pnorm);
        default:
            throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);
    }
}
 
Example #9
Source File: MaskedReductionUtil.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public static INDArray maskedPoolingEpsilonTimeSeries(PoolingType poolingType, INDArray input, INDArray mask,
                INDArray epsilon2d, int pnorm) {

    if (input.rank() != 3) {
        throw new IllegalArgumentException("Expect rank 3 input activation array: got " + input.rank());
    }
    if (mask.rank() != 2) {
        throw new IllegalArgumentException("Expect rank 2 array for mask: got " + mask.rank());
    }
    if (epsilon2d.rank() != 2) {
        throw new IllegalArgumentException("Expected rank 2 array for errors: got " + epsilon2d.rank());
    }

    //Mask: [minibatch, tsLength]
    //Epsilon: [minibatch, vectorSize]

    mask = mask.castTo(input.dataType());

    switch (poolingType) {
        case MAX:
            INDArray negInfMask = mask.rsub(1.0);
            BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));

            INDArray withInf = Nd4j.createUninitialized(input.dataType(), input.shape());
            Nd4j.getExecutioner().exec(new BroadcastAddOp(input, negInfMask, withInf, 0, 2));
            //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op

            INDArray isMax = Nd4j.exec(new IsMax(withInf, withInf.ulike(), 2))[0];

            return Nd4j.getExecutioner().exec(new BroadcastMulOp(isMax, epsilon2d, isMax, 0, 1));
        case AVG:
        case SUM:
            //if out = sum(in,dims) then dL/dIn = dL/dOut -> duplicate to each step and mask
            //if out = avg(in,dims) then dL/dIn = 1/N * dL/dOut
            //With masking: N differs for different time series

            INDArray out = Nd4j.createUninitialized(input.dataType(), input.shape(), 'f');

            //Broadcast copy op, then divide and mask to 0 as appropriate
            Nd4j.getExecutioner().exec(new BroadcastCopyOp(out, epsilon2d, out, 0, 1));
            Nd4j.getExecutioner().exec(new BroadcastMulOp(out, mask, out, 0, 2));

            if (poolingType == PoolingType.SUM) {
                return out;
            }

            INDArray nEachTimeSeries = mask.sum(1); //[minibatchSize,tsLength] -> [minibatchSize,1]
            Nd4j.getExecutioner().exec(new BroadcastDivOp(out, nEachTimeSeries, out, 0));

            return out;

        case PNORM:
            //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
            INDArray masked2 = Nd4j.createUninitialized(input.dataType(), input.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(input, mask, masked2, 0, 2));

            INDArray abs = Transforms.abs(masked2, true);
            Transforms.pow(abs, pnorm, false);
            INDArray pNorm = Transforms.pow(abs.sum(2), 1.0 / pnorm);

            INDArray numerator;
            if (pnorm == 2) {
                numerator = input.dup();
            } else {
                INDArray absp2 = Transforms.pow(Transforms.abs(input, true), pnorm - 2, false);
                numerator = input.mul(absp2);
            }

            INDArray denom = Transforms.pow(pNorm, pnorm - 1, false);
            denom.rdivi(epsilon2d);
            Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(numerator, denom, numerator, 0, 1));
            Nd4j.getExecutioner().exec(new BroadcastMulOp(numerator, mask, numerator, 0, 2)); //Apply mask

            return numerator;
        default:
            throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);
    }
}
 
Example #10
Source File: MaskedReductionUtil.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public static INDArray maskedPoolingTimeSeries(PoolingType poolingType, INDArray toReduce, INDArray mask,
                int pnorm, DataType dataType) {
    if (toReduce.rank() != 3) {
        throw new IllegalArgumentException("Expect rank 3 array: got " + toReduce.rank());
    }
    if (mask.rank() != 2) {
        throw new IllegalArgumentException("Expect rank 2 array for mask: got " + mask.rank());
    }

    toReduce = toReduce.castTo(dataType);
    mask = mask.castTo(dataType);

    //Sum pooling: easy. Multiply by mask, then sum as normal
    //Average pooling: as above, but do a broadcast element-wise divi by mask.sum(1)
    //Max pooling: set to -inf if mask is 0, then do max as normal

    switch (poolingType) {
        case MAX:
            INDArray negInfMask = mask.castTo(dataType).rsub(1.0);
            BooleanIndexing.replaceWhere(negInfMask, Double.NEGATIVE_INFINITY, Conditions.equals(1.0));

            INDArray withInf = Nd4j.createUninitialized(dataType, toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastAddOp(toReduce, negInfMask, withInf, 0, 2));
            //At this point: all the masked out steps have value -inf, hence can't be the output of the MAX op

            return withInf.max(2);
        case AVG:
        case SUM:
            INDArray masked = Nd4j.createUninitialized(dataType, toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked, 0, 2));
            INDArray summed = masked.sum(2);
            if (poolingType == PoolingType.SUM) {
                return summed;
            }

            INDArray maskCounts = mask.sum(1);
            summed.diviColumnVector(maskCounts);
            return summed;
        case PNORM:
            //Similar to average and sum pooling: there's no N term here, so we can just set the masked values to 0
            INDArray masked2 = Nd4j.createUninitialized(dataType, toReduce.shape());
            Nd4j.getExecutioner().exec(new BroadcastMulOp(toReduce, mask, masked2, 0, 2));

            INDArray abs = Transforms.abs(masked2, true);
            Transforms.pow(abs, pnorm, false);
            INDArray pNorm = abs.sum(2);

            return Transforms.pow(pNorm, 1.0 / pnorm);
        default:
            throw new UnsupportedOperationException("Unknown or not supported pooling type: " + poolingType);
    }
}
 
Example #11
Source File: Vasttext.java    From scava with Eclipse Public License 2.0 4 votes vote down vote up
private ComputationGraph VasttextTextualAndNumeric()
{
	Activation activation = null;
	LossFunction loss = null;
	//If multilabel, it is considered according to the book "Deep Learning with Python" to use the following parameters
	if(multiLabel)
	{
		activation = Activation.SIGMOID;
		loss = LossFunction.XENT; //Binary Crossentropy
	}
	else
	{
		//We're using a softmax/cross entropy for the binary classification, as the number of neurons is two. If the number of neurons would be one, then
		//the activation would be sigmoid and the loss binary crossentropy
		activation = Activation.SOFTMAX;
		loss = LossFunction.MCXENT;	//CATEGORICAL_CROSSENTROPY
	}

	System.err.println("LR:"+lr);
	
	System.err.println("Dense:"+denseDimension);

	ComputationGraphConfiguration  nnConf = new NeuralNetConfiguration.Builder()
			.updater(new Adam(lr))
			.weightInit(WeightInit.XAVIER)
			.trainingWorkspaceMode(WorkspaceMode.ENABLED)
               .inferenceWorkspaceMode(WorkspaceMode.ENABLED)
			.graphBuilder()
			.addInputs("Text", "Extra")
			//Embeddings Parts
			.addLayer("Embeddings", new EmbeddingSequenceLayer.Builder()
                       .nIn(textFeaturesSize)
                       .nOut(denseDimension)
                       .activation(Activation.IDENTITY)
                       //.activation(Activation.TANH)
                       //.dropOut(0.0)
                       .build(), "Text")
			.addLayer("GlobalPooling", new GlobalPoolingLayer.Builder()
                       .poolingType(PoolingType.AVG)
                       .poolingDimensions(2)
                       .collapseDimensions(true)
                       //.dropOut(0.0)
                       .build(), "Embeddings")
			//We're merging directly the values from the extra
			.addVertex("Merge", new MergeVertex(), "GlobalPooling","Extra")
			.addLayer("DenseAll", new DenseLayer.Builder()
					.nIn(denseDimension+numericFeaturesSize)
					.nOut(denseDimension/2)
					//.dropOut(0.5)
					//.l2(0.001)
					.build(), "Merge")
			.addLayer("Output", new OutputLayer.Builder()
					//.dropOut(0.5)
					.nIn(denseDimension/2)
                       .nOut(labelsSize)
                       .activation(activation)
                       .lossFunction(loss)
                       .build(), "DenseAll")
			.setOutputs("Output")
			.pretrain(false)
			.backprop(true)
			.build();

	return new ComputationGraph(nnConf);
}
 
Example #12
Source File: SubsamplingHelper.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
Pair<Gradient, INDArray> backpropGradient(INDArray input, INDArray epsilon, int[] kernel, int[] strides, int[] pad,
PoolingType poolingType, ConvolutionMode convolutionMode, int[] dilation,
CNN2DFormat format, LayerWorkspaceMgr workspaceMgr);
 
Example #13
Source File: Subsampling3DLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) {
    assertInputSet(false);
    if (training && !dropoutApplied && layerConf().getIDropout() != null) {
        applyDropOutIfNecessary(true, workspaceMgr);
    }

    boolean isNCDHW = layerConf().getDataFormat() == Convolution3D.DataFormat.NCDHW;

    if (input.rank() != 5) {
        if(isNCDHW){
            throw new DL4JInvalidInputException("Got rank " + input.rank()
                    + " array as input to Subsampling3DLayer with shape " + Arrays.toString(input.shape())
                    + ". Expected rank 5 array with shape [minibatchSize, channels, "
                    + "inputDepth, inputHeight, inputWidth] when dataFormat=NCDHW. "
                    + layerId());
        } else {
            throw new DL4JInvalidInputException("Got rank " + input.rank()
                    + " array as input to Subsampling3DLayer with shape " + Arrays.toString(input.shape())
                    + ". Expected rank 5 array with shape [minibatchSize, inputDepth, inputHeight, inputWidth, channels] when dataFormat=NDHWC. "
                    + layerId());
        }
    }

    long miniBatch = input.size(0);
    long inChannels = isNCDHW ? input.size(1) : input.size(4);
    int inD = (int) (isNCDHW ? input.size(2) : input.size(1));
    int inH = (int) (isNCDHW ? input.size(3) : input.size(2));
    int inW = (int) (isNCDHW ? input.size(4) : input.size(3));

    int[] kernel = layerConf().getKernelSize();
    int[] strides = layerConf().getStride();
    int[] dilation = layerConf().getDilation();
    int[] pad;
    int[] outSize;
    if (convolutionMode == ConvolutionMode.Same) {
        int[] inShape = new int[]{inD, inH, inW};
        outSize = Convolution3DUtils.get3DOutputSize(
                input, kernel, strides, null, convolutionMode, dilation, isNCDHW);
        pad = Convolution3DUtils.get3DSameModeTopLeftPadding(outSize, inShape, kernel, strides, dilation);
    } else {
        pad = layerConf().getPadding();
        outSize = Convolution3DUtils.get3DOutputSize(
                input, kernel, strides, pad, convolutionMode, dilation, isNCDHW);
    }
    long outD = outSize[0];
    long outH = outSize[1];
    long outW = outSize[2];

    String opName = layerConf().getPoolingType() == PoolingType.MAX ? "maxpool3dnew" : "avgpool3dnew";

    INDArray output = workspaceMgr.createUninitialized(ArrayType.ACTIVATIONS, input.dataType(),
            isNCDHW ? new long[]{miniBatch, inChannels, outD, outH, outW} : new long[]{miniBatch, outD, outH, outW, inChannels}, 'c');

    int[] intArgs = new int[]{
            kernel[0], kernel[1], kernel[2],
            strides[0], strides[1], strides[2],
            pad[0], pad[1], pad[2],
            dilation[0], dilation[1], dilation[2],
            convolutionMode == ConvolutionMode.Same ? 1 : 0,
            0,  //Extra param - 0 = exclude padding for average divisor (only applicable for average pooling)
            isNCDHW ? 0 : 1
    };

    CustomOp op = DynamicCustomOp.builder(opName)
            .addInputs(input)
            .addIntegerArguments(intArgs)
            .addOutputs(output)
            .callInplace(false)
            .build();

    Nd4j.getExecutioner().exec(op);

    return output;
}
 
Example #14
Source File: Subsampling3DLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) {
    assertInputSet(true);

    boolean isNCDHW = layerConf().getDataFormat() == Convolution3D.DataFormat.NCDHW;

    long miniBatch = input.size(0);
    long inChannels = isNCDHW ? input.size(1) : input.size(4);
    int inD = (int) (isNCDHW ? input.size(2) : input.size(1));
    int inH = (int) (isNCDHW ? input.size(3) : input.size(2));
    int inW = (int) (isNCDHW ? input.size(4) : input.size(3));

    int[] kernel = layerConf().getKernelSize();
    int[] strides = layerConf().getStride();
    int[] dilation = layerConf().getDilation();

    int[] pad;
    int[] outSize;
    if (convolutionMode == ConvolutionMode.Same) {
        outSize = Convolution3DUtils.get3DOutputSize(
                input, kernel, strides, null, convolutionMode, dilation, isNCDHW);
        pad = Convolution3DUtils.get3DSameModeTopLeftPadding(
                outSize, new int[]{inD, inH, inW}, kernel, strides, dilation);
    } else {
        pad = layerConf().getPadding();
    }

    INDArray outEpsilon = workspaceMgr.createUninitialized(ArrayType.ACTIVATION_GRAD, epsilon.dataType(),
            isNCDHW ? new long[]{miniBatch, inChannels, inD, inH, inW} : new long[]{miniBatch, inD, inH, inW, inChannels}, 'c');


    int[] intArgs = new int[]{
            kernel[0], kernel[1], kernel[2],
            strides[0], strides[1], strides[2],
            pad[0], pad[1], pad[2],
            dilation[0], dilation[1], dilation[2],
            convolutionMode == ConvolutionMode.Same ? 1 : 0,
            0,  //Extra param - 0 = exclude padding for average divisor
            isNCDHW ? 0 : 1
    };

    String opName = layerConf().getPoolingType() == PoolingType.MAX ? "maxpool3dnew_bp" : "avgpool3dnew_bp";

    CustomOp op = DynamicCustomOp.builder(opName)
            .addInputs(input, epsilon)
            .addIntegerArguments(intArgs)
            .addOutputs(outEpsilon)
            .callInplace(false)
            .build();

    Nd4j.getExecutioner().exec(op);

    Gradient retGradient = new DefaultGradient();
    outEpsilon = backpropDropOutIfPresent(outEpsilon);
    return new Pair<>(retGradient, outEpsilon);
}
 
Example #15
Source File: MKLDNNSubsamplingHelper.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray activate(INDArray input, boolean training, int[] kernel, int[] strides, int[] pad, PoolingType poolingType,
                         ConvolutionMode convolutionMode, int[] dilation, CNN2DFormat format, LayerWorkspaceMgr workspaceMgr) {

    int hIdx = 2;
    int wIdx = 3;
    if(format == CNN2DFormat.NHWC){
        hIdx = 1;
        wIdx = 2;
    }

    int[] outSize;
    if (convolutionMode == ConvolutionMode.Same) {
        outSize = ConvolutionUtils.getOutputSize(input, kernel, strides, null, convolutionMode, dilation, format); //Also performs validation
        pad = ConvolutionUtils.getSameModeTopLeftPadding(outSize, new int[] {(int)input.size(hIdx), (int)input.size(wIdx)}, kernel, strides, dilation);
    } else {
        outSize = ConvolutionUtils.getOutputSize(input, kernel, strides, pad, convolutionMode, dilation, format); //Also performs validation
    }

    long[] outShape = format == CNN2DFormat.NCHW ? new long[]{input.size(0), input.size(1), outSize[0], outSize[1]} :
            new long[]{input.size(0), outSize[0], outSize[1], input.size(3)};
    INDArray output = workspaceMgr.createUninitialized(ArrayType.ACTIVATIONS, input.dataType(), outShape);

    if(context == null){
        context = Nd4j.getExecutioner().buildContext();
        context.setIArguments(
                kernel[0], kernel[1],
                strides[0], strides[1],
                pad[0], pad[1],
                dilation[0], dilation[1],
                ArrayUtil.fromBoolean(convolutionMode == ConvolutionMode.Same),
                0,  //Extra - not used?
                format == CNN2DFormat.NCHW ? 0 : 1); //0 = NCHW, 1=NHWC
    }

    DynamicCustomOp op;
    switch (poolingType){
        case MAX:
            op = new MaxPooling2D();
            break;
        case AVG:
            op = new AvgPooling2D();
            break;
        case SUM:
        case PNORM:
        default:
            return null;
    }

    context.purge();
    context.setInputArray(0, input);
    context.setOutputArray(0, output);

    Nd4j.exec(op, context);

    return output;
}
 
Example #16
Source File: MKLDNNSubsamplingHelper.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public Pair<Gradient, INDArray> backpropGradient(INDArray input, INDArray epsilon, int[] kernel, int[] strides, int[] pad,
                                                 PoolingType poolingType, ConvolutionMode convolutionMode, int[] dilation,
                                                 CNN2DFormat format, LayerWorkspaceMgr workspaceMgr) {
    if(poolingType == PoolingType.SUM || poolingType == PoolingType.PNORM)
        return null;

    INDArray gradAtInput = workspaceMgr.createUninitialized(ArrayType.ACTIVATION_GRAD, input.dataType(), input.shape());

    int hIdx = 2;
    int wIdx = 3;
    if(format == CNN2DFormat.NHWC){
        hIdx = 1;
        wIdx = 2;
    }

    if (convolutionMode == ConvolutionMode.Same) {
        pad = ConvolutionUtils.getSameModeTopLeftPadding(new int[]{(int)epsilon.size(hIdx), (int)epsilon.size(wIdx)}, new int[] {(int)input.size(hIdx), (int)input.size(wIdx)}, kernel, strides, dilation);
    }

    Pooling2DConfig conf = Pooling2DConfig.builder()
            .isSameMode(convolutionMode == ConvolutionMode.Same)
            .kH(kernel[0]).kW(kernel[1])
            .sH(strides[0]).sW(strides[1])
            .dH(dilation[0]).dW(dilation[1])
            .pH(pad[0]).pW(pad[1])
            .isNHWC(format == CNN2DFormat.NHWC)
            .build();

    switch (poolingType){
        case MAX:
            conf.setType(Pooling2D.Pooling2DType.MAX);
            break;
        case AVG:
            conf.setType(Pooling2D.Pooling2DType.AVG);
            break;
    }

    Pooling2DDerivative d = new Pooling2DDerivative(input, epsilon, gradAtInput, conf);

    Nd4j.exec(d);
    return new Pair<Gradient,INDArray>(new DefaultGradient(), gradAtInput);
}
 
Example #17
Source File: Vasttext.java    From scava with Eclipse Public License 2.0 4 votes vote down vote up
private MultiLayerNetwork VasttextTextual()
{
	Activation activation = null;
	LossFunction loss = null;
	//If multilabel, it is considered according to the book "Deep Learning with Python" to use the following parameters
	if(multiLabel)
	{
		activation = Activation.SIGMOID;
		loss = LossFunction.XENT; //Binary Crossentropy
	}
	else
	{
		//We're using a softmax/cross entropy for the binary classification, as the number of neurons is two. If the number of neurons would be one, then
		//the activation would be sigmoid and the loss binary crossentropy
		activation = Activation.SOFTMAX;
		loss = LossFunction.MCXENT;	//CATEGORICAL_CROSSENTROPY
	}

	MultiLayerConfiguration nnConf = new NeuralNetConfiguration.Builder()
               .updater(new Adam(lr))
               .weightInit(WeightInit.XAVIER)
               .trainingWorkspaceMode(WorkspaceMode.ENABLED)
               .inferenceWorkspaceMode(WorkspaceMode.ENABLED)
               .list()
               .layer(0, new EmbeddingSequenceLayer.Builder()
                       .nIn(textFeaturesSize)
                       .nOut(denseDimension)
                       .activation(Activation.IDENTITY)
                       .build())
               .layer(1, new GlobalPoolingLayer.Builder()
                       .poolingType(PoolingType.AVG)
                       .poolingDimensions(2)
                       .collapseDimensions(true)
                       .build())
               .layer(2, new OutputLayer.Builder()
                       .nIn(denseDimension)
                       .nOut(labelsSize)
                       .activation(activation)
                       .lossFunction(loss)
                       .build())
               .pretrain(false).backprop(true).build();

       return new MultiLayerNetwork(nnConf);
}