Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#subi()

The following examples show how to use org.nd4j.linalg.api.ndarray.INDArray#subi() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MinMaxStrategy.java    From nd4j with Apache License 2.0 6 votes vote down vote up
/**
 * Denormalize a data array
 *
 * @param array the data to denormalize
 * @param stats statistics of the data population
 */
@Override
public void revert(INDArray array, INDArray maskArray, MinMaxStats stats) {
    // Subtract target range minimum value
    array.subi(minRange);
    // Scale by target range
    array.divi(maxRange - minRange);

    if (array.rank() <= 2) {
        array.muliRowVector(stats.getRange());
        array.addiRowVector(stats.getLower());
    } else {
        Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(array, stats.getRange(), array, 1));
        Nd4j.getExecutioner().execAndReturn(new BroadcastAddOp(array, stats.getLower(), array, 1));
    }

    if (maskArray != null) {
        DataSetUtil.setMaskedValuesToZero(array, maskArray);
    }
}
 
Example 2
Source File: TimeSeriesUtil.java    From AILibs with GNU Affero General Public License v3.0 6 votes vote down vote up
/**
 * Normalizes an INDArray vector object.
 *
 * @param array INDArray row vector with single shape dimension
 * @param inplace Indication whether the normalization should be performed in
 *            place or on a new array copy
 * @return Returns the view on the transformed INDArray (if inplace) or a
 *         normalized copy of the input array (if not inplace)
 */
public static INDArray normalizeINDArray(final INDArray array, final boolean inplace) {
	if (array.shape().length > 2 && array.shape()[0] != 1) {
		throw new IllegalArgumentException(String.format("Input INDArray object must be a vector with shape size 1. Actual shape: (%s)", Arrays.toString(array.shape())));
	}

	final double mean = array.mean(1).getDouble(0);
	final double std = array.std(1).getDouble(0);

	INDArray result;
	if (inplace) {
		result = array.subi(mean);
	} else {
		result = array.sub(mean);
	}
	return result.addi(Nd4j.EPS_THRESHOLD).divi(std);
}
 
Example 3
Source File: MinMaxStrategy.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**
 * Denormalize a data array
 *
 * @param array the data to denormalize
 * @param stats statistics of the data population
 */
@Override
public void revert(INDArray array, INDArray maskArray, MinMaxStats stats) {
    // Subtract target range minimum value
    array.subi(minRange);
    // Scale by target range
    array.divi(maxRange - minRange);

    if (array.rank() <= 2) {
        array.muliRowVector(stats.getRange());
        array.addiRowVector(stats.getLower());
    } else {
        Nd4j.getExecutioner().execAndReturn(new BroadcastMulOp(array, stats.getRange().castTo(array.dataType()), array, 1));
        Nd4j.getExecutioner().execAndReturn(new BroadcastAddOp(array, stats.getLower().castTo(array.dataType()), array, 1));
    }

    if (maskArray != null) {
        DataSetUtil.setMaskedValuesToZero(array, maskArray);
    }
}
 
Example 4
Source File: Nd4jMatrix.java    From jstarcraft-ai with Apache License 2.0 5 votes vote down vote up
@Override
public MathMatrix subtractMatrix(MathMatrix matrix, boolean transpose) {
    if (matrix instanceof Nd4jMatrix) {
        Nd4jEnvironmentThread thread = EnvironmentThread.getThread(Nd4jEnvironmentThread.class);
        try (MemoryWorkspace workspace = thread.getSpace()) {
            INDArray thisArray = this.getArray();
            INDArray thatArray = Nd4jMatrix.class.cast(matrix).getArray();
            thisArray.subi(transpose ? thatArray.transposei() : thatArray);
            return this;
        }
    } else {
        return MathMatrix.super.subtractMatrix(matrix, transpose);
    }
}
 
Example 5
Source File: SoftMaxDerivative.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public void exec() {
    INDArray softmaxed = Nd4j.getExecutioner().execAndReturn(new OldSoftMax(x));
    INDArray mulled = softmaxed.muli(y);
    INDArray summed = mulled.sum(-1);
    softmaxed.muliColumnVector(summed);
    mulled.subi(softmaxed);

}
 
Example 6
Source File: ImagePreProcessingScaler.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public void revertFeatures(INDArray features) {
    if (minRange != 0) {
        features.subi(minRange);
    }
    if (maxRange - minRange != 1.0) {
        features.divi(maxRange - minRange);
    }
    features.muli(this.maxPixelVal);
}
 
Example 7
Source File: ImageMultiPreProcessingScaler.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public void revertFeatures(INDArray[] features) {
    for( int i=0; i<featureIndices.length; i++ ){
        INDArray f = features[featureIndices[i]];
        if (minRange != 0) {
            f.subi(minRange);
        }
        if (maxRange - minRange != 1.0) {
            f.divi(maxRange - minRange);
        }
        f.muli(this.maxPixelVal);
    }
}
 
Example 8
Source File: ElementWiseMultiplicationLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
    public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) {
        //If this layer is layer L, then epsilon for this layer is ((w^(L+1)*(delta^(L+1))^T))^T (or equivalent)
        INDArray z = preOutput(true, workspaceMgr); //Note: using preOutput(INDArray) can't be used as this does a setInput(input) and resets the 'appliedDropout' flag
        INDArray delta = layerConf().getActivationFn().backprop(z, epsilon).getFirst(); //TODO handle activation function params

        if (maskArray != null) {
            applyMask(delta);
        }

        INDArray input = this.input.castTo(dataType);

        Gradient ret = new DefaultGradient();

        INDArray weightGrad =  gradientViews.get(ElementWiseParamInitializer.WEIGHT_KEY);
        weightGrad.subi(weightGrad);

        weightGrad.addi(input.mul(delta).sum(0));

        INDArray biasGrad = gradientViews.get(ElementWiseParamInitializer.BIAS_KEY);
        delta.sum(biasGrad, 0); //biasGrad is initialized/zeroed first

        ret.gradientForVariable().put(ElementWiseParamInitializer.WEIGHT_KEY, weightGrad);
        ret.gradientForVariable().put(ElementWiseParamInitializer.BIAS_KEY, biasGrad);

//      epsilonNext is a 2d matrix
        INDArray epsilonNext = delta.mulRowVector(params.get(ElementWiseParamInitializer.WEIGHT_KEY));
        epsilonNext = workspaceMgr.leverageTo(ArrayType.ACTIVATION_GRAD, epsilonNext);

        epsilonNext = backpropDropOutIfPresent(epsilonNext);
        return new Pair<>(ret, epsilonNext);
    }
 
Example 9
Source File: LossL1.java    From nd4j with Apache License 2.0 5 votes vote down vote up
public INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if (labels.size(1) != preOutput.size(1)) {
        throw new IllegalArgumentException(
                        "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer"
                                        + " number of outputs (nOut = " + preOutput.size(1) + ") ");

    }
    INDArray scoreArr;
    //INDArray output = Nd4j.getExecutioner().execAndReturn(Nd4j.getOpFactory().createTransform(activationFn, preOutput.dup()));
    INDArray output = activationFn.getActivation(preOutput.dup(), true);
    scoreArr = output.subi(labels);
    Nd4j.getExecutioner().execAndReturn(Nd4j.getOpFactory().createTransform("abs", scoreArr));

    //Weighted loss function
    if (weights != null) {
        if (weights.length() != output.size(1)) {
            throw new IllegalStateException("Weights vector (length " + weights.length()
                            + ") does not match output.size(1)=" + output.size(1));
        }
        scoreArr.muliRowVector(weights);
    }

    if (mask != null) {
        LossUtil.applyMask(scoreArr, mask);
    }
    return scoreArr;
}
 
Example 10
Source File: LossMixtureDensity.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
private INDArray labelsMinusMu(INDArray labels, INDArray mu) {
    // Now that we have the mixtures, let's compute the negative
    // log likelihodd of the label against the 
    long nSamples = labels.size(0);
    long labelsPerSample = labels.size(1);

    // This worked, but was actually much
    // slower than the for loop below.
    // labels = samples, mixtures, labels
    // mu = samples, mixtures
    // INDArray labelMinusMu = labels
    //        .reshape('f', nSamples, labelsPerSample, 1)
    //        .repeat(2, mMixtures)
    //        .permute(0, 2, 1)
    //        .subi(mu);

    // The above code does the same thing as the loop below,
    // but it does it with index magix instead of a for loop.
    // It turned out to be way less efficient than the simple 'for' here.
    INDArray labelMinusMu = Nd4j.zeros(nSamples, mMixtures, labelsPerSample);
    for (int k = 0; k < mMixtures; k++) {
        labelMinusMu.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.point(k), NDArrayIndex.all()},
                        labels);
    }
    labelMinusMu.subi(mu);

    return labelMinusMu;
}
 
Example 11
Source File: BarnesHutTsne.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public Gradient gradient() {
    /*MemoryWorkspace workspace =
            workspaceMode == WorkspaceMode.NONE ? new DummyWorkspace()
                    : Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(
                    workspaceConfigurationExternal,
                    workspaceExternal);


    try (MemoryWorkspace ws = workspace.notifyScopeEntered())*/ {


        if (yIncs == null)
            yIncs = Y.like();
        if (gains == null)
            gains = Y.ulike().assign(1.0D);

        AtomicDouble sumQ = new AtomicDouble(0);
        /* Calculate gradient based on barnes hut approximation with positive and negative forces */
        INDArray posF = Y.like();
        INDArray negF = Y.like();

        tree = new SpTree(Y);

        tree.computeEdgeForces(rows, cols, vals, N, posF);
        for (int n = 0; n < N; n++) {
            INDArray temp = negF.slice(n);
            tree.computeNonEdgeForces(n, theta, temp, sumQ);
        }
        INDArray dC = posF.subi(negF.divi(sumQ));

        Gradient ret = new DefaultGradient();
        ret.gradientForVariable().put(Y_GRAD, dC);
        return ret;
    }
}
 
Example 12
Source File: CudaScalarsTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testPinnedScalarSub() throws Exception {
    // simple way to stop test if we're not on CUDA backend here
    assertEquals("JcublasLevel1", Nd4j.getBlasWrapper().level1().getClass().getSimpleName());

    INDArray array1 = Nd4j.create(new float[]{1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f, 1.01f});
    INDArray array2 = Nd4j.create(new float[]{2.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f, 1.00f});

    array2.subi(0.5f);

    System.out.println("Subi result: " + array2.getFloat(0));
    assertEquals(1.5f, array2.getFloat(0), 0.01f);
}
 
Example 13
Source File: WeirdSparkTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testMultithreadedViews1() throws Exception {
    final INDArray array = Nd4j.ones(10,10);
    final INDArray view = array.getRow(1);

    assertEquals(1.0f, view.getFloat(0), 0.01f);

    Thread thread = new Thread(new Runnable() {
        @Override
        public void run() {
            assertEquals(1.0f, view.getFloat(0), 0.01f);

            view.subi(1.0f);

            try {
                Thread.sleep(100);
            } catch (Exception e) {
                //
            }

            System.out.println(view);
        }
    });

    Nd4j.getAffinityManager().attachThreadToDevice(thread, 1);
    thread.start();
    thread.join();

    //System.out.println(view);
    assertEquals(0.0f, view.getFloat(0), 0.01f);
}
 
Example 14
Source File: SimpleNormalizationTransform.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray transform(INDArray input) {
    if(offset != 0.0) {
        input.subi(offset);
    }

    input.divi(divisor);

    return input;
}
 
Example 15
Source File: Nd4jVector.java    From jstarcraft-ai with Apache License 2.0 5 votes vote down vote up
@Override
public MathVector subtractVector(MathVector vector) {
    if (vector instanceof Nd4jVector) {
        INDArray dataArray = this.getArray();
        // TODO 此处可能需要修改方向.
        INDArray vectorArray = Nd4jVector.class.cast(vector).getArray();
        dataArray.subi(vectorArray);
        return this;
    } else {
        return MathVector.super.addVector(vector);
    }
}
 
Example 16
Source File: NDArrayTestsFortran.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testScalarOps() {
    INDArray n = Nd4j.create(Nd4j.ones(27).data(), new long[] {3, 3, 3});
    assertEquals(27d, n.length(), 1e-1);
    n.addi(Nd4j.scalar(1d));
    n.subi(Nd4j.scalar(1.0d));
    n.muli(Nd4j.scalar(1.0d));
    n.divi(Nd4j.scalar(1.0d));

    n = Nd4j.create(Nd4j.ones(27).data(), new long[] {3, 3, 3});
    assertEquals(27, n.sumNumber().doubleValue(), 1e-1);
    INDArray a = n.slice(2);
    assertEquals(true, Arrays.equals(new long[] {3, 3}, a.shape()));

}
 
Example 17
Source File: LossMCXENT.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if(!labels.equalShapes(preOutput)){
        Preconditions.throwEx("Labels and preOutput must have equal shapes: got shapes %s vs %s", labels.shape(), preOutput.shape());
    }
    INDArray grad;
    INDArray output = activationFn.getActivation(preOutput.dup(), true);
    labels = labels.castTo(preOutput.dataType());   //No-op if already correct dtype

    if (activationFn instanceof ActivationSoftmax) {

        if (mask != null && LossUtil.isPerOutputMasking(output, mask)) {
            throw new UnsupportedOperationException("Per output masking for MCXENT + softmax: not supported");
        }

        //Weighted loss function
        if (weights != null) {
            if (weights.length() != output.size(1)) {
                throw new IllegalStateException("Weights vector (length " + weights.length()
                                + ") does not match output.size(1)=" + output.size(1));
            }
            INDArray temp = labels.mulRowVector(weights.castTo(labels.dataType()));
            INDArray col = temp.sum(true,1);
            grad = output.mulColumnVector(col).sub(temp);
        } else {
            grad = output.subi(labels);
        }
    } else {
        INDArray dLda = output.rdivi(labels).negi();

        grad = activationFn.backprop(preOutput, dLda).getFirst(); //TODO activation function with weights

        //Weighted loss function
        if (weights != null) {
            if (weights.length() != output.size(1)) {
                throw new IllegalStateException("Weights vector (length " + weights.length()
                                + ") does not match output.size(1)=" + output.size(1));
            }
            grad.muliRowVector(weights.castTo(grad.dataType()));
        }
    }

    //Loss function with masking
    if (mask != null) {
        LossUtil.applyMask(grad, mask);
    }

    return grad;
}
 
Example 18
Source File: NDArrayScalarOpTransform.java    From DataVec with Apache License 2.0 4 votes vote down vote up
@Override
public NDArrayWritable map(Writable w) {
    if (!(w instanceof NDArrayWritable)) {
        throw new IllegalArgumentException("Input writable is not an NDArrayWritable: is " + w.getClass());
    }

    //Make a copy - can't always assume that the original INDArray won't be used again in the future
    NDArrayWritable n = ((NDArrayWritable) w);
    INDArray a = n.get().dup();
    switch (mathOp) {
        case Add:
            a.addi(scalar);
            break;
        case Subtract:
            a.subi(scalar);
            break;
        case Multiply:
            a.muli(scalar);
            break;
        case Divide:
            a.divi(scalar);
            break;
        case Modulus:
            throw new UnsupportedOperationException(mathOp + " is not supported for NDArrayWritable");
        case ReverseSubtract:
            a.rsubi(scalar);
            break;
        case ReverseDivide:
            a.rdivi(scalar);
            break;
        case ScalarMin:
            Transforms.min(a, scalar, false);
            break;
        case ScalarMax:
            Transforms.max(a, scalar, false);
            break;
        default:
            throw new UnsupportedOperationException("Unknown or not supported op: " + mathOp);
    }

    //To avoid threading issues...
    Nd4j.getExecutioner().commit();

    return new NDArrayWritable(a);
}
 
Example 19
Source File: PLNetInputOptimizer.java    From AILibs with GNU Affero General Public License v3.0 4 votes vote down vote up
/**
 * Optimizes the given loss function with respect to a given PLNet's inputs using gradient descent. Ensures the outcome will be within the range of 0 and 1.
 * Performs gradient descent for a given number of steps starting at a given input, using a linearly decaying learning rate.
 * The inputs that should be optimized can be specified using a 0,1-vector
 * @param plNet					PLNet whose inputs to optimize.
 * @param input					Initial inputs to start the gradient descent procedure from.
 * @param loss					The loss to be minimized.
 * @param initialLearningRate	The initial learning rate.
 * @param finalLearningRate		The value the learning rate should decay to.
 * @param numSteps				The number of steps to perform gradient descent for.
 * @param inputMask				0,1 vector specifying the inputs to optimize, i.e. should have a 1 at the index of any input that should be optimized and a 0 elsewhere.
 * @return						The input optimized with respect to the given loss.
 */
public INDArray optimizeInput(PLNetDyadRanker plNet, INDArray input, InputOptimizerLoss loss, double initialLearningRate, double finalLearningRate, int numSteps,
		INDArray inputMask) {
	INDArray inp = input.dup();
	INDArray alphas = Nd4j.zeros(inp.shape());
	INDArray betas = Nd4j.zeros(inp.shape());
	INDArray ones = Nd4j.ones(inp.shape());
	double output = plNet.getPlNet().output(inp).getDouble(0);
	double incumbentOutput = output;
	INDArray incumbent = inp.dup();
	for (int i = 0; i < numSteps; i++) {
		double lrDecayTerm = (double) i / (double) numSteps;
		double learningRate = (1 - lrDecayTerm) * initialLearningRate + lrDecayTerm * finalLearningRate;
		// Gradient of PLNet
		INDArray grad = computeInputDerivative(plNet, inp, loss);
		// Gradient of KKT term
		grad.subi(alphas);
		grad.addi(betas);
		// Apply gradient to alphas and betas
		alphas.subi(inp);
		betas.addi(inp.sub(ones));
		BooleanIndexing.replaceWhere(alphas, 0.0d, Conditions.lessThan(0.0d));
		BooleanIndexing.replaceWhere(betas, 0.0d, Conditions.lessThan(0.0d));
		grad.muli(inputMask);
		grad.muli(learningRate);
		inp.subi(grad);

		output = plNet.getPlNet().output(inp).getDouble(0);
		if (listener != null) {
			listener.reportOptimizationStep(inp, output);
		}

		INDArray incCheck = inp.dup().muli(inputMask);
		if (output > incumbentOutput && BooleanIndexing.and(incCheck, Conditions.greaterThanOrEqual(0.0d)) && BooleanIndexing.and(incCheck, Conditions.lessThanOrEqual(1.0d))) {
			incumbent = inp.dup();
			incumbentOutput = output;
		}
	}

	return incumbent;
}
 
Example 20
Source File: BaseComplexNDArray.java    From nd4j with Apache License 2.0 2 votes vote down vote up
/**
 * Reverse subtraction (in-place)
 *
 * @param other  the other ndarray to subtract
 * @param result the result ndarray
 * @return the ndarray with the operation applied
 */
@Override
public IComplexNDArray rsubi(INDArray other, INDArray result) {
    return (IComplexNDArray) other.subi(this, result);
}