Java Code Examples for org.nd4j.linalg.api.ndarray.INDArray#muli()

The following examples show how to use org.nd4j.linalg.api.ndarray.INDArray#muli() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NormalizerStandardizeLabelsTest.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
public genRandomDataSet(int nSamples, int nFeatures, int a, int b, long randSeed) {
    /* if a =1 and b = 0,normal distribution
        otherwise with some random mean and some random distribution
     */
    int i = 0;
    // Randomly generate scaling constants and add offsets
    // to get aA and bB
    INDArray aA = a == 1 ? Nd4j.ones(1, nFeatures) : Nd4j.rand(new int[]{1, nFeatures}, randSeed).mul(a); //a = 1, don't scale
    INDArray bB = Nd4j.rand(new int[]{1, nFeatures}, randSeed).mul(b); //b = 0 this zeros out
    // transform ndarray as X = aA + bB * X
    INDArray randomFeatures = Nd4j.zeros(nSamples, nFeatures);
    while (i < nFeatures) {
        INDArray randomSlice = Nd4j.randn(randSeed, new long[]{nSamples, 1});
        randomSlice.muli(aA.getScalar(0, i));
        randomSlice.addi(bB.getScalar(0, i));
        randomFeatures.putColumn(i, randomSlice);
        i++;
    }
    INDArray randomLabels = randomFeatures.dup();
    this.sampleDataSet = new DataSet(randomFeatures, randomLabels);
    this.theoreticalMean = bB.dup();
    this.theoreticalStd = aA.dup();
    this.theoreticalSEM = this.theoreticalStd.div(Math.sqrt(nSamples));
}
 
Example 2
Source File: MinMaxStrategy.java    From nd4j with Apache License 2.0 6 votes vote down vote up
/**
 * Normalize a data array
 *
 * @param array the data to normalize
 * @param stats statistics of the data population
 */
@Override
public void preProcess(INDArray array, INDArray maskArray, MinMaxStats stats) {
    if (array.rank() <= 2) {
        array.subiRowVector(stats.getLower());
        array.diviRowVector(stats.getRange());
    }
    // if feature Rank is 3 (time series) samplesxfeaturesxtimesteps
    // if feature Rank is 4 (images) samplesxchannelsxrowsxcols
    // both cases operations should be carried out in dimension 1
    else {
        Nd4j.getExecutioner().execAndReturn(new BroadcastSubOp(array, stats.getLower(), array, 1));
        Nd4j.getExecutioner().execAndReturn(new BroadcastDivOp(array, stats.getRange(), array, 1));
    }

    // Scale by target range
    array.muli(maxRange - minRange);
    // Add target range minimum values
    array.addi(minRange);

    if (maskArray != null) {
        DataSetUtil.setMaskedValuesToZero(array, maskArray);
    }
}
 
Example 3
Source File: LossKLD.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
private INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if(!labels.equalShapes(preOutput)){
        Preconditions.throwEx("Labels and preOutput must have equal shapes: got shapes %s vs %s", labels.shape(), preOutput.shape());
    }
    labels = labels.castTo(preOutput.dataType());   //No-op if already correct dtype
    INDArray output = activationFn.getActivation(preOutput.dup(), true);

    // Clip output and labels to be between Nd4j.EPS_THREsHOLD and 1, i.e. a valid non-zero probability
    output = Transforms.min(Transforms.max(output, Nd4j.EPS_THRESHOLD, false), 1, false);
    labels = Transforms.min(Transforms.max(labels, Nd4j.EPS_THRESHOLD, true), 1, false);

    INDArray logRatio = Transforms.log(output.rdivi(labels), false);

    INDArray scoreArr = logRatio.muli(labels);
    if (mask != null) {
        LossUtil.applyMask(scoreArr, mask);
    }
    return scoreArr;
}
 
Example 4
Source File: LossSquaredHinge.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray computeScoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    INDArray scoreArr = scoreArray(labels, preOutput, activationFn, mask);
    BooleanIndexing.replaceWhere(scoreArr, 0.0, Conditions.lessThan(0.0));//max(0,1-y*yhat)
    scoreArr.muli(scoreArr);
    return scoreArr.sum(true,1);
}
 
Example 5
Source File: AdaGrad.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
public INDArray getGradient(INDArray gradient, int slice, long[] shape) {
    boolean historicalInitialized = false;
    INDArray sqrtHistory;

    if (this.historicalGradient == null) {
        this.historicalGradient = Nd4j.zeros(shape).add(epsilon);
        historicalInitialized = true;
    } else if (!this.historicalGradient.isVector()
                    && this.historicalGradient.slice(slice).length() != gradient.length())
        throw new IllegalArgumentException("Illegal gradient");

    if (historicalGradient.isVector())
        sqrtHistory = sqrt(historicalGradient);
    else
        sqrtHistory = !historicalInitialized ? sqrt(historicalGradient.slice(slice)) : historicalGradient;
    INDArray learningRates;
    try {
        learningRates = sqrtHistory.rdivi(learningRate);
    } catch (ArithmeticException ae) {
        learningRates = sqrtHistory.rdivi(learningRate + epsilon);
    }
    if (gradient.length() != learningRates.length())
        gradient.muli(learningRates.slice(slice));
    else
        gradient.muli(learningRates);

    this.historicalGradient.slice(slice).addi(gradient.mul(gradient));
    numIterations++;

    //ensure no zeros
    return gradient;
}
 
Example 6
Source File: SporadicTests.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testInf() {
    INDArray x = Nd4j.create(10).assign(0.0);

    x.muli(0.0);

    log.error("X: {}", x);
}
 
Example 7
Source File: OperationProfilerTests.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test(expected = ND4JIllegalStateException.class)
public void testNaNPanic1() {
    Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.NAN_PANIC);

    INDArray a = Nd4j.create(new float[] {1f, 2f, 3f, Float.NaN});

    a.muli(3f);
}
 
Example 8
Source File: LossSquaredHinge.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if (labels.size(1) != preOutput.size(1)) {
        throw new IllegalArgumentException(
                        "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer"
                                        + " number of outputs (nOut = " + preOutput.size(1) + ") ");

    }
    INDArray scoreArr = scoreArray(labels, preOutput, activationFn, mask);

    INDArray bitMaskRowCol = scoreArr.dup();
    /*
        bit mask is 0 if 1-sigma(y*yhat) is neg, bit mask is 1 if 1-sigma(y*yhat) is +ve
     */
    BooleanIndexing.replaceWhere(bitMaskRowCol, 0.0, Conditions.lessThan(0.0));
    BooleanIndexing.replaceWhere(bitMaskRowCol, 1.0, Conditions.greaterThan(0.0));

    INDArray dLda = scoreArr.muli(2).muli(labels.neg());
    dLda.muli(bitMaskRowCol);

    if (mask != null && LossUtil.isPerOutputMasking(dLda, mask)) {
        //For *most* activation functions: we don't actually need to mask dL/da in addition to masking dL/dz later
        //but: some, like softmax, require both (due to dL/dz_i being a function of dL/da_j, for i != j)
        //We could add a special case for softmax (activationFn instanceof ActivationSoftmax) but that would be
        // error prone - though buy us a tiny bit of performance
        LossUtil.applyMask(dLda, mask);
    }

    INDArray gradients = activationFn.backprop(preOutput, dLda).getFirst(); //TODO activation functions with params

    if (mask != null) {
        LossUtil.applyMask(gradients, mask);
    }

    return gradients;
}
 
Example 9
Source File: ImageMultiPreProcessingScaler.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public void preProcess(MultiDataSet multiDataSet) {
    for( int i=0; i<featureIndices.length; i++ ){
        INDArray f = multiDataSet.getFeatures(featureIndices[i]);
        f.divi(this.maxPixelVal); //Scaled to 0->1
        if (this.maxRange - this.minRange != 1)
            f.muli(this.maxRange - this.minRange); //Scaled to minRange -> maxRange
        if (this.minRange != 0)
            f.addi(this.minRange); //Offset by minRange
    }
}
 
Example 10
Source File: LossMAPE.java    From nd4j with Apache License 2.0 5 votes vote down vote up
public INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) {
    if (labels.size(1) != preOutput.size(1)) {
        throw new IllegalArgumentException(
                        "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer"
                                        + " number of outputs (nOut = " + preOutput.size(1) + ") ");

    }
    INDArray scoreArr;
    //INDArray output = Nd4j.getExecutioner().execAndReturn(Nd4j.getOpFactory().createTransform(activationFn, preOutput.dup()));
    INDArray output = activationFn.getActivation(preOutput.dup(), true);
    scoreArr = output.rsubi(labels).divi(labels);
    Nd4j.getExecutioner().execAndReturn(Nd4j.getOpFactory().createTransform("abs", scoreArr));
    scoreArr.muli(100.0 / labels.size(1));

    //Weighted loss function
    if (weights != null) {
        if (weights.length() != output.size(1)) {
            throw new IllegalStateException("Weights vector (length " + weights.length()
                            + ") does not match output.size(1)=" + output.size(1));
        }
        scoreArr.muliRowVector(weights);
    }

    if (mask != null) {
        LossUtil.applyMask(scoreArr, mask);
    }
    return scoreArr;
}
 
Example 11
Source File: GaussianReconstructionDistribution.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
private INDArray[] calcLogProbArrayExConstants(INDArray x, INDArray preOutDistributionParams) {
    INDArray output = preOutDistributionParams.dup();
    activationFn.getActivation(output, false);

    val size = output.size(1) / 2;
    INDArray mean = output.get(NDArrayIndex.all(), NDArrayIndex.interval(0, size));
    INDArray logStdevSquared = output.get(NDArrayIndex.all(), NDArrayIndex.interval(size, 2 * size));

    INDArray sigmaSquared = Transforms.exp(logStdevSquared, true);
    INDArray lastTerm = x.sub(mean.castTo(x.dataType()));
    lastTerm.muli(lastTerm);
    lastTerm.divi(sigmaSquared.castTo(lastTerm.dataType())).divi(2);

    return new INDArray[] {logStdevSquared, lastTerm};
}
 
Example 12
Source File: NativeOpExecutionerTest.java    From nd4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testMul_Scalar1() throws Exception {
    DataTypeUtil.setDTypeForContext(DataBuffer.Type.DOUBLE);
    INDArray x = Nd4j.create(new double[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
    INDArray y = Nd4j.create(10).assign(0.000003);

    x.muli(y);
    x.divi(0.0000022);

    System.out.println("Data: " + Arrays.toString(x.data().asDouble()));
}
 
Example 13
Source File: IntegrationTestRunner.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
private static INDArray relativeError(@NonNull INDArray a1, @NonNull INDArray a2, double minAbsError) {
        long numNaN1 = Nd4j.getExecutioner().exec(new MatchCondition(a1, Conditions.isNan(), Integer.MAX_VALUE)).getInt(0);
        long numNaN2 = Nd4j.getExecutioner().exec(new MatchCondition(a2, Conditions.isNan(), Integer.MAX_VALUE)).getInt(0);
        Preconditions.checkState(numNaN1 == 0, "Array 1 has NaNs");
        Preconditions.checkState(numNaN2 == 0, "Array 2 has NaNs");


//        INDArray isZero1 = a1.eq(0.0);
//        INDArray isZero2 = a2.eq(0.0);
//        INDArray bothZero = isZero1.muli(isZero2);

        INDArray abs1 = Transforms.abs(a1, true);
        INDArray abs2 = Transforms.abs(a2, true);
        INDArray absDiff = Transforms.abs(a1.sub(a2), false);

        //abs(a1-a2) < minAbsError ? 1 : 0
        INDArray greaterThanMinAbs = Transforms.abs(a1.sub(a2), false);
        BooleanIndexing.replaceWhere(greaterThanMinAbs, 0.0, Conditions.lessThan(minAbsError));
        BooleanIndexing.replaceWhere(greaterThanMinAbs, 1.0, Conditions.greaterThan(0.0));

        INDArray result = absDiff.divi(abs1.add(abs2));
        //Only way to have NaNs given there weren't any in original : both 0s
        BooleanIndexing.replaceWhere(result, 0.0, Conditions.isNan());
        //Finally, set to 0 if less than min abs error, or unchanged otherwise
        result.muli(greaterThanMinAbs);

//        double maxRE = result.maxNumber().doubleValue();
//        if(maxRE > MAX_REL_ERROR){
//            System.out.println();
//        }
        return result;
    }
 
Example 14
Source File: AdaGrad.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * Gets feature specific learning rates
 * Adagrad keeps a history of gradients being passed in.
 * Note that each gradient passed in becomes adapted over time, hence
 * the opName adagrad
 *
 * @param gradient  the gradient to get learning rates for
 * @param iteration
 * @return the feature specific learning rates
 */
public INDArray getGradient(INDArray gradient, int iteration) {
    if (historicalGradient == null)
        throw new IllegalStateException("Updater has not been initialized with view state");

    historicalGradient.addi(gradient.mul(gradient));

    INDArray sqrtHistory = sqrt(historicalGradient.dup(gradientReshapeOrder), false).addi(epsilon);
    // lr * gradient / (sqrt(sumSquaredGradients) + epsilon)
    INDArray ret = gradient.muli(sqrtHistory.rdivi(learningRate));
    numIterations++;
    return ret;
}
 
Example 15
Source File: RnnOutputLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Override
public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) {
    INDArray input = this.input;
    if (input.rank() != 3)
        throw new UnsupportedOperationException(
                        "Input must be rank 3. Got input with rank " + input.rank() + " " + layerId());
    INDArray b = getParamWithNoise(DefaultParamInitializer.BIAS_KEY, training, workspaceMgr);
    INDArray W = getParamWithNoise(DefaultParamInitializer.WEIGHT_KEY, training, workspaceMgr);

    applyDropOutIfNecessary(training, workspaceMgr);
    if (layerConf().getRnnDataFormat() == RNNFormat.NWC){
        input = input.permute(0, 2, 1);
    }
    INDArray input2d = TimeSeriesUtils.reshape3dTo2d(input.castTo(W.dataType()), workspaceMgr, ArrayType.FF_WORKING_MEM);

    INDArray act2d = layerConf().getActivationFn().getActivation(input2d.mmul(W).addiRowVector(b), training);
    if (maskArray != null) {
        if(!maskArray.isColumnVectorOrScalar() || Arrays.equals(maskArray.shape(), act2d.shape())){
            //Per output masking
            act2d.muli(maskArray.castTo(act2d.dataType()));
        } else {
            //Per time step masking
            act2d.muliColumnVector(maskArray.castTo(act2d.dataType()));
        }
    }

    INDArray ret = TimeSeriesUtils.reshape2dTo3d(act2d, input.size(0), workspaceMgr, ArrayType.ACTIVATIONS);
    if (layerConf().getRnnDataFormat() == RNNFormat.NWC){
        ret = ret.permute(0, 2, 1);
    }
    return ret;
}
 
Example 16
Source File: CustomImagePreProcessingScaler.java    From konduit-serving with Apache License 2.0 5 votes vote down vote up
@Override
public void revertFeatures(INDArray features) {
    if (minRange != 0) {
        features.subi(minRange);
    }
    if (maxRange - minRange != 1.0) {
        features.divi(maxRange - minRange);
    }
    features.muli(this.maxPixelVal);
}
 
Example 17
Source File: TFGraphTestAllHelper.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public static void checkIntermediate(Map<String, INDArray> inputs, String modelName, String baseDir, String modelFileName,
                                         ExecuteWith execType, BiFunction<File,String,SameDiff> loader,
                                         Double maxRelErrorOverride, Double minAbsErrorOverride, File localTestDir, boolean printArraysDebugging) throws IOException {
        Preconditions.checkArgument((maxRelErrorOverride == null) == (minAbsErrorOverride == null), "Both maxRelErrorOverride and minAbsErrorOverride" +
                " must be null or both must be provided");
        Nd4j.EPS_THRESHOLD = 1e-3;
        OpExecOrderListener listener = new OpExecOrderListener();       //Used to collect exec order
        Pair<SameDiff, Map<String,INDArray>> p = getGraphAfterExec(baseDir, modelFileName, modelName, inputs, execType, loader, Collections.singletonList(listener), null, printArraysDebugging);
        SameDiff graph = p.getFirst();
        Map<String,INDArray> sdPredictions = p.getSecond();

        //Collect coverage info about ops
        OpValidation.collectTensorflowImportCoverage(graph);

        if (!execType.equals(ExecuteWith.JUST_PRINT)) {
            int count = 0;
            //Evaluate the nodes in their execution order - this is useful for debugging (as we want the *first* failure
            // to be detected before later failures)
            List<String> varNames = new ArrayList<>();
            Map<String,SameDiffOp> fns = graph.getOps();
            List<String> execOrder = listener.getOpNamesList();
            for(String opName : execOrder){
                String[] outputs = graph.getOutputsForOp(fns.get(opName).getOp());
                Collections.addAll(varNames, outputs);
            }

            for (String varName : varNames) {
                if (!inputs.containsKey(varName)) { //avoiding placeholders
                    INDArray tfValue = intermediateVars(modelName, baseDir, varName, localTestDir);
                    if (tfValue == null) {
                        continue;
                    }
                    log.info("Starting check: variable {}", varName);
                    if (skipNode(modelName, varName)) {
                        log.info("\n\tFORCING no check on " + varName);
                    } else {
                        assertArrayEquals("Shape not equal on node " + varName, tfValue.shape(), graph.getVariable(varName).getShape());
                        INDArray sdVal = sdPredictions.get(varName);
                        if(maxRelErrorOverride != null){
                            INDArray diff = Transforms.abs(tfValue.sub(sdVal), false);
                            INDArray absErrorMask = diff.gte(minAbsErrorOverride);   //value 1 if x[i] > minAbsError; value 0 otherwise. Used to get rid of 1e-30 vs. 1e-29 type failures
                            INDArray sumAbs = Transforms.abs(tfValue, true).addi(Transforms.abs(sdVal, true));
                            BooleanIndexing.replaceWhere(sumAbs, 1.0, Conditions.equals(0.0));  //Can only get 0.0 if both are zeros - need to avoid 0/0=NaN
                            INDArray relError = diff.divi(sumAbs);
                            relError.muli(absErrorMask);

                            int countExceeds = Nd4j.getExecutioner().exec(new MatchCondition(relError, Conditions.greaterThan(maxRelErrorOverride))).getInt(0);

                            double maxRE = -1;
                            //Mainly used for analysis in debugger:
                            DifferentialFunction op = null;
                            String[] opInputs = null;
                            if(countExceeds > 0){
                                maxRE = relError.maxNumber().doubleValue();
                                //Find the op that this variable is produced by
                                op = graph.getVariableOutputOp(varName);
                                opInputs = graph.getInputsForOp(op);
                            }


                            assertEquals( varName + ": " + countExceeds + " values exceed maxRelError=" + maxRelErrorOverride
                                    + " with minAbsError=" + minAbsErrorOverride + "; largest observed relError=" + maxRE, 0, countExceeds);
                        } else {
//                            assertEquals("Value not equal on node " + varName, tfValue, sdVal);
                            if(tfValue.equals(sdVal)){
                                System.out.println("Pass: " + varName);
                            } else {
                                System.out.println("FAIL: " + varName);
                                System.out.println("TF:\n" + tfValue);
                                System.out.println("SD:\n" + sdVal);
                            }

                        }
                        log.info("Values and shapes equal for {}", varName);
                        count++;
                    }

                }
            }

            assertTrue("No intermediate variables were checked", count > 0);
        }

        Nd4j.EPS_THRESHOLD = 1e-5;
    }
 
Example 18
Source File: ActivationSwish.java    From nd4j with Apache License 2.0 4 votes vote down vote up
@Override
public Pair<INDArray, INDArray> backprop(INDArray in, INDArray epsilon) {
    INDArray dLdz = Nd4j.getExecutioner().execAndReturn(new SwishDerivative(in));
    dLdz.muli(epsilon);
    return new Pair<>(dLdz, null);
}
 
Example 19
Source File: MiscOpValidation.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testScatterOpGradients() {
    List<String> failed = new ArrayList<>();

    for (int i = 0; i < 7; i++) {
        Nd4j.getRandom().setSeed(12345);

        SameDiff sd = SameDiff.create();

        SDVariable in = sd.var("in", DataType.DOUBLE, 20, 10);
        SDVariable indices = sd.var("indices", DataType.INT, new long[]{5});
        SDVariable updates = sd.var("updates", DataType.DOUBLE, 5, 10);


        in.setArray(Nd4j.rand(DataType.DOUBLE, 20, 10));
        indices.setArray(Nd4j.create(new double[]{3, 4, 5, 10, 18}).castTo(DataType.INT));
        updates.setArray(Nd4j.rand(DataType.DOUBLE, 5, 10).muli(2).subi(1));

        SDVariable scatter;
        String name;
        switch (i) {
            case 0:
                scatter = sd.scatterAdd("s", in, indices, updates);
                name = "scatterAdd";
                break;
            case 1:
                scatter = sd.scatterSub("s", in, indices, updates);
                name = "scatterSub";
                break;
            case 2:
                scatter = sd.scatterMul("s", in, indices, updates);
                name = "scatterMul";
                break;
            case 3:
                scatter = sd.scatterDiv("s", in, indices, updates);
                name = "scatterDiv";
                break;
            case 4:
                scatter = sd.scatterUpdate("s", in, indices, updates);
                name = "scatterUpdate";
                break;
            case 5:
                scatter = sd.scatterMax("s", in, indices, updates);
                name = "scatterMax";
                break;
            case 6:
                scatter = sd.scatterMin("s", in, indices, updates);
                name = "scatterMin";
                break;
            default:
                throw new RuntimeException();
        }

        INDArray exp = in.getArr().dup();
        int[] indicesInt = indices.getArr().dup().data().asInt();
        for( int j=0; j<indicesInt.length; j++ ){
            INDArray updateRow = updates.getArr().getRow(j);
            INDArray destinationRow = exp.getRow(indicesInt[j]);
            switch (i){
                case 0:
                    destinationRow.addi(updateRow);
                    break;
                case 1:
                    destinationRow.subi(updateRow);
                    break;
                case 2:
                    destinationRow.muli(updateRow);
                    break;
                case 3:
                    destinationRow.divi(updateRow);
                    break;
                case 4:
                    destinationRow.assign(updateRow);
                    break;
                case 5:
                    destinationRow.assign(Transforms.max(destinationRow, updateRow, true));
                    break;
                case 6:
                    destinationRow.assign(Transforms.min(destinationRow, updateRow, true));
                    break;
                default:
                    throw new RuntimeException();
            }
        }

        SDVariable loss = sd.sum(scatter);  //.standardDeviation(scatter, true);  //.sum(scatter);  //TODO stdev might be better here as gradients are non-symmetrical...


        TestCase tc = new TestCase(sd)
                .expected(scatter, exp)
                .gradCheckSkipVariables(indices.name());

        String error = OpValidation.validate(tc);
        if(error != null){
            failed.add(name);
        }
    }

    assertEquals(failed.toString(), 0, failed.size());
}
 
Example 20
Source File: TestOptimizers.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr) {
    //Gradient decomposes due to sum, so:
    //d(x^2 - 10*cos(2*Pi*x))/dx
    // = 2x + 20*pi*sin(2*Pi*x)
    INDArray gradient = parameters.mul(2 * Math.PI);
    Nd4j.getExecutioner().exec(new Sin(gradient));
    gradient.muli(20 * Math.PI);
    gradient.addi(parameters.mul(2));

    Gradient g = new DefaultGradient(this.gradientView);
    g.gradientForVariable().put("W", this.gradientView);
    this.gradient = g;
    //If any parameters are outside range [-5.12,5.12]: score = infinity
    INDArray paramExceeds512 = parameters.cond(new Condition() {
        @Override
        public int condtionNum() {
            return 0;
        }

        @Override
        public double getValue() {
            return 0;
        }

        @Override
        public double epsThreshold() {
            return 0;
        }

        @Override
        public Boolean apply(Number input) {
            return Math.abs(input.doubleValue()) > 5.12;
        }
    });

    int nExceeds512 = paramExceeds512.castTo(DataType.DOUBLE).sum(Integer.MAX_VALUE).getInt(0);
    if (nExceeds512 > 0)
        this.score = Double.POSITIVE_INFINITY;

    //Otherwise:
    double costFn = 10 * parameters.length();
    costFn += Nd4j.getBlasWrapper().dot(parameters, parameters); //xi*xi
    INDArray temp = parameters.mul(2.0 * Math.PI);
    Nd4j.getExecutioner().exec(new Cos(temp));
    temp.muli(-10.0); //After this: each element is -10*cos(2*Pi*xi)
    costFn += temp.sum(Integer.MAX_VALUE).getDouble(0);

    this.score = costFn;
    this.gradientView.assign(gradient);
}