org.deeplearning4j.nn.conf.GradientNormalization Java Examples

The following examples show how to use org.deeplearning4j.nn.conf.GradientNormalization. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: NeuralNetworks.java    From Machine-Learning-in-Java with MIT License 6 votes vote down vote up
private static MultiLayerNetwork softMaxRegression(int seed,
		int iterations, int numRows, int numColumns, int outputNum) {
	MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
			.seed(seed)
			.gradientNormalization(
					GradientNormalization.ClipElementWiseAbsoluteValue)
			.gradientNormalizationThreshold(1.0)
			.iterations(iterations)
			.momentum(0.5)
			.momentumAfter(Collections.singletonMap(3, 0.9))
			.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
			.list(1)
			.layer(0,
					new OutputLayer.Builder(
							LossFunction.NEGATIVELOGLIKELIHOOD)
							.activation("softmax")
							.nIn(numColumns * numRows).nOut(outputNum)
							.build()).pretrain(true).backprop(false)
			.build();

	MultiLayerNetwork model = new MultiLayerNetwork(conf);

	return model;
}
 
Example #2
Source File: LocalResponseTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Before
public void doBefore() {
    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                    .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).seed(123)
                    .layer(new LocalResponseNormalization.Builder().k(2).n(5).alpha(1e-4).beta(0.75).build())
                    .build();

    layer = new LocalResponseNormalization().instantiate(conf, null, 0, null, false, Nd4j.defaultFloatingPointType());
    activationsActual = layer.activate(x, false, LayerWorkspaceMgr.noWorkspaces());
}
 
Example #3
Source File: RegressionTest080.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void regressionTestCGLSTM1() throws Exception {

    File f = Resources.asFile("regression_testing/080/080_ModelSerializer_Regression_CG_LSTM_1.zip");

    ComputationGraph net = ModelSerializer.restoreComputationGraph(f, true);

    ComputationGraphConfiguration conf = net.getConfiguration();
    assertEquals(3, conf.getVertices().size());

    GravesLSTM l0 = (GravesLSTM) ((LayerVertex) conf.getVertices().get("0")).getLayerConf().getLayer();
    assertTrue(l0.getActivationFn() instanceof ActivationTanH);
    assertEquals(3, l0.getNIn());
    assertEquals(4, l0.getNOut());
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l0.getGradientNormalization());
    assertEquals(1.5, l0.getGradientNormalizationThreshold(), 1e-5);

    GravesBidirectionalLSTM l1 =
                    (GravesBidirectionalLSTM) ((LayerVertex) conf.getVertices().get("1")).getLayerConf().getLayer();
    assertTrue(l1.getActivationFn() instanceof ActivationSoftSign);
    assertEquals(4, l1.getNIn());
    assertEquals(4, l1.getNOut());
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l1.getGradientNormalization());
    assertEquals(1.5, l1.getGradientNormalizationThreshold(), 1e-5);

    RnnOutputLayer l2 = (RnnOutputLayer) ((LayerVertex) conf.getVertices().get("2")).getLayerConf().getLayer();
    assertEquals(4, l2.getNIn());
    assertEquals(5, l2.getNOut());
    assertTrue(l2.getActivationFn() instanceof ActivationSoftmax);
    assertTrue(l2.getLossFn() instanceof LossMCXENT);
}
 
Example #4
Source File: RegressionTest080.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void regressionTestLSTM1() throws Exception {

    File f = Resources.asFile("regression_testing/080/080_ModelSerializer_Regression_LSTM_1.zip");

    MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true);

    MultiLayerConfiguration conf = net.getLayerWiseConfigurations();
    assertEquals(3, conf.getConfs().size());

    GravesLSTM l0 = (GravesLSTM) conf.getConf(0).getLayer();
    assertTrue(l0.getActivationFn() instanceof ActivationTanH);
    assertEquals(3, l0.getNIn());
    assertEquals(4, l0.getNOut());
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l0.getGradientNormalization());
    assertEquals(1.5, l0.getGradientNormalizationThreshold(), 1e-5);

    GravesBidirectionalLSTM l1 = (GravesBidirectionalLSTM) conf.getConf(1).getLayer();
    assertTrue(l1.getActivationFn() instanceof ActivationSoftSign);
    assertEquals(4, l1.getNIn());
    assertEquals(4, l1.getNOut());
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l1.getGradientNormalization());
    assertEquals(1.5, l1.getGradientNormalizationThreshold(), 1e-5);

    RnnOutputLayer l2 = (RnnOutputLayer) conf.getConf(2).getLayer();
    assertEquals(4, l2.getNIn());
    assertEquals(5, l2.getNOut());
    assertTrue(l2.getActivationFn() instanceof ActivationSoftmax);
    assertTrue(l2.getLossFn() instanceof LossMCXENT);
}
 
Example #5
Source File: RegressionTest071.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void regressionTestLSTM1() throws Exception {

    File f = Resources.asFile("regression_testing/071/071_ModelSerializer_Regression_LSTM_1.zip");

    MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true);

    MultiLayerConfiguration conf = net.getLayerWiseConfigurations();
    assertEquals(3, conf.getConfs().size());

    GravesLSTM l0 = (GravesLSTM) conf.getConf(0).getLayer();
    assertEquals("tanh", l0.getActivationFn().toString());
    assertEquals(3, l0.getNIn());
    assertEquals(4, l0.getNOut());
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l0.getGradientNormalization());
    assertEquals(1.5, l0.getGradientNormalizationThreshold(), 1e-5);

    GravesBidirectionalLSTM l1 = (GravesBidirectionalLSTM) conf.getConf(1).getLayer();
    assertEquals("softsign", l1.getActivationFn().toString());
    assertEquals(4, l1.getNIn());
    assertEquals(4, l1.getNOut());
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l1.getGradientNormalization());
    assertEquals(1.5, l1.getGradientNormalizationThreshold(), 1e-5);

    RnnOutputLayer l2 = (RnnOutputLayer) conf.getConf(2).getLayer();
    assertEquals(4, l2.getNIn());
    assertEquals(5, l2.getNOut());
    assertEquals("softmax", l2.getActivationFn().toString());
    assertTrue(l2.getLossFn() instanceof LossMCXENT);
}
 
Example #6
Source File: BaseLayer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * Reset the learning related configs of the layer to default. When instantiated with a global neural network
 * configuration the parameters specified in the neural network configuration will be used. For internal use with
 * the transfer learning API. Users should not have to call this method directly.
 */
public void resetLayerDefaultConfig() {
    //clear the learning related params for all layers in the origConf and set to defaults
    this.setIUpdater(null);
    this.setWeightInitFn(null);
    this.setBiasInit(Double.NaN);
    this.setGainInit(Double.NaN);
    this.regularization = null;
    this.regularizationBias = null;
    this.setGradientNormalization(GradientNormalization.None);
    this.setGradientNormalizationThreshold(1.0);
    this.iUpdater = null;
    this.biasUpdater = null;
}
 
Example #7
Source File: RegressionTest071.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void regressionTestCGLSTM1() throws Exception {
    File f = Resources.asFile("regression_testing/071/071_ModelSerializer_Regression_CG_LSTM_1.zip");

    ComputationGraph net = ModelSerializer.restoreComputationGraph(f, true);

    ComputationGraphConfiguration conf = net.getConfiguration();
    assertEquals(3, conf.getVertices().size());

    GravesLSTM l0 = (GravesLSTM) ((LayerVertex) conf.getVertices().get("0")).getLayerConf().getLayer();
    assertEquals("tanh", l0.getActivationFn().toString());
    assertEquals(3, l0.getNIn());
    assertEquals(4, l0.getNOut());
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l0.getGradientNormalization());
    assertEquals(1.5, l0.getGradientNormalizationThreshold(), 1e-5);

    GravesBidirectionalLSTM l1 =
                    (GravesBidirectionalLSTM) ((LayerVertex) conf.getVertices().get("1")).getLayerConf().getLayer();
    assertEquals("softsign", l1.getActivationFn().toString());
    assertEquals(4, l1.getNIn());
    assertEquals(4, l1.getNOut());
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l1.getGradientNormalization());
    assertEquals(1.5, l1.getGradientNormalizationThreshold(), 1e-5);

    RnnOutputLayer l2 = (RnnOutputLayer) ((LayerVertex) conf.getVertices().get("2")).getLayerConf().getLayer();
    assertEquals(4, l2.getNIn());
    assertEquals(5, l2.getNOut());
    assertEquals("softmax", l2.getActivationFn().toString());
    assertTrue(l2.getLossFn() instanceof LossMCXENT);
}
 
Example #8
Source File: RegressionTest060.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void regressionTestLSTM1() throws Exception {

    File f = Resources.asFile("regression_testing/060/060_ModelSerializer_Regression_LSTM_1.zip");

    MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true);

    MultiLayerConfiguration conf = net.getLayerWiseConfigurations();
    assertEquals(3, conf.getConfs().size());

    GravesLSTM l0 = (GravesLSTM) conf.getConf(0).getLayer();
    assertEquals("tanh", l0.getActivationFn().toString());
    assertEquals(3, l0.getNIn());
    assertEquals(4, l0.getNOut());
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l0.getGradientNormalization());
    assertEquals(1.5, l0.getGradientNormalizationThreshold(), 1e-5);

    GravesBidirectionalLSTM l1 = (GravesBidirectionalLSTM) conf.getConf(1).getLayer();
    assertEquals("softsign", l1.getActivationFn().toString());
    assertEquals(4, l1.getNIn());
    assertEquals(4, l1.getNOut());
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l1.getGradientNormalization());
    assertEquals(1.5, l1.getGradientNormalizationThreshold(), 1e-5);

    RnnOutputLayer l2 = (RnnOutputLayer) conf.getConf(2).getLayer();
    assertEquals(4, l2.getNIn());
    assertEquals(5, l2.getNOut());
    assertEquals("softmax", l2.getActivationFn().toString());
    assertTrue(l2.getLossFn() instanceof LossMCXENT);
}
 
Example #9
Source File: RegressionTest060.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void regressionTestCGLSTM1() throws Exception {

    File f = Resources.asFile("regression_testing/060/060_ModelSerializer_Regression_CG_LSTM_1.zip");

    ComputationGraph net = ModelSerializer.restoreComputationGraph(f, true);

    ComputationGraphConfiguration conf = net.getConfiguration();
    assertEquals(3, conf.getVertices().size());

    GravesLSTM l0 = (GravesLSTM) ((LayerVertex) conf.getVertices().get("0")).getLayerConf().getLayer();
    assertEquals("tanh", l0.getActivationFn().toString());
    assertEquals(3, l0.getNIn());
    assertEquals(4, l0.getNOut());
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l0.getGradientNormalization());
    assertEquals(1.5, l0.getGradientNormalizationThreshold(), 1e-5);

    GravesBidirectionalLSTM l1 =
                    (GravesBidirectionalLSTM) ((LayerVertex) conf.getVertices().get("1")).getLayerConf().getLayer();
    assertEquals("softsign", l1.getActivationFn().toString());
    assertEquals(4, l1.getNIn());
    assertEquals(4, l1.getNOut());
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l1.getGradientNormalization());
    assertEquals(1.5, l1.getGradientNormalizationThreshold(), 1e-5);

    RnnOutputLayer l2 = (RnnOutputLayer) ((LayerVertex) conf.getVertices().get("2")).getLayerConf().getLayer();
    assertEquals(4, l2.getNIn());
    assertEquals(5, l2.getNOut());
    assertEquals("softmax", l2.getActivationFn().toString());
    assertTrue(l2.getLossFn() instanceof LossMCXENT);
}
 
Example #10
Source File: LocalResponseTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
public void testRegularization() {
    // Confirm a structure with regularization true will not throw an error

    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                    .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).l1(0.2)
                    .l2(0.1).seed(123)
                    .layer(new LocalResponseNormalization.Builder().k(2).n(5).alpha(1e-4).beta(0.75).build())
                    .build();
}
 
Example #11
Source File: Upsampling1DTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
private Layer getUpsampling1DLayer() {
    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                    .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).seed(123)
                    .layer(new Upsampling1D.Builder(size).build()).build();
    return conf.getLayer().instantiate(conf, null, 0,
            null, true, Nd4j.defaultFloatingPointType());
}
 
Example #12
Source File: Convolution3DTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
private Layer getConvolution3DLayer(ConvolutionMode mode) {
    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
            .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).seed(123)
            .layer(new Convolution3D.Builder().kernelSize(kernelSize).nIn(nChannelsIn).nOut(nChannelsOut)
                    .dataFormat(Convolution3D.DataFormat.NCDHW).convolutionMode(mode).hasBias(false)
                    .build())
            .build();
    long numParams = conf.getLayer().initializer().numParams(conf);
    INDArray params = Nd4j.ones(1, numParams);
    return conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType());
}
 
Example #13
Source File: SubsamplingLayerTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
private Layer getSubsamplingLayer(SubsamplingLayer.PoolingType pooling) {
    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                    .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).seed(123)
                    .layer(new SubsamplingLayer.Builder(pooling, new int[] {2, 2}).build()).build();

    return conf.getLayer().instantiate(conf, null, 0, null, true, Nd4j.defaultFloatingPointType());
}
 
Example #14
Source File: DataSetIteratorTest.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
@Test
    public void testLfwModel() throws Exception {
        final int numRows = 28;
        final int numColumns = 28;
        int numChannels = 3;
        int outputNum = LFWLoader.NUM_LABELS;
        int numSamples = LFWLoader.NUM_IMAGES;
        int batchSize = 2;
        int seed = 123;
        int listenerFreq = 1;

        LFWDataSetIterator lfw = new LFWDataSetIterator(batchSize, numSamples,
                        new int[] {numRows, numColumns, numChannels}, outputNum, false, true, 1.0, new Random(seed));

        MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed)
                        .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
                        .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list()
                        .layer(0, new ConvolutionLayer.Builder(5, 5).nIn(numChannels).nOut(6)
                                        .weightInit(WeightInit.XAVIER).activation(Activation.RELU).build())
                        .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2})
                                        .stride(1, 1).build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                                        .nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX)
                                        .build())
                        .setInputType(InputType.convolutionalFlat(numRows, numColumns, numChannels))
                        ;

        MultiLayerNetwork model = new MultiLayerNetwork(builder.build());
        model.init();

        model.setListeners(new ScoreIterationListener(listenerFreq));

        model.fit(lfw.next());

        DataSet dataTest = lfw.next();
        INDArray output = model.output(dataTest.getFeatures());
        Evaluation eval = new Evaluation(outputNum);
        eval.eval(dataTest.getLabels(), output);
//        System.out.println(eval.stats());
    }
 
Example #15
Source File: SpaceToDepthTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
private Layer getSpaceToDepthLayer() {
    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
            .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).seed(123)
            .layer(new SpaceToDepthLayer.Builder(blockSize, dataFormat).build()).build();
    return conf.getLayer().instantiate(conf, null, 0, null, true, Nd4j.defaultFloatingPointType());
}
 
Example #16
Source File: NoParamLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public GradientNormalization getGradientNormalization() {
    return GradientNormalization.None;
}
 
Example #17
Source File: TestEarlyStopping.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void testEarlyStoppingMaximizeScore() throws Exception {
    Nd4j.getRandom().setSeed(12345);

    int outputs = 2;

    DataSet ds = new DataSet(
            Nd4j.rand(new int[]{3, 10, 50}),
            TestUtils.randomOneHotTimeSeries(3, outputs, 50, 12345));
    DataSetIterator train = new ExistingDataSetIterator(
            Arrays.asList(ds, ds, ds, ds, ds, ds, ds, ds, ds, ds));
    DataSetIterator test = new SingletonDataSetIterator(ds);


    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .seed(123)
            .weightInit(WeightInit.XAVIER)
            .updater(new Adam(0.1))
            .activation(Activation.ELU)
            .l2(1e-5)
            .gradientNormalization(GradientNormalization
                    .ClipElementWiseAbsoluteValue)
            .gradientNormalizationThreshold(1.0)
            .list()
            .layer(0, new LSTM.Builder()
                    .nIn(10)
                    .nOut(10)
                    .activation(Activation.TANH)
                    .gateActivationFunction(Activation.SIGMOID)
                    .dropOut(0.5)
                    .build())
            .layer(1, new RnnOutputLayer.Builder()
                    .nIn(10)
                    .nOut(outputs)
                    .activation(Activation.SOFTMAX)
                    .lossFunction(LossFunctions.LossFunction.MCXENT)
                    .build())
            .build();

    File f = testDir.newFolder();
    EarlyStoppingModelSaver<MultiLayerNetwork> saver = new LocalFileModelSaver(f.getAbsolutePath());
    EarlyStoppingConfiguration<MultiLayerNetwork> esConf =
            new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>()
                    .epochTerminationConditions(
                            new MaxEpochsTerminationCondition(10),
                            new ScoreImprovementEpochTerminationCondition(1))
                    .iterationTerminationConditions(
                            new MaxTimeIterationTerminationCondition(10, TimeUnit.MINUTES))
                    .scoreCalculator(new ClassificationScoreCalculator(Evaluation.Metric.F1, test))
                    .modelSaver(saver)
                    .saveLastModel(true)
                    .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.init();

    EarlyStoppingTrainer t = new EarlyStoppingTrainer(esConf, net, train);
    EarlyStoppingResult<MultiLayerNetwork> result = t.fit();

    Map<Integer,Double> map = result.getScoreVsEpoch();
    for( int i=1; i<map.size(); i++ ){
        if(i == map.size() - 1){
            assertTrue(map.get(i) <+ map.get(i-1));
        } else {
            assertTrue(map.get(i) > map.get(i-1));
        }
    }
}
 
Example #18
Source File: RegressionTest060.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void regressionTestMLP2() throws Exception {

    File f = Resources.asFile("regression_testing/060/060_ModelSerializer_Regression_MLP_2.zip");

    MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true);

    MultiLayerConfiguration conf = net.getLayerWiseConfigurations();
    assertEquals(2, conf.getConfs().size());

    DenseLayer l0 = (DenseLayer) conf.getConf(0).getLayer();
    assertTrue(l0.getActivationFn() instanceof ActivationLReLU);
    assertEquals(3, l0.getNIn());
    assertEquals(4, l0.getNOut());
    assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInitFn());
    assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l0.getIUpdater());
    assertEquals(0.15, ((RmsProp)l0.getIUpdater()).getLearningRate(), 1e-6);
    assertEquals(new Dropout(0.6), l0.getIDropout());
    assertEquals(0.1, TestUtils.getL1(l0), 1e-6);
    assertEquals(new WeightDecay(0.2, false), TestUtils.getWeightDecayReg(l0));
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l0.getGradientNormalization());
    assertEquals(1.5, l0.getGradientNormalizationThreshold(), 1e-5);

    OutputLayer l1 = (OutputLayer) conf.getConf(1).getLayer();
    assertEquals("identity", l1.getActivationFn().toString());
    assertTrue(l1.getLossFn() instanceof LossMSE);
    assertEquals(4, l1.getNIn());
    assertEquals(5, l1.getNOut());
    assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInitFn());
    assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l1.getIUpdater());
    assertEquals(0.15, ((RmsProp)l1.getIUpdater()).getLearningRate(), 1e-6);
    assertEquals(new Dropout(0.6), l1.getIDropout());
    assertEquals(0.1, TestUtils.getL1(l1), 1e-6);
    assertEquals(new WeightDecay(0.2,false), TestUtils.getWeightDecayReg(l1));
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l1.getGradientNormalization());
    assertEquals(1.5, l1.getGradientNormalizationThreshold(), 1e-5);

    int numParams = (int)net.numParams();
    assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.params());
    int updaterSize = (int) new RmsProp().stateSize(numParams);
    assertEquals(Nd4j.linspace(1, updaterSize, updaterSize, Nd4j.dataType()).reshape(1,numParams), net.getUpdater().getStateViewArray());
}
 
Example #19
Source File: Upsampling2DTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
private Layer getUpsamplingLayer() {
    NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder()
                    .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).seed(123)
                    .layer(new Upsampling2D.Builder(size).build()).build();
    return conf.getLayer().instantiate(conf, null, 0, null, true, Nd4j.defaultFloatingPointType());
}
 
Example #20
Source File: RegressionTest071.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
public void regressionTestMLP2() throws Exception {

    File f = Resources.asFile("regression_testing/071/071_ModelSerializer_Regression_MLP_2.zip");

    MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true);

    MultiLayerConfiguration conf = net.getLayerWiseConfigurations();
    assertEquals(2, conf.getConfs().size());

    DenseLayer l0 = (DenseLayer) conf.getConf(0).getLayer();
    assertTrue(l0.getActivationFn() instanceof ActivationLReLU);
    assertEquals(3, l0.getNIn());
    assertEquals(4, l0.getNOut());
    assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInitFn());
    assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l0.getIUpdater());
    assertEquals(0.15, ((RmsProp)l0.getIUpdater()).getLearningRate(), 1e-6);
    assertEquals(new Dropout(0.6), l0.getIDropout());
    assertEquals(0.1, TestUtils.getL1(l0), 1e-6);
    assertEquals(new WeightDecay(0.2,false), TestUtils.getWeightDecayReg(l0));
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l0.getGradientNormalization());
    assertEquals(1.5, l0.getGradientNormalizationThreshold(), 1e-5);

    OutputLayer l1 = (OutputLayer) conf.getConf(1).getLayer();
    assertTrue(l1.getActivationFn() instanceof ActivationIdentity);
    assertTrue(l1.getLossFn() instanceof LossMSE);
    assertEquals(4, l1.getNIn());
    assertEquals(5, l1.getNOut());
    assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInitFn());
    assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l1.getIUpdater());
    assertEquals(0.15, ((RmsProp)l0.getIUpdater()).getLearningRate(), 1e-6);
    assertEquals(new Dropout(0.6), l1.getIDropout());
    assertEquals(0.1, TestUtils.getL1(l1), 1e-6);
    assertEquals(new WeightDecay(0.2,false), TestUtils.getWeightDecayReg(l1));
    assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l1.getGradientNormalization());
    assertEquals(1.5, l1.getGradientNormalizationThreshold(), 1e-5);

    long numParams = net.numParams();
    assertEquals(Nd4j.linspace(1, numParams, numParams).reshape(1,numParams), net.params());
    int updaterSize = (int) new RmsProp().stateSize(numParams);
    assertEquals(Nd4j.linspace(1, updaterSize, updaterSize).reshape(1,numParams), net.getUpdater().getStateViewArray());
}
 
Example #21
Source File: DataSetIteratorTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public void runCifar(boolean preProcessCifar) throws Exception {
        final int height = 32;
        final int width = 32;
        int channels = 3;
        int outputNum = CifarLoader.NUM_LABELS;
        int batchSize = 5;
        int seed = 123;
        int listenerFreq = 1;

        Cifar10DataSetIterator cifar = new Cifar10DataSetIterator(batchSize);

        MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed)
                        .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
                        .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list()
                        .layer(0, new ConvolutionLayer.Builder(5, 5).nIn(channels).nOut(6).weightInit(WeightInit.XAVIER)
                                        .activation(Activation.RELU).build())
                        .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2})
                                        .build())
                        .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                                        .nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX)
                                        .build())

                        .setInputType(InputType.convolutionalFlat(height, width, channels));

        MultiLayerNetwork model = new MultiLayerNetwork(builder.build());
        model.init();

        //model.setListeners(Arrays.asList((TrainingListener) new ScoreIterationListener(listenerFreq)));

        CollectScoresIterationListener listener = new CollectScoresIterationListener(listenerFreq);
        model.setListeners(listener);

        model.fit(cifar);

        cifar = new Cifar10DataSetIterator(batchSize);
        Evaluation eval = new Evaluation(cifar.getLabels());
        while (cifar.hasNext()) {
            DataSet testDS = cifar.next(batchSize);
            INDArray output = model.output(testDS.getFeatures());
            eval.eval(testDS.getLabels(), output);
        }
//        System.out.println(eval.stats(true));
        listener.exportScores(System.out);
    }
 
Example #22
Source File: TestConvolution.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Test
    public void testGradientNorm() throws Exception {

        int height = 100;
        int width = 100;
        int channels = 1;
        int numLabels = 10;

        for( int batchSize : new int[]{1, 32}) {

            long seed = 12345;
            double nonZeroBias = 1;

            MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .seed(seed)
                    .dataType(DataType.DOUBLE)
                    .dist(new NormalDistribution(0.0, 0.01))
                    .activation(Activation.RELU)
                    .updater(new Adam(5e-3))
                    //.biasUpdater(new Nesterovs(new StepSchedule(ScheduleType.ITERATION, 2e-2, 0.1, 20000), 0.9))
                    .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
                    .l2(5 * 1e-4)
                    .list()
                    .layer(convInit("cnn1", channels, 96, new int[]{11, 11}, new int[]{4, 4},
                            new int[]{3, 3}, 0))
                    .layer(maxPool("maxpool1", new int[]{3, 3}))
                    .layer(conv5x5("cnn2", 256, new int[]{1, 1}, new int[]{2, 2}, nonZeroBias))
                    .layer(maxPool("maxpool2", new int[]{3, 3}))
                    .layer(conv3x3("cnn3", 384, 0))
                    .layer(conv3x3("cnn4", 384, nonZeroBias))
                    .layer(conv3x3("cnn5", 256, nonZeroBias))
                    .layer(maxPool("maxpool3", new int[]{3, 3}))
                    .layer(fullyConnected("ffn1", 4096, nonZeroBias, new GaussianDistribution(0, 0.005)))
                    .layer(fullyConnected("ffn2", 4096, nonZeroBias, new GaussianDistribution(0, 0.005)))
                    .layer(new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                            .name("output")
                            .nOut(numLabels)
                            .activation(Activation.SOFTMAX)
                            .build())
                    .setInputType(InputType.convolutional(height, width, channels))
                    .build();


            MultiLayerNetwork netNoCudnn = new MultiLayerNetwork(conf.clone());
            netNoCudnn.init();
            MultiLayerNetwork netWithCudnn = new MultiLayerNetwork(conf.clone());
            netWithCudnn.init();

            CuDNNTestUtils.removeHelpers(netNoCudnn.getLayers());



            Nd4j.getRandom().setSeed(12345);
            for( int j=0; j<3; j++ ) {
//                System.out.println("j=" + j);
                INDArray f = Nd4j.rand(new int[]{batchSize, channels, height, width});
                INDArray l = TestUtils.randomOneHot(batchSize, numLabels);

                netNoCudnn.fit(f, l);
                netWithCudnn.fit(f, l);

                assertEquals(netNoCudnn.score(), netWithCudnn.score(), 1e-5);

                for (Map.Entry<String, INDArray> e : netNoCudnn.paramTable().entrySet()) {
                    boolean pEq = e.getValue().equalsWithEps(netWithCudnn.paramTable().get(e.getKey()), 1e-4);
//                    int idx = e.getKey().indexOf("_");
//                    int layerNum = Integer.parseInt(e.getKey().substring(0, idx));
                    //System.out.println(e.getKey() + " - " + pEq + " - " + netNoCudnn.getLayer(layerNum).getClass().getSimpleName());
                    assertTrue(pEq);
                }

                boolean eq = netNoCudnn.params().equalsWithEps(netWithCudnn.params(), 1e-4);
                assertTrue(eq);
            }
        }
    }
 
Example #23
Source File: DummyConfig.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public GradientNormalization getGradientNormalization() {
    return GradientNormalization.None;
}
 
Example #24
Source File: DL4JSequenceRecommenderTraits.java    From inception with Apache License 2.0 4 votes vote down vote up
public GradientNormalization getGradientNormalization()
{
    return gradientNormalization;
}
 
Example #25
Source File: DL4JSequenceRecommenderTraits.java    From inception with Apache License 2.0 4 votes vote down vote up
public void setGradientNormalization(GradientNormalization gradientNormalization)
{
    this.gradientNormalization = gradientNormalization;
}
 
Example #26
Source File: TrainCifar10Model.java    From Java-Machine-Learning-for-Computer-Vision with MIT License 4 votes vote down vote up
private void train() throws IOException {

        ZooModel zooModel = VGG16.builder().build();
        ComputationGraph vgg16 = (ComputationGraph) zooModel.initPretrained(PretrainedType.CIFAR10);
        log.info(vgg16.summary());

        IUpdater iUpdaterWithDefaultConfig = Updater.ADAM.getIUpdaterWithDefaultConfig();
        iUpdaterWithDefaultConfig.setLrAndSchedule(0.1, null);
        FineTuneConfiguration fineTuneConf = new FineTuneConfiguration.Builder()
                .seed(1234)
//                .weightInit(WeightInit.XAVIER)
                .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                .activation(Activation.RELU)
                .updater(iUpdaterWithDefaultConfig)
                .cudnnAlgoMode(ConvolutionLayer.AlgoMode.NO_WORKSPACE)
                .miniBatch(true)
                .inferenceWorkspaceMode(WorkspaceMode.ENABLED)
                .trainingWorkspaceMode(WorkspaceMode.ENABLED)
                .pretrain(true)
                .backprop(true)
                .build();

        ComputationGraph cifar10 = new TransferLearning.GraphBuilder(vgg16)
                .setWorkspaceMode(WorkspaceMode.ENABLED)
                .fineTuneConfiguration(fineTuneConf)
                .setInputTypes(InputType.convolutionalFlat(ImageUtils.HEIGHT,
                        ImageUtils.WIDTH, 3))
                .removeVertexAndConnections("dense_2_loss")
                .removeVertexAndConnections("dense_2")
                .removeVertexAndConnections("dense_1")
                .removeVertexAndConnections("dropout_1")
                .removeVertexAndConnections("embeddings")
                .removeVertexAndConnections("flatten_1")
                .addLayer("dense_1", new DenseLayer.Builder()
                        .nIn(4096)
                        .nOut(EMBEDDINGS)
                        .activation(Activation.RELU).build(), "block3_pool")
                .addVertex("embeddings", new L2NormalizeVertex(new int[]{}, 1e-12), "dense_1")
                .addLayer("lossLayer", new CenterLossOutputLayer.Builder()
                                .lossFunction(LossFunctions.LossFunction.SQUARED_LOSS)
                                .activation(Activation.SOFTMAX).nIn(EMBEDDINGS).nOut(NUM_POSSIBLE_LABELS)
                                .lambda(LAMBDA).alpha(0.9)
                                .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).build(),
                        "embeddings")
                .setOutputs("lossLayer")
                .build();

        log.info(cifar10.summary());
        File rootDir = new File("CarTracking/train_from_video_" + NUM_POSSIBLE_LABELS);
        DataSetIterator dataSetIterator = ImageUtils.createDataSetIterator(rootDir, NUM_POSSIBLE_LABELS, BATCH_SIZE);
        DataSetIterator testIterator = ImageUtils.createDataSetIterator(rootDir, NUM_POSSIBLE_LABELS, BATCH_SIZE);
        cifar10.setListeners(new ScoreIterationListener(2));
        int iEpoch = I_EPOCH;
        while (iEpoch < EPOCH_TRAINING) {
            while (dataSetIterator.hasNext()) {
                DataSet trainMiniBatchData = null;
                try {
                    trainMiniBatchData = dataSetIterator.next();
                } catch (Exception e) {
                    e.printStackTrace();
                }
                cifar10.fit(trainMiniBatchData);
            }
            iEpoch++;

            String modelName = PREFIX + NUM_POSSIBLE_LABELS + "_epoch_data_e" + EMBEDDINGS + "_b" + BATCH_SIZE + "_" + iEpoch + ".zip";
            saveProgress(cifar10, iEpoch, modelName);
            testResults(cifar10, testIterator, iEpoch, modelName);
            dataSetIterator.reset();
            log.info("iEpoch = " + iEpoch);
        }
    }
 
Example #27
Source File: DL4JSentimentAnalysisExample.java    From Java-for-Data-Science with MIT License 4 votes vote down vote up
public static void main(String[] args) throws Exception {

        getModelData();
        
        System.out.println("Total memory = " + Runtime.getRuntime().totalMemory());

        int batchSize = 50;
        int vectorSize = 300;
        int nEpochs = 5;
        int truncateReviewsToLength = 300;

        MultiLayerConfiguration sentimentNN = new NeuralNetConfiguration.Builder()
                .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).iterations(1)
                .updater(Updater.RMSPROP)
                .regularization(true).l2(1e-5)
                .weightInit(WeightInit.XAVIER)
                .gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue).gradientNormalizationThreshold(1.0)
                .learningRate(0.0018)
                .list()
                .layer(0, new GravesLSTM.Builder().nIn(vectorSize).nOut(200)
                        .activation("softsign").build())
                .layer(1, new RnnOutputLayer.Builder().activation("softmax")
                        .lossFunction(LossFunctions.LossFunction.MCXENT).nIn(200).nOut(2).build())
                .pretrain(false).backprop(true).build();

        MultiLayerNetwork net = new MultiLayerNetwork(sentimentNN);
        net.init();
        net.setListeners(new ScoreIterationListener(1));

        WordVectors wordVectors = WordVectorSerializer.loadGoogleModel(new File(GNEWS_VECTORS_PATH), true, false);
        DataSetIterator trainData = new AsyncDataSetIterator(new SentimentExampleIterator(EXTRACT_DATA_PATH, wordVectors, batchSize, truncateReviewsToLength, true), 1);
        DataSetIterator testData = new AsyncDataSetIterator(new SentimentExampleIterator(EXTRACT_DATA_PATH, wordVectors, 100, truncateReviewsToLength, false), 1);

        for (int i = 0; i < nEpochs; i++) {
            net.fit(trainData);
            trainData.reset();

            Evaluation evaluation = new Evaluation();
            while (testData.hasNext()) {
                DataSet t = testData.next();
                INDArray dataFeatures = t.getFeatureMatrix();
                INDArray dataLabels = t.getLabels();
                INDArray inMask = t.getFeaturesMaskArray();
                INDArray outMask = t.getLabelsMaskArray();
                INDArray predicted = net.output(dataFeatures, false, inMask, outMask);

                evaluation.evalTimeSeries(dataLabels, predicted, outMask);
            }
            testData.reset();

            System.out.println(evaluation.stats());
        }
    }
 
Example #28
Source File: AlexNetTrain.java    From dl4j-tutorials with MIT License 4 votes vote down vote up
public static  MultiLayerNetwork alexnetModel() {
    /**
     * AlexNet model interpretation based on the original paper ImageNet Classification with Deep Convolutional Neural Networks
     * and the imagenetExample code referenced.
     * http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf
     **/

    double nonZeroBias = 1;
    double dropOut = 0.8;

    MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
            .seed(seed)
            .weightInit(WeightInit.DISTRIBUTION)
            .dist(new NormalDistribution(0.0, 0.01))
            .activation(Activation.RELU)
            .updater(new Nesterovs(new StepSchedule(ScheduleType.ITERATION, 0.1, 0.1, 100000), 0.9))
            .biasUpdater(new Nesterovs(new StepSchedule(ScheduleType.ITERATION, 0.2, 0.1, 100000), 0.9))
            .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) // normalize to prevent vanishing or exploding gradients
            //.l2(5 * 1e-4)
            .list()
            .layer(0, convInit("cnn1", channels, 96, new int[]{11, 11}, new int[]{4, 4}, new int[]{3, 3}, 0))
            .layer(1, new LocalResponseNormalization.Builder().name("lrn1").build())
            .layer(2, maxPool("maxpool1", new int[]{3,3}))
            .layer(3, conv5x5("cnn2", 256, new int[] {1,1}, new int[] {2,2}, nonZeroBias))
            .layer(4, new LocalResponseNormalization.Builder().name("lrn2").build())
            .layer(5, maxPool("maxpool2", new int[]{3,3}))
            .layer(6,conv3x3("cnn3", 384, 0))
            .layer(7,conv3x3("cnn4", 384, nonZeroBias))
            .layer(8,conv3x3("cnn5", 256, nonZeroBias))
            .layer(9, maxPool("maxpool3", new int[]{3,3}))
            .layer(10, fullyConnected("ffn1", 4096, nonZeroBias, dropOut, new GaussianDistribution(0, 0.005)))
            .layer(11, fullyConnected("ffn2", 4096, nonZeroBias, dropOut, new GaussianDistribution(0, 0.005)))
            .layer(12, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                    .name("output")
                    .nOut(numLabels)
                    .activation(Activation.SOFTMAX)
                    .build())
            .backprop(true)
            .pretrain(false)
            .setInputType(InputType.convolutional(height, width, channels))
            .build();

    return new MultiLayerNetwork(conf);

}
 
Example #29
Source File: NeuralNetworks.java    From Machine-Learning-in-Java with MIT License 4 votes vote down vote up
private static MultiLayerNetwork deepBeliefNetwork(int seed,
		int iterations, int numRows, int numColumns, int outputNum) {
	MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
			.seed(seed)
			.gradientNormalization(
					GradientNormalization.ClipElementWiseAbsoluteValue)
			.gradientNormalizationThreshold(1.0)
			.iterations(iterations)
			.momentum(0.5)
			.momentumAfter(Collections.singletonMap(3, 0.9))
			.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
			.list(4)
			.layer(0,
					new RBM.Builder().nIn(numRows * numColumns).nOut(500)
							.weightInit(WeightInit.XAVIER)
							.lossFunction(LossFunction.RMSE_XENT)
							.visibleUnit(RBM.VisibleUnit.BINARY)
							.hiddenUnit(RBM.HiddenUnit.BINARY).build())
			.layer(1,
					new RBM.Builder().nIn(500).nOut(250)
							.weightInit(WeightInit.XAVIER)
							.lossFunction(LossFunction.RMSE_XENT)
							.visibleUnit(RBM.VisibleUnit.BINARY)
							.hiddenUnit(RBM.HiddenUnit.BINARY).build())
			.layer(2,
					new RBM.Builder().nIn(250).nOut(200)
							.weightInit(WeightInit.XAVIER)
							.lossFunction(LossFunction.RMSE_XENT)
							.visibleUnit(RBM.VisibleUnit.BINARY)
							.hiddenUnit(RBM.HiddenUnit.BINARY).build())
			.layer(3,
					new OutputLayer.Builder(
							LossFunction.NEGATIVELOGLIKELIHOOD)
							.activation("softmax").nIn(200).nOut(outputNum)
							.build()).pretrain(true).backprop(false)
			.build();

	MultiLayerNetwork model = new MultiLayerNetwork(conf);

	return model;
}
 
Example #30
Source File: TFOpLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public GradientNormalization getGradientNormalization(){return null;}