Java Code Examples for weka.classifiers.Evaluation#pctCorrect()

The following examples show how to use weka.classifiers.Evaluation#pctCorrect() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StabilityTest.java    From wekaDeeplearning4j with GNU General Public License v3.0 6 votes vote down vote up
public static void evaluate(Dl4jMlpClassifier clf, Instances data, double minPerfomance)
    throws Exception {
  Instances[] split = TestUtil.splitTrainTest(data);

  Instances train = split[0];
  Instances test = split[1];

  clf.buildClassifier(train);
  Evaluation trainEval = new Evaluation(train);
  trainEval.evaluateModel(clf, train);

  Evaluation testEval = new Evaluation(train);
  testEval.evaluateModel(clf, test);

  final double testPctCorrect = testEval.pctCorrect();
  final double trainPctCorrect = trainEval.pctCorrect();

  log.info("Train: {}, Test: {}", trainPctCorrect, testPctCorrect);
  boolean success = testPctCorrect > minPerfomance && trainPctCorrect > minPerfomance;
  log.info("Success: " + success);

  log.info(clf.getModel().conf().toYaml());
  Assert.assertTrue("Performance was < " + minPerfomance + ". TestPctCorrect: " + testPctCorrect
      + ", TrainPctCorrect: " + trainPctCorrect, success);
}
 
Example 2
Source File: StabilityTest.java    From wekaDeeplearning4j with GNU General Public License v3.0 6 votes vote down vote up
public static void evaluate(Dl4jMlpClassifier clf, Instances data, double minPerfomance)
    throws Exception {
  Instances[] split = TestUtil.splitTrainTest(data);

  Instances train = split[0];
  Instances test = split[1];

  clf.buildClassifier(train);
  Evaluation trainEval = new Evaluation(train);
  trainEval.evaluateModel(clf, train);

  Evaluation testEval = new Evaluation(train);
  testEval.evaluateModel(clf, test);

  final double testPctCorrect = testEval.pctCorrect();
  final double trainPctCorrect = trainEval.pctCorrect();

  log.info("Train: {}, Test: {}", trainPctCorrect, testPctCorrect);
  boolean success = testPctCorrect > minPerfomance && trainPctCorrect > minPerfomance;
  log.info("Success: " + success);

  log.info(clf.getModel().conf().toYaml());
  Assert.assertTrue("Performance was < " + minPerfomance + ". TestPctCorrect: " + testPctCorrect
      + ", TrainPctCorrect: " + trainPctCorrect, success);
}
 
Example 3
Source File: EvaluationUtils.java    From AILibs with GNU Affero General Public License v3.0 5 votes vote down vote up
public static double performEnsemble(Instances instances) throws Exception {
	List<Instances> subsample = WekaUtil.getStratifiedSplit(instances, 42, .05f);
	instances = subsample.get(0);

	/* Relief */
	ReliefFAttributeEval relief = new ReliefFAttributeEval();
	relief.buildEvaluator(instances);
	double attEvalSum = 0;
	for (int i = 0; i < instances.numAttributes() - 1; i++) {
		attEvalSum += relief.evaluateAttribute(i);
	}
	attEvalSum /= instances.numAttributes();

	/* Variance */
	double varianceMean = 0;
	int totalNumericCount = 0;
	for (int i = 0; i < instances.numAttributes() - 1; i++) {
		if (instances.attribute(i).isNumeric()) {
			instances.attributeStats(i).numericStats.calculateDerived();
			varianceMean += Math.pow(instances.attributeStats(i).numericStats.stdDev, 2);
			totalNumericCount++;
		}
	}
	varianceMean /= (totalNumericCount != 0 ? totalNumericCount : 1);

	/* KNN */
	List<Instances> split = WekaUtil.getStratifiedSplit(instances, 42, .7f);
	IBk knn = new IBk(10);
	knn.buildClassifier(split.get(0));
	Evaluation eval = new Evaluation(split.get(0));
	eval.evaluateModel(knn, split.get(1));
	double knnResult = eval.pctCorrect() / 100d;

	return 1 - (0.33 * attEvalSum + 0.33 * knnResult + 0.33 * varianceMean);
}
 
Example 4
Source File: EvaluationUtils.java    From AILibs with GNU Affero General Public License v3.0 5 votes vote down vote up
public static double evaluateMLPlan(final int timeout, final Instances training, final Instances test,
		final int seed, final Logger logger, final int numCores)
				throws Exception {

	logger.debug("Starting ML-Plan execution. Training on {} instances with "
			+ "{} attributes.", training.numInstances(), training.numAttributes());

	/* Initialize MLPlan using WEKA components */
	MLPlanWekaBuilder builder = AbstractMLPlanBuilder.forWeka();
	builder.withTimeOut(new Timeout(timeout, TimeUnit.SECONDS));
	builder.withNumCpus(numCores);
	builder.withDataset(training);
	MLPlan mlplan = builder.build();
	mlplan.setRandomSeed(seed);
	Classifier clf = mlplan.call();

	if (mlplan.getSelectedClassifier() == null
			|| ((MLPipeline) mlplan.getSelectedClassifier()).getBaseClassifier() == null) {
		logger.warn("Could not find a model using ML-Plan. Returning -1...");
		return -1;
	}

	String solutionString = ((MLPipeline) mlplan.getSelectedClassifier()).getBaseClassifier().getClass().getName()
			+ " | " + ((MLPipeline) mlplan.getSelectedClassifier()).getPreprocessors();
	logger.debug("Selected classifier: {}", solutionString);

	/* evaluate solution produced by mlplan */
	Evaluation eval = new Evaluation(training);
	eval.evaluateModel(clf, test);

	return eval.pctCorrect();
}
 
Example 5
Source File: Util.java    From AILibs with GNU Affero General Public License v3.0 5 votes vote down vote up
public static List<Map<String, Object>> conductSingleOneStepReductionExperiment(final ReductionExperiment experiment) throws Exception {
	/* load data */
	Instances data = new Instances(new BufferedReader(new FileReader(experiment.getDataset())));
	data.setClassIndex(data.numAttributes() - 1);

	/* prepare basis for experiments */
	int seed = experiment.getSeed();
	Classifier classifierForRPNDSplit = AbstractClassifier.forName(experiment.getNameOfInnerClassifier(), null);
	Classifier leftClassifier = AbstractClassifier.forName(experiment.getNameOfLeftClassifier(), null);
	Classifier innerClassifier = AbstractClassifier.forName(experiment.getNameOfInnerClassifier(), null);
	Classifier rightClassifier = AbstractClassifier.forName(experiment.getNameOfRightClassifier(), null);

	RPNDSplitter splitter = new RPNDSplitter(new Random(seed), classifierForRPNDSplit);

	/* conduct experiments */
	List<Map<String, Object>> results = new ArrayList<>();
	for (int k = 0; k < 10; k++) {
		List<Collection<String>> classSplit;
		try {
			classSplit = new ArrayList<>(splitter.split(data));
		} catch (Exception e) {
			throw new RuntimeException("Could not create RPND split.", e);
		}
		MCTreeNodeReD classifier = new MCTreeNodeReD(innerClassifier, classSplit.get(0), leftClassifier, classSplit.get(1), rightClassifier);
		long start = System.currentTimeMillis();
		Map<String, Object> result = new HashMap<>();
		List<Instances> dataSplit = WekaUtil.getStratifiedSplit(data, (seed + k), .7);
		classifier.buildClassifier(dataSplit.get(0));
		long time = System.currentTimeMillis() - start;
		Evaluation eval = new Evaluation(dataSplit.get(0));
		eval.evaluateModel(classifier, dataSplit.get(1));
		double loss = (100 - eval.pctCorrect()) / 100f;
		logger.info("Conducted experiment {} with split {}/{}. Loss: {}. Time: {}ms.", k, classSplit.get(0), classSplit.get(1), loss, time);
		result.put("errorRate", loss);
		result.put(LABEL_TRAIN_TIME, time);
		results.add(result);
	}
	return results;
}
 
Example 6
Source File: Util.java    From AILibs with GNU Affero General Public License v3.0 4 votes vote down vote up
public static List<Map<String, Object>> conductEnsembleOfOneStepReductionsExperiment(final EnsembleOfSimpleOneStepReductionsExperiment experiment) throws Exception {

		/* load data */
		Instances data = new Instances(new BufferedReader(new FileReader(experiment.getDataset())));
		data.setClassIndex(data.numAttributes() - 1);

		/* prepare basis for experiments */
		int seed = experiment.getSeed();
		String classifier = experiment.getNameOfClassifier();
		RPNDSplitter splitter = new RPNDSplitter(new Random(seed), AbstractClassifier.forName(classifier, null));

		/* conduct experiments */
		List<Map<String, Object>> results = new ArrayList<>();
		for (int k = 0; k < 10; k++) {

			Vote ensemble = new Vote();
			ensemble.setOptions(new String[] { "-R", "MAJ" });
			long start = System.currentTimeMillis();
			List<Instances> dataSplit = WekaUtil.getStratifiedSplit(data, (seed + k), .7);
			for (int i = 0; i < experiment.getNumberOfStumps(); i++) {

				List<Collection<String>> classSplit;
				classSplit = new ArrayList<>(splitter.split(data));
				MCTreeNodeReD tree = new MCTreeNodeReD(classifier, classSplit.get(0), classifier, classSplit.get(1), classifier);
				tree.buildClassifier(dataSplit.get(0));
				ensemble.addPreBuiltClassifier(tree);
			}
			Map<String, Object> result = new HashMap<>();
			result.put(LABEL_TRAIN_TIME, System.currentTimeMillis() - start);

			/* now evaluate the ensemble */
			ensemble.buildClassifier(data);
			Evaluation eval = new Evaluation(dataSplit.get(0));
			eval.evaluateModel(ensemble, dataSplit.get(1));
			double loss = (100 - eval.pctCorrect()) / 100f;
			logger.info("Conducted experiment {}. Loss: {}. Time: {}ms.", k, loss, result.get(LABEL_TRAIN_TIME));
			result.put("errorRate", loss);
			results.add(result);
		}
		return results;
	}