weka.classifiers.Evaluation Java Examples
The following examples show how to use
weka.classifiers.Evaluation.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestWekaBayes.java From Java-Data-Analysis with MIT License | 8 votes |
public static void main(String[] args) throws Exception { // ConverterUtils.DataSource source = new ConverterUtils.DataSource("data/AnonFruit.arff"); DataSource source = new DataSource("data/AnonFruit.arff"); Instances train = source.getDataSet(); train.setClassIndex(3); // target attribute: (Sweet) //build model NaiveBayes model=new NaiveBayes(); model.buildClassifier(train); //use Instances test = train; Evaluation eval = new Evaluation(test); eval.evaluateModel(model,test); List <Prediction> predictions = eval.predictions(); int k = 0; for (Instance instance : test) { double actual = instance.classValue(); double prediction = eval.evaluateModelOnce(model, instance); System.out.printf("%2d.%4.0f%4.0f", ++k, actual, prediction); System.out.println(prediction != actual? " *": ""); } }
Example #2
Source File: SimpleCart.java From tsml with GNU General Public License v3.0 | 6 votes |
/** * Updates the numIncorrectModel field for all nodes when subtree (to be * pruned) is rooted. This is needed for calculating the alpha-values. * * @throws Exception if something goes wrong */ public void modelErrors() throws Exception{ Evaluation eval = new Evaluation(m_train); if (!m_isLeaf) { m_isLeaf = true; //temporarily make leaf // calculate distribution for evaluation eval.evaluateModel(this, m_train); m_numIncorrectModel = eval.incorrect(); m_isLeaf = false; for (int i = 0; i < m_Successors.length; i++) m_Successors[i].modelErrors(); } else { eval.evaluateModel(this, m_train); m_numIncorrectModel = eval.incorrect(); } }
Example #3
Source File: ReductionOptimizer.java From AILibs with GNU Affero General Public License v3.0 | 6 votes |
private int getLossForClassifier(final MCTreeNode tree, final Instances data) { this.completeTree(tree); synchronized (this) { /* now eval the tree */ try { DescriptiveStatistics stats = new DescriptiveStatistics(); for (int i = 0; i < 2; i++) { List<IWekaInstances> split = (WekaUtil.getStratifiedSplit(new WekaInstances(data), this.seed + i, .6f)); tree.buildClassifier(split.get(0).getList()); Evaluation eval = new Evaluation(data); eval.evaluateModel(tree, split.get(1).getList()); stats.addValue(eval.pctIncorrect()); } return (int) Math.round((stats.getMean() * 100)); } catch (Exception e) { this.logger.error(LoggerUtil.getExceptionInfo(e)); return Integer.MAX_VALUE; } } }
Example #4
Source File: AllPairsTable.java From AILibs with GNU Affero General Public License v3.0 | 6 votes |
public AllPairsTable(final Instances training, final Instances validation, final Classifier c) throws Exception { Collection<String> classes = WekaUtil.getClassesActuallyContainedInDataset(training); for (Collection<String> set : SetUtil.getAllPossibleSubsetsWithSize(classes, 2)) { List<String> pair = set.stream().sorted().collect(Collectors.toList()); String a = pair.get(0); String b = pair.get(1); Instances trainingData = WekaUtil.getInstancesOfClass(training, a); trainingData.addAll(WekaUtil.getInstancesOfClass(training, b)); c.buildClassifier(trainingData); Instances validationData = WekaUtil.getInstancesOfClass(validation, a); validationData.addAll(WekaUtil.getInstancesOfClass(validation, b)); Evaluation eval = new Evaluation(trainingData); eval.evaluateModel(c, validationData); if (!this.separabilities.containsKey(a)) { this.separabilities.put(a, new HashMap<>()); } this.separabilities.get(a).put(b, eval.pctCorrect() / 100); } this.classCount = WekaUtil.getNumberOfInstancesPerClass(training); this.sum = training.size(); }
Example #5
Source File: TransformEnsembles.java From tsml with GNU General Public License v3.0 | 6 votes |
public void findCVWeights() throws Exception { cvWeights=new double[nosTransforms]; int folds=numInstances; if(folds>THRESHOLD1){ folds=10; } System.out.print("\n Finding CV Accuracy: "); for(int i=0;i<nosTransforms;i++){ Evaluation evaluation = new Evaluation(train.get(i)); if(i==0) evaluation.crossValidateModel(AbstractClassifier.makeCopy(baseTime), train.get(i), folds, new Random()); else evaluation.crossValidateModel(AbstractClassifier.makeCopy(base), train.get(i), folds, new Random()); cvWeights[i]=1-evaluation.errorRate(); System.out.print(","+cvWeights[i]); } System.out.print("\n"); }
Example #6
Source File: StabilityTest.java From wekaDeeplearning4j with GNU General Public License v3.0 | 6 votes |
public static void evaluate(Dl4jMlpClassifier clf, Instances data, double minPerfomance) throws Exception { Instances[] split = TestUtil.splitTrainTest(data); Instances train = split[0]; Instances test = split[1]; clf.buildClassifier(train); Evaluation trainEval = new Evaluation(train); trainEval.evaluateModel(clf, train); Evaluation testEval = new Evaluation(train); testEval.evaluateModel(clf, test); final double testPctCorrect = testEval.pctCorrect(); final double trainPctCorrect = trainEval.pctCorrect(); log.info("Train: {}, Test: {}", trainPctCorrect, testPctCorrect); boolean success = testPctCorrect > minPerfomance && trainPctCorrect > minPerfomance; log.info("Success: " + success); log.info(clf.getModel().conf().toYaml()); Assert.assertTrue("Performance was < " + minPerfomance + ". TestPctCorrect: " + testPctCorrect + ", TrainPctCorrect: " + trainPctCorrect, success); }
Example #7
Source File: LMTNode.java From tsml with GNU General Public License v3.0 | 6 votes |
/** *Updates the numIncorrectModel field for all nodes. This is needed for calculating the alpha-values. */ public void modelErrors() throws Exception{ Evaluation eval = new Evaluation(m_train); if (!m_isLeaf) { m_isLeaf = true; eval.evaluateModel(this, m_train); m_isLeaf = false; m_numIncorrectModel = eval.incorrect(); for (int i = 0; i < m_sons.length; i++) m_sons[i].modelErrors(); } else { eval.evaluateModel(this, m_train); m_numIncorrectModel = eval.incorrect(); } }
Example #8
Source File: StabilityTest.java From wekaDeeplearning4j with GNU General Public License v3.0 | 6 votes |
public static void evaluate(Dl4jMlpClassifier clf, Instances data, double minPerfomance) throws Exception { Instances[] split = TestUtil.splitTrainTest(data); Instances train = split[0]; Instances test = split[1]; clf.buildClassifier(train); Evaluation trainEval = new Evaluation(train); trainEval.evaluateModel(clf, train); Evaluation testEval = new Evaluation(train); testEval.evaluateModel(clf, test); final double testPctCorrect = testEval.pctCorrect(); final double trainPctCorrect = trainEval.pctCorrect(); log.info("Train: {}, Test: {}", trainPctCorrect, testPctCorrect); boolean success = testPctCorrect > minPerfomance && trainPctCorrect > minPerfomance; log.info("Success: " + success); log.info(clf.getModel().conf().toYaml()); Assert.assertTrue("Performance was < " + minPerfomance + ". TestPctCorrect: " + testPctCorrect + ", TrainPctCorrect: " + trainPctCorrect, success); }
Example #9
Source File: DatasetTest.java From wekaDeeplearning4j with GNU General Public License v3.0 | 6 votes |
private static Evaluation eval(Instances metaData) throws Exception { String imagesPath = "src/test/resources/nominal/mnist-minimal"; Dl4jMlpClassifier clf = new Dl4jMlpClassifier(); ImageInstanceIterator iii = new ImageInstanceIterator(); iii.setImagesLocation(new File(imagesPath)); iii.setTrainBatchSize(2); clf.setInstanceIterator(iii); clf.setNumEpochs(5); // Build clf clf.buildClassifier(metaData); // Evaluate clf Evaluation trainEval = new Evaluation(metaData); trainEval.evaluateModel(clf, metaData); return trainEval; }
Example #10
Source File: NBTreeNoSplit.java From tsml with GNU General Public License v3.0 | 6 votes |
/** * Utility method for fast 5-fold cross validation of a naive bayes * model * * @param fullModel a <code>NaiveBayesUpdateable</code> value * @param trainingSet an <code>Instances</code> value * @param r a <code>Random</code> value * @return a <code>double</code> value * @exception Exception if an error occurs */ public static double crossValidate(NaiveBayesUpdateable fullModel, Instances trainingSet, Random r) throws Exception { // make some copies for fast evaluation of 5-fold xval Classifier [] copies = AbstractClassifier.makeCopies(fullModel, 5); Evaluation eval = new Evaluation(trainingSet); // make some splits for (int j = 0; j < 5; j++) { Instances test = trainingSet.testCV(5, j); // unlearn these test instances for (int k = 0; k < test.numInstances(); k++) { test.instance(k).setWeight(-test.instance(k).weight()); ((NaiveBayesUpdateable)copies[j]).updateClassifier(test.instance(k)); // reset the weight back to its original value test.instance(k).setWeight(-test.instance(k).weight()); } eval.evaluateModel(copies[j], test); } return eval.incorrect(); }
Example #11
Source File: DatasetTest.java From wekaDeeplearning4j with GNU General Public License v3.0 | 6 votes |
/** * Test datasets with class meta data that is not in lexicographic order. * * @throws Exception Something went wrong. */ @Test public void testMixedClassOrder() throws Exception { String prefix = "src/test/resources/nominal/"; // Get data Instances testProb = DatasetLoader.loadArff(prefix + "mnist.meta.minimal.arff"); Instances testProbInverse = DatasetLoader .loadArff(prefix + "mnist.meta.minimal.mixed-class-meta-data.arff"); Evaluation evalNormal = eval(testProb); Evaluation evalMixed = eval(testProbInverse); // Compare accuracy Assert.assertEquals(evalNormal.pctCorrect(), evalMixed.pctCorrect(), 1e-7); Assert.assertEquals(evalNormal.pctIncorrect(), evalMixed.pctIncorrect(), 1e-7); }
Example #12
Source File: TestUtil.java From wekaDeeplearning4j with GNU General Public License v3.0 | 6 votes |
/** * Perform simple holdout with a given percentage * * @param clf Classifier * @param data Full dataset * @param p Split percentage */ public static void holdout(Classifier clf, Instances data, double p) throws Exception { Instances[] split = splitTrainTest(data, p); Instances train = split[0]; Instances test = split[1]; logger.info("Classifier: \n{}", clf.toString()); clf.buildClassifier(train); Evaluation trainEval = new Evaluation(train); trainEval.evaluateModel(clf, train); logger.info("Weka Train Evaluation:"); logger.info(trainEval.toSummaryString()); if (!data.classAttribute().isNumeric()) { logger.info(trainEval.toMatrixString()); } Evaluation testEval = new Evaluation(train); logger.info("Weka Test Evaluation:"); testEval.evaluateModel(clf, test); logger.info(testEval.toSummaryString()); if (!data.classAttribute().isNumeric()) { logger.info(testEval.toMatrixString()); } }
Example #13
Source File: DatasetTest.java From wekaDeeplearning4j with GNU General Public License v3.0 | 6 votes |
private static Evaluation eval(Instances metaData) throws Exception { String imagesPath = "src/test/resources/nominal/mnist-minimal"; Dl4jMlpClassifier clf = new Dl4jMlpClassifier(); ImageInstanceIterator iii = new ImageInstanceIterator(); iii.setImagesLocation(new File(imagesPath)); iii.setTrainBatchSize(2); clf.setInstanceIterator(iii); clf.setNumEpochs(5); // Build clf clf.buildClassifier(metaData); // Evaluate clf Evaluation trainEval = new Evaluation(metaData); trainEval.evaluateModel(clf, metaData); return trainEval; }
Example #14
Source File: DatasetTest.java From wekaDeeplearning4j with GNU General Public License v3.0 | 6 votes |
/** * Test datasets with class meta data that is not in lexicographic order. * * @throws Exception Something went wrong. */ @Test public void testMixedClassOrder() throws Exception { String prefix = "src/test/resources/nominal/"; // Get data Instances testProb = DatasetLoader.loadArff(prefix + "mnist.meta.minimal.arff"); Instances testProbInverse = DatasetLoader .loadArff(prefix + "mnist.meta.minimal.mixed-class-meta-data.arff"); Evaluation evalNormal = eval(testProb); Evaluation evalMixed = eval(testProbInverse); // Compare accuracy Assert.assertEquals(evalNormal.pctCorrect(), evalMixed.pctCorrect(), 1e-7); Assert.assertEquals(evalNormal.pctIncorrect(), evalMixed.pctIncorrect(), 1e-7); }
Example #15
Source File: TestUtil.java From wekaDeeplearning4j with GNU General Public License v3.0 | 6 votes |
/** * Perform simple holdout with a given percentage * * @param clf Classifier * @param data Full dataset * @param p Split percentage */ public static void holdout(Classifier clf, Instances data, double p) throws Exception { Instances[] split = splitTrainTest(data, p); Instances train = split[0]; Instances test = split[1]; logger.info("Classifier: \n{}", clf.toString()); clf.buildClassifier(train); Evaluation trainEval = new Evaluation(train); trainEval.evaluateModel(clf, train); logger.info("Weka Train Evaluation:"); logger.info(trainEval.toSummaryString()); if (!data.classAttribute().isNumeric()) { logger.info(trainEval.toMatrixString()); } Evaluation testEval = new Evaluation(train); logger.info("Weka Test Evaluation:"); testEval.evaluateModel(clf, test); logger.info(testEval.toSummaryString()); if (!data.classAttribute().isNumeric()) { logger.info(testEval.toMatrixString()); } }
Example #16
Source File: HARAMNetwork.java From meka with GNU General Public License v3.0 | 5 votes |
public static void main(String [] argv) { try { Evaluation.runExperiment(((MultiLabelClassifier) new HARAMNetwork()), argv); } catch (Exception e) { e.printStackTrace(); System.err.println(e.getMessage()); } }
Example #17
Source File: DataGenerator.java From collective-classification-weka-package with GNU General Public License v3.0 | 5 votes |
/** * Initializes the generator. * * @param eval the Evaluation object to use */ public DataGenerator(Evaluation eval) { super(); m_Evaluation = eval; m_Processed = false; }
Example #18
Source File: Util.java From AILibs with GNU Affero General Public License v3.0 | 5 votes |
public static List<Map<String, Object>> conductSingleOneStepReductionExperiment(final ReductionExperiment experiment) throws Exception { /* load data */ Instances data = new Instances(new BufferedReader(new FileReader(experiment.getDataset()))); data.setClassIndex(data.numAttributes() - 1); /* prepare basis for experiments */ int seed = experiment.getSeed(); Classifier classifierForRPNDSplit = AbstractClassifier.forName(experiment.getNameOfInnerClassifier(), null); Classifier leftClassifier = AbstractClassifier.forName(experiment.getNameOfLeftClassifier(), null); Classifier innerClassifier = AbstractClassifier.forName(experiment.getNameOfInnerClassifier(), null); Classifier rightClassifier = AbstractClassifier.forName(experiment.getNameOfRightClassifier(), null); RPNDSplitter splitter = new RPNDSplitter(new Random(seed), classifierForRPNDSplit); /* conduct experiments */ List<Map<String, Object>> results = new ArrayList<>(); for (int k = 0; k < 10; k++) { List<Collection<String>> classSplit; try { classSplit = new ArrayList<>(splitter.split(data)); } catch (Exception e) { throw new RuntimeException("Could not create RPND split.", e); } MCTreeNodeReD classifier = new MCTreeNodeReD(innerClassifier, classSplit.get(0), leftClassifier, classSplit.get(1), rightClassifier); long start = System.currentTimeMillis(); Map<String, Object> result = new HashMap<>(); List<Instances> dataSplit = WekaUtil.getStratifiedSplit(data, (seed + k), .7); classifier.buildClassifier(dataSplit.get(0)); long time = System.currentTimeMillis() - start; Evaluation eval = new Evaluation(dataSplit.get(0)); eval.evaluateModel(classifier, dataSplit.get(1)); double loss = (100 - eval.pctCorrect()) / 100f; logger.info("Conducted experiment {} with split {}/{}. Loss: {}. Time: {}ms.", k, classSplit.get(0), classSplit.get(1), loss, time); result.put("errorRate", loss); result.put(LABEL_TRAIN_TIME, time); results.add(result); } return results; }
Example #19
Source File: NavieBayesClassifier.java From MonitorClient with Apache License 2.0 | 5 votes |
public String training(String path){ if(instances==null) return "training data not found"; try { classifier=new NaiveBayesUpdateable(); classifier.buildClassifier(instances); loadArff(path); Evaluation eTest = new Evaluation(instances); eTest.evaluateModel(classifier, instances); String strSummary = eTest.toSummaryString(); System.out.println(strSummary); SerializationHelper.write("config/bayes.model",classifier); } catch (Exception e) { e.printStackTrace(); } return ""; }
Example #20
Source File: WekaDeeplearning4jExamples.java From wekaDeeplearning4j with GNU General Public License v3.0 | 5 votes |
private static void dl4jResnet50() throws Exception { String folderPath = "src/test/resources/nominal/plant-seedlings-small"; ImageDirectoryLoader loader = new ImageDirectoryLoader(); loader.setInputDirectory(new File(folderPath)); Instances inst = loader.getDataSet(); inst.setClassIndex(1); Dl4jMlpClassifier classifier = new Dl4jMlpClassifier(); classifier.setNumEpochs(3); KerasEfficientNet kerasEfficientNet = new KerasEfficientNet(); kerasEfficientNet.setVariation(EfficientNet.VARIATION.EFFICIENTNET_B1); classifier.setZooModel(kerasEfficientNet); ImageInstanceIterator iterator = new ImageInstanceIterator(); iterator.setImagesLocation(new File(folderPath)); classifier.setInstanceIterator(iterator); // Stratify and split the data Random rand = new Random(0); inst.randomize(rand); inst.stratify(5); Instances train = inst.trainCV(5, 0); Instances test = inst.testCV(5, 0); // Build the classifier on the training data classifier.buildClassifier(train); // Evaluate the model on test data Evaluation eval = new Evaluation(test); eval.evaluateModel(classifier, test); // Output some summary statistics System.out.println(eval.toSummaryString()); System.out.println(eval.toMatrixString()); }
Example #21
Source File: TestUtil.java From wekaDeeplearning4j with GNU General Public License v3.0 | 5 votes |
/** * Perform simple holdout with a given percentage * * @param clf Classifier * @param data Full dataset * @param p Split percentage */ public static void holdout( Dl4jMlpClassifier clf, Instances data, double p, AbstractInstanceIterator aii) throws Exception { holdout(clf, data, p); Instances[] split = splitTrainTest(data, p); Instances test = split[1]; final DataSetIterator testIter = aii.getDataSetIterator(test, 42); final ComputationGraph model = clf.getModel(); logger.info("DL4J Evaluation: "); org.deeplearning4j.eval.Evaluation evaluation = model.evaluate(testIter); logger.info(evaluation.stats()); }
Example #22
Source File: KddCup.java From Machine-Learning-in-Java with MIT License | 5 votes |
public static double[] evaluate(Classifier model) throws Exception { double results[] = new double[4]; String[] labelFiles = new String[] { "churn", "appetency", "upselling" }; double overallScore = 0.0; for (int i = 0; i < labelFiles.length; i++) { // Load data Instances train_data = loadData("data/orange_small_train.data", "data/orange_small_train_" + labelFiles[i]+ ".labels.txt"); train_data = preProcessData(train_data); // cross-validate the data Evaluation eval = new Evaluation(train_data); eval.crossValidateModel(model, train_data, 5, new Random(1), new Object[] {}); // Save results results[i] = eval.areaUnderROC(train_data.classAttribute() .indexOfValue("1")); overallScore += results[i]; System.out.println(labelFiles[i] + "\t-->\t" +results[i]); } // Get average results over all three problems results[3] = overallScore / 3; return results; }
Example #23
Source File: LDAEvaluationTest.java From AILibs with GNU Affero General Public License v3.0 | 5 votes |
@Test public void evaluateTest() throws Exception { logger.info("Starting LDA evaluation test..."); /* load dataset and create a train-test-split */ OpenmlConnector connector = new OpenmlConnector(); DataSetDescription ds = connector.dataGet(DataSetUtils.SEGMENT_ID); File file = ds.getDataset(DataSetUtils.API_KEY); Instances data = new Instances(new BufferedReader(new FileReader(file))); data.setClassIndex(data.numAttributes() - 1); List<Instances> dataSplit = WekaUtil.getStratifiedSplit(data, 42, .05f); Instances insts = dataSplit.get(0); List<Instances> split = WekaUtil.getStratifiedSplit(insts, 42, .7f); long timeStart = System.currentTimeMillis(); LDA lda = new LDA(); lda.buildClassifier(split.get(0)); long timeStartEval = System.currentTimeMillis(); Evaluation eval = new Evaluation(split.get(0)); eval.evaluateModel(lda, split.get(1)); logger.debug("LDA pct correct: " + eval.pctCorrect()); Assert.assertTrue(eval.pctCorrect() > 0); long timeTaken = System.currentTimeMillis() - timeStart; long timeTakenEval = System.currentTimeMillis() - timeStartEval; logger.debug("LDA took " + (timeTaken / 1000) + " s."); logger.debug("LDA eval took " + (timeTakenEval / 1000) + " s."); }
Example #24
Source File: EnsembleEvaluatorTest.java From AILibs with GNU Affero General Public License v3.0 | 5 votes |
@Test public void knnEvaluatorTest() throws Exception { logger.info("Starting knn evaluation test..."); /* load dataset and create a train-test-split */ OpenmlConnector connector = new OpenmlConnector(); DataSetDescription ds = connector.dataGet(DataSetUtils.SEGMENT_ID); File file = ds.getDataset(DataSetUtils.API_KEY); Instances data = new Instances(new BufferedReader(new FileReader(file))); data.setClassIndex(data.numAttributes() - 1); List<Instances> split = WekaUtil.getStratifiedSplit(data, 42, .05f); Instances insts = split.get(0); List<Instances> split2 = WekaUtil.getStratifiedSplit(insts, 42, .7f); long timeStart = System.currentTimeMillis(); IBk knn = new IBk(10); knn.buildClassifier(split2.get(0)); long timeStartEval = System.currentTimeMillis(); Evaluation eval = new Evaluation(split2.get(0)); eval.evaluateModel(knn, split2.get(1)); logger.debug("Pct correct: " + eval.pctCorrect()); Assert.assertTrue(eval.pctCorrect() > 0); long timeTaken = System.currentTimeMillis() - timeStart; long timeTakenEval = System.currentTimeMillis() - timeStartEval; logger.debug("KNN took " + (timeTaken / 1000) + " s."); logger.debug("KNN eval took " + (timeTakenEval / 1000) + " s."); }
Example #25
Source File: EvaluationUtils.java From AILibs with GNU Affero General Public License v3.0 | 5 votes |
public static double evaluateMLPlan(final int timeout, final Instances training, final Instances test, final int seed, final Logger logger, final int numCores) throws Exception { logger.debug("Starting ML-Plan execution. Training on {} instances with " + "{} attributes.", training.numInstances(), training.numAttributes()); /* Initialize MLPlan using WEKA components */ MLPlanWekaBuilder builder = AbstractMLPlanBuilder.forWeka(); builder.withTimeOut(new Timeout(timeout, TimeUnit.SECONDS)); builder.withNumCpus(numCores); builder.withDataset(training); MLPlan mlplan = builder.build(); mlplan.setRandomSeed(seed); Classifier clf = mlplan.call(); if (mlplan.getSelectedClassifier() == null || ((MLPipeline) mlplan.getSelectedClassifier()).getBaseClassifier() == null) { logger.warn("Could not find a model using ML-Plan. Returning -1..."); return -1; } String solutionString = ((MLPipeline) mlplan.getSelectedClassifier()).getBaseClassifier().getClass().getName() + " | " + ((MLPipeline) mlplan.getSelectedClassifier()).getPreprocessors(); logger.debug("Selected classifier: {}", solutionString); /* evaluate solution produced by mlplan */ Evaluation eval = new Evaluation(training); eval.evaluateModel(clf, test); return eval.pctCorrect(); }
Example #26
Source File: EvaluationUtils.java From AILibs with GNU Affero General Public License v3.0 | 5 votes |
public static double performEnsemble(Instances instances) throws Exception { List<Instances> subsample = WekaUtil.getStratifiedSplit(instances, 42, .05f); instances = subsample.get(0); /* Relief */ ReliefFAttributeEval relief = new ReliefFAttributeEval(); relief.buildEvaluator(instances); double attEvalSum = 0; for (int i = 0; i < instances.numAttributes() - 1; i++) { attEvalSum += relief.evaluateAttribute(i); } attEvalSum /= instances.numAttributes(); /* Variance */ double varianceMean = 0; int totalNumericCount = 0; for (int i = 0; i < instances.numAttributes() - 1; i++) { if (instances.attribute(i).isNumeric()) { instances.attributeStats(i).numericStats.calculateDerived(); varianceMean += Math.pow(instances.attributeStats(i).numericStats.stdDev, 2); totalNumericCount++; } } varianceMean /= (totalNumericCount != 0 ? totalNumericCount : 1); /* KNN */ List<Instances> split = WekaUtil.getStratifiedSplit(instances, 42, .7f); IBk knn = new IBk(10); knn.buildClassifier(split.get(0)); Evaluation eval = new Evaluation(split.get(0)); eval.evaluateModel(knn, split.get(1)); double knnResult = eval.pctCorrect() / 100d; return 1 - (0.33 * attEvalSum + 0.33 * knnResult + 0.33 * varianceMean); }
Example #27
Source File: Main-SVG.java From Java-for-Data-Science with MIT License | 5 votes |
public Main() { try { BufferedReader datafile; datafile = readDataFile("camping.txt"); Instances data = new Instances(datafile); data.setClassIndex(data.numAttributes() - 1); Instances trainingData = new Instances(data, 0, 14); Instances testingData = new Instances(data, 14, 5); Evaluation evaluation = new Evaluation(trainingData); SMO smo = new SMO(); smo.buildClassifier(data); evaluation.evaluateModel(smo, testingData); System.out.println(evaluation.toSummaryString()); // Test instance Instance instance = new DenseInstance(3); instance.setValue(data.attribute("age"), 78); instance.setValue(data.attribute("income"), 125700); instance.setValue(data.attribute("camps"), 1); instance.setDataset(data); System.out.println("The instance: " + instance); System.out.println(smo.classifyInstance(instance)); } catch (Exception ex) { ex.printStackTrace(); } }
Example #28
Source File: ARAMNetworkfast.java From meka with GNU General Public License v3.0 | 5 votes |
/** * Main method for testing this class. * * @param argv the options */ public static void main(String [] argv) { try { Evaluation.runExperiment((MultiLabelClassifier)new WvARAM(), argv); } catch (Exception e) { e.printStackTrace(); System.err.println(e.getMessage()); } }
Example #29
Source File: WekaDeeplearning4jExamples.java From wekaDeeplearning4j with GNU General Public License v3.0 | 5 votes |
private static void dl4jResnet50() throws Exception { String folderPath = "src/test/resources/nominal/plant-seedlings-small"; ImageDirectoryLoader loader = new ImageDirectoryLoader(); loader.setInputDirectory(new File(folderPath)); Instances inst = loader.getDataSet(); inst.setClassIndex(1); Dl4jMlpClassifier classifier = new Dl4jMlpClassifier(); classifier.setNumEpochs(3); KerasEfficientNet kerasEfficientNet = new KerasEfficientNet(); kerasEfficientNet.setVariation(EfficientNet.VARIATION.EFFICIENTNET_B1); classifier.setZooModel(kerasEfficientNet); ImageInstanceIterator iterator = new ImageInstanceIterator(); iterator.setImagesLocation(new File(folderPath)); classifier.setInstanceIterator(iterator); // Stratify and split the data Random rand = new Random(0); inst.randomize(rand); inst.stratify(5); Instances train = inst.trainCV(5, 0); Instances test = inst.testCV(5, 0); // Build the classifier on the training data classifier.buildClassifier(train); // Evaluate the model on test data Evaluation eval = new Evaluation(test); eval.evaluateModel(classifier, test); // Output some summary statistics System.out.println(eval.toSummaryString()); System.out.println(eval.toMatrixString()); }
Example #30
Source File: TestUtil.java From wekaDeeplearning4j with GNU General Public License v3.0 | 5 votes |
/** * Perform simple holdout with a given percentage * * @param clf Classifier * @param data Full dataset * @param p Split percentage */ public static void holdout( Dl4jMlpClassifier clf, Instances data, double p, AbstractInstanceIterator aii) throws Exception { holdout(clf, data, p); Instances[] split = splitTrainTest(data, p); Instances test = split[1]; final DataSetIterator testIter = aii.getDataSetIterator(test, 42); final ComputationGraph model = clf.getModel(); logger.info("DL4J Evaluation: "); org.deeplearning4j.eval.Evaluation evaluation = model.evaluate(testIter); logger.info(evaluation.stats()); }