001 package jcolibri.test.test14; 002 003 import jcolibri.evaluation.Evaluator; 004 import jcolibri.extensions.maintenance_evaluation.DetailedEvaluationReport; 005 import jcolibri.extensions.maintenance_evaluation.evaluators.MaintenanceHoldOutEvaluator; 006 007 008 /** 009 * This example shows how to evaluate two datasets at the same time 010 * with respect to their accuracy. 011 * It uses a CBR application (a StandardCBRApplication implementation) 012 * that must store its results in the DetailedEvaluationReport. 013 * 014 * @author Lisa Cummins 015 * @version 1.0 016 */ 017 public class Test14 018 { 019 /** 020 * Runs the example to compute accuracy of two datasets. 021 * @param args 022 */ 023 public static void main(String[] args) 024 { 025 //SwingProgressBar shows the progress 026 jcolibri.util.ProgressController.clear(); 027 jcolibri.util.ProgressController.register(new jcolibri.test.main.SwingProgressBar(), MaintenanceHoldOutEvaluator.class); 028 029 // Example of the Hold-Out evaluation 030 MaintenanceHoldOutEvaluator eval = new MaintenanceHoldOutEvaluator(); 031 eval.init(new IrisEvaluableApp()); 032 eval.HoldOut(20, 3); 033 Double avgCost = ((DetailedEvaluationReport)(Evaluator.getEvaluationReport())). 034 getAverageOfQueryDataSeries(IrisEvaluableApp.DATA_SERIES_NAME); 035 Double percentAccuracy = (1.0 - avgCost) * 100; 036 Evaluator.getEvaluationReport().putOtherData(IrisEvaluableApp.DATA_SERIES_NAME + 037 " Accuracy", "" + percentAccuracy); 038 039 eval.init(new GlassEvaluableApp()); 040 eval.HoldOut(20, 3); 041 avgCost = ((DetailedEvaluationReport)(Evaluator.getEvaluationReport())). 042 getAverageOfDataSeries(GlassEvaluableApp.DATA_SERIES_NAME); 043 percentAccuracy = (1.0 - avgCost) * 100; 044 Evaluator.getEvaluationReport().putOtherData(GlassEvaluableApp.DATA_SERIES_NAME + 045 " Accuracy", "" + percentAccuracy); 046 047 System.out.println(Evaluator.getEvaluationReport()); 048 jcolibri.evaluation.tools.EvaluationResultGUI.show(Evaluator.getEvaluationReport(), "Test14 - Evaluation",false); 049 } 050 }