weka.filters.Filter.useFilter()方法的使用及代码示例

x33g5p2x  于2022-01-19 转载在 其他  
字(12.4k)|赞(0)|评价(0)|浏览(151)

本文整理了Java中weka.filters.Filter.useFilter()方法的一些代码示例,展示了Filter.useFilter()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Filter.useFilter()方法的具体详情如下:
包路径:weka.filters.Filter
类名称:Filter
方法名:useFilter

Filter.useFilter介绍

[英]Filters an entire set of instances through a filter and returns the new set.
[中]通过筛选器筛选整个实例集并返回新的实例集。

代码示例

代码示例来源:origin: nz.ac.waikato.cms.weka/weka-stable

/**
 * reduce the dimensionality of a set of instances to include only those
 * attributes chosen by the last run of attribute selection.
 * 
 * @param in the instances to be reduced
 * @return a dimensionality reduced set of instances
 * @exception Exception if the instances can't be reduced
 */
public Instances reduceDimensionality(Instances in) throws Exception {
 if (m_attributeFilter == null) {
  throw new Exception("No feature selection has been performed yet!");
 }
 if (m_transformer != null) {
  Instances transformed =
   new Instances(m_transformer.transformedHeader(), in.numInstances());
  for (int i = 0; i < in.numInstances(); i++) {
   transformed.add(m_transformer.convertInstance(in.instance(i)));
  }
  return Filter.useFilter(transformed, m_attributeFilter);
 }
 return Filter.useFilter(in, m_attributeFilter);
}

代码示例来源:origin: Waikato/weka-trunk

insts = Filter.useFilter(insts, m_ReorderOriginal);
Instances filteredInsts = Filter.useFilter(insts, m_Filter);
if (filteredInsts.numInstances() != insts.numInstances()) {
 throw new WekaException(
  "FilteredClassifier: filter has returned more/less instances than required.");
 filteredInsts = Filter.useFilter(filteredInsts, m_ReorderFiltered);
double[][] result = new double[insts.numInstances()][insts.numClasses()];
for (int i = 0; i < insts.numInstances(); i++) {
 result[i] = distributionForInstance(insts.instance(i));

代码示例来源:origin: nz.ac.waikato.cms.weka/weka-stable

private Instances makeDataSetProbabilities(Instances insts, Instances format,
 weka.classifiers.Classifier classifier, String relationNameModifier)
 throws Exception {
 // adjust structure for InputMappedClassifier (if necessary)
 if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) {
  format =
   ((weka.classifiers.misc.InputMappedClassifier) classifier)
    .getModelHeader(new Instances(format, 0));
 }
 String classifierName = classifier.getClass().getName();
 classifierName =
  classifierName.substring(classifierName.lastIndexOf('.') + 1,
   classifierName.length());
 Instances newInstances = new Instances(insts);
 for (int i = 0; i < format.classAttribute().numValues(); i++) {
  weka.filters.unsupervised.attribute.Add addF =
   new weka.filters.unsupervised.attribute.Add();
  addF.setAttributeIndex("last");
  addF.setAttributeName(classifierName + "_prob_"
   + format.classAttribute().value(i));
  addF.setInputFormat(newInstances);
  newInstances = weka.filters.Filter.useFilter(newInstances, addF);
 }
 newInstances.setRelationName(insts.relationName() + relationNameModifier);
 return newInstances;
}

代码示例来源:origin: Waikato/weka-trunk

/**
 * reduce the dimensionality of a set of instances to include only those
 * attributes chosen by the last run of attribute selection.
 * 
 * @param in the instances to be reduced
 * @return a dimensionality reduced set of instances
 * @exception Exception if the instances can't be reduced
 */
public Instances reduceDimensionality(Instances in) throws Exception {
 if (m_attributeFilter == null) {
  throw new Exception("No feature selection has been performed yet!");
 }
 if (m_transformer != null) {
  Instances transformed =
   new Instances(m_transformer.transformedHeader(), in.numInstances());
  for (int i = 0; i < in.numInstances(); i++) {
   transformed.add(m_transformer.convertInstance(in.instance(i)));
  }
  return Filter.useFilter(transformed, m_attributeFilter);
 }
 return Filter.useFilter(in, m_attributeFilter);
}

代码示例来源:origin: nz.ac.waikato.cms.weka/weka-stable

structure =
  ((weka.classifiers.misc.InputMappedClassifier) classifier)
   .getModelHeader(new Instances(structure, 0));
Instances newInstances = weka.filters.Filter.useFilter(insts, addF);
newInstances.setRelationName(insts.relationName() + relationNameModifier);
return newInstances;

代码示例来源:origin: nz.ac.waikato.cms.weka/weka-stable

/**
 * Build the no-split node
 *
 * @param instances an <code>Instances</code> value
 * @exception Exception if an error occurs
 */
public final void buildClassifier(Instances instances) throws Exception {
 m_nb = new NaiveBayesUpdateable();
 m_disc = new Discretize();
 m_disc.setInputFormat(instances);
 Instances temp = Filter.useFilter(instances, m_disc);
 m_nb.buildClassifier(temp);
 if (temp.numInstances() >= 5) {
  m_errors = crossValidate(m_nb, temp, new Random(1));
 }
 m_numSubsets = 1;
}

代码示例来源:origin: nz.ac.waikato.cms.weka/multiInstanceLearning

Instances insts = new Instances(exmp.dataset(), 0);
insts.add(exmp);
insts = Filter.useFilter(insts, m_ConvertToSI);
insts.deleteAttributeAt(0); // remove the bagIndex attribute
double n = insts.numInstances();
 insts = Filter.useFilter(insts, m_Filter);

代码示例来源:origin: Waikato/weka-trunk

private Instances makeDataSetProbabilities(Instances insts, Instances format,
 weka.classifiers.Classifier classifier, String relationNameModifier)
 throws Exception {
 // adjust structure for InputMappedClassifier (if necessary)
 if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) {
  format =
   ((weka.classifiers.misc.InputMappedClassifier) classifier)
    .getModelHeader(new Instances(format, 0));
 }
 String classifierName = classifier.getClass().getName();
 classifierName =
  classifierName.substring(classifierName.lastIndexOf('.') + 1,
   classifierName.length());
 Instances newInstances = new Instances(insts);
 for (int i = 0; i < format.classAttribute().numValues(); i++) {
  weka.filters.unsupervised.attribute.Add addF =
   new weka.filters.unsupervised.attribute.Add();
  addF.setAttributeIndex("last");
  addF.setAttributeName(classifierName + "_prob_"
   + format.classAttribute().value(i));
  addF.setInputFormat(newInstances);
  newInstances = weka.filters.Filter.useFilter(newInstances, addF);
 }
 newInstances.setRelationName(insts.relationName() + relationNameModifier);
 return newInstances;
}

代码示例来源:origin: Waikato/weka-trunk

/**
 * Build the no-split node
 *
 * @param instances an <code>Instances</code> value
 * @exception Exception if an error occurs
 */
public final void buildClassifier(Instances instances) throws Exception {
 m_nb = new NaiveBayesUpdateable();
 m_disc = new Discretize();
 m_disc.setInputFormat(instances);
 Instances temp = Filter.useFilter(instances, m_disc);
 m_nb.buildClassifier(temp);
 if (temp.numInstances() >= 5) {
  m_errors = crossValidate(m_nb, temp, new Random(1));
 }
 m_numSubsets = 1;
}

代码示例来源:origin: nz.ac.waikato.cms.weka/weka-stable

m_estimator = new NaiveBayes();
 Instances trainingData = new Instances(instances);
 if (m_remove != null) {
  trainingData = Filter.useFilter(instances, m_remove);
 new Instances(getOutputFormat(), instances.numInstances());
for (int i = 0; i < instances.numInstances(); i++) {

代码示例来源:origin: Waikato/weka-trunk

structure =
  ((weka.classifiers.misc.InputMappedClassifier) classifier)
   .getModelHeader(new Instances(structure, 0));
Instances newInstances = weka.filters.Filter.useFilter(insts, addF);
newInstances.setRelationName(insts.relationName() + relationNameModifier);
return newInstances;

代码示例来源:origin: Waikato/weka-trunk

protected void fillCovariance() throws Exception {
 // just center the data or standardize it?
 if (m_center) {
  m_centerFilter = new Center();
  m_centerFilter.setInputFormat(m_TrainInstances);
  m_TrainInstances = Filter.useFilter(m_TrainInstances, m_centerFilter);
 } else {
  m_standardizeFilter = new Standardize();
  m_standardizeFilter.setInputFormat(m_TrainInstances);
  m_TrainInstances = Filter.useFilter(m_TrainInstances, m_standardizeFilter);
 }
 // now compute the covariance matrix
 m_Correlation = new UpperSymmDenseMatrix(m_NumAttribs);
 for (int i = 0; i < m_NumAttribs; i++) {
  for (int j = i; j < m_NumAttribs; j++) {
   double cov = 0;
   for (Instance inst: m_TrainInstances) {
    cov += inst.value(i) * inst.value(j);
   }
   cov /= m_TrainInstances.numInstances() - 1;
   m_Correlation.set(i, j, cov);
  }
 }
}

代码示例来源:origin: Waikato/weka-trunk

/**
 * runs a simple test
 */
public void testTypical() {
 Instances icopy = new Instances(m_Instances);
 Instances result = null;
 try {
  m_Filter.setInputFormat(icopy);
 } 
 catch (Exception ex) {
  ex.printStackTrace();
  fail("Exception thrown on setInputFormat(): \n" + ex.getMessage());
 }
 try {
  result = Filter.useFilter(icopy, m_Filter);
  assertNotNull(result);
 } 
 catch (Exception ex) {
  ex.printStackTrace();
  fail("Exception thrown on useFilter(): \n" + ex.getMessage());
 }
 assertEquals(icopy.numInstances(), result.numInstances());
}

代码示例来源:origin: nz.ac.waikato.cms.weka/weka-stable

protected void setUp() throws Exception {
 m_Filter             = getFilter();
 m_Instances          = new Instances(new BufferedReader(new InputStreamReader(ClassLoader.getSystemResourceAsStream("weka/filters/data/FilterTest.arff"))));
 Remove r = new Remove();
 r.setAttributeIndices("1, 2, 4, 5");
 r.setInputFormat(m_Instances);
 m_Instances = Filter.useFilter(m_Instances, r);
 m_OptionTester       = getOptionTester();
 m_GOETester          = getGOETester();
 m_FilteredClassifier = null;
}

代码示例来源:origin: nz.ac.waikato.cms.weka/weka-stable

protected void fillCovariance() throws Exception {
 // just center the data or standardize it?
 if (m_center) {
  m_centerFilter = new Center();
  m_centerFilter.setInputFormat(m_TrainInstances);
  m_TrainInstances = Filter.useFilter(m_TrainInstances, m_centerFilter);
 } else {
  m_standardizeFilter = new Standardize();
  m_standardizeFilter.setInputFormat(m_TrainInstances);
  m_TrainInstances = Filter.useFilter(m_TrainInstances, m_standardizeFilter);
 }
 // now compute the covariance matrix
 m_Correlation = new UpperSymmDenseMatrix(m_NumAttribs);
 for (int i = 0; i < m_NumAttribs; i++) {
  for (int j = i; j < m_NumAttribs; j++) {
   double cov = 0;
   for (Instance inst: m_TrainInstances) {
    cov += inst.value(i) * inst.value(j);
   }
   cov /= m_TrainInstances.numInstances() - 1;
   m_Correlation.set(i, j, cov);
  }
 }
}

代码示例来源:origin: nz.ac.waikato.cms.weka/weka-stable

/**
 * runs a simple test
 */
public void testTypical() {
 Instances icopy = new Instances(m_Instances);
 Instances result = null;
 try {
  m_Filter.setInputFormat(icopy);
 } 
 catch (Exception ex) {
  ex.printStackTrace();
  fail("Exception thrown on setInputFormat(): \n" + ex.getMessage());
 }
 try {
  result = Filter.useFilter(icopy, m_Filter);
  assertNotNull(result);
 } 
 catch (Exception ex) {
  ex.printStackTrace();
  fail("Exception thrown on useFilter(): \n" + ex.getMessage());
 }
 assertEquals(icopy.numInstances(), result.numInstances());
}

代码示例来源:origin: Waikato/weka-trunk

protected void setUp() throws Exception {
 m_Filter             = getFilter();
 m_Instances          = new Instances(new BufferedReader(new InputStreamReader(ClassLoader.getSystemResourceAsStream("weka/filters/data/FilterTest.arff"))));
 Remove r = new Remove();
 r.setAttributeIndices("1, 2, 4, 5");
 r.setInputFormat(m_Instances);
 m_Instances = Filter.useFilter(m_Instances, r);
 m_OptionTester       = getOptionTester();
 m_GOETester          = getGOETester();
 m_FilteredClassifier = null;
}

代码示例来源:origin: dkpro/dkpro-tc

private Instances getPredictionInstancesMultiLabel(Instances testData, Classifier cl,
    double[] thresholdArray)
  throws Exception
{
  int numLabels = testData.classIndex();
  // get predictions
  List<double[]> labelPredictionList = new ArrayList<double[]>();
  for (int i = 0; i < testData.numInstances(); i++) {
    labelPredictionList.add(cl.distributionForInstance(testData.instance(i)));
  }
  // add attributes to store predictions in test data
  Add filter = new Add();
  for (int i = 0; i < numLabels; i++) {
    filter.setAttributeIndex(Integer.toString(numLabels + i + 1));
    filter.setNominalLabels("0,1");
    filter.setAttributeName(
        testData.attribute(i).name() + "_" + WekaTestTask.PREDICTION_CLASS_LABEL_NAME);
    filter.setInputFormat(testData);
    testData = Filter.useFilter(testData, filter);
  }
  // fill predicted values for each instance
  for (int i = 0; i < labelPredictionList.size(); i++) {
    for (int j = 0; j < labelPredictionList.get(i).length; j++) {
      testData.instance(i).setValue(j + numLabels,
          labelPredictionList.get(i)[j] >= thresholdArray[j] ? 1. : 0.);
    }
  }
  return testData;
}

代码示例来源:origin: nz.ac.waikato.cms.weka/weka-stable

/**
 * performs the actual test.
 */
protected void performTest() {
 Instances icopy = new Instances(m_Instances);
 Instances result = null;
 try {
  m_Filter.setInputFormat(icopy);
 } 
 catch (Exception ex) {
  ex.printStackTrace();
  fail("Exception thrown on setInputFormat(): \n" + ex.getMessage());
 }
 try {
  result = Filter.useFilter(icopy, m_Filter);
  assertNotNull(result);
 } 
 catch (Exception ex) {
  ex.printStackTrace();
  fail("Exception thrown on useFilter(): \n" + ex.getMessage());
 }
 assertEquals(icopy.numInstances(), result.numInstances());
}

代码示例来源:origin: nz.ac.waikato.cms.weka/weka-stable

private Instances makeClusterDataSetProbabilities(Instances format,
 weka.clusterers.Clusterer clusterer, String relationNameModifier)
 throws Exception {
 Instances newInstances = new Instances(format);
 for (int i = 0; i < clusterer.numberOfClusters(); i++) {
  weka.filters.unsupervised.attribute.Add addF =
   new weka.filters.unsupervised.attribute.Add();
  addF.setAttributeIndex("last");
  addF.setAttributeName("prob_cluster" + i);
  addF.setInputFormat(newInstances);
  newInstances = weka.filters.Filter.useFilter(newInstances, addF);
 }
 newInstances.setRelationName(format.relationName() + relationNameModifier);
 return newInstances;
}

相关文章