org.apache.uima.cas.text.AnnotationFS.setDoubleValue()方法的使用及代码示例

x33g5p2x  于2022-01-15 转载在 其他  
字(11.4k)|赞(0)|评价(0)|浏览(65)

本文整理了Java中org.apache.uima.cas.text.AnnotationFS.setDoubleValue()方法的一些代码示例,展示了AnnotationFS.setDoubleValue()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。AnnotationFS.setDoubleValue()方法的具体详情如下:
包路径:org.apache.uima.cas.text.AnnotationFS
类名称:AnnotationFS
方法名:setDoubleValue

AnnotationFS.setDoubleValue介绍

暂无

代码示例

代码示例来源:origin: apache/opennlp

@Override
protected void postProcessAnnotations(Span[] tokens,
                   AnnotationFS[] tokenAnnotations) {
 // if interest
 if (probabilityFeature != null) {
  double[] tokenProbabilties = tokenizer.getTokenProbabilities();
  for (int i = 0; i < tokenAnnotations.length; i++) {
   tokenAnnotations[i].setDoubleValue(probabilityFeature,
     tokenProbabilties[i]);
  }
 }
}

代码示例来源:origin: apache/opennlp

@Override
protected void postProcessAnnotations(AnnotationFS[] sentences) {
 if (probabilityFeature != null) {
  double[] sentenceProbabilities = sentenceDetector.getSentenceProbabilities();
  for (int i = 0; i < sentences.length; i++) {
   sentences[i].setDoubleValue(probabilityFeature, sentenceProbabilities[i]);
  }
 }
}

代码示例来源:origin: apache/opennlp

protected void postProcessAnnotations(Span[] detectedNames,
                   AnnotationFS[] nameAnnotations) {
 if (probabilityFeature != null) {
  double[] probs = mNameFinder.probs(detectedNames);
  for (int i = 0; i < nameAnnotations.length; i++) {
   nameAnnotations[i].setDoubleValue(probabilityFeature, probs[i]);
  }
 }
}

代码示例来源:origin: apache/opennlp

tokenAnnotation.setDoubleValue(this.probabilityFeature, posProbabilities[index]);

代码示例来源:origin: apache/opennlp

.getName())) {
nameAnnotation
  .setDoubleValue(mStructureFeature, number.doubleValue());

代码示例来源:origin: apache/opennlp

protected AnnotationFS createAnnotation(CAS cas, int offset, Parse parse) {
 Parse[] parseChildren = parse.getChildren();
 AnnotationFS[] parseChildAnnotations = new AnnotationFS[parseChildren.length];
 // do this for all children
 for (int i = 0; i < parseChildren.length; i++) {
  parseChildAnnotations[i] = createAnnotation(cas, offset, parseChildren[i]);
 }
 AnnotationFS parseAnnotation = cas.createAnnotation(mParseType, offset +
   parse.getSpan().getStart(), offset + parse.getSpan().getEnd());
 parseAnnotation.setStringValue(mTypeFeature, parse.getType());
 if (probabilityFeature != null) {
  parseAnnotation.setDoubleValue(probabilityFeature, parse.getProb());
 }
 ArrayFS childrenArray = cas.createArrayFS(parseChildAnnotations.length);
 childrenArray.copyFromArray(parseChildAnnotations, 0, 0, parseChildAnnotations.length);
 parseAnnotation.setFeatureValue(childrenFeature, childrenArray);
 cas.getIndexRepository().addFS(parseAnnotation);
 return parseAnnotation;
}

代码示例来源:origin: jpatanooga/Canova

@Override
protected void postProcessAnnotations(Span[] tokens,
  AnnotationFS[] tokenAnnotations) {
 // if interest
 if (probabilityFeature != null) {
  double tokenProbabilties[] = tokenizer.getTokenProbabilities();
  for (int i = 0; i < tokenAnnotations.length; i++) {
   tokenAnnotations[i].setDoubleValue(probabilityFeature,
     tokenProbabilties[i]);
  }
 }
}

代码示例来源:origin: org.nd4j/canova-data-nlp

@Override
protected void postProcessAnnotations(Span[] tokens,
  AnnotationFS[] tokenAnnotations) {
 // if interest
 if (probabilityFeature != null) {
  double tokenProbabilties[] = tokenizer.getTokenProbabilities();
  for (int i = 0; i < tokenAnnotations.length; i++) {
   tokenAnnotations[i].setDoubleValue(probabilityFeature,
     tokenProbabilties[i]);
  }
 }
}

代码示例来源:origin: org.apache.opennlp/opennlp-uima

@Override
protected void postProcessAnnotations(AnnotationFS[] sentences) {
 if (probabilityFeature != null) {
  double[] sentenceProbabilities = sentenceDetector.getSentenceProbabilities();
  for (int i = 0; i < sentences.length; i++) {
   sentences[i].setDoubleValue(probabilityFeature, sentenceProbabilities[i]);
  }
 }
}

代码示例来源:origin: org.apache.opennlp/opennlp-uima

@Override
protected void postProcessAnnotations(Span[] tokens,
                   AnnotationFS[] tokenAnnotations) {
 // if interest
 if (probabilityFeature != null) {
  double[] tokenProbabilties = tokenizer.getTokenProbabilities();
  for (int i = 0; i < tokenAnnotations.length; i++) {
   tokenAnnotations[i].setDoubleValue(probabilityFeature,
     tokenProbabilties[i]);
  }
 }
}

代码示例来源:origin: org.datavec/datavec-data-nlp

@Override
protected void postProcessAnnotations(Span[] tokens, AnnotationFS[] tokenAnnotations) {
  // if interest
  if (probabilityFeature != null) {
    double tokenProbabilties[] = tokenizer.getTokenProbabilities();
    for (int i = 0; i < tokenAnnotations.length; i++) {
      tokenAnnotations[i].setDoubleValue(probabilityFeature, tokenProbabilties[i]);
    }
  }
}

代码示例来源:origin: org.apache.opennlp/opennlp-uima

protected void postProcessAnnotations(Span[] detectedNames,
                   AnnotationFS[] nameAnnotations) {
 if (probabilityFeature != null) {
  double[] probs = mNameFinder.probs(detectedNames);
  for (int i = 0; i < nameAnnotations.length; i++) {
   nameAnnotations[i].setDoubleValue(probabilityFeature, probs[i]);
  }
 }
}

代码示例来源:origin: org.apache.uima/ruta-core

private void setFeatureValue(AnnotationFS annotationFS, Feature feature, Object o) {
 if (feature != null && o != null) {
  Type range = feature.getRange();
  String rangeName = range.getName();
  if (rangeName.equals(CAS.TYPE_NAME_STRING) && o instanceof String) {
   annotationFS.setStringValue(feature, (String) o);
  } else if (rangeName.equals(CAS.TYPE_NAME_INTEGER) && o instanceof Number) {
   annotationFS.setIntValue(feature, ((Number) o).intValue());
  } else if (rangeName.equals(CAS.TYPE_NAME_DOUBLE) && o instanceof Number) {
   annotationFS.setDoubleValue(feature, ((Number) o).doubleValue());
  } else if (rangeName.equals(CAS.TYPE_NAME_FLOAT) && o instanceof Number) {
   annotationFS.setFloatValue(feature, ((Number) o).floatValue());
  } else if (rangeName.equals(CAS.TYPE_NAME_BYTE) && o instanceof Number) {
   annotationFS.setByteValue(feature, ((Number) o).byteValue());
  } else if (rangeName.equals(CAS.TYPE_NAME_SHORT) && o instanceof Number) {
   annotationFS.setShortValue(feature, ((Number) o).shortValue());
  } else if (rangeName.equals(CAS.TYPE_NAME_LONG) && o instanceof Number) {
   annotationFS.setLongValue(feature, ((Number) o).longValue());
  } else if (rangeName.equals(CAS.TYPE_NAME_BOOLEAN) && o instanceof Boolean) {
   annotationFS.setBooleanValue(feature, (Boolean) o);
  } else if (rangeName.equals(CAS.TYPE_NAME_STRING) & o instanceof Type) {
   annotationFS.setStringValue(feature, ((Type) o).getName());
  }
 } else {
  throw new IllegalArgumentException("Not able to assign feature value: " + o + " -> "
      + feature);
 }
}

代码示例来源:origin: inception-project/inception

annotation.setDoubleValue(confidenceFeature, confidence);
annotation.setStringValue(labelFeature, label);
aCas.addFsToIndexes(annotation);

代码示例来源:origin: inception-project/inception

int end = tokenAnnotations.get(prediction.getEnd() - 1).getEnd();
AnnotationFS annotation = aCas.createAnnotation(predictionType, begin, end);
annotation.setDoubleValue(confidenceFeature, prediction.getProb());
annotation.setStringValue(labelFeature, label);
aCas.addFsToIndexes(annotation);

代码示例来源:origin: inception-project/inception

@Override
public void predict(RecommenderContext aContext, CAS aCas) throws RecommendationException
{
  DoccatModel model = aContext.get(KEY_MODEL).orElseThrow(() -> 
      new RecommendationException("Key [" + KEY_MODEL + "] not found in context"));
  
  DocumentCategorizerME finder = new DocumentCategorizerME(model);
  Type sentenceType = getType(aCas, Sentence.class);
  Type predictionType = getAnnotationType(aCas, PredictedSpan.class);
  Type tokenType = getType(aCas, Token.class);
  Feature confidenceFeature = predictionType.getFeatureByBaseName("score");
  Feature labelFeature = predictionType.getFeatureByBaseName("label");
  for (AnnotationFS sentence : select(aCas, sentenceType)) {
    List<AnnotationFS> tokenAnnotations = selectCovered(tokenType, sentence);
    String[] tokens = tokenAnnotations.stream()
      .map(AnnotationFS::getCoveredText)
      .toArray(String[]::new);
    double[] outcome = finder.categorize(tokens);
    String label = finder.getBestCategory(outcome);
    
    AnnotationFS annotation = aCas.createAnnotation(predictionType, sentence.getBegin(),
        sentence.getEnd());
    annotation.setDoubleValue(confidenceFeature, NumberUtils.max(outcome));
    annotation.setStringValue(labelFeature, label);
    aCas.addFsToIndexes(annotation);
  }
}

代码示例来源:origin: inception-project/inception

@Override
  public void predict(RecommenderContext aContext, CAS aCas) throws RecommendationException
  {
    DataMajorityModel model = aContext.get(KEY_MODEL).orElseThrow(() ->
        new RecommendationException("Key [" + KEY_MODEL + "] not found in context"));

    // Make the predictions
    Type tokenType = getAnnotationType(aCas, Token.class);
    Collection<AnnotationFS> candidates = CasUtil.select(aCas, tokenType);
    List<Annotation> predictions = predict(candidates, model);

    // Add predictions to the CAS
    Type predictionType = getAnnotationType(aCas, PredictedSpan.class);
    Feature confidenceFeature = predictionType.getFeatureByBaseName("score");
    Feature labelFeature = predictionType.getFeatureByBaseName("label");

    for (Annotation ann : predictions) {
      AnnotationFS annotation = aCas.createAnnotation(predictionType, ann.begin, ann.end);
      annotation.setDoubleValue(confidenceFeature, ann.score);
      annotation.setStringValue(labelFeature, ann.label);
      aCas.addFsToIndexes(annotation);
    }
  }
// end::predict1[]

代码示例来源:origin: inception-project/inception

@Override
public void predict(RecommenderContext aContext, CAS aCas) throws RecommendationException
{
  Trie<DictEntry> dict = aContext.get(KEY_MODEL).orElseThrow(() -> 
      new RecommendationException("Key [" + KEY_MODEL + "] not found in context"));
  
  Type predictionType = getAnnotationType(aCas, PredictedSpan.class);
  Feature confidenceFeature = predictionType.getFeatureByBaseName("score");
  Feature labelFeature = predictionType.getFeatureByBaseName("label");
  List<Sample> data = predict(0, aCas, dict);
  
  for (Sample sample : data) {
    for (Span span : sample.getSpans()) {
      AnnotationFS annotation = aCas.createAnnotation(predictionType, span.getBegin(),
          span.getEnd());
      annotation.setDoubleValue(confidenceFeature, span.getScore());
      annotation.setStringValue(labelFeature, span.getLabel());
      aCas.addFsToIndexes(annotation);
    }
  }
}

代码示例来源:origin: de.tudarmstadt.ukp.inception.app/inception-imls-stringmatch

@Override
public void predict(RecommenderContext aContext, CAS aCas) throws RecommendationException
{
  Trie<DictEntry> dict = aContext.get(KEY_MODEL).orElseThrow(() -> 
      new RecommendationException("Key [" + KEY_MODEL + "] not found in context"));
  
  Type predictionType = getAnnotationType(aCas, PredictedSpan.class);
  Feature confidenceFeature = predictionType.getFeatureByBaseName("score");
  Feature labelFeature = predictionType.getFeatureByBaseName("label");
  List<Sample> data = predict(0, aCas, dict);
  
  for (Sample sample : data) {
    for (Span span : sample.getSpans()) {
      AnnotationFS annotation = aCas.createAnnotation(predictionType, span.getBegin(),
          span.getEnd());
      annotation.setDoubleValue(confidenceFeature, span.getScore());
      annotation.setStringValue(labelFeature, span.getLabel());
      aCas.addFsToIndexes(annotation);
    }
  }
}

代码示例来源:origin: org.apache.opennlp/opennlp-uima

protected AnnotationFS createAnnotation(CAS cas, int offset, Parse parse) {
 Parse[] parseChildren = parse.getChildren();
 AnnotationFS[] parseChildAnnotations = new AnnotationFS[parseChildren.length];
 // do this for all children
 for (int i = 0; i < parseChildren.length; i++) {
  parseChildAnnotations[i] = createAnnotation(cas, offset, parseChildren[i]);
 }
 AnnotationFS parseAnnotation = cas.createAnnotation(mParseType, offset +
   parse.getSpan().getStart(), offset + parse.getSpan().getEnd());
 parseAnnotation.setStringValue(mTypeFeature, parse.getType());
 if (probabilityFeature != null) {
  parseAnnotation.setDoubleValue(probabilityFeature, parse.getProb());
 }
 ArrayFS childrenArray = cas.createArrayFS(parseChildAnnotations.length);
 childrenArray.copyFromArray(parseChildAnnotations, 0, 0, parseChildAnnotations.length);
 parseAnnotation.setFeatureValue(childrenFeature, childrenArray);
 cas.getIndexRepository().addFS(parseAnnotation);
 return parseAnnotation;
}

相关文章