org.apache.lucene.analysis.Token.endOffset()方法的使用及代码示例

x33g5p2x  于2022-01-30 转载在 其他  
字(7.8k)|赞(0)|评价(0)|浏览(112)

本文整理了Java中org.apache.lucene.analysis.Token.endOffset()方法的一些代码示例,展示了Token.endOffset()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Token.endOffset()方法的具体详情如下:
包路径:org.apache.lucene.analysis.Token
类名称:Token
方法名:endOffset

Token.endOffset介绍

[英]End in source text.
[中]以源文本结尾。

代码示例

代码示例来源:origin: org.dspace.dependencies.solr/dspace-solr-core

public int compare(Token o1, Token o2) {
  return o1.endOffset() - o2.endOffset();
 }
});

代码示例来源:origin: org.compass-project/compass

public int getEndOffset() {
  return token.endOffset();
}

代码示例来源:origin: org.infinispan/infinispan-embedded-query

/**
 * The default implementation adds last prefix token end offset to the suffix token start and end offsets.
 *
 * @param suffixToken a token from the suffix stream
 * @param lastPrefixToken the last token from the prefix stream
 * @return consumer token
 */
public Token updateSuffixToken(Token suffixToken, Token lastPrefixToken) {
 suffixToken.setOffset(lastPrefixToken.endOffset() + suffixToken.startOffset(),
            lastPrefixToken.endOffset() + suffixToken.endOffset());
 return suffixToken;
}

代码示例来源:origin: org.infinispan/infinispan-embedded-query

public Token updateInputToken(Token inputToken, Token lastPrefixToken) {
 inputToken.setOffset(lastPrefixToken.endOffset() + inputToken.startOffset(), 
            lastPrefixToken.endOffset() + inputToken.endOffset());
 return inputToken;
}

代码示例来源:origin: org.infinispan/infinispan-embedded-query

public Token updateSuffixToken(Token suffixToken, Token lastInputToken) {
 suffixToken.setOffset(lastInputToken.endOffset() + suffixToken.startOffset(),
            lastInputToken.endOffset() + suffixToken.endOffset());
 return suffixToken;
}

代码示例来源:origin: org.apache.lucene/lucene-analyzers

public Token updateInputToken(Token inputToken, Token lastPrefixToken) {
 inputToken.setStartOffset(lastPrefixToken.endOffset() + inputToken.startOffset());
 inputToken.setEndOffset(lastPrefixToken.endOffset() + inputToken.endOffset());
 return inputToken;
}

代码示例来源:origin: org.apache.lucene/lucene-analyzers

/**
 * The default implementation adds last prefix token end offset to the suffix token start and end offsets.
 *
 * @param suffixToken a token from the suffix stream
 * @param lastPrefixToken the last token from the prefix stream
 * @return consumer token
 */
public Token updateSuffixToken(Token suffixToken, Token lastPrefixToken) {
 suffixToken.setStartOffset(lastPrefixToken.endOffset() + suffixToken.startOffset());
 suffixToken.setEndOffset(lastPrefixToken.endOffset() + suffixToken.endOffset());
 return suffixToken;
}

代码示例来源:origin: org.apache.lucene/lucene-analyzers

public Token updateSuffixToken(Token suffixToken, Token lastInputToken) {
 suffixToken.setStartOffset(lastInputToken.endOffset() + suffixToken.startOffset());
 suffixToken.setEndOffset(lastInputToken.endOffset() + suffixToken.endOffset());
 return suffixToken;
}

代码示例来源:origin: org.dspace.dependencies.solr/dspace-solr-core

/** Construct a compound token. */
private Token gramToken(Token first, Token second) {
 buffer.setLength(0);
 buffer.append(first.termText());
 buffer.append(SEPARATOR);
 buffer.append(second.termText());
 Token result = new Token(buffer.toString(), first.startOffset(), second
   .endOffset(), "gram");
 result.setPositionIncrement(0);
 return result;
}

代码示例来源:origin: DiceTechJobs/SolrPlugins

private Token newToken(Token existing, String newText){
  return new Token(newText, existing.startOffset(), existing.endOffset());
}

代码示例来源:origin: org.dspace.dependencies.solr/dspace-solr-core

public final Token next(Token in) throws IOException {
  for (Token token=input.next(in); token!=null; token=input.next(in)) {
   final int len = token.endOffset() - token.startOffset();
   if (len<min || len>max) continue;
   return token;
  }
  return null;
 }
}

代码示例来源:origin: org.compass-project/compass

private void addAliasesToStack(Token token) {
    String[] synonyms = synonymLookupProvider.lookupSynonyms(token.termText());

    if (synonyms == null) {
      return;
    }

    for (int i = 0; i < synonyms.length; i++) {
      Token synToken = new Token(synonyms[i], token.startOffset(), token.endOffset(), TOKEN_TYPE_SYNONYM);
      synToken.setPositionIncrement(0);
      synonymStack.addFirst(synToken);
    }
  }
}

代码示例来源:origin: org.dspace.dependencies.solr/dspace-solr-core

private Token newTok(Token orig, int start, int end) {
 int startOff = orig.startOffset();
 int endOff = orig.endOffset();
 // if length by start + end offsets doesn't match the term text then assume
 // this is a synonym and don't adjust the offsets.
 if (orig.termLength() == endOff-startOff) {
  endOff = startOff + end;
  startOff += start;     
 }
 return (Token)orig.clone(orig.termBuffer(), start, (end - start), startOff, endOff);
}

代码示例来源:origin: org.apache.lucene/lucene-core-jfrog

void newTerm(Token t, RawPostingList p0) {
 assert docState.testPoint("TermVectorsTermsWriterPerField.newTerm start");
 TermVectorsTermsWriter.PostingList p = (TermVectorsTermsWriter.PostingList) p0;
 p.freq = 1;
 if (doVectorOffsets) {
  final int startOffset = fieldState.offset + t.startOffset();
  final int endOffset = fieldState.offset + t.endOffset();
  termsHashPerField.writeVInt(1, startOffset);
  termsHashPerField.writeVInt(1, endOffset - startOffset);
  p.lastOffset = endOffset;
 }
 if (doVectorPositions) {
  termsHashPerField.writeVInt(0, fieldState.position);
  p.lastPosition = fieldState.position;
 }
}

代码示例来源:origin: org.apache.lucene/lucene-core-jfrog

void addTerm(Token t, RawPostingList p0) {
 assert docState.testPoint("TermVectorsTermsWriterPerField.addTerm start");
 TermVectorsTermsWriter.PostingList p = (TermVectorsTermsWriter.PostingList) p0;
 p.freq++;
 if (doVectorOffsets) {
  final int startOffset = fieldState.offset + t.startOffset();
  final int endOffset = fieldState.offset + t.endOffset();
  termsHashPerField.writeVInt(1, startOffset - p.lastOffset);
  termsHashPerField.writeVInt(1, endOffset - startOffset);
  p.lastOffset = endOffset;
 }
 if (doVectorPositions) {
  termsHashPerField.writeVInt(0, fieldState.position - p.lastPosition);
  p.lastPosition = fieldState.position;
 }
}

代码示例来源:origin: org.apache.lucene/com.springsource.org.apache.lucene

void newTerm(Token t, RawPostingList p0) {
 assert docState.testPoint("TermVectorsTermsWriterPerField.newTerm start");
 TermVectorsTermsWriter.PostingList p = (TermVectorsTermsWriter.PostingList) p0;
 p.freq = 1;
 if (doVectorOffsets) {
  final int startOffset = fieldState.offset + t.startOffset();
  final int endOffset = fieldState.offset + t.endOffset();
  termsHashPerField.writeVInt(1, startOffset);
  termsHashPerField.writeVInt(1, endOffset - startOffset);
  p.lastOffset = endOffset;
 }
 if (doVectorPositions) {
  termsHashPerField.writeVInt(0, fieldState.position);
  p.lastPosition = fieldState.position;
 }
}

代码示例来源:origin: org.apache.lucene/com.springsource.org.apache.lucene

void addTerm(Token t, RawPostingList p0) {
 assert docState.testPoint("TermVectorsTermsWriterPerField.addTerm start");
 TermVectorsTermsWriter.PostingList p = (TermVectorsTermsWriter.PostingList) p0;
 p.freq++;
 if (doVectorOffsets) {
  final int startOffset = fieldState.offset + t.startOffset();
  final int endOffset = fieldState.offset + t.endOffset();
  termsHashPerField.writeVInt(1, startOffset - p.lastOffset);
  termsHashPerField.writeVInt(1, endOffset - startOffset);
  p.lastOffset = endOffset;
 }
 if (doVectorPositions) {
  termsHashPerField.writeVInt(0, fieldState.position - p.lastPosition);
  p.lastPosition = fieldState.position;
 }
}

代码示例来源:origin: ajermakovics/eclipse-instasearch

private void applyToken(Token token)
{
  termAtt.setTermBuffer(token.termBuffer(), 0, token.termLength());
  posAtt.setPositionIncrement(token.getPositionIncrement());
  offsetAtt.setOffset(token.startOffset(), token.endOffset());
}

代码示例来源:origin: org.infinispan/infinispan-embedded-query

private void setCurrentToken(Token token) {
 if (token == null) return;
 clearAttributes();
 termAtt.copyBuffer(token.buffer(), 0, token.length());
 posIncrAtt.setPositionIncrement(token.getPositionIncrement());
 flagsAtt.setFlags(token.getFlags());
 offsetAtt.setOffset(token.startOffset(), token.endOffset());
 typeAtt.setType(token.type());
 payloadAtt.setPayload(token.getPayload());
}

代码示例来源:origin: org.apache.lucene/lucene-analyzers

private void setCurrentToken(Token token) {
 if (token == null) return;
 clearAttributes();
 termAtt.copyBuffer(token.buffer(), 0, token.length());
 posIncrAtt.setPositionIncrement(token.getPositionIncrement());
 flagsAtt.setFlags(token.getFlags());
 offsetAtt.setOffset(token.startOffset(), token.endOffset());
 typeAtt.setType(token.type());
 payloadAtt.setPayload(token.getPayload());
}

相关文章