本文整理了Java中org.apache.lucene.util.BytesRefHash.add()
方法的一些代码示例,展示了BytesRefHash.add()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。BytesRefHash.add()
方法的具体详情如下:
包路径:org.apache.lucene.util.BytesRefHash
类名称:BytesRefHash
方法名:add
[英]Adds a new BytesRef
[中]添加新的BytesRef
代码示例来源:origin: org.apache.lucene/lucene-core
private void addOneValue(BytesRef value) {
int termID = hash.add(value);
if (termID < 0) {
termID = -termID-1;
} else {
// reserve additional space for each unique value:
// 1. when indexing, when hash is 50% full, rehash() suddenly needs 2*size ints.
// TODO: can this same OOM happen in THPF?
// 2. when flushing, we need 1 int per value (slot in the ordMap).
iwBytesUsed.addAndGet(2 * Integer.BYTES);
}
pending.add(termID);
updateBytesUsed();
}
代码示例来源:origin: org.apache.lucene/lucene-core
private void addOneValue(BytesRef value) {
int termID = hash.add(value);
if (termID < 0) {
termID = -termID-1;
} else {
// reserve additional space for each unique value:
// 1. when indexing, when hash is 50% full, rehash() suddenly needs 2*size ints.
// TODO: can this same OOM happen in THPF?
// 2. when flushing, we need 1 int per value (slot in the ordMap).
iwBytesUsed.addAndGet(2 * Integer.BYTES);
}
if (currentUpto == currentValues.length) {
currentValues = ArrayUtil.grow(currentValues, currentValues.length+1);
iwBytesUsed.addAndGet((currentValues.length - currentUpto) * Integer.BYTES);
}
currentValues[currentUpto] = termID;
currentUpto++;
}
代码示例来源:origin: org.apache.lucene/lucene-core
int termID = bytesHash.add(termAtt.getBytesRef());
代码示例来源:origin: org.apache.lucene/lucene-analyzers-common
int ord = words.add(utf8Scratch.get());
if (ord < 0) {
代码示例来源:origin: org.apache.lucene/lucene-analyzers-common
/**
* Adds an input string and its stemmer override output to this builder.
*
* @param input the input char sequence
* @param output the stemmer override output char sequence
* @return <code>false</code> iff the input has already been added to this builder otherwise <code>true</code>.
*/
public boolean add(CharSequence input, CharSequence output) {
final int length = input.length();
if (ignoreCase) {
// convert on the fly to lowercase
charsSpare.grow(length);
final char[] buffer = charsSpare.chars();
for (int i = 0; i < length; ) {
i += Character.toChars(
Character.toLowerCase(
Character.codePointAt(input, i)), buffer, i);
}
spare.copyChars(buffer, 0, length);
} else {
spare.copyChars(input, 0, length);
}
if (hash.add(spare.get()) >= 0) {
outputValues.add(output);
return true;
}
return false;
}
代码示例来源:origin: org.apache.lucene/lucene-analyzers-common
this.needsInputCleaning = ignoreCase;
this.needsOutputCleaning = false; // set if we have an OCONV
flagLookup.add(new BytesRef()); // no flags -> ord 0
代码示例来源:origin: org.apache.lucene/lucene-analyzers-common
int appendFlagsOrd = flagLookup.add(scratch.get());
if (appendFlagsOrd < 0) {
代码示例来源:origin: org.apache.lucene/lucene-analyzers-common
} else {
encodeFlags(flagsScratch, wordForm);
int ord = flagLookup.add(flagsScratch.get());
if (ord < 0) {
代码示例来源:origin: pearson-enabling-technologies/elasticsearch-approx-plugin
@Override
public void consume(final BytesRef ref) throws Exception {
_target.add(ref);
}
代码示例来源:origin: sirensolutions/siren-join
public void add(BytesRef term) {
this.set.add(term);
}
代码示例来源:origin: harbby/presto-connectors
@Override
public void collect(int doc) throws IOException {
final BytesRef term = docValues.get(doc);
collectorTerms.add(term);
}
}
代码示例来源:origin: org.apache.lucene/lucene-grouping
@Override
public void setGroups(Collection<SearchGroup<BytesRef>> searchGroups) {
this.values.clear();
this.values.reinit();
for (SearchGroup<BytesRef> sg : searchGroups) {
if (sg.groupValue == null)
includeEmpty = true;
else
this.values.add(sg.groupValue);
}
this.secondPass = true;
}
}
代码示例来源:origin: sirensolutions/siren-join
@Override
protected void addAll(TermsSet terms) {
if (!(terms instanceof BytesRefTermsSet)) {
throw new UnsupportedOperationException("Invalid type: BytesRefTermsSet expected.");
}
BytesRefHash input = ((BytesRefTermsSet) terms).set;
BytesRef reusable = new BytesRef();
for (int i = 0; i < input.size(); i++) {
input.get(i, reusable);
set.add(reusable);
}
}
代码示例来源:origin: harbby/presto-connectors
@Override
public void collect(int doc) throws IOException {
long ord;
docValues.setDocument(doc);
while ((ord = docValues.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
final BytesRef term = docValues.lookupOrd(ord);
collectorTerms.add(term);
}
}
}
代码示例来源:origin: org.apache.servicemix.bundles/org.apache.servicemix.bundles.lucene
private void addOneValue(BytesRef value) {
int termID = hash.add(value);
if (termID < 0) {
termID = -termID-1;
} else {
// reserve additional space for each unique value:
// 1. when indexing, when hash is 50% full, rehash() suddenly needs 2*size ints.
// TODO: can this same OOM happen in THPF?
// 2. when flushing, we need 1 int per value (slot in the ordMap).
iwBytesUsed.addAndGet(2 * Integer.BYTES);
}
pending.add(termID);
updateBytesUsed();
}
代码示例来源:origin: org.infinispan/infinispan-embedded-query
private void addOneValue(BytesRef value) {
int termID = hash.add(value);
if (termID < 0) {
termID = -termID-1;
} else {
// reserve additional space for each unique value:
// 1. when indexing, when hash is 50% full, rehash() suddenly needs 2*size ints.
// TODO: can this same OOM happen in THPF?
// 2. when flushing, we need 1 int per value (slot in the ordMap).
iwBytesUsed.addAndGet(2 * RamUsageEstimator.NUM_BYTES_INT);
}
pending.add(termID);
updateBytesUsed();
}
代码示例来源:origin: harbby/presto-connectors
private void addOneValue(BytesRef value) {
int termID = hash.add(value);
if (termID < 0) {
termID = -termID-1;
} else {
// reserve additional space for each unique value:
// 1. when indexing, when hash is 50% full, rehash() suddenly needs 2*size ints.
// TODO: can this same OOM happen in THPF?
// 2. when flushing, we need 1 int per value (slot in the ordMap).
iwBytesUsed.addAndGet(2 * RamUsageEstimator.NUM_BYTES_INT);
}
pending.add(termID);
updateBytesUsed();
}
代码示例来源:origin: pearson-enabling-technologies/elasticsearch-approx-plugin
@Override
public void collect(final int docId) throws IOException {
if(_entries.size() > _maxPerShard)
return;
if(!_exhaustive && _random.nextFloat() > _sampleRate)
return;
final Iter iter = _values.getIter(docId);
while(iter.hasNext() && _entries.size() < _maxPerShard) {
_entries.add(iter.next(), iter.hash());
}
}
代码示例来源:origin: sirensolutions/siren-join
@Override
public void readFrom(StreamInput in) throws IOException {
this.setIsPruned(in.readBoolean());
int size = in.readInt();
bytesUsed = Counter.newCounter();
pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
set = new BytesRefHash(pool);
for (long i = 0; i < size; i++) {
set.add(in.readBytesRef());
}
}
代码示例来源:origin: sirensolutions/siren-join
private void readFromBytes(BytesRef bytes) {
// Read pruned flag
this.setIsPruned(bytes.bytes[bytes.offset++] == 1 ? true : false);
// Read size fo the set
int size = Bytes.readInt(bytes);
// Read terms
bytesUsed = Counter.newCounter();
pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
set = new BytesRefHash(pool);
BytesRef reusable = new BytesRef();
for (int i = 0; i < size; i++) {
Bytes.readBytesRef(bytes, reusable);
set.add(reusable);
}
}
内容来源于网络,如有侵权,请联系作者删除!