本文整理了Java中org.elasticsearch.common.text.Text
类的一些代码示例,展示了Text
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Text
类的具体详情如下:
包路径:org.elasticsearch.common.text.Text
类名称:Text
[英]Both String and BytesReference representation of the text. Starts with one of those, and if the other is requests, caches the other one in a local reference so no additional conversion will be needed.
[中]字符串和字节都引用文本的表示形式。从其中一个开始,如果另一个是请求,则将另一个缓存在本地引用中,因此不需要额外的转换。
代码示例来源:origin: org.elasticsearch/elasticsearch
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (hasString()) {
return builder.value(this.string());
} else {
// TODO: TextBytesOptimization we can use a buffer here to convert it? maybe add a
// request to jackson to support InputStream as well?
BytesRef br = this.bytes().toBytesRef();
return builder.utf8Value(br.bytes, br.offset, br.length);
}
}
}
代码示例来源:origin: NLPchina/elasticsearch-sql
private void fillInternalSearchHits(List<SearchHit> unionHits, SearchHit[] hits, Map<String, String> fieldNameToAlias) {
for(SearchHit hit : hits){
SearchHit searchHit = new SearchHit(currentId, hit.getId(), new Text(hit.getType()), hit.getFields());
searchHit.sourceRef(hit.getSourceRef());
searchHit.getSourceAsMap().clear();
Map<String, Object> sourceAsMap = hit.getSourceAsMap();
if(!fieldNameToAlias.isEmpty()){
updateFieldNamesToAlias(sourceAsMap, fieldNameToAlias);
}
searchHit.getSourceAsMap().putAll(sourceAsMap);
currentId++;
unionHits.add(searchHit);
}
}
代码示例来源:origin: org.elasticsearch/elasticsearch
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Entry<?> entry = (Entry<?>) o;
if (length != entry.length) return false;
if (offset != entry.offset) return false;
if (!this.text.equals(entry.text)) return false;
return true;
}
代码示例来源:origin: dadoonet/fscrawler
for (SearchHit hit : response.getHits()) {
ESSearchHit esSearchHit = new ESSearchHit();
if (!hit.getFields().isEmpty()) {
Map<String, ESDocumentField> esFields = new HashMap<>();
for (Map.Entry<String, DocumentField> entry : hit.getFields().entrySet()) {
esFields.put(entry.getKey(), new ESDocumentField(entry.getKey(), entry.getValue().getValues()));
esSearchHit.setIndex(hit.getIndex());
esSearchHit.setId(hit.getId());
esSearchHit.setSourceAsMap(hit.getSourceAsMap());
for (int i = 0; i < value.fragments().length; i++) {
Text fragment = value.fragments()[i];
texts[i] = fragment.string();
代码示例来源:origin: org.elasticsearch/elasticsearch
ArrayList<TextFragment> fragsList = new ArrayList<>();
List<Object> textsToHighlight;
Analyzer analyzer = HighlightUtils.getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), fieldType);
final int maxAnalyzedOffset = context.indexShard().indexSettings().getHighlightMaxAnalyzedOffset();
deprecationLogger.deprecated(
"The length [" + text.length()+ "] of [" + highlighterContext.fieldName + "] field of [" +
hitContext.hit().getId() + "] doc of [" + context.indexShard().shardId().getIndexName() + "] index has " +
"exceeded the allowed maximum of ["+ maxAnalyzedOffset7 + "] set for the next major Elastic version. " +
"This maximum can be set by changing the [" + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() +
hitContext.hit().getId() + "] doc of [" + context.indexShard().shardId().getIndexName() + "] index " +
"has exceeded [" + maxAnalyzedOffset + "] - maximum allowed to be analyzed for highlighting. " +
"This maximum can be set by changing the [" + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() +
return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
return new HighlightField(highlighterContext.fieldName, new Text[] { new Text(fieldContents.substring(0, end)) });
代码示例来源:origin: org.elasticsearch/elasticsearch
try {
final Analyzer analyzer = getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), fieldType);
List<Object> fieldValues = loadFieldValues(fieldType, field, context, hitContext);
if (fieldValues.size() == 0) {
deprecationLogger.deprecated(
"The length [" + fieldValue.length() + "] of [" + highlighterContext.fieldName + "] field of [" +
hitContext.hit().getId() + "] doc of [" + context.indexShard().shardId().getIndexName() + "] index has " +
"exceeded the allowed maximum of [" + maxAnalyzedOffset7 + "] set for the next major Elastic version. " +
"This maximum can be set by changing the [" + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() +
throw new IllegalArgumentException(
"The length [" + fieldValue.length() + "] of [" + highlighterContext.fieldName + "] field of [" +
hitContext.hit().getId() + "] doc of [" + context.indexShard().shardId().getIndexName() + "] index " +
"has exceeded [" + maxAnalyzedOffset + "] - maximum allowed to be analyzed for highlighting. " +
"This maximum can be set by changing the [" + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() +
return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
代码示例来源:origin: NLPchina/elasticsearch-sql
private void fillMinusHitsFromOneField(String fieldName, Set<Object> fieldValues, SearchHit someHit) {
List<SearchHit> minusHitsList = new ArrayList<>();
int currentId = 1;
for(Object result : fieldValues){
Map<String,DocumentField> fields = new HashMap<>();
ArrayList<Object> values = new ArrayList<Object>();
values.add(result);
fields.put(fieldName,new DocumentField(fieldName, values));
SearchHit searchHit = new SearchHit(currentId,currentId+"", new Text(someHit.getType()), fields);
searchHit.sourceRef(someHit.getSourceRef());
searchHit.getSourceAsMap().clear();
Map<String, Object> sourceAsMap = new HashMap<>();
sourceAsMap.put(fieldName,result);
searchHit.getSourceAsMap().putAll(sourceAsMap);
currentId++;
minusHitsList.add(searchHit);
}
int totalSize = currentId - 1;
SearchHit[] unionHitsArr = minusHitsList.toArray(new SearchHit[totalSize]);
this.minusHits = new SearchHits(unionHitsArr, totalSize,1.0f);
}
代码示例来源:origin: harbby/presto-connectors
public XContentBuilder field(XContentBuilderString name, Text value) throws IOException {
field(name);
if (value.hasBytes() && value.bytes().hasArray()) {
generator.writeUTF8String(value.bytes().array(), value.bytes().arrayOffset(), value.bytes().length());
return this;
}
if (value.hasString()) {
generator.writeString(value.string());
return this;
}
// TODO: TextBytesOptimization we can use a buffer here to convert it? maybe add a request to jackson to support InputStream as well?
BytesArray bytesArray = value.bytes().toBytesArray();
generator.writeUTF8String(bytesArray.array(), bytesArray.arrayOffset(), bytesArray.length());
return this;
}
代码示例来源:origin: org.elasticsearch/elasticsearch
public void writeText(Text text) throws IOException {
if (!text.hasBytes()) {
final String string = text.string();
spare.copyChars(string);
writeInt(spare.length());
write(spare.bytes(), 0, spare.length());
} else {
BytesReference bytes = text.bytes();
writeInt(bytes.length());
bytes.writeTo(this);
}
}
代码示例来源:origin: org.elasticsearch/elasticsearch
/**
* The type of the document.
*/
public String getType() {
return type != null ? type.string() : null;
}
代码示例来源:origin: org.elasticsearch/elasticsearch
public static Option fromXContent(XContentParser parser) {
Map<String, Object> values = PARSER.apply(parser, null);
Text text = new Text((String) values.get(Suggestion.Entry.Option.TEXT.getPreferredName()));
Float score = (Float) values.get(Suggestion.Entry.Option.SCORE.getPreferredName());
@SuppressWarnings("unchecked")
Map<String, Set<String>> contexts = (Map<String, Set<String>>) values
.get(CompletionSuggestion.Entry.Option.CONTEXTS.getPreferredName());
if (contexts == null) {
contexts = Collections.emptyMap();
}
SearchHit hit = null;
// the option either prints SCORE or inlines the search hit
if (score == null) {
hit = SearchHit.createFromMap(values);
score = hit.getScore();
}
CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option(-1, text, score, contexts);
option.setHit(hit);
return option;
}
代码示例来源:origin: com.strapdata.elasticsearch.test/framework
private static void assertHighlight(SearchHit hit, String field, int fragment, Matcher<Integer> fragmentsMatcher, Matcher<String> matcher) {
assertThat(hit.getHighlightFields(), hasKey(field));
assertThat(hit.getHighlightFields().get(field).fragments().length, fragmentsMatcher);
assertThat(hit.getHighlightFields().get(field).fragments()[fragment].string(), matcher);
}
代码示例来源:origin: org.elasticsearch/elasticsearch
public static void declareInnerHitsParseFields(ObjectParser<Map<String, Object>, Void> parser) {
declareMetaDataFields(parser);
parser.declareString((map, value) -> map.put(Fields._TYPE, new Text(value)), new ParseField(Fields._TYPE));
parser.declareString((map, value) -> map.put(Fields._INDEX, value), new ParseField(Fields._INDEX));
parser.declareString((map, value) -> map.put(Fields._ID, value), new ParseField(Fields._ID));
parser.declareString((map, value) -> map.put(Fields._NODE, value), new ParseField(Fields._NODE));
parser.declareField((map, value) -> map.put(Fields._SCORE, value), SearchHit::parseScore, new ParseField(Fields._SCORE),
ValueType.FLOAT_OR_NULL);
parser.declareLong((map, value) -> map.put(Fields._VERSION, value), new ParseField(Fields._VERSION));
parser.declareField((map, value) -> map.put(Fields._SHARD, value), (p, c) -> ShardId.fromString(p.text()),
new ParseField(Fields._SHARD), ValueType.STRING);
parser.declareObject((map, value) -> map.put(SourceFieldMapper.NAME, value), (p, c) -> parseSourceBytes(p),
new ParseField(SourceFieldMapper.NAME));
parser.declareObject((map, value) -> map.put(Fields.HIGHLIGHT, value), (p, c) -> parseHighlightFields(p),
new ParseField(Fields.HIGHLIGHT));
parser.declareObject((map, value) -> {
Map<String, DocumentField> fieldMap = get(Fields.FIELDS, map, new HashMap<String, DocumentField>());
fieldMap.putAll(value);
map.put(Fields.FIELDS, fieldMap);
}, (p, c) -> parseFields(p), new ParseField(Fields.FIELDS));
parser.declareObject((map, value) -> map.put(Fields._EXPLANATION, value), (p, c) -> parseExplanation(p),
new ParseField(Fields._EXPLANATION));
parser.declareObject((map, value) -> map.put(NestedIdentity._NESTED, value), NestedIdentity::fromXContent,
new ParseField(NestedIdentity._NESTED));
parser.declareObject((map, value) -> map.put(Fields.INNER_HITS, value), (p,c) -> parseInnerHits(p),
new ParseField(Fields.INNER_HITS));
parser.declareStringArray((map, list) -> map.put(Fields.MATCHED_QUERIES, list), new ParseField(Fields.MATCHED_QUERIES));
parser.declareField((map, list) -> map.put(Fields.SORT, list), SearchSortValues::fromXContent, new ParseField(Fields.SORT),
ValueType.OBJECT_ARRAY);
}
代码示例来源:origin: org.elasticsearch/elasticsearch
public static Text[] convertFromStringArray(String[] strings) {
if (strings.length == 0) {
return EMPTY_ARRAY;
}
Text[] texts = new Text[strings.length];
for (int i = 0; i < strings.length; i++) {
texts[i] = new Text(strings[i]);
}
return texts;
}
代码示例来源:origin: com.strapdata.elasticsearch/elasticsearch
ArrayList<TextFragment> fragsList = new ArrayList<>();
List<Object> textsToHighlight;
Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().type()).mappers().indexAnalyzer();
return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
return new HighlightField(highlighterContext.fieldName, new Text[] { new Text(fieldContents.substring(0, end)) });
代码示例来源:origin: org.elasticsearch/elasticsearch
private Map<String, Object> getNestedSource(Map<String, Object> sourceAsMap, HitContext hitContext) {
for (SearchHit.NestedIdentity o = hitContext.hit().getNestedIdentity(); o != null; o = o.getChild()) {
sourceAsMap = (Map<String, Object>) sourceAsMap.get(o.getField().string());
if (sourceAsMap == null) {
return null;
}
}
return sourceAsMap;
}
}
代码示例来源:origin: com.strapdata.elasticsearch/elasticsearch
try {
Analyzer analyzer =
context.mapperService().documentMapper(hitContext.hit().type()).mappers().indexAnalyzer();
List<Object> fieldValues = HighlightUtils.loadFieldValues(field, fieldMapper, context, hitContext);
fieldValues = fieldValues.stream().map(obj -> {
return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
代码示例来源:origin: org.elasticsearch/elasticsearch
Map<String, Object> current = nestedSourceAsMap;
for (SearchHit.NestedIdentity nested = nestedIdentity; nested != null; nested = nested.getChild()) {
String nestedPath = nested.getField().string();
current.put(nestedPath, new HashMap<>());
Object extractedValue = XContentMapValues.extractValue(nestedPath, sourceAsMap);
context.lookup().source().setSourceContentType(contentType);
return new SearchHit(nestedTopDocId, uid.id(), documentMapper.typeText(), nestedIdentity, searchFields);
代码示例来源:origin: org.elasticsearch/elasticsearch
private SearchHit createSearchHit(SearchContext context,
FieldsVisitor fieldsVisitor,
int docId,
int subDocId,
Map<String, Set<String>> storedToRequestedFields,
LeafReaderContext subReaderContext) {
if (fieldsVisitor == null) {
return new SearchHit(docId);
}
Map<String, DocumentField> searchFields = getSearchFields(context, fieldsVisitor, subDocId,
storedToRequestedFields, subReaderContext);
DocumentMapper documentMapper = context.mapperService().documentMapper(fieldsVisitor.uid().type());
Text typeText;
if (documentMapper == null) {
typeText = new Text(fieldsVisitor.uid().type());
} else {
typeText = documentMapper.typeText();
}
SearchHit searchHit = new SearchHit(docId, fieldsVisitor.uid().id(), typeText, searchFields);
// Set _source if requested.
SourceLookup sourceLookup = context.lookup().source();
sourceLookup.setSegmentAndDocument(subReaderContext, subDocId);
if (fieldsVisitor.source() != null) {
sourceLookup.setSource(fieldsVisitor.source());
}
return searchHit;
}
代码示例来源:origin: harbby/presto-connectors
return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
return new HighlightField(highlighterContext.fieldName, new Text[] { new Text(fieldContents.substring(0, end)) });
内容来源于网络,如有侵权,请联系作者删除!