本文整理了Java中org.elasticsearch.action.bulk.BulkRequest
类的一些代码示例,展示了BulkRequest
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。BulkRequest
类的具体详情如下:
包路径:org.elasticsearch.action.bulk.BulkRequest
类名称:BulkRequest
[英]A bulk request holds an ordered IndexRequests, DeleteRequests and UpdateRequests and allows to executes it in a single batch. Note that we only support refresh on the bulk request not per item.
[中]批量请求包含一个有序的IndexRequests、DeleteRequests和UpdateRequests,并允许在单个批处理中执行它。请注意,我们只支持批量请求的刷新,而不支持每项刷新。
代码示例来源:origin: spring-projects/spring-data-elasticsearch
@Override
public void bulkIndex(List<IndexQuery> queries) {
BulkRequest bulkRequest = new BulkRequest();
for (IndexQuery query : queries) {
bulkRequest.add(prepareIndex(query));
}
try {
checkForBulkUpdateFailure(client.bulk(bulkRequest));
} catch (IOException e) {
throw new ElasticsearchException("Error while bulk for request: " + bulkRequest.toString(), e);
}
}
代码示例来源:origin: org.elasticsearch/elasticsearch
private boolean isOverTheLimit() {
if (bulkActions != -1 && bulkRequest.numberOfActions() >= bulkActions) {
return true;
}
if (bulkSize != -1 && bulkRequest.estimatedSizeInBytes() >= bulkSize) {
return true;
}
return false;
}
代码示例来源:origin: apache/flink
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
while (nextBulkRequest.numberOfActions() > 0) {
// wait until we are allowed to continue with the flushing
flushLatch.await();
// create a copy of the accumulated mock requests, so that
// re-added requests from the failure handler are included in the next bulk
BulkRequest currentBulkRequest = nextBulkRequest;
nextBulkRequest = new BulkRequest();
listener.beforeBulk(123L, currentBulkRequest);
if (nextBulkFailure == null) {
BulkItemResponse[] mockResponses = new BulkItemResponse[currentBulkRequest.requests().size()];
for (int i = 0; i < currentBulkRequest.requests().size(); i++) {
Throwable mockItemFailure = mockItemFailuresList.get(i);
if (mockItemFailure == null) {
// the mock response for the item is success
mockResponses[i] = new BulkItemResponse(i, "opType", mock(ActionResponse.class));
} else {
// the mock response for the item is failure
mockResponses[i] = new BulkItemResponse(i, "opType", new BulkItemResponse.Failure("index", "type", "id", mockItemFailure));
}
}
listener.afterBulk(123L, currentBulkRequest, new BulkResponse(mockResponses, 1000L));
} else {
listener.afterBulk(123L, currentBulkRequest, nextBulkFailure);
}
}
return null;
}
}).when(mockBulkProcessor).flush();
代码示例来源:origin: richardwilly98/elasticsearch-river-mongodb
@Override
public void beforeBulk(long executionId, BulkRequest request) {
checkBulkProcessorAvailability();
logger.trace("beforeBulk - new bulk [{}] of items [{}]", executionId, request.numberOfActions());
if (flushBulkProcessor.get()) {
logger.trace("About to flush bulk request index[{}] - type[{}]", index, type);
int dropDollectionIndex = findLastDropCollection(request.requests());
request.requests().subList(0, dropDollectionIndex + 1).clear();
try {
dropRecreateMapping();
deletedDocuments.set(0);
updatedDocuments.set(0);
insertedDocuments.set(0);
flushBulkProcessor.set(false);
} catch (Throwable t) {
logger.error("Drop collection operation failed", t);
MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED);
request.requests().clear();
bulkProcessor.close();
river.close();
}
}
}
代码示例来源:origin: apache/nifi
@Override
public IndexOperationResponse add(List<IndexOperationRequest> operations) throws IOException {
BulkRequest bulkRequest = new BulkRequest();
for (int index = 0; index < operations.size(); index++) {
IndexOperationRequest or = operations.get(index);
IndexRequest indexRequest = new IndexRequest(or.getIndex(), or.getType(), or.getId())
.source(or.getFields());
bulkRequest.add(indexRequest);
}
bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
BulkResponse response = highLevelClient.bulk(bulkRequest);
IndexOperationResponse retVal = new IndexOperationResponse(response.getTookInMillis(), response.getIngestTookInMillis());
return retVal;
}
代码示例来源:origin: org.elasticsearch/elasticsearch
public static BulkRequest toSingleItemBulkRequest(ReplicatedWriteRequest request) {
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(((DocWriteRequest) request));
bulkRequest.setRefreshPolicy(request.getRefreshPolicy());
bulkRequest.timeout(request.timeout());
bulkRequest.waitForActiveShards(request.waitForActiveShards());
request.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE);
return bulkRequest;
}
}
代码示例来源:origin: tomoya92/pybbs
public void bulkDeleteDocument(String type, List<Integer> ids) {
try {
if (this.instance() == null) return;
BulkRequest requests = new BulkRequest();
int count = 0;
for (Integer id: ids) {
count++;
DeleteRequest request = new DeleteRequest(name, type, String.valueOf(id));
requests.add(request);
if (count % 1000 == 0) {
client.bulk(requests, RequestOptions.DEFAULT);
requests.requests().clear();
count = 0;
}
}
if (requests.numberOfActions() > 0) client.bulk(requests, RequestOptions.DEFAULT);
} catch (IOException e) {
log.error(e.getMessage());
}
}
代码示例来源:origin: org.elasticsearch/elasticsearch
private BulkRequest createBulkRequestForRetry(BulkResponse bulkItemResponses) {
BulkRequest requestToReissue = new BulkRequest();
int index = 0;
for (BulkItemResponse bulkItemResponse : bulkItemResponses.getItems()) {
if (bulkItemResponse.isFailed()) {
requestToReissue.add(currentBulkRequest.requests().get(index));
}
index++;
}
return requestToReissue;
}
代码示例来源:origin: apache/flink
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
// intercept the request and add it to our mock bulk request
nextBulkRequest.add((IndexRequest) invocationOnMock.getArgument(0));
return null;
}
});
代码示例来源:origin: org.elasticsearch/elasticsearch
BulkRequest getBulkRequest() {
if (itemResponses.isEmpty()) {
return bulkRequest;
} else {
BulkRequest modifiedBulkRequest = new BulkRequest();
modifiedBulkRequest.setRefreshPolicy(bulkRequest.getRefreshPolicy());
modifiedBulkRequest.waitForActiveShards(bulkRequest.waitForActiveShards());
modifiedBulkRequest.timeout(bulkRequest.timeout());
int slot = 0;
List<DocWriteRequest<?>> requests = bulkRequest.requests();
originalSlots = new int[requests.size()]; // oversize, but that's ok
for (int i = 0; i < requests.size(); i++) {
DocWriteRequest request = requests.get(i);
if (failedSlots.get(i) == false) {
modifiedBulkRequest.add(request);
originalSlots[slot++] = i;
}
}
return modifiedBulkRequest;
}
}
代码示例来源:origin: brianway/webporter
@Override
public void beforeBulk(long l, BulkRequest bulkRequest) {
logger.info("bulk request numberOfActions:" + bulkRequest.numberOfActions());
}
代码示例来源:origin: spring-projects/spring-data-elasticsearch
BulkRequest request = new BulkRequest();
List<String> ids = new ArrayList<String>();
request.add(new DeleteRequest(indexName, typeName, id));
if (request.numberOfActions() > 0) {
BulkResponse response;
try {
代码示例来源:origin: apache/flink
nextBulkRequest = new BulkRequest();
代码示例来源:origin: spring-projects/spring-data-elasticsearch
parameters.withTimeout(bulkRequest.timeout());
parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy());
for (int i = 0; i < bulkRequest.numberOfActions(); i++) {
DocWriteRequest<?> action = bulkRequest.requests().get(i);
for (DocWriteRequest<?> action : bulkRequest.requests()) {
DocWriteRequest.OpType opType = action.opType();
代码示例来源:origin: SonarSource/sonarqube
message.append("Bulk[");
Multiset<BulkRequestKey> groupedRequests = LinkedHashMultiset.create();
for (int i = 0; i < bulkRequest.requests().size(); i++) {
DocWriteRequest item = bulkRequest.requests().get(i);
String requestType;
if (item instanceof IndexRequest) {
代码示例来源:origin: org.elasticsearch/elasticsearch
private Supplier<BulkRequest> createBulkRequestWithGlobalDefaults() {
return () -> new BulkRequest(globalIndex, globalType)
.pipeline(globalPipeline)
.routing(globalRouting);
}
}
代码示例来源:origin: thinkaurelius/titan
log.error("Failed to execute ES query {}", brb.request().timeout(), e);
throw convert(e);
代码示例来源:origin: org.elasticsearch/elasticsearch
final ShardId shardId = entry.getKey();
final List<BulkItemRequest> requests = entry.getValue();
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, bulkRequest.getRefreshPolicy(),
requests.toArray(new BulkItemRequest[requests.size()]));
bulkShardRequest.waitForActiveShards(bulkRequest.waitForActiveShards());
bulkShardRequest.timeout(bulkRequest.timeout());
if (task != null) {
bulkShardRequest.setParentTask(nodeId, task.getId());
代码示例来源:origin: org.elasticsearch/elasticsearch
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
BulkRequest bulkRequest = Requests.bulkRequest();
String defaultIndex = request.param("index");
String defaultType = request.param("type");
String defaultRouting = request.param("routing");
FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request);
String fieldsParam = request.param("fields");
if (fieldsParam != null) {
DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
}
String[] defaultFields = fieldsParam != null ? Strings.commaDelimitedListToStringArray(fieldsParam) : null;
String defaultPipeline = request.param("pipeline");
String waitForActiveShards = request.param("wait_for_active_shards");
if (waitForActiveShards != null) {
bulkRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards));
}
bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT));
bulkRequest.setRefreshPolicy(request.param("refresh"));
bulkRequest.add(request.requiredContent(), defaultIndex, defaultType, defaultRouting, defaultFields,
defaultFetchSourceContext, defaultPipeline, null, allowExplicitIndex, request.getXContentType());
return channel -> client.bulk(bulkRequest, new RestStatusToXContentListener<>(channel));
}
代码示例来源:origin: tomoya92/pybbs
public void bulkDocument(String type, Map<String, Map<String, Object>> sources) {
try {
if (this.instance() == null) return;
BulkRequest requests = new BulkRequest();
Iterator<String> it = sources.keySet().iterator();
int count = 0;
while(it.hasNext()) {
count++;
String next = it.next();
IndexRequest request = new IndexRequest(name, type, next);
request.source(sources.get(next));
requests.add(request);
if (count % 1000 == 0) {
client.bulk(requests, RequestOptions.DEFAULT);
requests.requests().clear();
count = 0;
}
}
if (requests.numberOfActions() > 0) client.bulk(requests, RequestOptions.DEFAULT);
} catch (IOException e) {
log.error(e.getMessage());
}
}
内容来源于网络,如有侵权,请联系作者删除!