|
@@ -8,9 +8,9 @@ package org.elasticsearch.xpack.ml.utils.persistence;
|
|
|
import org.apache.logging.log4j.LogManager;
|
|
|
import org.apache.logging.log4j.Logger;
|
|
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
|
|
-import org.elasticsearch.ElasticsearchException;
|
|
|
import org.elasticsearch.ElasticsearchStatusException;
|
|
|
import org.elasticsearch.ExceptionsHelper;
|
|
|
+import org.elasticsearch.action.ActionListener;
|
|
|
import org.elasticsearch.action.bulk.BulkAction;
|
|
|
import org.elasticsearch.action.bulk.BulkItemResponse;
|
|
|
import org.elasticsearch.action.bulk.BulkRequest;
|
|
@@ -18,11 +18,11 @@ import org.elasticsearch.action.bulk.BulkResponse;
|
|
|
import org.elasticsearch.action.index.IndexRequest;
|
|
|
import org.elasticsearch.action.search.SearchRequest;
|
|
|
import org.elasticsearch.action.search.SearchResponse;
|
|
|
+import org.elasticsearch.action.support.PlainActionFuture;
|
|
|
+import org.elasticsearch.action.support.RetryableAction;
|
|
|
import org.elasticsearch.action.support.WriteRequest;
|
|
|
import org.elasticsearch.client.OriginSettingClient;
|
|
|
import org.elasticsearch.cluster.service.ClusterService;
|
|
|
-import org.elasticsearch.common.CheckedConsumer;
|
|
|
-import org.elasticsearch.common.Randomness;
|
|
|
import org.elasticsearch.common.settings.Setting;
|
|
|
import org.elasticsearch.common.settings.Settings;
|
|
|
import org.elasticsearch.common.unit.TimeValue;
|
|
@@ -30,6 +30,7 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
|
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
|
|
import org.elasticsearch.rest.RestStatus;
|
|
|
+import org.elasticsearch.threadpool.ThreadPool;
|
|
|
import org.elasticsearch.xpack.core.ClientHelper;
|
|
|
|
|
|
import java.io.IOException;
|
|
@@ -38,14 +39,14 @@ import java.util.Arrays;
|
|
|
import java.util.Collections;
|
|
|
import java.util.HashSet;
|
|
|
import java.util.Map;
|
|
|
-import java.util.Random;
|
|
|
import java.util.Set;
|
|
|
+import java.util.function.BiConsumer;
|
|
|
import java.util.function.Consumer;
|
|
|
-import java.util.function.Function;
|
|
|
import java.util.function.Supplier;
|
|
|
import java.util.stream.Collectors;
|
|
|
|
|
|
import static org.elasticsearch.ExceptionsHelper.status;
|
|
|
+import static org.elasticsearch.xpack.ml.MachineLearning.UTILITY_THREAD_POOL_NAME;
|
|
|
|
|
|
public class ResultsPersisterService {
|
|
|
/**
|
|
@@ -79,20 +80,16 @@ public class ResultsPersisterService {
|
|
|
// Having an exponent higher than this causes integer overflow
|
|
|
private static final int MAX_RETRY_EXPONENT = 24;
|
|
|
|
|
|
- private final CheckedConsumer<Integer, InterruptedException> sleeper;
|
|
|
+ private final ThreadPool threadPool;
|
|
|
private final OriginSettingClient client;
|
|
|
private volatile int maxFailureRetries;
|
|
|
|
|
|
- public ResultsPersisterService(OriginSettingClient client, ClusterService clusterService, Settings settings) {
|
|
|
- this(Thread::sleep, client, clusterService, settings);
|
|
|
- }
|
|
|
-
|
|
|
// Visible for testing
|
|
|
- ResultsPersisterService(CheckedConsumer<Integer, InterruptedException> sleeper,
|
|
|
- OriginSettingClient client,
|
|
|
- ClusterService clusterService,
|
|
|
- Settings settings) {
|
|
|
- this.sleeper = sleeper;
|
|
|
+ public ResultsPersisterService(ThreadPool threadPool,
|
|
|
+ OriginSettingClient client,
|
|
|
+ ClusterService clusterService,
|
|
|
+ Settings settings) {
|
|
|
+ this.threadPool = threadPool;
|
|
|
this.client = client;
|
|
|
this.maxFailureRetries = PERSIST_RESULTS_MAX_RETRIES.get(settings);
|
|
|
clusterService.getClusterSettings()
|
|
@@ -123,8 +120,11 @@ public class ResultsPersisterService {
|
|
|
String jobId,
|
|
|
Supplier<Boolean> shouldRetry,
|
|
|
Consumer<String> retryMsgHandler) {
|
|
|
- return bulkIndexWithRetry(bulkRequest, jobId, shouldRetry, retryMsgHandler,
|
|
|
- providedBulkRequest -> client.bulk(providedBulkRequest).actionGet());
|
|
|
+ return bulkIndexWithRetry(bulkRequest,
|
|
|
+ jobId,
|
|
|
+ shouldRetry,
|
|
|
+ retryMsgHandler,
|
|
|
+ client::bulk);
|
|
|
}
|
|
|
|
|
|
public BulkResponse bulkIndexWithHeadersWithRetry(Map<String, String> headers,
|
|
@@ -132,73 +132,58 @@ public class ResultsPersisterService {
|
|
|
String jobId,
|
|
|
Supplier<Boolean> shouldRetry,
|
|
|
Consumer<String> retryMsgHandler) {
|
|
|
- return bulkIndexWithRetry(bulkRequest, jobId, shouldRetry, retryMsgHandler,
|
|
|
- providedBulkRequest -> ClientHelper.executeWithHeaders(
|
|
|
- headers, ClientHelper.ML_ORIGIN, client, () -> client.execute(BulkAction.INSTANCE, bulkRequest).actionGet()));
|
|
|
+ return bulkIndexWithRetry(bulkRequest,
|
|
|
+ jobId,
|
|
|
+ shouldRetry,
|
|
|
+ retryMsgHandler,
|
|
|
+ (providedBulkRequest, listener) -> ClientHelper.executeWithHeadersAsync(
|
|
|
+ headers,
|
|
|
+ ClientHelper.ML_ORIGIN,
|
|
|
+ client,
|
|
|
+ BulkAction.INSTANCE,
|
|
|
+ providedBulkRequest,
|
|
|
+ listener));
|
|
|
}
|
|
|
|
|
|
private BulkResponse bulkIndexWithRetry(BulkRequest bulkRequest,
|
|
|
String jobId,
|
|
|
Supplier<Boolean> shouldRetry,
|
|
|
Consumer<String> retryMsgHandler,
|
|
|
- Function<BulkRequest, BulkResponse> actionExecutor) {
|
|
|
- RetryContext retryContext = new RetryContext(jobId, shouldRetry, retryMsgHandler);
|
|
|
- while (true) {
|
|
|
- BulkResponse bulkResponse = actionExecutor.apply(bulkRequest);
|
|
|
- if (bulkResponse.hasFailures() == false) {
|
|
|
- return bulkResponse;
|
|
|
- }
|
|
|
- for (BulkItemResponse itemResponse : bulkResponse.getItems()) {
|
|
|
- if (itemResponse.isFailed() && isIrrecoverable(itemResponse.getFailure().getCause())) {
|
|
|
- Throwable unwrappedParticular = ExceptionsHelper.unwrapCause(itemResponse.getFailure().getCause());
|
|
|
- LOGGER.warn(new ParameterizedMessage(
|
|
|
- "[{}] experienced failure that cannot be automatically retried. Bulk failure message [{}]",
|
|
|
- jobId,
|
|
|
- bulkResponse.buildFailureMessage()),
|
|
|
- unwrappedParticular);
|
|
|
- throw new ElasticsearchStatusException(
|
|
|
- "{} experienced failure that cannot be automatically retried. See logs for bulk failures",
|
|
|
- status(unwrappedParticular),
|
|
|
- unwrappedParticular,
|
|
|
- jobId);
|
|
|
- }
|
|
|
- }
|
|
|
- retryContext.nextIteration("index", bulkResponse.buildFailureMessage());
|
|
|
- // We should only retry the docs that failed.
|
|
|
- bulkRequest = buildNewRequestFromFailures(bulkRequest, bulkResponse);
|
|
|
- }
|
|
|
+ BiConsumer<BulkRequest, ActionListener<BulkResponse>> actionExecutor) {
|
|
|
+ PlainActionFuture<BulkResponse> getResponse = PlainActionFuture.newFuture();
|
|
|
+ BulkRetryableAction bulkRetryableAction = new BulkRetryableAction(
|
|
|
+ jobId,
|
|
|
+ new BulkRequestRewriter(bulkRequest),
|
|
|
+ shouldRetry,
|
|
|
+ retryMsgHandler,
|
|
|
+ actionExecutor,
|
|
|
+ getResponse
|
|
|
+ );
|
|
|
+ bulkRetryableAction.run();
|
|
|
+ return getResponse.actionGet();
|
|
|
}
|
|
|
|
|
|
public SearchResponse searchWithRetry(SearchRequest searchRequest,
|
|
|
String jobId,
|
|
|
Supplier<Boolean> shouldRetry,
|
|
|
Consumer<String> retryMsgHandler) {
|
|
|
- RetryContext retryContext = new RetryContext(jobId, shouldRetry, retryMsgHandler);
|
|
|
- while (true) {
|
|
|
- String failureMessage;
|
|
|
- try {
|
|
|
- SearchResponse searchResponse = client.search(searchRequest).actionGet();
|
|
|
- if (RestStatus.OK.equals(searchResponse.status())) {
|
|
|
- return searchResponse;
|
|
|
- }
|
|
|
- failureMessage = searchResponse.status().toString();
|
|
|
- } catch (ElasticsearchException e) {
|
|
|
- LOGGER.warn("[" + jobId + "] Exception while executing search action", e);
|
|
|
- failureMessage = e.getDetailedMessage();
|
|
|
- if (isIrrecoverable(e)) {
|
|
|
- LOGGER.warn(new ParameterizedMessage("[{}] experienced failure that cannot be automatically retried", jobId), e);
|
|
|
- throw new ElasticsearchStatusException(
|
|
|
- "{} experienced failure that cannot be automatically retried",
|
|
|
- status(e),
|
|
|
- e,
|
|
|
- jobId);
|
|
|
- }
|
|
|
- }
|
|
|
+ PlainActionFuture<SearchResponse> getResponse = PlainActionFuture.newFuture();
|
|
|
+ SearchRetryableAction mlRetryableAction = new SearchRetryableAction(
|
|
|
+ jobId,
|
|
|
+ searchRequest,
|
|
|
+ shouldRetry,
|
|
|
+ retryMsgHandler,
|
|
|
+ getResponse);
|
|
|
+ mlRetryableAction.run();
|
|
|
+ return getResponse.actionGet();
|
|
|
+ }
|
|
|
|
|
|
- retryContext.nextIteration("search", failureMessage);
|
|
|
+ static class RecoverableException extends Exception { }
|
|
|
+ static class IrrecoverableException extends ElasticsearchStatusException {
|
|
|
+ IrrecoverableException(String msg, RestStatus status, Throwable cause, Object... args) {
|
|
|
+ super(msg, status, cause, args);
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
/**
|
|
|
* @param ex The exception to check
|
|
|
* @return true when the failure will persist no matter how many times we retry.
|
|
@@ -208,48 +193,194 @@ public class ResultsPersisterService {
|
|
|
return IRRECOVERABLE_REST_STATUSES.contains(status(t));
|
|
|
}
|
|
|
|
|
|
- /**
|
|
|
- * {@link RetryContext} object handles logic that is executed between consecutive retries of an action.
|
|
|
- *
|
|
|
- * Note that it does not execute the action itself.
|
|
|
- */
|
|
|
- private class RetryContext {
|
|
|
+ @SuppressWarnings("NonAtomicOperationOnVolatileField")
|
|
|
+ private static class BulkRequestRewriter {
|
|
|
+ private volatile BulkRequest bulkRequest;
|
|
|
+ BulkRequestRewriter(BulkRequest initialRequest) {
|
|
|
+ this.bulkRequest = initialRequest;
|
|
|
+ }
|
|
|
+
|
|
|
+ void rewriteRequest(BulkResponse bulkResponse) {
|
|
|
+ if (bulkResponse.hasFailures() == false) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ bulkRequest = buildNewRequestFromFailures(bulkRequest, bulkResponse);
|
|
|
+ }
|
|
|
+
|
|
|
+ BulkRequest getBulkRequest() {
|
|
|
+ return bulkRequest;
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ private class BulkRetryableAction extends MlRetryableAction<BulkRequest, BulkResponse> {
|
|
|
+ private final BulkRequestRewriter bulkRequestRewriter;
|
|
|
+ BulkRetryableAction(String jobId,
|
|
|
+ BulkRequestRewriter bulkRequestRewriter,
|
|
|
+ Supplier<Boolean> shouldRetry,
|
|
|
+ Consumer<String> msgHandler,
|
|
|
+ BiConsumer<BulkRequest, ActionListener<BulkResponse>> actionExecutor,
|
|
|
+ ActionListener<BulkResponse> listener) {
|
|
|
+ super(jobId,
|
|
|
+ shouldRetry,
|
|
|
+ msgHandler,
|
|
|
+ (request, retryableListener) -> actionExecutor.accept(request, ActionListener.wrap(
|
|
|
+ bulkResponse -> {
|
|
|
+ if (bulkResponse.hasFailures() == false) {
|
|
|
+ retryableListener.onResponse(bulkResponse);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ for (BulkItemResponse itemResponse : bulkResponse.getItems()) {
|
|
|
+ if (itemResponse.isFailed() && isIrrecoverable(itemResponse.getFailure().getCause())) {
|
|
|
+ Throwable unwrappedParticular = ExceptionsHelper.unwrapCause(itemResponse.getFailure().getCause());
|
|
|
+ LOGGER.warn(new ParameterizedMessage(
|
|
|
+ "[{}] experienced failure that cannot be automatically retried. Bulk failure message [{}]",
|
|
|
+ jobId,
|
|
|
+ bulkResponse.buildFailureMessage()),
|
|
|
+ unwrappedParticular);
|
|
|
+ retryableListener.onFailure(new IrrecoverableException(
|
|
|
+ "{} experienced failure that cannot be automatically retried. See logs for bulk failures",
|
|
|
+ status(unwrappedParticular),
|
|
|
+ unwrappedParticular,
|
|
|
+ jobId));
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ bulkRequestRewriter.rewriteRequest(bulkResponse);
|
|
|
+ // Let the listener attempt again with the new bulk request
|
|
|
+ retryableListener.onFailure(new RecoverableException());
|
|
|
+ },
|
|
|
+ retryableListener::onFailure
|
|
|
+ )),
|
|
|
+ listener);
|
|
|
+ this.bulkRequestRewriter = bulkRequestRewriter;
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public BulkRequest buildRequest() {
|
|
|
+ return bulkRequestRewriter.getBulkRequest();
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public String getName() {
|
|
|
+ return "index";
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ private class SearchRetryableAction extends MlRetryableAction<SearchRequest, SearchResponse> {
|
|
|
+
|
|
|
+ private final SearchRequest searchRequest;
|
|
|
+ SearchRetryableAction(String jobId,
|
|
|
+ SearchRequest searchRequest,
|
|
|
+ Supplier<Boolean> shouldRetry,
|
|
|
+ Consumer<String> msgHandler,
|
|
|
+ ActionListener<SearchResponse> listener) {
|
|
|
+ super(jobId,
|
|
|
+ shouldRetry,
|
|
|
+ msgHandler,
|
|
|
+ (request, retryableListener) -> client.search(request, ActionListener.wrap(
|
|
|
+ searchResponse -> {
|
|
|
+ if (RestStatus.OK.equals(searchResponse.status())) {
|
|
|
+ retryableListener.onResponse(searchResponse);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ retryableListener.onFailure(
|
|
|
+ new ElasticsearchStatusException(
|
|
|
+ "search failed with status {}",
|
|
|
+ searchResponse.status(),
|
|
|
+ searchResponse.status())
|
|
|
+ );
|
|
|
+ },
|
|
|
+ retryableListener::onFailure
|
|
|
+ )),
|
|
|
+ listener);
|
|
|
+ this.searchRequest = searchRequest;
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public SearchRequest buildRequest() {
|
|
|
+ return searchRequest;
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public String getName() {
|
|
|
+ return "search";
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // This encapsulates a retryable action that implements our custom backoff retry logic
|
|
|
+ private abstract class MlRetryableAction<Request, Response> extends RetryableAction<Response> {
|
|
|
|
|
|
final String jobId;
|
|
|
final Supplier<Boolean> shouldRetry;
|
|
|
final Consumer<String> msgHandler;
|
|
|
- final Random random = Randomness.get();
|
|
|
-
|
|
|
- int currentAttempt = 0;
|
|
|
- int currentMin = MIN_RETRY_SLEEP_MILLIS;
|
|
|
- int currentMax = MIN_RETRY_SLEEP_MILLIS;
|
|
|
+ final BiConsumer<Request, ActionListener<Response>> action;
|
|
|
+ volatile int currentAttempt = 0;
|
|
|
+ volatile long currentMin = MIN_RETRY_SLEEP_MILLIS;
|
|
|
+ volatile long currentMax = MIN_RETRY_SLEEP_MILLIS;
|
|
|
|
|
|
- RetryContext(String jobId, Supplier<Boolean> shouldRetry, Consumer<String> msgHandler) {
|
|
|
+ MlRetryableAction(String jobId,
|
|
|
+ Supplier<Boolean> shouldRetry,
|
|
|
+ Consumer<String> msgHandler,
|
|
|
+ BiConsumer<Request, ActionListener<Response>> action,
|
|
|
+ ActionListener<Response> listener) {
|
|
|
+ super(
|
|
|
+ LOGGER,
|
|
|
+ threadPool,
|
|
|
+ TimeValue.timeValueMillis(MIN_RETRY_SLEEP_MILLIS),
|
|
|
+ TimeValue.MAX_VALUE,
|
|
|
+ listener,
|
|
|
+ UTILITY_THREAD_POOL_NAME);
|
|
|
this.jobId = jobId;
|
|
|
this.shouldRetry = shouldRetry;
|
|
|
this.msgHandler = msgHandler;
|
|
|
+ this.action = action;
|
|
|
}
|
|
|
|
|
|
- void nextIteration(String actionName, String failureMessage) {
|
|
|
+ public abstract Request buildRequest();
|
|
|
+
|
|
|
+ public abstract String getName();
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void tryAction(ActionListener<Response> listener) {
|
|
|
currentAttempt++;
|
|
|
+ action.accept(buildRequest(), listener);
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public boolean shouldRetry(Exception e) {
|
|
|
+ if (isIrrecoverable(e)) {
|
|
|
+ LOGGER.warn(new ParameterizedMessage("[{}] experienced failure that cannot be automatically retried", jobId), e);
|
|
|
+ return false;
|
|
|
+ }
|
|
|
|
|
|
// If the outside conditions have changed and retries are no longer needed, do not retry.
|
|
|
if (shouldRetry.get() == false) {
|
|
|
- String msg = new ParameterizedMessage(
|
|
|
- "[{}] should not retry {} after [{}] attempts. {}", jobId, actionName, currentAttempt, failureMessage)
|
|
|
- .getFormattedMessage();
|
|
|
- LOGGER.info(msg);
|
|
|
- throw new ElasticsearchException(msg);
|
|
|
+ LOGGER.info(() -> new ParameterizedMessage(
|
|
|
+ "[{}] should not retry {} after [{}] attempts",
|
|
|
+ jobId,
|
|
|
+ getName(),
|
|
|
+ currentAttempt
|
|
|
+ ), e);
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
// If the configured maximum number of retries has been reached, do not retry.
|
|
|
if (currentAttempt > maxFailureRetries) {
|
|
|
- String msg = new ParameterizedMessage(
|
|
|
- "[{}] failed to {} after [{}] attempts. {}", jobId, actionName, currentAttempt, failureMessage).getFormattedMessage();
|
|
|
- LOGGER.warn(msg);
|
|
|
- throw new ElasticsearchException(msg);
|
|
|
+ LOGGER.warn(() -> new ParameterizedMessage(
|
|
|
+ "[{}] failed to {} after [{}] attempts.",
|
|
|
+ jobId,
|
|
|
+ getName(),
|
|
|
+ currentAttempt
|
|
|
+ ), e);
|
|
|
+ return false;
|
|
|
}
|
|
|
+ return true;
|
|
|
+ }
|
|
|
|
|
|
+ @Override
|
|
|
+ protected long calculateDelay(long previousDelay) {
|
|
|
// Since we exponentially increase, we don't want force randomness to have an excessively long sleep
|
|
|
if (currentMax < MAX_RETRY_SLEEP_MILLIS) {
|
|
|
currentMin = currentMax;
|
|
@@ -259,33 +390,24 @@ public class ResultsPersisterService {
|
|
|
currentMax = Math.min(uncappedBackoff, MAX_RETRY_SLEEP_MILLIS);
|
|
|
// Its good to have a random window along the exponentially increasing curve
|
|
|
// so that not all bulk requests rest for the same amount of time
|
|
|
- int randBound = 1 + (currentMax - currentMin);
|
|
|
- int randSleep = currentMin + random.nextInt(randBound);
|
|
|
- {
|
|
|
- String msg = new ParameterizedMessage(
|
|
|
- "failed to {} after [{}] attempts. Will attempt again in [{}].",
|
|
|
- actionName,
|
|
|
- currentAttempt,
|
|
|
- TimeValue.timeValueMillis(randSleep).getStringRep())
|
|
|
- .getFormattedMessage();
|
|
|
- LOGGER.warn(() -> new ParameterizedMessage("[{}] {}", jobId, msg));
|
|
|
- msgHandler.accept(msg);
|
|
|
- }
|
|
|
- try {
|
|
|
- sleeper.accept(randSleep);
|
|
|
- } catch (InterruptedException interruptedException) {
|
|
|
- LOGGER.warn(
|
|
|
- new ParameterizedMessage("[{}] failed to {} after [{}] attempts due to interruption",
|
|
|
- jobId,
|
|
|
- actionName,
|
|
|
- currentAttempt),
|
|
|
- interruptedException);
|
|
|
- Thread.currentThread().interrupt();
|
|
|
- }
|
|
|
+ int randBound = (int)(1 + (currentMax - currentMin));
|
|
|
+ String msg = new ParameterizedMessage(
|
|
|
+ "failed to {} after [{}] attempts. Will attempt again.",
|
|
|
+ getName(),
|
|
|
+ currentAttempt)
|
|
|
+ .getFormattedMessage();
|
|
|
+ LOGGER.warn(() -> new ParameterizedMessage("[{}] {}", jobId, msg));
|
|
|
+ msgHandler.accept(msg);
|
|
|
+ return randBound;
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ protected long minimumDelayMillis() {
|
|
|
+ return currentMin;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- private BulkRequest buildNewRequestFromFailures(BulkRequest bulkRequest, BulkResponse bulkResponse) {
|
|
|
+ private static BulkRequest buildNewRequestFromFailures(BulkRequest bulkRequest, BulkResponse bulkResponse) {
|
|
|
// If we failed, lets set the bulkRequest to be a collection of the failed requests
|
|
|
BulkRequest bulkRequestOfFailures = new BulkRequest();
|
|
|
Set<String> failedDocIds = Arrays.stream(bulkResponse.getItems())
|