|
@@ -23,14 +23,22 @@ import org.elasticsearch.common.Nullable;
|
|
|
import org.elasticsearch.common.io.stream.StreamInput;
|
|
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
|
|
import org.elasticsearch.common.unit.TimeValue;
|
|
|
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
|
|
+import org.elasticsearch.common.util.concurrent.FutureUtils;
|
|
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
|
|
import org.elasticsearch.tasks.CancellableTask;
|
|
|
import org.elasticsearch.tasks.Task;
|
|
|
+import org.elasticsearch.threadpool.ThreadPool;
|
|
|
|
|
|
import java.io.IOException;
|
|
|
+import java.util.concurrent.ScheduledFuture;
|
|
|
+import java.util.concurrent.TimeUnit;
|
|
|
+import java.util.concurrent.atomic.AtomicBoolean;
|
|
|
import java.util.concurrent.atomic.AtomicInteger;
|
|
|
import java.util.concurrent.atomic.AtomicLong;
|
|
|
+import java.util.concurrent.atomic.AtomicReference;
|
|
|
|
|
|
+import static java.lang.Math.round;
|
|
|
import static org.elasticsearch.common.unit.TimeValue.timeValueNanos;
|
|
|
|
|
|
/**
|
|
@@ -50,15 +58,42 @@ public class BulkByScrollTask extends CancellableTask {
|
|
|
private final AtomicLong versionConflicts = new AtomicLong(0);
|
|
|
private final AtomicLong retries = new AtomicLong(0);
|
|
|
private final AtomicLong throttledNanos = new AtomicLong();
|
|
|
+ /**
|
|
|
+ * The number of requests per second to which to throttle the request that this task represents. The other variables are all AtomicXXX
|
|
|
+ * style variables but there isn't an AtomicFloat so we just use a volatile.
|
|
|
+ */
|
|
|
+ private volatile float requestsPerSecond;
|
|
|
+ /**
|
|
|
+ * Reference to any the last delayed prepareBulkRequest call. Used during rethrottling and canceling to reschedule the request.
|
|
|
+ */
|
|
|
+ private final AtomicReference<DelayedPrepareBulkRequest> delayedPrepareBulkRequestReference = new AtomicReference<>();
|
|
|
|
|
|
- public BulkByScrollTask(long id, String type, String action, String description) {
|
|
|
+ public BulkByScrollTask(long id, String type, String action, String description, float requestsPerSecond) {
|
|
|
super(id, type, action, description);
|
|
|
+ setRequestsPerSecond(requestsPerSecond);
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ protected void onCancelled() {
|
|
|
+ // Drop the throttle to 0, immediately rescheduling all outstanding tasks so the task will wake up and cancel itself.
|
|
|
+ rethrottle(0);
|
|
|
}
|
|
|
|
|
|
@Override
|
|
|
public Status getStatus() {
|
|
|
return new Status(total.get(), updated.get(), created.get(), deleted.get(), batch.get(), versionConflicts.get(), noops.get(),
|
|
|
- retries.get(), timeValueNanos(throttledNanos.get()), getReasonCancelled());
|
|
|
+ retries.get(), timeValueNanos(throttledNanos.get()), getRequestsPerSecond(), getReasonCancelled(), throttledUntil());
|
|
|
+ }
|
|
|
+
|
|
|
+ private TimeValue throttledUntil() {
|
|
|
+ DelayedPrepareBulkRequest delayed = delayedPrepareBulkRequestReference.get();
|
|
|
+ if (delayed == null) {
|
|
|
+ return timeValueNanos(0);
|
|
|
+ }
|
|
|
+ if (delayed.future == null) {
|
|
|
+ return timeValueNanos(0);
|
|
|
+ }
|
|
|
+ return timeValueNanos(delayed.future.getDelay(TimeUnit.NANOSECONDS));
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -70,6 +105,7 @@ public class BulkByScrollTask extends CancellableTask {
|
|
|
|
|
|
public static class Status implements Task.Status {
|
|
|
public static final String NAME = "bulk-by-scroll";
|
|
|
+
|
|
|
private final long total;
|
|
|
private final long updated;
|
|
|
private final long created;
|
|
@@ -79,10 +115,12 @@ public class BulkByScrollTask extends CancellableTask {
|
|
|
private final long noops;
|
|
|
private final long retries;
|
|
|
private final TimeValue throttled;
|
|
|
+ private final float requestsPerSecond;
|
|
|
private final String reasonCancelled;
|
|
|
+ private final TimeValue throttledUntil;
|
|
|
|
|
|
public Status(long total, long updated, long created, long deleted, int batches, long versionConflicts, long noops, long retries,
|
|
|
- TimeValue throttled, @Nullable String reasonCancelled) {
|
|
|
+ TimeValue throttled, float requestsPerSecond, @Nullable String reasonCancelled, TimeValue throttledUntil) {
|
|
|
this.total = checkPositive(total, "total");
|
|
|
this.updated = checkPositive(updated, "updated");
|
|
|
this.created = checkPositive(created, "created");
|
|
@@ -92,7 +130,9 @@ public class BulkByScrollTask extends CancellableTask {
|
|
|
this.noops = checkPositive(noops, "noops");
|
|
|
this.retries = checkPositive(retries, "retries");
|
|
|
this.throttled = throttled;
|
|
|
+ this.requestsPerSecond = requestsPerSecond;
|
|
|
this.reasonCancelled = reasonCancelled;
|
|
|
+ this.throttledUntil = throttledUntil;
|
|
|
}
|
|
|
|
|
|
public Status(StreamInput in) throws IOException {
|
|
@@ -105,7 +145,9 @@ public class BulkByScrollTask extends CancellableTask {
|
|
|
noops = in.readVLong();
|
|
|
retries = in.readVLong();
|
|
|
throttled = TimeValue.readTimeValue(in);
|
|
|
+ requestsPerSecond = in.readFloat();
|
|
|
reasonCancelled = in.readOptionalString();
|
|
|
+ throttledUntil = TimeValue.readTimeValue(in);
|
|
|
}
|
|
|
|
|
|
@Override
|
|
@@ -119,7 +161,9 @@ public class BulkByScrollTask extends CancellableTask {
|
|
|
out.writeVLong(noops);
|
|
|
out.writeVLong(retries);
|
|
|
throttled.writeTo(out);
|
|
|
+ out.writeFloat(requestsPerSecond);
|
|
|
out.writeOptionalString(reasonCancelled);
|
|
|
+ throttledUntil.writeTo(out);
|
|
|
}
|
|
|
|
|
|
@Override
|
|
@@ -144,9 +188,11 @@ public class BulkByScrollTask extends CancellableTask {
|
|
|
builder.field("noops", noops);
|
|
|
builder.field("retries", retries);
|
|
|
builder.timeValueField("throttled_millis", "throttled", throttled);
|
|
|
+ builder.field("requests_per_second", requestsPerSecond == 0 ? "unlimited" : requestsPerSecond);
|
|
|
if (reasonCancelled != null) {
|
|
|
builder.field("canceled", reasonCancelled);
|
|
|
}
|
|
|
+ builder.timeValueField("throttled_until_millis", "throttled_until", throttledUntil);
|
|
|
return builder;
|
|
|
}
|
|
|
|
|
@@ -173,6 +219,7 @@ public class BulkByScrollTask extends CancellableTask {
|
|
|
if (reasonCancelled != null) {
|
|
|
builder.append(",canceled=").append(reasonCancelled);
|
|
|
}
|
|
|
+ builder.append(",throttledUntil=").append(throttledUntil);
|
|
|
}
|
|
|
|
|
|
@Override
|
|
@@ -238,12 +285,19 @@ public class BulkByScrollTask extends CancellableTask {
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * The total time this request has throttled itself.
|
|
|
+ * The total time this request has throttled itself not including the current throttle time if it is currently sleeping.
|
|
|
*/
|
|
|
public TimeValue getThrottled() {
|
|
|
return throttled;
|
|
|
}
|
|
|
|
|
|
+ /**
|
|
|
+ * The number of requests per second to which to throttle the request. 0 means unlimited.
|
|
|
+ */
|
|
|
+ public float getRequestsPerSecond() {
|
|
|
+ return requestsPerSecond;
|
|
|
+ }
|
|
|
+
|
|
|
/**
|
|
|
* The reason that the request was canceled or null if it hasn't been.
|
|
|
*/
|
|
@@ -251,6 +305,13 @@ public class BulkByScrollTask extends CancellableTask {
|
|
|
return reasonCancelled;
|
|
|
}
|
|
|
|
|
|
+ /**
|
|
|
+ * Remaining delay of any current throttle sleep or 0 if not sleeping.
|
|
|
+ */
|
|
|
+ public TimeValue getThrottledUntil() {
|
|
|
+ return throttledUntil;
|
|
|
+ }
|
|
|
+
|
|
|
private int checkPositive(int value, String name) {
|
|
|
if (value < 0) {
|
|
|
throw new IllegalArgumentException(name + " must be greater than 0 but was [" + value + "]");
|
|
@@ -298,10 +359,114 @@ public class BulkByScrollTask extends CancellableTask {
|
|
|
retries.incrementAndGet();
|
|
|
}
|
|
|
|
|
|
- public void countThrottle(TimeValue delay) {
|
|
|
- long nanos = delay.nanos();
|
|
|
- if (nanos > 0) {
|
|
|
- throttledNanos.addAndGet(nanos);
|
|
|
+ float getRequestsPerSecond() {
|
|
|
+ return requestsPerSecond;
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Schedule prepareBulkRequestRunnable to run after some delay. This is where throttling plugs into reindexing so the request can be
|
|
|
+ * rescheduled over and over again.
|
|
|
+ */
|
|
|
+ void delayPrepareBulkRequest(ThreadPool threadPool, TimeValue delay, AbstractRunnable prepareBulkRequestRunnable) {
|
|
|
+ // Synchronize so we are less likely to schedule the same request twice.
|
|
|
+ synchronized (delayedPrepareBulkRequestReference) {
|
|
|
+ AbstractRunnable oneTime = new AbstractRunnable() {
|
|
|
+ private final AtomicBoolean hasRun = new AtomicBoolean(false);
|
|
|
+
|
|
|
+ @Override
|
|
|
+ protected void doRun() throws Exception {
|
|
|
+ // Paranoia to prevent furiously rethrottling from running the command multiple times. Without this we totally can.
|
|
|
+ if (hasRun.compareAndSet(false, true)) {
|
|
|
+ prepareBulkRequestRunnable.run();
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void onFailure(Throwable t) {
|
|
|
+ prepareBulkRequestRunnable.onFailure(t);
|
|
|
+ }
|
|
|
+ };
|
|
|
+ delayedPrepareBulkRequestReference.set(new DelayedPrepareBulkRequest(threadPool, getRequestsPerSecond(), delay, oneTime));
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private void setRequestsPerSecond(float requestsPerSecond) {
|
|
|
+ if (requestsPerSecond == -1) {
|
|
|
+ requestsPerSecond = 0;
|
|
|
+ }
|
|
|
+ this.requestsPerSecond = requestsPerSecond;
|
|
|
+ }
|
|
|
+
|
|
|
+ void rethrottle(float newRequestsPerSecond) {
|
|
|
+ synchronized (delayedPrepareBulkRequestReference) {
|
|
|
+ setRequestsPerSecond(newRequestsPerSecond);
|
|
|
+
|
|
|
+ DelayedPrepareBulkRequest delayedPrepareBulkRequest = this.delayedPrepareBulkRequestReference.get();
|
|
|
+ if (delayedPrepareBulkRequest == null) {
|
|
|
+ // No request has been queued yet so nothing to reschedule.
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ this.delayedPrepareBulkRequestReference.set(delayedPrepareBulkRequest.rethrottle(newRequestsPerSecond));
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ class DelayedPrepareBulkRequest {
|
|
|
+ private final ThreadPool threadPool;
|
|
|
+ private final AbstractRunnable command;
|
|
|
+ private final float requestsPerSecond;
|
|
|
+ private final ScheduledFuture<?> future;
|
|
|
+
|
|
|
+ DelayedPrepareBulkRequest(ThreadPool threadPool, float requestsPerSecond, TimeValue delay, AbstractRunnable command) {
|
|
|
+ this.threadPool = threadPool;
|
|
|
+ this.requestsPerSecond = requestsPerSecond;
|
|
|
+ this.command = command;
|
|
|
+ this.future = threadPool.schedule(delay, ThreadPool.Names.GENERIC, new AbstractRunnable() {
|
|
|
+ @Override
|
|
|
+ protected void doRun() throws Exception {
|
|
|
+ throttledNanos.addAndGet(delay.nanos());
|
|
|
+ command.run();
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void onFailure(Throwable t) {
|
|
|
+ command.onFailure(t);
|
|
|
+ }
|
|
|
+ });
|
|
|
+ }
|
|
|
+
|
|
|
+ DelayedPrepareBulkRequest rethrottle(float newRequestsPerSecond) {
|
|
|
+ if (newRequestsPerSecond != 0 && newRequestsPerSecond < requestsPerSecond) {
|
|
|
+ /*
|
|
|
+ * The user is attempting to slow the request down. We'll let the change in throttle take effect the next time we delay
|
|
|
+ * prepareBulkRequest. We can't just reschedule the request further out in the future the bulk context might time out.
|
|
|
+ */
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ long remainingDelay = future.getDelay(TimeUnit.NANOSECONDS);
|
|
|
+ // Actually reschedule the task
|
|
|
+ if (false == FutureUtils.cancel(future)) {
|
|
|
+ // Couldn't cancel, probably because the task has finished or been scheduled. Either way we have nothing to do here.
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Strangely enough getting here doesn't mean that you actually cancelled the request, just that you probably did. If you stress
|
|
|
+ * test it you'll find that requests sneak through. So each request is given a runOnce boolean to prevent that.
|
|
|
+ */
|
|
|
+ TimeValue newDelay = newDelay(remainingDelay, newRequestsPerSecond);
|
|
|
+ return new DelayedPrepareBulkRequest(threadPool, requestsPerSecond, newDelay, command);
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Scale back remaining delay to fit the new delay.
|
|
|
+ */
|
|
|
+ TimeValue newDelay(long remainingDelay, float newRequestsPerSecond) {
|
|
|
+ if (remainingDelay < 0 || newRequestsPerSecond == 0) {
|
|
|
+ return timeValueNanos(0);
|
|
|
+ }
|
|
|
+ return timeValueNanos(round(remainingDelay * requestsPerSecond / newRequestsPerSecond));
|
|
|
}
|
|
|
}
|
|
|
}
|