|
@@ -22,6 +22,7 @@ package org.elasticsearch.cluster.service;
|
|
|
import org.apache.logging.log4j.Level;
|
|
|
import org.apache.logging.log4j.LogManager;
|
|
|
import org.apache.logging.log4j.Logger;
|
|
|
+import org.elasticsearch.ElasticsearchException;
|
|
|
import org.elasticsearch.Version;
|
|
|
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
|
|
import org.elasticsearch.cluster.ClusterChangedEvent;
|
|
@@ -41,22 +42,22 @@ import org.elasticsearch.common.Nullable;
|
|
|
import org.elasticsearch.common.Priority;
|
|
|
import org.elasticsearch.common.collect.Tuple;
|
|
|
import org.elasticsearch.common.logging.Loggers;
|
|
|
+import org.elasticsearch.common.settings.ClusterSettings;
|
|
|
import org.elasticsearch.common.settings.Settings;
|
|
|
import org.elasticsearch.common.unit.TimeValue;
|
|
|
import org.elasticsearch.common.util.concurrent.BaseFuture;
|
|
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
|
|
+import org.elasticsearch.node.Node;
|
|
|
import org.elasticsearch.test.ESTestCase;
|
|
|
import org.elasticsearch.test.MockLogAppender;
|
|
|
import org.elasticsearch.test.junit.annotations.TestLogging;
|
|
|
import org.elasticsearch.threadpool.TestThreadPool;
|
|
|
import org.elasticsearch.threadpool.ThreadPool;
|
|
|
-import org.junit.After;
|
|
|
import org.junit.AfterClass;
|
|
|
import org.junit.Before;
|
|
|
import org.junit.BeforeClass;
|
|
|
|
|
|
import java.util.ArrayList;
|
|
|
-import java.util.Arrays;
|
|
|
import java.util.Collections;
|
|
|
import java.util.HashMap;
|
|
|
import java.util.HashSet;
|
|
@@ -84,11 +85,16 @@ import static org.hamcrest.Matchers.hasKey;
|
|
|
public class MasterServiceTests extends ESTestCase {
|
|
|
|
|
|
private static ThreadPool threadPool;
|
|
|
- private TimedMasterService masterService;
|
|
|
+ private static long relativeTimeInMillis;
|
|
|
|
|
|
@BeforeClass
|
|
|
public static void createThreadPool() {
|
|
|
- threadPool = new TestThreadPool(MasterServiceTests.class.getName());
|
|
|
+ threadPool = new TestThreadPool(MasterServiceTests.class.getName()) {
|
|
|
+ @Override
|
|
|
+ public long relativeTimeInMillis() {
|
|
|
+ return relativeTimeInMillis;
|
|
|
+ }
|
|
|
+ };
|
|
|
}
|
|
|
|
|
|
@AfterClass
|
|
@@ -100,46 +106,41 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
}
|
|
|
|
|
|
@Before
|
|
|
- public void setUp() throws Exception {
|
|
|
- super.setUp();
|
|
|
- masterService = createTimedMasterService(true);
|
|
|
+ public void randomizeCurrentTime() {
|
|
|
+ relativeTimeInMillis = randomLongBetween(0L, 1L << 62);
|
|
|
}
|
|
|
|
|
|
- @After
|
|
|
- public void tearDown() throws Exception {
|
|
|
- masterService.close();
|
|
|
- super.tearDown();
|
|
|
- }
|
|
|
-
|
|
|
- private TimedMasterService createTimedMasterService(boolean makeMaster) throws InterruptedException {
|
|
|
- DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(),
|
|
|
+ private MasterService createMasterService(boolean makeMaster) {
|
|
|
+ final DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(),
|
|
|
emptySet(), Version.CURRENT);
|
|
|
- TimedMasterService timedMasterService = new TimedMasterService(Settings.builder().put("cluster.name",
|
|
|
- MasterServiceTests.class.getSimpleName()).build(), threadPool);
|
|
|
- ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName()))
|
|
|
+ final MasterService masterService = new MasterService(Settings.builder()
|
|
|
+ .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName())
|
|
|
+ .put(Node.NODE_NAME_SETTING.getKey(), "test_node")
|
|
|
+ .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool);
|
|
|
+ final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName()))
|
|
|
.nodes(DiscoveryNodes.builder()
|
|
|
.add(localNode)
|
|
|
.localNodeId(localNode.getId())
|
|
|
.masterNodeId(makeMaster ? localNode.getId() : null))
|
|
|
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build();
|
|
|
- AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialClusterState);
|
|
|
- timedMasterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
|
|
|
+ final AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialClusterState);
|
|
|
+ masterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
|
|
|
clusterStateRef.set(event.state());
|
|
|
publishListener.onResponse(null);
|
|
|
});
|
|
|
- timedMasterService.setClusterStateSupplier(clusterStateRef::get);
|
|
|
- timedMasterService.start();
|
|
|
- return timedMasterService;
|
|
|
+ masterService.setClusterStateSupplier(clusterStateRef::get);
|
|
|
+ masterService.start();
|
|
|
+ return masterService;
|
|
|
}
|
|
|
|
|
|
public void testMasterAwareExecution() throws Exception {
|
|
|
- TimedMasterService nonMaster = createTimedMasterService(false);
|
|
|
+ final MasterService nonMaster = createMasterService(false);
|
|
|
|
|
|
final boolean[] taskFailed = {false};
|
|
|
final CountDownLatch latch1 = new CountDownLatch(1);
|
|
|
nonMaster.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
|
|
|
@Override
|
|
|
- public ClusterState execute(ClusterState currentState) throws Exception {
|
|
|
+ public ClusterState execute(ClusterState currentState) {
|
|
|
latch1.countDown();
|
|
|
return currentState;
|
|
|
}
|
|
@@ -154,11 +155,10 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
latch1.await();
|
|
|
assertTrue("cluster state update task was executed on a non-master", taskFailed[0]);
|
|
|
|
|
|
- taskFailed[0] = true;
|
|
|
final CountDownLatch latch2 = new CountDownLatch(1);
|
|
|
nonMaster.submitStateUpdateTask("test", new LocalClusterUpdateTask() {
|
|
|
@Override
|
|
|
- public ClusterTasksResult<LocalClusterUpdateTask> execute(ClusterState currentState) throws Exception {
|
|
|
+ public ClusterTasksResult<LocalClusterUpdateTask> execute(ClusterState currentState) {
|
|
|
taskFailed[0] = false;
|
|
|
latch2.countDown();
|
|
|
return unchanged();
|
|
@@ -177,13 +177,13 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
}
|
|
|
|
|
|
public void testThreadContext() throws InterruptedException {
|
|
|
- final TimedMasterService master = createTimedMasterService(true);
|
|
|
+ final MasterService master = createMasterService(true);
|
|
|
final CountDownLatch latch = new CountDownLatch(1);
|
|
|
|
|
|
try (ThreadContext.StoredContext ignored = threadPool.getThreadContext().stashContext()) {
|
|
|
final Map<String, String> expectedHeaders = Collections.singletonMap("test", "test");
|
|
|
final Map<String, List<String>> expectedResponseHeaders = Collections.singletonMap("testResponse",
|
|
|
- Arrays.asList("testResponse"));
|
|
|
+ Collections.singletonList("testResponse"));
|
|
|
threadPool.getThreadContext().putHeader(expectedHeaders);
|
|
|
|
|
|
final TimeValue ackTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000));
|
|
@@ -273,37 +273,39 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
final CountDownLatch latch = new CountDownLatch(1);
|
|
|
AtomicBoolean published = new AtomicBoolean();
|
|
|
|
|
|
- masterService.submitStateUpdateTask(
|
|
|
- "testClusterStateTaskListenerThrowingExceptionIsOkay",
|
|
|
- new Object(),
|
|
|
- ClusterStateTaskConfig.build(Priority.NORMAL),
|
|
|
- new ClusterStateTaskExecutor<Object>() {
|
|
|
- @Override
|
|
|
- public ClusterTasksResult<Object> execute(ClusterState currentState, List<Object> tasks) throws Exception {
|
|
|
- ClusterState newClusterState = ClusterState.builder(currentState).build();
|
|
|
- return ClusterTasksResult.builder().successes(tasks).build(newClusterState);
|
|
|
- }
|
|
|
+ try (MasterService masterService = createMasterService(true)) {
|
|
|
+ masterService.submitStateUpdateTask(
|
|
|
+ "testClusterStateTaskListenerThrowingExceptionIsOkay",
|
|
|
+ new Object(),
|
|
|
+ ClusterStateTaskConfig.build(Priority.NORMAL),
|
|
|
+ new ClusterStateTaskExecutor<Object>() {
|
|
|
+ @Override
|
|
|
+ public ClusterTasksResult<Object> execute(ClusterState currentState, List<Object> tasks) {
|
|
|
+ ClusterState newClusterState = ClusterState.builder(currentState).build();
|
|
|
+ return ClusterTasksResult.builder().successes(tasks).build(newClusterState);
|
|
|
+ }
|
|
|
|
|
|
- @Override
|
|
|
- public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {
|
|
|
- published.set(true);
|
|
|
- latch.countDown();
|
|
|
- }
|
|
|
- },
|
|
|
- new ClusterStateTaskListener() {
|
|
|
- @Override
|
|
|
- public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
|
|
- throw new IllegalStateException(source);
|
|
|
- }
|
|
|
+ @Override
|
|
|
+ public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {
|
|
|
+ published.set(true);
|
|
|
+ latch.countDown();
|
|
|
+ }
|
|
|
+ },
|
|
|
+ new ClusterStateTaskListener() {
|
|
|
+ @Override
|
|
|
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
|
|
+ throw new IllegalStateException(source);
|
|
|
+ }
|
|
|
|
|
|
- @Override
|
|
|
- public void onFailure(String source, Exception e) {
|
|
|
+ @Override
|
|
|
+ public void onFailure(String source, Exception e) {
|
|
|
+ }
|
|
|
}
|
|
|
- }
|
|
|
- );
|
|
|
+ );
|
|
|
|
|
|
- latch.await();
|
|
|
- assertTrue(published.get());
|
|
|
+ latch.await();
|
|
|
+ assertTrue(published.get());
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
@TestLogging(value = "org.elasticsearch.cluster.service:TRACE", reason = "to ensure that we log cluster state events on TRACE level")
|
|
@@ -312,37 +314,81 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
mockAppender.start();
|
|
|
mockAppender.addExpectation(
|
|
|
new MockLogAppender.SeenEventExpectation(
|
|
|
- "test1",
|
|
|
+ "test1 start",
|
|
|
MasterService.class.getCanonicalName(),
|
|
|
Level.DEBUG,
|
|
|
- "*processing [test1]: took [1s] no change in cluster state"));
|
|
|
+ "executing cluster state update for [test1]"));
|
|
|
mockAppender.addExpectation(
|
|
|
new MockLogAppender.SeenEventExpectation(
|
|
|
- "test2",
|
|
|
+ "test1 computation",
|
|
|
+ MasterService.class.getCanonicalName(),
|
|
|
+ Level.DEBUG,
|
|
|
+ "took [1s] to compute cluster state update for [test1]"));
|
|
|
+ mockAppender.addExpectation(
|
|
|
+ new MockLogAppender.SeenEventExpectation(
|
|
|
+ "test1 notification",
|
|
|
+ MasterService.class.getCanonicalName(),
|
|
|
+ Level.DEBUG,
|
|
|
+ "took [0s] to notify listeners on unchanged cluster state for [test1]"));
|
|
|
+
|
|
|
+ mockAppender.addExpectation(
|
|
|
+ new MockLogAppender.SeenEventExpectation(
|
|
|
+ "test2 start",
|
|
|
+ MasterService.class.getCanonicalName(),
|
|
|
+ Level.DEBUG,
|
|
|
+ "executing cluster state update for [test2]"));
|
|
|
+ mockAppender.addExpectation(
|
|
|
+ new MockLogAppender.SeenEventExpectation(
|
|
|
+ "test2 failure",
|
|
|
MasterService.class.getCanonicalName(),
|
|
|
Level.TRACE,
|
|
|
- "*failed to execute cluster state update in [2s]*"));
|
|
|
+ "failed to execute cluster state update (on version: [*], uuid: [*]) for [test2]*"));
|
|
|
mockAppender.addExpectation(
|
|
|
new MockLogAppender.SeenEventExpectation(
|
|
|
- "test3",
|
|
|
+ "test2 computation",
|
|
|
MasterService.class.getCanonicalName(),
|
|
|
Level.DEBUG,
|
|
|
- "*processing [test3]: took [3s] done publishing updated cluster state (version: *, uuid: *)"));
|
|
|
+ "took [2s] to compute cluster state update for [test2]"));
|
|
|
+ mockAppender.addExpectation(
|
|
|
+ new MockLogAppender.SeenEventExpectation(
|
|
|
+ "test2 notification",
|
|
|
+ MasterService.class.getCanonicalName(),
|
|
|
+ Level.DEBUG,
|
|
|
+ "took [0s] to notify listeners on unchanged cluster state for [test2]"));
|
|
|
+
|
|
|
+ mockAppender.addExpectation(
|
|
|
+ new MockLogAppender.SeenEventExpectation(
|
|
|
+ "test3 start",
|
|
|
+ MasterService.class.getCanonicalName(),
|
|
|
+ Level.DEBUG,
|
|
|
+ "executing cluster state update for [test3]"));
|
|
|
+ mockAppender.addExpectation(
|
|
|
+ new MockLogAppender.SeenEventExpectation(
|
|
|
+ "test3 computation",
|
|
|
+ MasterService.class.getCanonicalName(),
|
|
|
+ Level.DEBUG,
|
|
|
+ "took [3s] to compute cluster state update for [test3]"));
|
|
|
+ mockAppender.addExpectation(
|
|
|
+ new MockLogAppender.SeenEventExpectation(
|
|
|
+ "test3 notification",
|
|
|
+ MasterService.class.getCanonicalName(),
|
|
|
+ Level.DEBUG,
|
|
|
+ "took [4s] to notify listeners on successful publication of cluster state (version: *, uuid: *) for [test3]"));
|
|
|
+
|
|
|
mockAppender.addExpectation(
|
|
|
new MockLogAppender.SeenEventExpectation(
|
|
|
"test4",
|
|
|
MasterService.class.getCanonicalName(),
|
|
|
Level.DEBUG,
|
|
|
- "*processing [test4]: took [0s] no change in cluster state"));
|
|
|
+ "executing cluster state update for [test4]"));
|
|
|
|
|
|
Logger clusterLogger = LogManager.getLogger(MasterService.class);
|
|
|
Loggers.addAppender(clusterLogger, mockAppender);
|
|
|
- try {
|
|
|
- masterService.currentTimeOverride = System.nanoTime();
|
|
|
+ try (MasterService masterService = createMasterService(true)) {
|
|
|
masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
|
|
|
@Override
|
|
|
- public ClusterState execute(ClusterState currentState) throws Exception {
|
|
|
- masterService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos();
|
|
|
+ public ClusterState execute(ClusterState currentState) {
|
|
|
+ relativeTimeInMillis += TimeValue.timeValueSeconds(1).millis();
|
|
|
return currentState;
|
|
|
}
|
|
|
|
|
@@ -357,7 +403,7 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
|
|
|
@Override
|
|
|
public ClusterState execute(ClusterState currentState) {
|
|
|
- masterService.currentTimeOverride += TimeValue.timeValueSeconds(2).nanos();
|
|
|
+ relativeTimeInMillis += TimeValue.timeValueSeconds(2).millis();
|
|
|
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
|
|
|
}
|
|
|
|
|
@@ -372,20 +418,20 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
|
|
|
@Override
|
|
|
public ClusterState execute(ClusterState currentState) {
|
|
|
- masterService.currentTimeOverride += TimeValue.timeValueSeconds(3).nanos();
|
|
|
+ relativeTimeInMillis += TimeValue.timeValueSeconds(3).millis();
|
|
|
return ClusterState.builder(currentState).incrementVersion().build();
|
|
|
}
|
|
|
|
|
|
@Override
|
|
|
- public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { }
|
|
|
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
|
|
+ relativeTimeInMillis += TimeValue.timeValueSeconds(4).millis();
|
|
|
+ }
|
|
|
|
|
|
@Override
|
|
|
public void onFailure(String source, Exception e) {
|
|
|
fail();
|
|
|
}
|
|
|
});
|
|
|
- // Additional update task to make sure all previous logging made it to the loggerName
|
|
|
- // We don't check logging for this on since there is no guarantee that it will occur before our check
|
|
|
masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
|
|
|
@Override
|
|
|
public ClusterState execute(ClusterState currentState) {
|
|
@@ -533,70 +579,72 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
}
|
|
|
};
|
|
|
|
|
|
- final ConcurrentMap<String, AtomicInteger> submittedTasksPerThread = new ConcurrentHashMap<>();
|
|
|
- CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
|
|
|
- for (int i = 0; i < numberOfThreads; i++) {
|
|
|
- final int index = i;
|
|
|
- Thread thread = new Thread(() -> {
|
|
|
- final String threadName = Thread.currentThread().getName();
|
|
|
- try {
|
|
|
- barrier.await();
|
|
|
- for (int j = 0; j < taskSubmissionsPerThread; j++) {
|
|
|
- Tuple<TaskExecutor, Set<Task>> assignment = assignments.get(index * taskSubmissionsPerThread + j);
|
|
|
- final Set<Task> tasks = assignment.v2();
|
|
|
- submittedTasksPerThread.computeIfAbsent(threadName, key -> new AtomicInteger()).addAndGet(tasks.size());
|
|
|
- final TaskExecutor executor = assignment.v1();
|
|
|
- if (tasks.size() == 1) {
|
|
|
- masterService.submitStateUpdateTask(
|
|
|
- threadName,
|
|
|
- tasks.stream().findFirst().get(),
|
|
|
- ClusterStateTaskConfig.build(randomFrom(Priority.values())),
|
|
|
- executor,
|
|
|
- listener);
|
|
|
- } else {
|
|
|
- Map<Task, ClusterStateTaskListener> taskListeners = new HashMap<>();
|
|
|
- tasks.stream().forEach(t -> taskListeners.put(t, listener));
|
|
|
- masterService.submitStateUpdateTasks(
|
|
|
- threadName,
|
|
|
- taskListeners, ClusterStateTaskConfig.build(randomFrom(Priority.values())),
|
|
|
- executor
|
|
|
- );
|
|
|
+ try (MasterService masterService = createMasterService(true)) {
|
|
|
+ final ConcurrentMap<String, AtomicInteger> submittedTasksPerThread = new ConcurrentHashMap<>();
|
|
|
+ CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
|
|
|
+ for (int i = 0; i < numberOfThreads; i++) {
|
|
|
+ final int index = i;
|
|
|
+ Thread thread = new Thread(() -> {
|
|
|
+ final String threadName = Thread.currentThread().getName();
|
|
|
+ try {
|
|
|
+ barrier.await();
|
|
|
+ for (int j = 0; j < taskSubmissionsPerThread; j++) {
|
|
|
+ Tuple<TaskExecutor, Set<Task>> assignment = assignments.get(index * taskSubmissionsPerThread + j);
|
|
|
+ final Set<Task> tasks = assignment.v2();
|
|
|
+ submittedTasksPerThread.computeIfAbsent(threadName, key -> new AtomicInteger()).addAndGet(tasks.size());
|
|
|
+ final TaskExecutor executor = assignment.v1();
|
|
|
+ if (tasks.size() == 1) {
|
|
|
+ masterService.submitStateUpdateTask(
|
|
|
+ threadName,
|
|
|
+ tasks.stream().findFirst().get(),
|
|
|
+ ClusterStateTaskConfig.build(randomFrom(Priority.values())),
|
|
|
+ executor,
|
|
|
+ listener);
|
|
|
+ } else {
|
|
|
+ Map<Task, ClusterStateTaskListener> taskListeners = new HashMap<>();
|
|
|
+ tasks.forEach(t -> taskListeners.put(t, listener));
|
|
|
+ masterService.submitStateUpdateTasks(
|
|
|
+ threadName,
|
|
|
+ taskListeners, ClusterStateTaskConfig.build(randomFrom(Priority.values())),
|
|
|
+ executor
|
|
|
+ );
|
|
|
+ }
|
|
|
}
|
|
|
+ barrier.await();
|
|
|
+ } catch (BrokenBarrierException | InterruptedException e) {
|
|
|
+ throw new AssertionError(e);
|
|
|
}
|
|
|
- barrier.await();
|
|
|
- } catch (BrokenBarrierException | InterruptedException e) {
|
|
|
- throw new AssertionError(e);
|
|
|
- }
|
|
|
- });
|
|
|
- thread.start();
|
|
|
- }
|
|
|
+ });
|
|
|
+ thread.start();
|
|
|
+ }
|
|
|
|
|
|
- // wait for all threads to be ready
|
|
|
- barrier.await();
|
|
|
- // wait for all threads to finish
|
|
|
- barrier.await();
|
|
|
+ // wait for all threads to be ready
|
|
|
+ barrier.await();
|
|
|
+ // wait for all threads to finish
|
|
|
+ barrier.await();
|
|
|
|
|
|
- // wait until all the cluster state updates have been processed
|
|
|
- updateLatch.await();
|
|
|
- // and until all of the publication callbacks have completed
|
|
|
- semaphore.acquire(numberOfExecutors);
|
|
|
+ // wait until all the cluster state updates have been processed
|
|
|
+ updateLatch.await();
|
|
|
+ // and until all of the publication callbacks have completed
|
|
|
+ semaphore.acquire(numberOfExecutors);
|
|
|
|
|
|
- // assert the number of executed tasks is correct
|
|
|
- assertEquals(totalTaskCount, counter.get());
|
|
|
+ // assert the number of executed tasks is correct
|
|
|
+ assertEquals(totalTaskCount, counter.get());
|
|
|
|
|
|
- // assert each executor executed the correct number of tasks
|
|
|
- for (TaskExecutor executor : executors) {
|
|
|
- if (counts.containsKey(executor)) {
|
|
|
- assertEquals((int) counts.get(executor), executor.counter.get());
|
|
|
- assertEquals(executor.batches.get(), executor.published.get());
|
|
|
+ // assert each executor executed the correct number of tasks
|
|
|
+ for (TaskExecutor executor : executors) {
|
|
|
+ if (counts.containsKey(executor)) {
|
|
|
+ assertEquals((int) counts.get(executor), executor.counter.get());
|
|
|
+ assertEquals(executor.batches.get(), executor.published.get());
|
|
|
+ }
|
|
|
}
|
|
|
- }
|
|
|
|
|
|
- // assert the correct number of clusterStateProcessed events were triggered
|
|
|
- for (Map.Entry<String, AtomicInteger> entry : processedStates.entrySet()) {
|
|
|
- assertThat(submittedTasksPerThread, hasKey(entry.getKey()));
|
|
|
- assertEquals("not all tasks submitted by " + entry.getKey() + " received a processed event",
|
|
|
- entry.getValue().get(), submittedTasksPerThread.get(entry.getKey()).get());
|
|
|
+ // assert the correct number of clusterStateProcessed events were triggered
|
|
|
+ for (Map.Entry<String, AtomicInteger> entry : processedStates.entrySet()) {
|
|
|
+ assertThat(submittedTasksPerThread, hasKey(entry.getKey()));
|
|
|
+ assertEquals("not all tasks submitted by " + entry.getKey() + " received a processed event",
|
|
|
+ entry.getValue().get(), submittedTasksPerThread.get(entry.getKey()).get());
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -605,42 +653,44 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
final CountDownLatch latch = new CountDownLatch(1);
|
|
|
final AtomicReference<AssertionError> assertionRef = new AtomicReference<>();
|
|
|
|
|
|
- masterService.submitStateUpdateTask(
|
|
|
- "testBlockingCallInClusterStateTaskListenerFails",
|
|
|
- new Object(),
|
|
|
- ClusterStateTaskConfig.build(Priority.NORMAL),
|
|
|
- (currentState, tasks) -> {
|
|
|
- ClusterState newClusterState = ClusterState.builder(currentState).build();
|
|
|
- return ClusterStateTaskExecutor.ClusterTasksResult.builder().successes(tasks).build(newClusterState);
|
|
|
- },
|
|
|
- new ClusterStateTaskListener() {
|
|
|
- @Override
|
|
|
- public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
|
|
- BaseFuture<Void> future = new BaseFuture<Void>() {};
|
|
|
- try {
|
|
|
- if (randomBoolean()) {
|
|
|
- future.get(1L, TimeUnit.SECONDS);
|
|
|
- } else {
|
|
|
- future.get();
|
|
|
+ try (MasterService masterService = createMasterService(true)) {
|
|
|
+ masterService.submitStateUpdateTask(
|
|
|
+ "testBlockingCallInClusterStateTaskListenerFails",
|
|
|
+ new Object(),
|
|
|
+ ClusterStateTaskConfig.build(Priority.NORMAL),
|
|
|
+ (currentState, tasks) -> {
|
|
|
+ ClusterState newClusterState = ClusterState.builder(currentState).build();
|
|
|
+ return ClusterStateTaskExecutor.ClusterTasksResult.builder().successes(tasks).build(newClusterState);
|
|
|
+ },
|
|
|
+ new ClusterStateTaskListener() {
|
|
|
+ @Override
|
|
|
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
|
|
+ BaseFuture<Void> future = new BaseFuture<Void>() {
|
|
|
+ };
|
|
|
+ try {
|
|
|
+ if (randomBoolean()) {
|
|
|
+ future.get(1L, TimeUnit.SECONDS);
|
|
|
+ } else {
|
|
|
+ future.get();
|
|
|
+ }
|
|
|
+ } catch (Exception e) {
|
|
|
+ throw new RuntimeException(e);
|
|
|
+ } catch (AssertionError e) {
|
|
|
+ assertionRef.set(e);
|
|
|
+ latch.countDown();
|
|
|
}
|
|
|
- } catch (Exception e) {
|
|
|
- throw new RuntimeException(e);
|
|
|
- } catch (AssertionError e) {
|
|
|
- assertionRef.set(e);
|
|
|
- latch.countDown();
|
|
|
}
|
|
|
- }
|
|
|
|
|
|
- @Override
|
|
|
- public void onFailure(String source, Exception e) {
|
|
|
+ @Override
|
|
|
+ public void onFailure(String source, Exception e) {
|
|
|
+ }
|
|
|
}
|
|
|
- }
|
|
|
- );
|
|
|
+ );
|
|
|
|
|
|
- latch.await();
|
|
|
- assertNotNull(assertionRef.get());
|
|
|
- assertThat(assertionRef.get().getMessage(),
|
|
|
- containsString("Reason: [Blocking operation]"));
|
|
|
+ latch.await();
|
|
|
+ assertNotNull(assertionRef.get());
|
|
|
+ assertThat(assertionRef.get().getMessage(), containsString("Reason: [Blocking operation]"));
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
@TestLogging(value = "org.elasticsearch.cluster.service:WARN", reason = "to ensure that we log cluster state events on WARN level")
|
|
@@ -649,39 +699,77 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
mockAppender.start();
|
|
|
mockAppender.addExpectation(
|
|
|
new MockLogAppender.UnseenEventExpectation(
|
|
|
- "test1 shouldn't see because setting is too low",
|
|
|
+ "test1 shouldn't log because it was fast enough",
|
|
|
MasterService.class.getCanonicalName(),
|
|
|
Level.WARN,
|
|
|
- "*cluster state update task [test1] took [*] which is above the warn threshold of [*]"));
|
|
|
+ "*took*test1*"));
|
|
|
mockAppender.addExpectation(
|
|
|
new MockLogAppender.SeenEventExpectation(
|
|
|
"test2",
|
|
|
MasterService.class.getCanonicalName(),
|
|
|
Level.WARN,
|
|
|
- "*cluster state update task [test2] took [32s] which is above the warn threshold of [*]"));
|
|
|
+ "*took [*], which is over [10s], to compute cluster state update for [test2]"));
|
|
|
mockAppender.addExpectation(
|
|
|
new MockLogAppender.SeenEventExpectation(
|
|
|
"test3",
|
|
|
MasterService.class.getCanonicalName(),
|
|
|
Level.WARN,
|
|
|
- "*cluster state update task [test3] took [33s] which is above the warn threshold of [*]"));
|
|
|
+ "*took [*], which is over [10s], to compute cluster state update for [test3]"));
|
|
|
mockAppender.addExpectation(
|
|
|
new MockLogAppender.SeenEventExpectation(
|
|
|
"test4",
|
|
|
MasterService.class.getCanonicalName(),
|
|
|
Level.WARN,
|
|
|
- "*cluster state update task [test4] took [34s] which is above the warn threshold of [*]"));
|
|
|
+ "*took [*], which is over [10s], to compute cluster state update for [test4]"));
|
|
|
+ mockAppender.addExpectation(
|
|
|
+ new MockLogAppender.UnseenEventExpectation(
|
|
|
+ "test5 should not log despite publishing slowly",
|
|
|
+ MasterService.class.getCanonicalName(),
|
|
|
+ Level.WARN,
|
|
|
+ "*took*test5*"));
|
|
|
+ mockAppender.addExpectation(
|
|
|
+ new MockLogAppender.SeenEventExpectation(
|
|
|
+ "test6 should log due to slow and failing publication",
|
|
|
+ MasterService.class.getCanonicalName(),
|
|
|
+ Level.WARN,
|
|
|
+ "took [*] and then failed to publish updated cluster state (version: *, uuid: *) for [test6]:*"));
|
|
|
|
|
|
Logger clusterLogger = LogManager.getLogger(MasterService.class);
|
|
|
Loggers.addAppender(clusterLogger, mockAppender);
|
|
|
- try {
|
|
|
- final CountDownLatch latch = new CountDownLatch(5);
|
|
|
+ try (MasterService masterService = new MasterService(Settings.builder()
|
|
|
+ .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName())
|
|
|
+ .put(Node.NODE_NAME_SETTING.getKey(), "test_node")
|
|
|
+ .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool)) {
|
|
|
+
|
|
|
+ final DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(),
|
|
|
+ emptySet(), Version.CURRENT);
|
|
|
+ final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName()))
|
|
|
+ .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId()))
|
|
|
+ .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build();
|
|
|
+ final AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialClusterState);
|
|
|
+ masterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
|
|
|
+ if (event.source().contains("test5")) {
|
|
|
+ relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis()
|
|
|
+ + randomLongBetween(1, 1000000);
|
|
|
+ }
|
|
|
+ if (event.source().contains("test6")) {
|
|
|
+ relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis()
|
|
|
+ + randomLongBetween(1, 1000000);
|
|
|
+ throw new ElasticsearchException("simulated error during slow publication which should trigger logging");
|
|
|
+ }
|
|
|
+ clusterStateRef.set(event.state());
|
|
|
+ publishListener.onResponse(null);
|
|
|
+ });
|
|
|
+ masterService.setClusterStateSupplier(clusterStateRef::get);
|
|
|
+ masterService.start();
|
|
|
+
|
|
|
+ final CountDownLatch latch = new CountDownLatch(6);
|
|
|
final CountDownLatch processedFirstTask = new CountDownLatch(1);
|
|
|
- masterService.currentTimeOverride = System.nanoTime();
|
|
|
masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
|
|
|
@Override
|
|
|
- public ClusterState execute(ClusterState currentState) throws Exception {
|
|
|
- masterService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos();
|
|
|
+ public ClusterState execute(ClusterState currentState) {
|
|
|
+ relativeTimeInMillis += randomLongBetween(0L,
|
|
|
+ MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis());
|
|
|
return currentState;
|
|
|
}
|
|
|
|
|
@@ -700,8 +788,9 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
processedFirstTask.await();
|
|
|
masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
|
|
|
@Override
|
|
|
- public ClusterState execute(ClusterState currentState) throws Exception {
|
|
|
- masterService.currentTimeOverride += TimeValue.timeValueSeconds(32).nanos();
|
|
|
+ public ClusterState execute(ClusterState currentState) {
|
|
|
+ relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis()
|
|
|
+ + randomLongBetween(1, 1000000);
|
|
|
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
|
|
|
}
|
|
|
|
|
@@ -717,8 +806,9 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
});
|
|
|
masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
|
|
|
@Override
|
|
|
- public ClusterState execute(ClusterState currentState) throws Exception {
|
|
|
- masterService.currentTimeOverride += TimeValue.timeValueSeconds(33).nanos();
|
|
|
+ public ClusterState execute(ClusterState currentState) {
|
|
|
+ relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis()
|
|
|
+ + randomLongBetween(1, 1000000);
|
|
|
return ClusterState.builder(currentState).incrementVersion().build();
|
|
|
}
|
|
|
|
|
@@ -734,8 +824,9 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
});
|
|
|
masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
|
|
|
@Override
|
|
|
- public ClusterState execute(ClusterState currentState) throws Exception {
|
|
|
- masterService.currentTimeOverride += TimeValue.timeValueSeconds(34).nanos();
|
|
|
+ public ClusterState execute(ClusterState currentState) {
|
|
|
+ relativeTimeInMillis += MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY).millis()
|
|
|
+ + randomLongBetween(1, 1000000);
|
|
|
return currentState;
|
|
|
}
|
|
|
|
|
@@ -749,12 +840,10 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
fail();
|
|
|
}
|
|
|
});
|
|
|
- // Additional update task to make sure all previous logging made it to the loggerName
|
|
|
- // We don't check logging for this on since there is no guarantee that it will occur before our check
|
|
|
masterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() {
|
|
|
@Override
|
|
|
public ClusterState execute(ClusterState currentState) {
|
|
|
- return currentState;
|
|
|
+ return ClusterState.builder(currentState).incrementVersion().build();
|
|
|
}
|
|
|
|
|
|
@Override
|
|
@@ -767,55 +856,10 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
fail();
|
|
|
}
|
|
|
});
|
|
|
- latch.await();
|
|
|
- } finally {
|
|
|
- Loggers.removeAppender(clusterLogger, mockAppender);
|
|
|
- mockAppender.stop();
|
|
|
- }
|
|
|
- mockAppender.assertAllExpectationsMatched();
|
|
|
- }
|
|
|
-
|
|
|
- public void testAcking() throws InterruptedException {
|
|
|
- final DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
|
|
- final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
|
|
- final DiscoveryNode node3 = new DiscoveryNode("node3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
|
|
- TimedMasterService timedMasterService = new TimedMasterService(Settings.builder().put("cluster.name",
|
|
|
- MasterServiceTests.class.getSimpleName()).build(), threadPool);
|
|
|
- ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName()))
|
|
|
- .nodes(DiscoveryNodes.builder()
|
|
|
- .add(node1)
|
|
|
- .add(node2)
|
|
|
- .add(node3)
|
|
|
- .localNodeId(node1.getId())
|
|
|
- .masterNodeId(node1.getId()))
|
|
|
- .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build();
|
|
|
- final AtomicReference<ClusterStatePublisher> publisherRef = new AtomicReference<>();
|
|
|
- timedMasterService.setClusterStatePublisher((e, pl, al) -> publisherRef.get().publish(e, pl, al));
|
|
|
- timedMasterService.setClusterStateSupplier(() -> initialClusterState);
|
|
|
- timedMasterService.start();
|
|
|
-
|
|
|
- // check that we don't time out before even committing the cluster state
|
|
|
- {
|
|
|
- final CountDownLatch latch = new CountDownLatch(1);
|
|
|
-
|
|
|
- publisherRef.set((clusterChangedEvent, publishListener, ackListener) -> {
|
|
|
- publishListener.onFailure(new FailedToCommitClusterStateException("mock exception"));
|
|
|
- });
|
|
|
-
|
|
|
- timedMasterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask<Void>(null, null) {
|
|
|
+ masterService.submitStateUpdateTask("test6", new ClusterStateUpdateTask() {
|
|
|
@Override
|
|
|
public ClusterState execute(ClusterState currentState) {
|
|
|
- return ClusterState.builder(currentState).build();
|
|
|
- }
|
|
|
-
|
|
|
- @Override
|
|
|
- public TimeValue ackTimeout() {
|
|
|
- return TimeValue.ZERO;
|
|
|
- }
|
|
|
-
|
|
|
- @Override
|
|
|
- public TimeValue timeout() {
|
|
|
- return null;
|
|
|
+ return ClusterState.builder(currentState).incrementVersion().build();
|
|
|
}
|
|
|
|
|
|
@Override
|
|
@@ -824,97 +868,160 @@ public class MasterServiceTests extends ESTestCase {
|
|
|
}
|
|
|
|
|
|
@Override
|
|
|
- protected Void newResponse(boolean acknowledged) {
|
|
|
- fail();
|
|
|
- return null;
|
|
|
+ public void onFailure(String source, Exception e) {
|
|
|
+ fail(); // maybe we should notify here?
|
|
|
+ }
|
|
|
+ });
|
|
|
+ // Additional update task to make sure all previous logging made it to the loggerName
|
|
|
+ // We don't check logging for this on since there is no guarantee that it will occur before our check
|
|
|
+ masterService.submitStateUpdateTask("test7", new ClusterStateUpdateTask() {
|
|
|
+ @Override
|
|
|
+ public ClusterState execute(ClusterState currentState) {
|
|
|
+ return currentState;
|
|
|
}
|
|
|
|
|
|
@Override
|
|
|
- public void onFailure(String source, Exception e) {
|
|
|
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
|
|
latch.countDown();
|
|
|
}
|
|
|
|
|
|
@Override
|
|
|
- public void onAckTimeout() {
|
|
|
+ public void onFailure(String source, Exception e) {
|
|
|
fail();
|
|
|
}
|
|
|
});
|
|
|
-
|
|
|
latch.await();
|
|
|
+ } finally {
|
|
|
+ Loggers.removeAppender(clusterLogger, mockAppender);
|
|
|
+ mockAppender.stop();
|
|
|
}
|
|
|
+ mockAppender.assertAllExpectationsMatched();
|
|
|
+ }
|
|
|
|
|
|
- // check that we timeout if commit took too long
|
|
|
- {
|
|
|
- final CountDownLatch latch = new CountDownLatch(2);
|
|
|
+ public void testAcking() throws InterruptedException {
|
|
|
+ final DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
|
|
+ final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
|
|
+ final DiscoveryNode node3 = new DiscoveryNode("node3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
|
|
+ try (MasterService masterService = new MasterService(Settings.builder()
|
|
|
+ .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName())
|
|
|
+ .put(Node.NODE_NAME_SETTING.getKey(), "test_node")
|
|
|
+ .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool)) {
|
|
|
+
|
|
|
+ final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName()))
|
|
|
+ .nodes(DiscoveryNodes.builder()
|
|
|
+ .add(node1)
|
|
|
+ .add(node2)
|
|
|
+ .add(node3)
|
|
|
+ .localNodeId(node1.getId())
|
|
|
+ .masterNodeId(node1.getId()))
|
|
|
+ .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build();
|
|
|
+ final AtomicReference<ClusterStatePublisher> publisherRef = new AtomicReference<>();
|
|
|
+ masterService.setClusterStatePublisher((e, pl, al) -> publisherRef.get().publish(e, pl, al));
|
|
|
+ masterService.setClusterStateSupplier(() -> initialClusterState);
|
|
|
+ masterService.start();
|
|
|
+
|
|
|
+ // check that we don't time out before even committing the cluster state
|
|
|
+ {
|
|
|
+ final CountDownLatch latch = new CountDownLatch(1);
|
|
|
+
|
|
|
+ publisherRef.set((clusterChangedEvent, publishListener, ackListener) ->
|
|
|
+ publishListener.onFailure(new FailedToCommitClusterStateException("mock exception")));
|
|
|
+
|
|
|
+ masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask<Void>(null, null) {
|
|
|
+ @Override
|
|
|
+ public ClusterState execute(ClusterState currentState) {
|
|
|
+ return ClusterState.builder(currentState).build();
|
|
|
+ }
|
|
|
|
|
|
- final TimeValue ackTimeout = TimeValue.timeValueMillis(randomInt(100));
|
|
|
+ @Override
|
|
|
+ public TimeValue ackTimeout() {
|
|
|
+ return TimeValue.ZERO;
|
|
|
+ }
|
|
|
|
|
|
- publisherRef.set((clusterChangedEvent, publishListener, ackListener) -> {
|
|
|
- publishListener.onResponse(null);
|
|
|
- ackListener.onCommit(TimeValue.timeValueMillis(ackTimeout.millis() + randomInt(100)));
|
|
|
- ackListener.onNodeAck(node1, null);
|
|
|
- ackListener.onNodeAck(node2, null);
|
|
|
- ackListener.onNodeAck(node3, null);
|
|
|
- });
|
|
|
+ @Override
|
|
|
+ public TimeValue timeout() {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
|
|
|
- timedMasterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask<Void>(null, null) {
|
|
|
- @Override
|
|
|
- public ClusterState execute(ClusterState currentState) {
|
|
|
- return ClusterState.builder(currentState).build();
|
|
|
- }
|
|
|
+ @Override
|
|
|
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
|
|
+ fail();
|
|
|
+ }
|
|
|
|
|
|
- @Override
|
|
|
- public TimeValue ackTimeout() {
|
|
|
- return ackTimeout;
|
|
|
- }
|
|
|
+ @Override
|
|
|
+ protected Void newResponse(boolean acknowledged) {
|
|
|
+ fail();
|
|
|
+ return null;
|
|
|
+ }
|
|
|
|
|
|
- @Override
|
|
|
- public TimeValue timeout() {
|
|
|
- return null;
|
|
|
- }
|
|
|
+ @Override
|
|
|
+ public void onFailure(String source, Exception e) {
|
|
|
+ latch.countDown();
|
|
|
+ }
|
|
|
|
|
|
- @Override
|
|
|
- public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
|
|
- latch.countDown();
|
|
|
- }
|
|
|
+ @Override
|
|
|
+ public void onAckTimeout() {
|
|
|
+ fail();
|
|
|
+ }
|
|
|
+ });
|
|
|
|
|
|
- @Override
|
|
|
- protected Void newResponse(boolean acknowledged) {
|
|
|
- fail();
|
|
|
- return null;
|
|
|
- }
|
|
|
+ latch.await();
|
|
|
+ }
|
|
|
|
|
|
- @Override
|
|
|
- public void onFailure(String source, Exception e) {
|
|
|
- fail();
|
|
|
- }
|
|
|
+ // check that we timeout if commit took too long
|
|
|
+ {
|
|
|
+ final CountDownLatch latch = new CountDownLatch(2);
|
|
|
|
|
|
- @Override
|
|
|
- public void onAckTimeout() {
|
|
|
- latch.countDown();
|
|
|
- }
|
|
|
- });
|
|
|
+ final TimeValue ackTimeout = TimeValue.timeValueMillis(randomInt(100));
|
|
|
|
|
|
- latch.await();
|
|
|
- }
|
|
|
+ publisherRef.set((clusterChangedEvent, publishListener, ackListener) -> {
|
|
|
+ publishListener.onResponse(null);
|
|
|
+ ackListener.onCommit(TimeValue.timeValueMillis(ackTimeout.millis() + randomInt(100)));
|
|
|
+ ackListener.onNodeAck(node1, null);
|
|
|
+ ackListener.onNodeAck(node2, null);
|
|
|
+ ackListener.onNodeAck(node3, null);
|
|
|
+ });
|
|
|
|
|
|
- timedMasterService.close();
|
|
|
- }
|
|
|
+ masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask<Void>(null, null) {
|
|
|
+ @Override
|
|
|
+ public ClusterState execute(ClusterState currentState) {
|
|
|
+ return ClusterState.builder(currentState).build();
|
|
|
+ }
|
|
|
|
|
|
- static class TimedMasterService extends MasterService {
|
|
|
+ @Override
|
|
|
+ public TimeValue ackTimeout() {
|
|
|
+ return ackTimeout;
|
|
|
+ }
|
|
|
|
|
|
- public volatile Long currentTimeOverride = null;
|
|
|
+ @Override
|
|
|
+ public TimeValue timeout() {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
|
|
|
- TimedMasterService(Settings settings, ThreadPool threadPool) {
|
|
|
- super("test_node", settings, threadPool);
|
|
|
- }
|
|
|
+ @Override
|
|
|
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
|
|
+ latch.countDown();
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ protected Void newResponse(boolean acknowledged) {
|
|
|
+ fail();
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void onFailure(String source, Exception e) {
|
|
|
+ fail();
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void onAckTimeout() {
|
|
|
+ latch.countDown();
|
|
|
+ }
|
|
|
+ });
|
|
|
|
|
|
- @Override
|
|
|
- protected long currentTimeInNanos() {
|
|
|
- if (currentTimeOverride != null) {
|
|
|
- return currentTimeOverride;
|
|
|
+ latch.await();
|
|
|
}
|
|
|
- return super.currentTimeInNanos();
|
|
|
}
|
|
|
}
|
|
|
|