Browse Source

[TEST] Add few additional MDP tests (#81274)

Add more MDP tests for PersistedClusterStateService:
- Add MDP delete tests
- Add MDP tests for overriding lucene version.
- Use MDP in ShardDataCommandTests
Nikola Grcevski 3 years ago
parent
commit
eb782d2c03

+ 91 - 10
server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java

@@ -10,6 +10,7 @@ package org.elasticsearch.gateway;
 import org.apache.logging.log4j.Level;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
+import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.Term;
@@ -18,6 +19,7 @@ import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FilterDirectory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.NIOFSDirectory;
 import org.elasticsearch.Version;
 import org.elasticsearch.cluster.ClusterName;
 import org.elasticsearch.cluster.ClusterState;
@@ -63,6 +65,7 @@ import java.util.stream.Stream;
 import java.util.stream.StreamSupport;
 
 import static org.apache.lucene.index.IndexWriter.WRITE_LOCK_NAME;
+import static org.elasticsearch.gateway.PersistedClusterStateService.METADATA_DIRECTORY_NAME;
 import static org.hamcrest.Matchers.allOf;
 import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.endsWith;
@@ -595,7 +598,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
             }
 
             final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
-            try (Directory directory = newFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
+            try (Directory directory = newFSDirectory(brokenPath.resolve(METADATA_DIRECTORY_NAME))) {
                 final IndexWriterConfig indexWriterConfig = new IndexWriterConfig();
                 indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
                 try (IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig)) {
@@ -633,8 +636,8 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
             final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
             final Path dupPath = randomValueOtherThan(brokenPath, () -> randomFrom(nodeEnvironment.nodeDataPaths()));
             try (
-                Directory directory = newFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME));
-                Directory dupDirectory = newFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))
+                Directory directory = newFSDirectory(brokenPath.resolve(METADATA_DIRECTORY_NAME));
+                Directory dupDirectory = newFSDirectory(dupPath.resolve(METADATA_DIRECTORY_NAME))
             ) {
                 try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig())) {
                     indexWriter.addIndexes(dupDirectory);
@@ -693,8 +696,8 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
             final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
             final Path dupPath = randomValueOtherThan(brokenPath, () -> randomFrom(nodeEnvironment.nodeDataPaths()));
             try (
-                Directory directory = newFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME));
-                Directory dupDirectory = newFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))
+                Directory directory = newFSDirectory(brokenPath.resolve(METADATA_DIRECTORY_NAME));
+                Directory dupDirectory = newFSDirectory(dupPath.resolve(METADATA_DIRECTORY_NAME))
             ) {
                 try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig())) {
                     indexWriter.deleteDocuments(new Term("type", "global")); // do not duplicate global metadata
@@ -1214,11 +1217,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
                     writer.writeIncrementalStateAndCommit(1, previousClusterState, clusterState);
 
                     for (Path dataPath : nodeEnvironment.nodeDataPaths()) {
-                        try (
-                            DirectoryStream<Path> files = Files.newDirectoryStream(
-                                dataPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME)
-                            )
-                        ) {
+                        try (DirectoryStream<Path> files = Files.newDirectoryStream(dataPath.resolve(METADATA_DIRECTORY_NAME))) {
 
                             int fileCount = 0;
                             final List<String> fileNames = new ArrayList<>();
@@ -1251,6 +1250,88 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
         }
     }
 
+    public void testOverrideLuceneVersion() throws IOException {
+        try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
+            final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
+            final String clusterUUID = UUIDs.randomBase64UUID(random());
+            final long version = randomLongBetween(1L, Long.MAX_VALUE);
+
+            ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
+            try (Writer writer = persistedClusterStateService.createWriter()) {
+                writer.writeFullStateAndCommit(
+                    0L,
+                    ClusterState.builder(clusterState)
+                        .metadata(
+                            Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID).clusterUUIDCommitted(true).version(version)
+                        )
+                        .incrementVersion()
+                        .build()
+                );
+                clusterState = loadPersistedClusterState(persistedClusterStateService);
+                assertThat(clusterState.metadata().clusterUUID(), equalTo(clusterUUID));
+                assertTrue(clusterState.metadata().clusterUUIDCommitted());
+                assertThat(clusterState.metadata().version(), equalTo(version));
+
+            }
+            NodeMetadata prevMetadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths());
+            assertEquals(Version.CURRENT, prevMetadata.nodeVersion());
+            PersistedClusterStateService.overrideVersion(Version.V_8_0_0, persistedClusterStateService.getDataPaths());
+            NodeMetadata metadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths());
+            assertEquals(Version.V_8_0_0, metadata.nodeVersion());
+            for (Path p : persistedClusterStateService.getDataPaths()) {
+                NodeMetadata individualMetadata = PersistedClusterStateService.nodeMetadata(p);
+                assertEquals(Version.V_8_0_0, individualMetadata.nodeVersion());
+            }
+        }
+    }
+
+    public void testDeleteAllPaths() throws IOException {
+        try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
+            final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
+            final String clusterUUID = UUIDs.randomBase64UUID(random());
+            final long version = randomLongBetween(1L, Long.MAX_VALUE);
+
+            ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
+            try (Writer writer = persistedClusterStateService.createWriter()) {
+                writer.writeFullStateAndCommit(
+                    0L,
+                    ClusterState.builder(clusterState)
+                        .metadata(
+                            Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID).clusterUUIDCommitted(true).version(version)
+                        )
+                        .incrementVersion()
+                        .build()
+                );
+                clusterState = loadPersistedClusterState(persistedClusterStateService);
+                assertThat(clusterState.metadata().clusterUUID(), equalTo(clusterUUID));
+                assertTrue(clusterState.metadata().clusterUUIDCommitted());
+                assertThat(clusterState.metadata().version(), equalTo(version));
+            }
+
+            for (Path dataPath : persistedClusterStateService.getDataPaths()) {
+                assertTrue(findSegmentInDirectory(dataPath));
+            }
+
+            PersistedClusterStateService.deleteAll(persistedClusterStateService.getDataPaths());
+
+            for (Path dataPath : persistedClusterStateService.getDataPaths()) {
+                assertFalse(findSegmentInDirectory(dataPath));
+            }
+        }
+    }
+
+    private boolean findSegmentInDirectory(Path dataPath) throws IOException {
+        Directory d = new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME));
+
+        for (final String file : d.listAll()) {
+            if (file.startsWith(IndexFileNames.SEGMENTS)) {
+                return true;
+            }
+        }
+
+        return false;
+    }
+
     private void assertExpectedLogs(
         long currentTerm,
         ClusterState previousState,

+ 14 - 8
server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java

@@ -11,6 +11,7 @@ import joptsimple.OptionParser;
 import joptsimple.OptionSet;
 
 import org.apache.lucene.store.BaseDirectoryWrapper;
+import org.apache.lucene.util.TestUtil;
 import org.elasticsearch.ElasticsearchException;
 import org.elasticsearch.Version;
 import org.elasticsearch.action.admin.indices.rollover.Condition;
@@ -101,18 +102,23 @@ public class RemoveCorruptedShardDataCommandTests extends IndexShardTestCase {
             RecoverySource.EmptyStoreRecoverySource.INSTANCE
         );
 
-        final Path dataDir = createTempDir();
+        dataPaths = new Path[] { createTempDir(), createTempDir(), createTempDir() };
+        final String[] tmpPaths = Arrays.stream(dataPaths).map(s -> s.toAbsolutePath().toString()).toArray(String[]::new);
+        int randomPath = TestUtil.nextInt(random(), 0, dataPaths.length - 1);
+        final Path tempDir = dataPaths[randomPath];
 
         environment = TestEnvironment.newEnvironment(
             Settings.builder()
-                .put(Environment.PATH_HOME_SETTING.getKey(), dataDir)
-                .putList(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath().toString())
+                .put(Environment.PATH_HOME_SETTING.getKey(), tempDir)
+                .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths)
                 .build()
         );
 
         // create same directory structure as prod does
-        Files.createDirectories(dataDir);
-        dataPaths = new Path[] { dataDir };
+        for (Path dataPath : dataPaths) {
+            Files.createDirectories(dataPath);
+        }
+
         final Settings settings = Settings.builder()
             .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
             .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
@@ -121,7 +127,7 @@ public class RemoveCorruptedShardDataCommandTests extends IndexShardTestCase {
             .put(IndexMetadata.SETTING_INDEX_UUID, shardId.getIndex().getUUID())
             .build();
 
-        final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(dataDir);
+        final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(tempDir);
         shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
 
         // Adding rollover info to IndexMetadata to check that NamedXContentRegistry is properly configured
@@ -142,10 +148,10 @@ public class RemoveCorruptedShardDataCommandTests extends IndexShardTestCase {
         clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(Metadata.builder().put(indexMetadata, false).build()).build();
 
         try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(logger, environment, Files::exists)) {
-            final Path[] dataPaths = Arrays.stream(lock.getNodePaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new);
+            final Path[] paths = Arrays.stream(lock.getNodePaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new);
             try (
                 PersistedClusterStateService.Writer writer = new PersistedClusterStateService(
-                    dataPaths,
+                    paths,
                     nodeId,
                     xContentRegistry(),
                     BigArrays.NON_RECYCLING_INSTANCE,