Browse Source

Rename NodeEnvironment.NodePath to DataPath (#86938)

The NodePath inner class of NodeEnvironment represents path.data entries
of the node. The name however is sometimes confusing since these are the
data paths, and there is normally no singular concept of "node path" (an
installation has several different paths). This commit renames NodePath
to DataPath, as well as all methods and variables referring to it, so
that the more general term "node paths" can be utilized for something
node wide: the paths in environment (in a future PR).
Ryan Ernst 3 years ago
parent
commit
f25c1a21a5
29 changed files with 289 additions and 289 deletions
  1. 8 8
      benchmarks/src/main/java/org/elasticsearch/benchmark/fs/AvailableIndexFoldersBenchmark.java
  2. 3 3
      server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java
  3. 1 1
      server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java
  4. 9 9
      server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java
  5. 1 1
      server/src/main/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommand.java
  6. 1 1
      server/src/main/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommand.java
  7. 1 1
      server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java
  8. 96 96
      server/src/main/java/org/elasticsearch/env/NodeEnvironment.java
  9. 6 6
      server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java
  10. 4 4
      server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java
  11. 10 10
      server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java
  12. 16 16
      server/src/main/java/org/elasticsearch/index/shard/ShardPath.java
  13. 9 9
      server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java
  14. 15 15
      server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java
  15. 4 4
      server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java
  16. 20 20
      server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java
  17. 2 2
      server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
  18. 15 15
      server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java
  19. 3 3
      server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java
  20. 3 3
      server/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java
  21. 4 4
      server/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java
  22. 2 2
      test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java
  23. 12 12
      test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java
  24. 1 1
      x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingCapacityActionIT.java
  25. 3 3
      x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/test/IdentityProviderIntegTestCase.java
  26. 32 32
      x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/PersistentCache.java
  27. 4 4
      x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/PersistentCacheTests.java
  28. 1 1
      x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java
  29. 3 3
      x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java

+ 8 - 8
benchmarks/src/main/java/org/elasticsearch/benchmark/fs/AvailableIndexFoldersBenchmark.java

@@ -37,7 +37,7 @@ import java.util.concurrent.TimeUnit;
 @State(Scope.Benchmark)
 public class AvailableIndexFoldersBenchmark {
 
-    private NodeEnvironment.NodePath nodePath;
+    private NodeEnvironment.DataPath dataPath;
     private NodeEnvironment nodeEnv;
     private Set<String> excludedDirs;
 
@@ -45,7 +45,7 @@ public class AvailableIndexFoldersBenchmark {
     public void setup() throws IOException {
         Path path = Files.createTempDirectory("test");
         String[] paths = new String[] { path.toString() };
-        nodePath = new NodeEnvironment.NodePath(path);
+        dataPath = new NodeEnvironment.DataPath(path);
 
         LogConfigurator.setNodeName("test");
         Settings settings = Settings.builder()
@@ -54,30 +54,30 @@ public class AvailableIndexFoldersBenchmark {
             .build();
         nodeEnv = new NodeEnvironment(settings, new Environment(settings, null));
 
-        Files.createDirectories(nodePath.indicesPath);
+        Files.createDirectories(dataPath.indicesPath);
         excludedDirs = new HashSet<>();
         int numIndices = 5000;
         for (int i = 0; i < numIndices; i++) {
             String dirName = "dir" + i;
-            Files.createDirectory(nodePath.indicesPath.resolve(dirName));
+            Files.createDirectory(dataPath.indicesPath.resolve(dirName));
             excludedDirs.add(dirName);
         }
-        if (nodeEnv.availableIndexFoldersForPath(nodePath).size() != numIndices) {
+        if (nodeEnv.availableIndexFoldersForPath(dataPath).size() != numIndices) {
             throw new IllegalStateException("bad size");
         }
-        if (nodeEnv.availableIndexFoldersForPath(nodePath, excludedDirs::contains).size() != 0) {
+        if (nodeEnv.availableIndexFoldersForPath(dataPath, excludedDirs::contains).size() != 0) {
             throw new IllegalStateException("bad size");
         }
     }
 
     @Benchmark
     public Set<String> availableIndexFolderNaive() throws IOException {
-        return nodeEnv.availableIndexFoldersForPath(nodePath);
+        return nodeEnv.availableIndexFoldersForPath(dataPath);
     }
 
     @Benchmark
     public Set<String> availableIndexFolderOptimized() throws IOException {
-        return nodeEnv.availableIndexFoldersForPath(nodePath, excludedDirs::contains);
+        return nodeEnv.availableIndexFoldersForPath(dataPath, excludedDirs::contains);
     }
 
 }

+ 3 - 3
server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java

@@ -139,10 +139,10 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
         final OptionParser parser = command.getParser();
         final ProcessInfo processInfo = new ProcessInfo(Map.of(), Map.of(), createTempDir());
 
-        final Settings nodePathSettings = internalCluster().dataPathSettings(node);
+        final Settings dataPathSettings = internalCluster().dataPathSettings(node);
 
         final Environment environment = TestEnvironment.newEnvironment(
-            Settings.builder().put(internalCluster().getDefaultSettings()).put(nodePathSettings).build()
+            Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()
         );
         final OptionSet options = parser.parse("-index", indexName, "-shard-id", "0");
 
@@ -667,7 +667,7 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
         final NodesStatsResponse nodeStatsResponse = client().admin().cluster().prepareNodesStats(nodeId).setFs(true).get();
         final Set<Path> paths = StreamSupport.stream(nodeStatsResponse.getNodes().get(0).getFs().spliterator(), false)
             .map(
-                nodePath -> PathUtils.get(nodePath.getPath())
+                dataPath -> PathUtils.get(dataPath.getPath())
                     .resolve(NodeEnvironment.INDICES_FOLDER)
                     .resolve(shardId.getIndex().getUUID())
                     .resolve(Integer.toString(shardId.getId()))

+ 1 - 1
server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java

@@ -37,7 +37,7 @@ public class DetachClusterCommand extends ElasticsearchNodeCommand {
     }
 
     @Override
-    protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException {
+    protected void processDataPaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException {
         final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths);
 
         terminal.println(Terminal.Verbosity.VERBOSE, "Loading cluster state");

+ 9 - 9
server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java

@@ -132,14 +132,14 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand {
         return Tuple.tuple(bestOnDiskState.currentTerm, clusterState(env, bestOnDiskState));
     }
 
-    protected void processNodePaths(Terminal terminal, OptionSet options, Environment env) throws IOException, UserException {
+    protected void processDataPaths(Terminal terminal, OptionSet options, Environment env) throws IOException, UserException {
         terminal.println(Terminal.Verbosity.VERBOSE, "Obtaining lock for node");
         try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(logger, env, Files::exists)) {
-            final Path[] dataPaths = Arrays.stream(lock.getNodePaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new);
+            final Path[] dataPaths = Arrays.stream(lock.getDataPaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new);
             if (dataPaths.length == 0) {
                 throw new ElasticsearchException(NO_NODE_FOLDER_FOUND_MSG);
             }
-            processNodePaths(terminal, dataPaths, options, env);
+            processDataPaths(terminal, dataPaths, options, env);
         } catch (LockObtainFailedException e) {
             throw new ElasticsearchException(FAILED_TO_OBTAIN_NODE_LOCK_MSG, e);
         }
@@ -157,7 +157,7 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand {
     public final void execute(Terminal terminal, OptionSet options, Environment env, ProcessInfo processInfo) throws Exception {
         terminal.println(STOP_WARNING_MSG);
         if (validateBeforeLock(terminal, env)) {
-            processNodePaths(terminal, options, env);
+            processDataPaths(terminal, options, env);
         }
     }
 
@@ -178,16 +178,16 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand {
      * @param options the command line options
      * @param env the env of the node to process
      */
-    protected abstract void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException,
+    protected abstract void processDataPaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException,
         UserException;
 
-    protected static NodeEnvironment.NodePath[] toNodePaths(Path[] dataPaths) {
-        return Arrays.stream(dataPaths).map(ElasticsearchNodeCommand::createNodePath).toArray(NodeEnvironment.NodePath[]::new);
+    protected static NodeEnvironment.DataPath[] toDataPaths(Path[] paths) {
+        return Arrays.stream(paths).map(ElasticsearchNodeCommand::createDataPath).toArray(NodeEnvironment.DataPath[]::new);
     }
 
-    private static NodeEnvironment.NodePath createNodePath(Path path) {
+    private static NodeEnvironment.DataPath createDataPath(Path path) {
         try {
-            return new NodeEnvironment.NodePath(path);
+            return new NodeEnvironment.DataPath(path);
         } catch (IOException e) {
             throw new ElasticsearchException("Unable to investigate path [" + path + "]", e);
         }

+ 1 - 1
server/src/main/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommand.java

@@ -43,7 +43,7 @@ public class RemoveCustomsCommand extends ElasticsearchNodeCommand {
     }
 
     @Override
-    protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException,
+    protected void processDataPaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException,
         UserException {
         final List<String> customsToRemove = arguments.values(options);
         if (customsToRemove.isEmpty()) {

+ 1 - 1
server/src/main/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommand.java

@@ -44,7 +44,7 @@ public class RemoveSettingsCommand extends ElasticsearchNodeCommand {
     }
 
     @Override
-    protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException,
+    protected void processDataPaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException,
         UserException {
         final List<String> settingsToRemove = arguments.values(options);
         if (settingsToRemove.isEmpty()) {

+ 1 - 1
server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java

@@ -68,7 +68,7 @@ public class UnsafeBootstrapMasterCommand extends ElasticsearchNodeCommand {
         return true;
     }
 
-    protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException {
+    protected void processDataPaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException {
         final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths);
 
         final Tuple<Long, ClusterState> state = loadTermAndClusterState(persistedClusterStateService, env);

+ 96 - 96
server/src/main/java/org/elasticsearch/env/NodeEnvironment.java

@@ -87,7 +87,7 @@ import java.util.stream.Stream;
  * A component that holds all data paths for a single node.
  */
 public final class NodeEnvironment implements Closeable {
-    public static class NodePath {
+    public static class DataPath {
         /* ${data.paths} */
         public final Path path;
         /* ${data.paths}/indices */
@@ -98,7 +98,7 @@ public final class NodeEnvironment implements Closeable {
         public final int majorDeviceNumber;
         public final int minorDeviceNumber;
 
-        public NodePath(Path path) throws IOException {
+        public DataPath(Path path) throws IOException {
             this.path = path;
             this.indicesPath = path.resolve(INDICES_FOLDER);
             this.fileStore = Environment.getFileStore(path);
@@ -112,7 +112,7 @@ public final class NodeEnvironment implements Closeable {
         }
 
         /**
-         * Resolves the given shards directory against this NodePath
+         * Resolves the given shards directory against this DataPath
          * ${data.paths}/indices/{index.uuid}/{shard.id}
          */
         public Path resolve(ShardId shardId) {
@@ -120,7 +120,7 @@ public final class NodeEnvironment implements Closeable {
         }
 
         /**
-         * Resolves index directory against this NodePath
+         * Resolves index directory against this DataPath
          * ${data.paths}/indices/{index.uuid}
          */
         public Path resolve(Index index) {
@@ -133,7 +133,7 @@ public final class NodeEnvironment implements Closeable {
 
         @Override
         public String toString() {
-            return "NodePath{"
+            return "DataPath{"
                 + "path="
                 + path
                 + ", indicesPath="
@@ -150,7 +150,7 @@ public final class NodeEnvironment implements Closeable {
     }
 
     private final Logger logger = LogManager.getLogger(NodeEnvironment.class);
-    private final NodePath[] nodePaths;
+    private final DataPath[] dataPaths;
     private final Path sharedDataPath;
     private final Lock[] locks;
 
@@ -190,7 +190,7 @@ public final class NodeEnvironment implements Closeable {
     public static class NodeLock implements Releasable {
 
         private final Lock[] locks;
-        private final NodePath[] nodePaths;
+        private final DataPath[] dataPaths;
 
         public NodeLock(final Logger logger, final Environment environment, final CheckedFunction<Path, Boolean, IOException> pathFunction)
             throws IOException {
@@ -207,8 +207,8 @@ public final class NodeEnvironment implements Closeable {
             final CheckedFunction<Path, Boolean, IOException> pathFunction,
             final Function<Path, Path> subPathMapping
         ) throws IOException {
-            nodePaths = new NodePath[environment.dataFiles().length];
-            locks = new Lock[nodePaths.length];
+            dataPaths = new DataPath[environment.dataFiles().length];
+            locks = new Lock[dataPaths.length];
             try {
                 final Path[] dataPaths = environment.dataFiles();
                 for (int dirIndex = 0; dirIndex < dataPaths.length; dirIndex++) {
@@ -220,7 +220,7 @@ public final class NodeEnvironment implements Closeable {
                     try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
                         logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
                         locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME);
-                        nodePaths[dirIndex] = new NodePath(dir);
+                        this.dataPaths[dirIndex] = new DataPath(dir);
                     } catch (IOException e) {
                         logger.trace(() -> new ParameterizedMessage("failed to obtain node lock on {}", dir.toAbsolutePath()), e);
                         // release all the ones that were obtained up until now
@@ -235,8 +235,8 @@ public final class NodeEnvironment implements Closeable {
             }
         }
 
-        public NodePath[] getNodePaths() {
-            return nodePaths;
+        public DataPath[] getDataPaths() {
+            return dataPaths;
         }
 
         @Override
@@ -285,9 +285,9 @@ public final class NodeEnvironment implements Closeable {
             }
 
             this.locks = nodeLock.locks;
-            this.nodePaths = nodeLock.nodePaths;
+            this.dataPaths = nodeLock.dataPaths;
 
-            logger.debug("using node location {}", Arrays.toString(nodePaths));
+            logger.debug("using node location {}", Arrays.toString(dataPaths));
 
             maybeLogPathDetails();
             maybeLogHeapDetails();
@@ -295,7 +295,7 @@ public final class NodeEnvironment implements Closeable {
             applySegmentInfosTrace(settings);
             assertCanWrite();
 
-            ensureAtomicMoveSupported(nodePaths);
+            ensureAtomicMoveSupported(dataPaths);
 
             if (upgradeLegacyNodeFolders(logger, settings, environment, nodeLock)) {
                 assertCanWrite();
@@ -316,13 +316,13 @@ public final class NodeEnvironment implements Closeable {
 
             if (DiscoveryNode.canContainData(settings) == false) {
                 if (DiscoveryNode.isMasterNode(settings) == false) {
-                    ensureNoIndexMetadata(nodePaths);
+                    ensureNoIndexMetadata(dataPaths);
                 }
 
-                ensureNoShardData(nodePaths);
+                ensureNoShardData(dataPaths);
             }
 
-            this.nodeMetadata = loadNodeMetadata(settings, logger, nodePaths);
+            this.nodeMetadata = loadNodeMetadata(settings, logger, dataPaths);
 
             success = true;
         } finally {
@@ -401,15 +401,15 @@ public final class NodeEnvironment implements Closeable {
         }
 
         // move contents from legacy path to new path
-        assert nodeLock.getNodePaths().length == legacyNodeLock.getNodePaths().length;
+        assert nodeLock.getDataPaths().length == legacyNodeLock.getDataPaths().length;
         try {
             // first check if we are upgrading from an index compatible version
-            checkForIndexCompatibility(logger, legacyNodeLock.getNodePaths());
+            checkForIndexCompatibility(logger, legacyNodeLock.getDataPaths());
 
             final List<CheckedRunnable<IOException>> upgradeActions = new ArrayList<>();
-            for (int i = 0; i < legacyNodeLock.getNodePaths().length; i++) {
-                final NodePath legacyNodePath = legacyNodeLock.getNodePaths()[i];
-                final NodePath nodePath = nodeLock.getNodePaths()[i];
+            for (int i = 0; i < legacyNodeLock.getDataPaths().length; i++) {
+                final DataPath legacyDataPath = legacyNodeLock.getDataPaths()[i];
+                final DataPath dataPath = nodeLock.getDataPaths()[i];
 
                 // determine folders to move and check that there are no extra files/folders
                 final Set<String> folderNames = new HashSet<>();
@@ -437,7 +437,7 @@ public final class NodeEnvironment implements Closeable {
                     )
                 );
 
-                try (DirectoryStream<Path> stream = Files.newDirectoryStream(legacyNodePath.path)) {
+                try (DirectoryStream<Path> stream = Files.newDirectoryStream(legacyDataPath.path)) {
                     for (Path subFolderPath : stream) {
                         final String fileName = subFolderPath.getFileName().toString();
                         if (FileSystemUtils.isDesktopServicesStore(subFolderPath)) {
@@ -448,7 +448,7 @@ public final class NodeEnvironment implements Closeable {
                                     "unexpected folder encountered during data folder upgrade: " + subFolderPath
                                 );
                             }
-                            final Path targetSubFolderPath = nodePath.path.resolve(fileName);
+                            final Path targetSubFolderPath = dataPath.path.resolve(fileName);
                             if (Files.exists(targetSubFolderPath)) {
                                 throw new IllegalStateException(
                                     "target folder already exists during data folder upgrade: " + targetSubFolderPath
@@ -468,12 +468,12 @@ public final class NodeEnvironment implements Closeable {
 
                 upgradeActions.add(() -> {
                     for (String folderName : folderNames) {
-                        final Path sourceSubFolderPath = legacyNodePath.path.resolve(folderName);
-                        final Path targetSubFolderPath = nodePath.path.resolve(folderName);
+                        final Path sourceSubFolderPath = legacyDataPath.path.resolve(folderName);
+                        final Path targetSubFolderPath = dataPath.path.resolve(folderName);
                         Files.move(sourceSubFolderPath, targetSubFolderPath, StandardCopyOption.ATOMIC_MOVE);
                         logger.info("data folder upgrade: moved from [{}] to [{}]", sourceSubFolderPath, targetSubFolderPath);
                     }
-                    IOUtils.fsync(nodePath.path, true);
+                    IOUtils.fsync(dataPath.path, true);
                 });
             }
             // now do the actual upgrade
@@ -495,11 +495,11 @@ public final class NodeEnvironment implements Closeable {
      * Checks to see if we can upgrade to this version based on the existing index state. Upgrading
      * from older versions can cause irreversible changes if allowed.
      * @param logger
-     * @param nodePaths
+     * @param dataPaths
      * @throws IOException
      */
-    static void checkForIndexCompatibility(Logger logger, NodePath... nodePaths) throws IOException {
-        final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new);
+    static void checkForIndexCompatibility(Logger logger, DataPath... dataPaths) throws IOException {
+        final Path[] paths = Arrays.stream(dataPaths).map(np -> np.path).toArray(Path[]::new);
         NodeMetadata metadata = PersistedClusterStateService.nodeMetadata(paths);
 
         // We are upgrading the cluster, but we didn't find any previous metadata. Corrupted state or incompatible version.
@@ -537,10 +537,10 @@ public final class NodeEnvironment implements Closeable {
         if (logger.isDebugEnabled()) {
             // Log one line per path.data:
             StringBuilder sb = new StringBuilder();
-            for (NodePath nodePath : nodePaths) {
-                sb.append('\n').append(" -> ").append(nodePath.path.toAbsolutePath());
+            for (DataPath dataPath : dataPaths) {
+                sb.append('\n').append(" -> ").append(dataPath.path.toAbsolutePath());
 
-                FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath);
+                FsInfo.Path fsPath = FsProbe.getFSInfo(dataPath);
                 sb.append(", free_space [")
                     .append(fsPath.getFree())
                     .append("], usable_space [")
@@ -558,8 +558,8 @@ public final class NodeEnvironment implements Closeable {
             FsInfo.Path totFSPath = new FsInfo.Path();
             Set<String> allTypes = new HashSet<>();
             Set<String> allMounts = new HashSet<>();
-            for (NodePath nodePath : nodePaths) {
-                FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath);
+            for (DataPath dataPath : dataPaths) {
+                FsInfo.Path fsPath = FsProbe.getFSInfo(dataPath);
                 String mount = fsPath.getMount();
                 if (allMounts.contains(mount) == false) {
                     allMounts.add(mount);
@@ -574,7 +574,7 @@ public final class NodeEnvironment implements Closeable {
             // Just log a 1-line summary:
             logger.info(
                 "using [{}] data paths, mounts [{}], net usable_space [{}], net total_space [{}], types [{}]",
-                nodePaths.length,
+                dataPaths.length,
                 allMounts,
                 totFSPath.getAvailable(),
                 totFSPath.getTotal(),
@@ -593,8 +593,8 @@ public final class NodeEnvironment implements Closeable {
     /**
      * scans the node paths and loads existing metadata file. If not found a new meta data will be generated
      */
-    private static NodeMetadata loadNodeMetadata(Settings settings, Logger logger, NodePath... nodePaths) throws IOException {
-        final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new);
+    private static NodeMetadata loadNodeMetadata(Settings settings, Logger logger, DataPath... dataPaths) throws IOException {
+        final Path[] paths = Arrays.stream(dataPaths).map(np -> np.path).toArray(Path[]::new);
         NodeMetadata metadata = PersistedClusterStateService.nodeMetadata(paths);
         if (metadata == null) {
             // load legacy metadata
@@ -997,7 +997,7 @@ public final class NodeEnvironment implements Closeable {
     }
 
     public boolean hasNodeFile() {
-        return nodePaths != null && locks != null;
+        return dataPaths != null && locks != null;
     }
 
     /**
@@ -1006,9 +1006,9 @@ public final class NodeEnvironment implements Closeable {
      */
     public Path[] nodeDataPaths() {
         assertEnvIsLocked();
-        Path[] paths = new Path[nodePaths.length];
+        Path[] paths = new Path[dataPaths.length];
         for (int i = 0; i < paths.length; i++) {
-            paths[i] = nodePaths[i].path;
+            paths[i] = dataPaths[i].path;
         }
         return paths;
     }
@@ -1039,14 +1039,14 @@ public final class NodeEnvironment implements Closeable {
     }
 
     /**
-     * Returns an array of all of the {@link NodePath}s.
+     * Returns an array of all of the {@link DataPath}s.
      */
-    public NodePath[] nodePaths() {
+    public DataPath[] dataPaths() {
         assertEnvIsLocked();
-        if (nodePaths == null || locks == null) {
+        if (dataPaths == null || locks == null) {
             throw new IllegalStateException("node is not configured to store local location");
         }
-        return nodePaths;
+        return dataPaths;
     }
 
     /**
@@ -1054,9 +1054,9 @@ public final class NodeEnvironment implements Closeable {
      */
     public Path[] indexPaths(Index index) {
         assertEnvIsLocked();
-        Path[] indexPaths = new Path[nodePaths.length];
-        for (int i = 0; i < nodePaths.length; i++) {
-            indexPaths[i] = nodePaths[i].resolve(index);
+        Path[] indexPaths = new Path[dataPaths.length];
+        for (int i = 0; i < dataPaths.length; i++) {
+            indexPaths[i] = dataPaths[i].resolve(index);
         }
         return indexPaths;
     }
@@ -1071,10 +1071,10 @@ public final class NodeEnvironment implements Closeable {
      */
     public Path[] availableShardPaths(ShardId shardId) {
         assertEnvIsLocked();
-        final NodePath[] nodePaths = nodePaths();
-        final Path[] shardLocations = new Path[nodePaths.length];
-        for (int i = 0; i < nodePaths.length; i++) {
-            shardLocations[i] = nodePaths[i].resolve(shardId);
+        final DataPath[] dataPaths = dataPaths();
+        final Path[] shardLocations = new Path[dataPaths.length];
+        for (int i = 0; i < dataPaths.length; i++) {
+            shardLocations[i] = dataPaths[i].resolve(shardId);
         }
         return shardLocations;
     }
@@ -1091,13 +1091,13 @@ public final class NodeEnvironment implements Closeable {
      * @param excludeIndexPathIdsPredicate folder names to exclude
      */
     public Set<String> availableIndexFolders(Predicate<String> excludeIndexPathIdsPredicate) throws IOException {
-        if (nodePaths == null || locks == null) {
+        if (dataPaths == null || locks == null) {
             throw new IllegalStateException("node is not configured to store local location");
         }
         assertEnvIsLocked();
         Set<String> indexFolders = new HashSet<>();
-        for (NodePath nodePath : nodePaths) {
-            indexFolders.addAll(availableIndexFoldersForPath(nodePath, excludeIndexPathIdsPredicate));
+        for (DataPath dataPath : dataPaths) {
+            indexFolders.addAll(availableIndexFoldersForPath(dataPath, excludeIndexPathIdsPredicate));
         }
         return indexFolders;
 
@@ -1106,30 +1106,30 @@ public final class NodeEnvironment implements Closeable {
     /**
      * Return all directory names in the indices directory for the given node path.
      *
-     * @param nodePath the path
+     * @param dataPath the path
      * @return all directories that could be indices for the given node path.
      * @throws IOException if an I/O exception occurs traversing the filesystem
      */
-    public Set<String> availableIndexFoldersForPath(final NodePath nodePath) throws IOException {
-        return availableIndexFoldersForPath(nodePath, p -> false);
+    public Set<String> availableIndexFoldersForPath(final DataPath dataPath) throws IOException {
+        return availableIndexFoldersForPath(dataPath, p -> false);
     }
 
     /**
      * Return directory names in the indices directory for the given node path that don't match the given predicate.
      *
-     * @param nodePath the path
+     * @param dataPath the path
      * @param excludeIndexPathIdsPredicate folder names to exclude
      * @return all directories that could be indices for the given node path.
      * @throws IOException if an I/O exception occurs traversing the filesystem
      */
-    public Set<String> availableIndexFoldersForPath(final NodePath nodePath, Predicate<String> excludeIndexPathIdsPredicate)
+    public Set<String> availableIndexFoldersForPath(final DataPath dataPath, Predicate<String> excludeIndexPathIdsPredicate)
         throws IOException {
-        if (nodePaths == null || locks == null) {
+        if (dataPaths == null || locks == null) {
             throw new IllegalStateException("node is not configured to store local location");
         }
         assertEnvIsLocked();
         final Set<String> indexFolders = new HashSet<>();
-        Path indicesLocation = nodePath.indicesPath;
+        Path indicesLocation = dataPath.indicesPath;
         if (Files.isDirectory(indicesLocation)) {
             try (DirectoryStream<Path> stream = Files.newDirectoryStream(indicesLocation)) {
                 for (Path index : stream) {
@@ -1147,13 +1147,13 @@ public final class NodeEnvironment implements Closeable {
      * Resolves all existing paths to <code>indexFolderName</code> in ${data.paths}/indices
      */
     public Path[] resolveIndexFolder(String indexFolderName) {
-        if (nodePaths == null || locks == null) {
+        if (dataPaths == null || locks == null) {
             throw new IllegalStateException("node is not configured to store local location");
         }
         assertEnvIsLocked();
-        List<Path> paths = new ArrayList<>(nodePaths.length);
-        for (NodePath nodePath : nodePaths) {
-            Path indexFolder = nodePath.indicesPath.resolve(indexFolderName);
+        List<Path> paths = new ArrayList<>(dataPaths.length);
+        for (DataPath dataPath : dataPaths) {
+            Path indexFolder = dataPath.indicesPath.resolve(indexFolderName);
             if (Files.exists(indexFolder)) {
                 paths.add(indexFolder);
             }
@@ -1171,36 +1171,36 @@ public final class NodeEnvironment implements Closeable {
      */
     public Set<ShardId> findAllShardIds(final Index index) throws IOException {
         assert index != null;
-        if (nodePaths == null || locks == null) {
+        if (dataPaths == null || locks == null) {
             throw new IllegalStateException("node is not configured to store local location");
         }
         assertEnvIsLocked();
         final Set<ShardId> shardIds = new HashSet<>();
         final String indexUniquePathId = index.getUUID();
-        for (final NodePath nodePath : nodePaths) {
-            shardIds.addAll(findAllShardsForIndex(nodePath.indicesPath.resolve(indexUniquePathId), index));
+        for (final DataPath dataPath : dataPaths) {
+            shardIds.addAll(findAllShardsForIndex(dataPath.indicesPath.resolve(indexUniquePathId), index));
         }
         return shardIds;
     }
 
     /**
-     * Find all the shards for this index, returning a map of the {@code NodePath} to the number of shards on that path
+     * Find all the shards for this index, returning a map of the {@code DataPath} to the number of shards on that path
      * @param index the index by which to filter shards
-     * @return a map of NodePath to count of the shards for the index on that path
+     * @return a map of DataPath to count of the shards for the index on that path
      * @throws IOException if an IOException occurs
      */
-    public Map<NodePath, Long> shardCountPerPath(final Index index) throws IOException {
+    public Map<DataPath, Long> shardCountPerPath(final Index index) throws IOException {
         assert index != null;
-        if (nodePaths == null || locks == null) {
+        if (dataPaths == null || locks == null) {
             throw new IllegalStateException("node is not configured to store local location");
         }
         assertEnvIsLocked();
-        final Map<NodePath, Long> shardCountPerPath = new HashMap<>();
+        final Map<DataPath, Long> shardCountPerPath = new HashMap<>();
         final String indexUniquePathId = index.getUUID();
-        for (final NodePath nodePath : nodePaths) {
-            Path indexLocation = nodePath.indicesPath.resolve(indexUniquePathId);
+        for (final DataPath dataPath : dataPaths) {
+            Path indexLocation = dataPath.indicesPath.resolve(indexUniquePathId);
             if (Files.isDirectory(indexLocation)) {
-                shardCountPerPath.put(nodePath, (long) findAllShardsForIndex(indexLocation, index).size());
+                shardCountPerPath.put(dataPath, (long) findAllShardsForIndex(indexLocation, index).size());
             }
         }
         return shardCountPerPath;
@@ -1257,11 +1257,11 @@ public final class NodeEnvironment implements Closeable {
      * not supported by the filesystem. This test is executed on each of the data directories.
      * This method cleans up all files even in the case of an error.
      */
-    private static void ensureAtomicMoveSupported(final NodePath[] nodePaths) throws IOException {
-        for (NodePath nodePath : nodePaths) {
-            assert Files.isDirectory(nodePath.path) : nodePath.path + " is not a directory";
-            final Path src = nodePath.path.resolve(TEMP_FILE_NAME + ".tmp");
-            final Path target = nodePath.path.resolve(TEMP_FILE_NAME + ".final");
+    private static void ensureAtomicMoveSupported(final DataPath[] dataPaths) throws IOException {
+        for (DataPath dataPath : dataPaths) {
+            assert Files.isDirectory(dataPath.path) : dataPath.path + " is not a directory";
+            final Path src = dataPath.path.resolve(TEMP_FILE_NAME + ".tmp");
+            final Path target = dataPath.path.resolve(TEMP_FILE_NAME + ".final");
             try {
                 Files.deleteIfExists(src);
                 Files.createFile(src);
@@ -1269,7 +1269,7 @@ public final class NodeEnvironment implements Closeable {
             } catch (AtomicMoveNotSupportedException ex) {
                 throw new IllegalStateException(
                     "atomic_move is not supported by the filesystem on path ["
-                        + nodePath.path
+                        + dataPath.path
                         + "] atomic_move is required for elasticsearch to work correctly.",
                     ex
                 );
@@ -1283,8 +1283,8 @@ public final class NodeEnvironment implements Closeable {
         }
     }
 
-    private static void ensureNoShardData(final NodePath[] nodePaths) throws IOException {
-        List<Path> shardDataPaths = collectShardDataPaths(nodePaths);
+    private static void ensureNoShardData(final DataPath[] dataPaths) throws IOException {
+        List<Path> shardDataPaths = collectShardDataPaths(dataPaths);
         if (shardDataPaths.isEmpty() == false) {
             final String message = String.format(
                 Locale.ROOT,
@@ -1296,8 +1296,8 @@ public final class NodeEnvironment implements Closeable {
         }
     }
 
-    private static void ensureNoIndexMetadata(final NodePath[] nodePaths) throws IOException {
-        List<Path> indexMetadataPaths = collectIndexMetadataPaths(nodePaths);
+    private static void ensureNoIndexMetadata(final DataPath[] dataPaths) throws IOException {
+        List<Path> indexMetadataPaths = collectIndexMetadataPaths(dataPaths);
         if (indexMetadataPaths.isEmpty() == false) {
             final String message = String.format(
                 Locale.ROOT,
@@ -1313,22 +1313,22 @@ public final class NodeEnvironment implements Closeable {
     /**
      * Collect the paths containing shard data in the indicated node paths. The returned paths will point to the shard data folder.
      */
-    static List<Path> collectShardDataPaths(NodePath[] nodePaths) throws IOException {
-        return collectIndexSubPaths(nodePaths, NodeEnvironment::isShardPath);
+    static List<Path> collectShardDataPaths(DataPath[] dataPaths) throws IOException {
+        return collectIndexSubPaths(dataPaths, NodeEnvironment::isShardPath);
     }
 
     /**
      * Collect the paths containing index meta data in the indicated node paths. The returned paths will point to the
      * {@link MetadataStateFormat#STATE_DIR_NAME} folder
      */
-    static List<Path> collectIndexMetadataPaths(NodePath[] nodePaths) throws IOException {
-        return collectIndexSubPaths(nodePaths, NodeEnvironment::isIndexMetadataPath);
+    static List<Path> collectIndexMetadataPaths(DataPath[] dataPaths) throws IOException {
+        return collectIndexSubPaths(dataPaths, NodeEnvironment::isIndexMetadataPath);
     }
 
-    private static List<Path> collectIndexSubPaths(NodePath[] nodePaths, Predicate<Path> subPathPredicate) throws IOException {
+    private static List<Path> collectIndexSubPaths(DataPath[] dataPaths, Predicate<Path> subPathPredicate) throws IOException {
         List<Path> indexSubPaths = new ArrayList<>();
-        for (NodePath nodePath : nodePaths) {
-            Path indicesPath = nodePath.indicesPath;
+        for (DataPath dataPath : dataPaths) {
+            Path indicesPath = dataPath.indicesPath;
             if (Files.isDirectory(indicesPath)) {
                 try (DirectoryStream<Path> indexStream = Files.newDirectoryStream(indicesPath)) {
                     for (Path indexPath : indexStream) {
@@ -1400,7 +1400,7 @@ public final class NodeEnvironment implements Closeable {
     }
 
     /**
-     * Returns the {@code NodePath.path} for this shard.
+     * Returns the {@code DataPath.path} for this shard.
      */
     public static Path shardStatePathToDataPath(Path shardPath) {
         int count = shardPath.getNameCount();

+ 6 - 6
server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java

@@ -63,7 +63,7 @@ public class NodeRepurposeCommand extends ElasticsearchNodeCommand {
     }
 
     @Override
-    protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException {
+    protected void processDataPaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException {
         assert DiscoveryNode.canContainData(env.settings()) == false;
 
         if (DiscoveryNode.isMasterNode(env.settings()) == false) {
@@ -74,13 +74,13 @@ public class NodeRepurposeCommand extends ElasticsearchNodeCommand {
     }
 
     private static void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException {
-        NodeEnvironment.NodePath[] nodePaths = toNodePaths(dataPaths);
+        NodeEnvironment.DataPath[] nodeDataPaths = toDataPaths(dataPaths);
 
         terminal.println(Terminal.Verbosity.VERBOSE, "Collecting shard data paths");
-        List<Path> shardDataPaths = NodeEnvironment.collectShardDataPaths(nodePaths);
+        List<Path> shardDataPaths = NodeEnvironment.collectShardDataPaths(nodeDataPaths);
 
         terminal.println(Terminal.Verbosity.VERBOSE, "Collecting index metadata paths");
-        List<Path> indexMetadataPaths = NodeEnvironment.collectIndexMetadataPaths(nodePaths);
+        List<Path> indexMetadataPaths = NodeEnvironment.collectIndexMetadataPaths(nodeDataPaths);
 
         Set<Path> indexPaths = uniqueParentPaths(shardDataPaths, indexMetadataPaths);
 
@@ -114,10 +114,10 @@ public class NodeRepurposeCommand extends ElasticsearchNodeCommand {
     }
 
     private static void processMasterNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException {
-        NodeEnvironment.NodePath[] nodePaths = toNodePaths(dataPaths);
+        NodeEnvironment.DataPath[] nodeDataPaths = toDataPaths(dataPaths);
 
         terminal.println(Terminal.Verbosity.VERBOSE, "Collecting shard data paths");
-        List<Path> shardDataPaths = NodeEnvironment.collectShardDataPaths(nodePaths);
+        List<Path> shardDataPaths = NodeEnvironment.collectShardDataPaths(nodeDataPaths);
         if (shardDataPaths.isEmpty()) {
             terminal.println(NO_SHARD_DATA_TO_CLEAN_UP_FOUND);
             return;

+ 4 - 4
server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java

@@ -60,9 +60,9 @@ public class OverrideNodeVersionCommand extends ElasticsearchNodeCommand {
     }
 
     @Override
-    protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException {
-        final Path[] nodePaths = Arrays.stream(toNodePaths(dataPaths)).map(p -> p.path).toArray(Path[]::new);
-        final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(nodePaths);
+    protected void processDataPaths(Terminal terminal, Path[] paths, OptionSet options, Environment env) throws IOException {
+        final Path[] dataPaths = Arrays.stream(toDataPaths(paths)).map(p -> p.path).toArray(Path[]::new);
+        final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths);
         if (nodeMetadata == null) {
             throw new ElasticsearchException(NO_METADATA_MESSAGE);
         }
@@ -88,7 +88,7 @@ public class OverrideNodeVersionCommand extends ElasticsearchNodeCommand {
             ).replace("V_NEW", nodeMetadata.nodeVersion().toString()).replace("V_CUR", Version.CURRENT.toString())
         );
 
-        PersistedClusterStateService.overrideVersion(Version.CURRENT, dataPaths);
+        PersistedClusterStateService.overrideVersion(Version.CURRENT, paths);
 
         terminal.println(SUCCESS_MESSAGE);
     }

+ 10 - 10
server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java

@@ -234,7 +234,7 @@ public class RemoveCorruptedShardDataCommand extends ElasticsearchNodeCommand {
 
     // Visible for testing
     @Override
-    public void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment environment) throws IOException {
+    public void processDataPaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment environment) throws IOException {
         warnAboutIndexBackup(terminal);
 
         final ClusterState clusterState = loadTermAndClusterState(
@@ -449,11 +449,11 @@ public class RemoveCorruptedShardDataCommand extends ElasticsearchNodeCommand {
     }
 
     private static void printRerouteCommand(ShardPath shardPath, Terminal terminal, boolean allocateStale) throws IOException {
-        final Path nodePath = getNodePath(shardPath);
-        final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(nodePath);
+        final Path dataPath = getDataPath(shardPath);
+        final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPath);
 
         if (nodeMetadata == null) {
-            throw new ElasticsearchException("No node meta data at " + nodePath);
+            throw new ElasticsearchException("No node meta data at " + dataPath);
         }
 
         final String nodeId = nodeMetadata.nodeId();
@@ -472,13 +472,13 @@ public class RemoveCorruptedShardDataCommand extends ElasticsearchNodeCommand {
         terminal.println("");
     }
 
-    private static Path getNodePath(ShardPath shardPath) {
-        final Path nodePath = shardPath.getDataPath().getParent().getParent().getParent();
-        if (Files.exists(nodePath) == false
-            || Files.exists(nodePath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME)) == false) {
-            throw new ElasticsearchException("Unable to resolve node path for " + shardPath);
+    private static Path getDataPath(ShardPath shardPath) {
+        final Path dataPath = shardPath.getDataPath().getParent().getParent().getParent();
+        if (Files.exists(dataPath) == false
+            || Files.exists(dataPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME)) == false) {
+            throw new ElasticsearchException("Unable to resolve data path for " + shardPath);
         }
-        return nodePath;
+        return dataPath;
     }
 
     public enum CleanStatus {

+ 16 - 16
server/src/main/java/org/elasticsearch/index/shard/ShardPath.java

@@ -214,11 +214,11 @@ public final class ShardPath {
 
         if (indexSettings.hasCustomDataPath()) {
             dataPath = env.resolveCustomLocation(indexSettings.customDataPath(), shardId);
-            statePath = env.nodePaths()[0].resolve(shardId);
+            statePath = env.dataPaths()[0].resolve(shardId);
         } else {
             BigInteger totFreeSpace = BigInteger.ZERO;
-            for (NodeEnvironment.NodePath nodePath : env.nodePaths()) {
-                totFreeSpace = totFreeSpace.add(BigInteger.valueOf(nodePath.fileStore.getUsableSpace()));
+            for (NodeEnvironment.DataPath nodeDataPath : env.dataPaths()) {
+                totFreeSpace = totFreeSpace.add(BigInteger.valueOf(nodeDataPath.fileStore.getUsableSpace()));
             }
 
             // TODO: this is a hack!! We should instead keep track of incoming (relocated) shards since we know
@@ -229,20 +229,20 @@ public final class ShardPath {
             BigInteger estShardSizeInBytes = BigInteger.valueOf(avgShardSizeInBytes).max(totFreeSpace.divide(BigInteger.valueOf(20)));
 
             // TODO - do we need something more extensible? Yet, this does the job for now...
-            final NodeEnvironment.NodePath[] paths = env.nodePaths();
+            final NodeEnvironment.DataPath[] paths = env.dataPaths();
 
             // If no better path is chosen, use the one with the most space by default
-            NodeEnvironment.NodePath bestPath = getPathWithMostFreeSpace(env);
+            NodeEnvironment.DataPath bestPath = getPathWithMostFreeSpace(env);
 
             if (paths.length != 1) {
-                Map<NodeEnvironment.NodePath, Long> pathToShardCount = env.shardCountPerPath(shardId.getIndex());
+                Map<NodeEnvironment.DataPath, Long> pathToShardCount = env.shardCountPerPath(shardId.getIndex());
 
                 // Compute how much space there is on each path
-                final Map<NodeEnvironment.NodePath, BigInteger> pathsToSpace = Maps.newMapWithExpectedSize(paths.length);
-                for (NodeEnvironment.NodePath nodePath : paths) {
-                    FileStore fileStore = nodePath.fileStore;
+                final Map<NodeEnvironment.DataPath, BigInteger> pathsToSpace = Maps.newMapWithExpectedSize(paths.length);
+                for (NodeEnvironment.DataPath nodeDataPath : paths) {
+                    FileStore fileStore = nodeDataPath.fileStore;
                     BigInteger usableBytes = BigInteger.valueOf(fileStore.getUsableSpace());
-                    pathsToSpace.put(nodePath, usableBytes);
+                    pathsToSpace.put(nodeDataPath, usableBytes);
                 }
 
                 bestPath = Arrays.stream(paths)
@@ -276,19 +276,19 @@ public final class ShardPath {
         return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, shardId);
     }
 
-    static NodeEnvironment.NodePath getPathWithMostFreeSpace(NodeEnvironment env) throws IOException {
-        final NodeEnvironment.NodePath[] paths = env.nodePaths();
-        NodeEnvironment.NodePath bestPath = null;
+    static NodeEnvironment.DataPath getPathWithMostFreeSpace(NodeEnvironment env) throws IOException {
+        final NodeEnvironment.DataPath[] paths = env.dataPaths();
+        NodeEnvironment.DataPath bestPath = null;
         long maxUsableBytes = Long.MIN_VALUE;
-        for (NodeEnvironment.NodePath nodePath : paths) {
-            FileStore fileStore = nodePath.fileStore;
+        for (NodeEnvironment.DataPath dataPath : paths) {
+            FileStore fileStore = dataPath.fileStore;
             long usableBytes = fileStore.getUsableSpace(); // NB usable bytes doesn't account for reserved space (e.g. incoming recoveries)
             assert usableBytes >= 0 : "usable bytes must be >= 0, got: " + usableBytes;
 
             if (bestPath == null || usableBytes > maxUsableBytes) {
                 // This path has been determined to be "better" based on the usable bytes
                 maxUsableBytes = usableBytes;
-                bestPath = nodePath;
+                bestPath = dataPath;
             }
         }
         return bestPath;

+ 9 - 9
server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java

@@ -16,7 +16,7 @@ import org.elasticsearch.core.PathUtils;
 import org.elasticsearch.core.SuppressForbidden;
 import org.elasticsearch.core.Tuple;
 import org.elasticsearch.env.NodeEnvironment;
-import org.elasticsearch.env.NodeEnvironment.NodePath;
+import org.elasticsearch.env.NodeEnvironment.DataPath;
 
 import java.io.IOException;
 import java.nio.file.FileStore;
@@ -42,7 +42,7 @@ public class FsProbe {
         if (nodeEnv.hasNodeFile() == false) {
             return new FsInfo(System.currentTimeMillis(), null, new FsInfo.Path[0]);
         }
-        NodePath[] dataLocations = nodeEnv.nodePaths();
+        DataPath[] dataLocations = nodeEnv.dataPaths();
         FsInfo.Path[] paths = new FsInfo.Path[dataLocations.length];
         for (int i = 0; i < dataLocations.length; i++) {
             paths[i] = getFSInfo(dataLocations[i]);
@@ -131,18 +131,18 @@ public class FsProbe {
         return bytes;
     }
 
-    public static FsInfo.Path getFSInfo(NodePath nodePath) throws IOException {
+    public static FsInfo.Path getFSInfo(DataPath dataPath) throws IOException {
         FsInfo.Path fsPath = new FsInfo.Path();
-        fsPath.path = nodePath.path.toString();
+        fsPath.path = dataPath.path.toString();
 
         // NOTE: we use already cached (on node startup) FileStore and spins
         // since recomputing these once per second (default) could be costly,
         // and they should not change:
-        fsPath.total = getTotal(nodePath.fileStore);
-        fsPath.free = adjustForHugeFilesystems(nodePath.fileStore.getUnallocatedSpace());
-        fsPath.available = adjustForHugeFilesystems(nodePath.fileStore.getUsableSpace());
-        fsPath.type = nodePath.fileStore.type();
-        fsPath.mount = nodePath.fileStore.toString();
+        fsPath.total = getTotal(dataPath.fileStore);
+        fsPath.free = adjustForHugeFilesystems(dataPath.fileStore.getUnallocatedSpace());
+        fsPath.available = adjustForHugeFilesystems(dataPath.fileStore.getUsableSpace());
+        fsPath.type = dataPath.fileStore.type();
+        fsPath.mount = dataPath.fileStore.toString();
         return fsPath;
     }
 

+ 15 - 15
server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java

@@ -248,8 +248,8 @@ public class NodeEnvironmentTests extends ESTestCase {
             SetOnce<Path[]> listener = new SetOnce<>();
             env.deleteShardDirectorySafe(new ShardId(index, 1), idxSettings, listener::set);
             Path[] deletedPaths = listener.get();
-            for (int i = 0; i < env.nodePaths().length; i++) {
-                assertThat(deletedPaths[i], equalTo(env.nodePaths()[i].resolve(index).resolve("1")));
+            for (int i = 0; i < env.dataPaths().length; i++) {
+                assertThat(deletedPaths[i], equalTo(env.dataPaths()[i].resolve(index).resolve("1")));
             }
         }
 
@@ -434,14 +434,14 @@ public class NodeEnvironmentTests extends ESTestCase {
         String[] paths = tmpPaths();
         // simulate some previous left over temp files
         for (String path : randomSubsetOf(randomIntBetween(1, paths.length), paths)) {
-            final Path nodePath = PathUtils.get(path);
-            Files.createDirectories(nodePath);
-            Files.createFile(nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME));
+            final Path dataPath = PathUtils.get(path);
+            Files.createDirectories(dataPath);
+            Files.createFile(dataPath.resolve(NodeEnvironment.TEMP_FILE_NAME));
             if (randomBoolean()) {
-                Files.createFile(nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME + ".tmp"));
+                Files.createFile(dataPath.resolve(NodeEnvironment.TEMP_FILE_NAME + ".tmp"));
             }
             if (randomBoolean()) {
-                Files.createFile(nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME + ".final"));
+                Files.createFile(dataPath.resolve(NodeEnvironment.TEMP_FILE_NAME + ".final"));
             }
         }
         NodeEnvironment env = newNodeEnvironment(paths, Settings.EMPTY);
@@ -449,12 +449,12 @@ public class NodeEnvironmentTests extends ESTestCase {
 
         // check we clean up
         for (String path : paths) {
-            final Path nodePath = PathUtils.get(path);
-            final Path tempFile = nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME);
+            final Path dataPath = PathUtils.get(path);
+            final Path tempFile = dataPath.resolve(NodeEnvironment.TEMP_FILE_NAME);
             assertFalse(tempFile + " should have been cleaned", Files.exists(tempFile));
-            final Path srcTempFile = nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME + ".src");
+            final Path srcTempFile = dataPath.resolve(NodeEnvironment.TEMP_FILE_NAME + ".src");
             assertFalse(srcTempFile + " should have been cleaned", Files.exists(srcTempFile));
-            final Path targetTempFile = nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME + ".target");
+            final Path targetTempFile = dataPath.resolve(NodeEnvironment.TEMP_FILE_NAME + ".target");
             assertFalse(targetTempFile + " should have been cleaned", Files.exists(targetTempFile));
         }
     }
@@ -569,7 +569,7 @@ public class NodeEnvironmentTests extends ESTestCase {
             IllegalStateException ex = expectThrows(
                 IllegalStateException.class,
                 "Must fail the check on index that's too old",
-                () -> checkForIndexCompatibility(logger, env.nodePaths())
+                () -> checkForIndexCompatibility(logger, env.dataPaths())
             );
 
             assertThat(ex.getMessage(), containsString("[" + oldIndexVersion + "] exist"));
@@ -577,11 +577,11 @@ public class NodeEnvironmentTests extends ESTestCase {
 
             // This should work
             overrideOldestIndexVersion(Version.CURRENT.minimumIndexCompatibilityVersion(), env.nodeDataPaths());
-            checkForIndexCompatibility(logger, env.nodePaths());
+            checkForIndexCompatibility(logger, env.dataPaths());
 
             // Trying to boot with newer version should pass this check
             overrideOldestIndexVersion(NodeMetadataTests.tooNewVersion(), env.nodeDataPaths());
-            checkForIndexCompatibility(logger, env.nodePaths());
+            checkForIndexCompatibility(logger, env.dataPaths());
 
             // Simulate empty old index version, attempting to upgrade before 7.17
             removeOldestIndexVersion(oldIndexVersion, env.nodeDataPaths());
@@ -589,7 +589,7 @@ public class NodeEnvironmentTests extends ESTestCase {
             ex = expectThrows(
                 IllegalStateException.class,
                 "Must fail the check on index that's too old",
-                () -> checkForIndexCompatibility(logger, env.nodePaths())
+                () -> checkForIndexCompatibility(logger, env.dataPaths())
             );
 
             assertThat(ex.getMessage(), startsWith("cannot upgrade a node from version [" + oldIndexVersion + "] directly"));

+ 4 - 4
server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java

@@ -52,21 +52,21 @@ public class NodeRepurposeCommandTests extends ESTestCase {
     private static final Index INDEX = new Index("testIndex", "testUUID");
     private Settings dataMasterSettings;
     private Environment environment;
-    private Path[] nodePaths;
+    private Path[] dataPaths;
     private Settings dataNoMasterSettings;
     private Settings noDataNoMasterSettings;
     private Settings noDataMasterSettings;
 
     @Before
-    public void createNodePaths() throws IOException {
+    public void createDataPaths() throws IOException {
         dataMasterSettings = buildEnvSettings(Settings.EMPTY);
         environment = TestEnvironment.newEnvironment(dataMasterSettings);
         try (NodeEnvironment nodeEnvironment = new NodeEnvironment(dataMasterSettings, environment)) {
-            nodePaths = nodeEnvironment.nodeDataPaths();
+            dataPaths = nodeEnvironment.nodeDataPaths();
             final String nodeId = randomAlphaOfLength(10);
             try (
                 PersistedClusterStateService.Writer writer = new PersistedClusterStateService(
-                    nodePaths,
+                    dataPaths,
                     nodeId,
                     xContentRegistry(),
                     new ClusterSettings(dataMasterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),

+ 20 - 20
server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java

@@ -33,21 +33,21 @@ import static org.hamcrest.Matchers.equalTo;
 public class OverrideNodeVersionCommandTests extends ESTestCase {
 
     private Environment environment;
-    private Path[] nodePaths;
+    private Path[] dataPaths;
     private String nodeId;
     private final OptionSet noOptions = new OptionParser().parse();
 
     @Before
-    public void createNodePaths() throws IOException {
+    public void createDataPaths() throws IOException {
         final Settings settings = buildEnvSettings(Settings.EMPTY);
         environment = TestEnvironment.newEnvironment(settings);
         try (NodeEnvironment nodeEnvironment = new NodeEnvironment(settings, environment)) {
-            nodePaths = nodeEnvironment.nodeDataPaths();
+            dataPaths = nodeEnvironment.nodeDataPaths();
             nodeId = nodeEnvironment.nodeId();
 
             try (
                 PersistedClusterStateService.Writer writer = new PersistedClusterStateService(
-                    nodePaths,
+                    dataPaths,
                     nodeId,
                     xContentRegistry(),
                     new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
@@ -73,7 +73,7 @@ public class OverrideNodeVersionCommandTests extends ESTestCase {
         assertTrue(
             Metadata.SETTING_READ_ONLY_SETTING.get(
                 new PersistedClusterStateService(
-                    nodePaths,
+                    dataPaths,
                     nodeId,
                     xContentRegistry(),
                     new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
@@ -88,7 +88,7 @@ public class OverrideNodeVersionCommandTests extends ESTestCase {
         final MockTerminal mockTerminal = MockTerminal.create();
         final ElasticsearchException elasticsearchException = expectThrows(
             ElasticsearchException.class,
-            () -> new OverrideNodeVersionCommand().processNodePaths(mockTerminal, new Path[] { emptyPath }, noOptions, environment)
+            () -> new OverrideNodeVersionCommand().processDataPaths(mockTerminal, new Path[] { emptyPath }, noOptions, environment)
         );
         assertThat(elasticsearchException.getMessage(), equalTo(OverrideNodeVersionCommand.NO_METADATA_MESSAGE));
         expectThrows(IllegalStateException.class, () -> mockTerminal.readText(""));
@@ -96,11 +96,11 @@ public class OverrideNodeVersionCommandTests extends ESTestCase {
 
     public void testFailsIfUnnecessary() throws IOException {
         final Version nodeVersion = Version.fromId(between(Version.CURRENT.minimumCompatibilityVersion().id, Version.CURRENT.id));
-        PersistedClusterStateService.overrideVersion(nodeVersion, nodePaths);
+        PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths);
         final MockTerminal mockTerminal = MockTerminal.create();
         final ElasticsearchException elasticsearchException = expectThrows(
             ElasticsearchException.class,
-            () -> new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, noOptions, environment)
+            () -> new OverrideNodeVersionCommand().processDataPaths(mockTerminal, dataPaths, noOptions, environment)
         );
         assertThat(
             elasticsearchException.getMessage(),
@@ -115,12 +115,12 @@ public class OverrideNodeVersionCommandTests extends ESTestCase {
 
     public void testWarnsIfTooOld() throws Exception {
         final Version nodeVersion = NodeMetadataTests.tooOldVersion();
-        PersistedClusterStateService.overrideVersion(nodeVersion, nodePaths);
+        PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths);
         final MockTerminal mockTerminal = MockTerminal.create();
         mockTerminal.addTextInput("n");
         final ElasticsearchException elasticsearchException = expectThrows(
             ElasticsearchException.class,
-            () -> new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, noOptions, environment)
+            () -> new OverrideNodeVersionCommand().processDataPaths(mockTerminal, dataPaths, noOptions, environment)
         );
         assertThat(elasticsearchException.getMessage(), equalTo("aborted by user"));
         assertThat(
@@ -135,18 +135,18 @@ public class OverrideNodeVersionCommandTests extends ESTestCase {
         );
         expectThrows(IllegalStateException.class, () -> mockTerminal.readText(""));
 
-        final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(nodePaths);
+        final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths);
         assertThat(nodeMetadata.nodeVersion(), equalTo(nodeVersion));
     }
 
     public void testWarnsIfTooNew() throws Exception {
         final Version nodeVersion = NodeMetadataTests.tooNewVersion();
-        PersistedClusterStateService.overrideVersion(nodeVersion, nodePaths);
+        PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths);
         final MockTerminal mockTerminal = MockTerminal.create();
         mockTerminal.addTextInput(randomFrom("yy", "Yy", "n", "yes", "true", "N", "no"));
         final ElasticsearchException elasticsearchException = expectThrows(
             ElasticsearchException.class,
-            () -> new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, noOptions, environment)
+            () -> new OverrideNodeVersionCommand().processDataPaths(mockTerminal, dataPaths, noOptions, environment)
         );
         assertThat(elasticsearchException.getMessage(), equalTo("aborted by user"));
         assertThat(
@@ -160,16 +160,16 @@ public class OverrideNodeVersionCommandTests extends ESTestCase {
         );
         expectThrows(IllegalStateException.class, () -> mockTerminal.readText(""));
 
-        final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(nodePaths);
+        final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths);
         assertThat(nodeMetadata.nodeVersion(), equalTo(nodeVersion));
     }
 
     public void testOverwritesIfTooOld() throws Exception {
         final Version nodeVersion = NodeMetadataTests.tooOldVersion();
-        PersistedClusterStateService.overrideVersion(nodeVersion, nodePaths);
+        PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths);
         final MockTerminal mockTerminal = MockTerminal.create();
         mockTerminal.addTextInput(randomFrom("y", "Y"));
-        new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, noOptions, environment);
+        new OverrideNodeVersionCommand().processDataPaths(mockTerminal, dataPaths, noOptions, environment);
         assertThat(
             mockTerminal.getOutput(),
             allOf(
@@ -183,16 +183,16 @@ public class OverrideNodeVersionCommandTests extends ESTestCase {
         );
         expectThrows(IllegalStateException.class, () -> mockTerminal.readText(""));
 
-        final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(nodePaths);
+        final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths);
         assertThat(nodeMetadata.nodeVersion(), equalTo(Version.CURRENT));
     }
 
     public void testOverwritesIfTooNew() throws Exception {
         final Version nodeVersion = NodeMetadataTests.tooNewVersion();
-        PersistedClusterStateService.overrideVersion(nodeVersion, nodePaths);
+        PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths);
         final MockTerminal mockTerminal = MockTerminal.create();
         mockTerminal.addTextInput(randomFrom("y", "Y"));
-        new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, noOptions, environment);
+        new OverrideNodeVersionCommand().processDataPaths(mockTerminal, dataPaths, noOptions, environment);
         assertThat(
             mockTerminal.getOutput(),
             allOf(
@@ -205,7 +205,7 @@ public class OverrideNodeVersionCommandTests extends ESTestCase {
         );
         expectThrows(IllegalStateException.class, () -> mockTerminal.readText(""));
 
-        final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(nodePaths);
+        final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths);
         assertThat(nodeMetadata.nodeVersion(), equalTo(Version.CURRENT));
     }
 }

+ 2 - 2
server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java

@@ -1588,9 +1588,9 @@ public class IndexShardTests extends IndexShardTestCase {
             ShardRoutingState.INITIALIZING,
             RecoverySource.EmptyStoreRecoverySource.INSTANCE
         );
-        final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir());
+        final NodeEnvironment.DataPath dataPath = new NodeEnvironment.DataPath(createTempDir());
 
-        ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
+        ShardPath shardPath = new ShardPath(false, dataPath.resolve(shardId), dataPath.resolve(shardId), shardId);
         Settings settings = Settings.builder()
             .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
             .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)

+ 15 - 15
server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java

@@ -14,7 +14,7 @@ import org.elasticsearch.core.PathUtils;
 import org.elasticsearch.core.PathUtilsForTesting;
 import org.elasticsearch.env.Environment;
 import org.elasticsearch.env.NodeEnvironment;
-import org.elasticsearch.env.NodeEnvironment.NodePath;
+import org.elasticsearch.env.NodeEnvironment.DataPath;
 import org.elasticsearch.env.TestEnvironment;
 import org.elasticsearch.index.IndexSettings;
 import org.elasticsearch.test.ESTestCase;
@@ -169,11 +169,11 @@ public class NewPathForShardTests extends ESTestCase {
         NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
 
         // Make sure all our mocking above actually worked:
-        NodePath[] nodePaths = nodeEnv.nodePaths();
-        assertEquals(2, nodePaths.length);
+        DataPath[] dataPaths = nodeEnv.dataPaths();
+        assertEquals(2, dataPaths.length);
 
-        assertEquals("mocka", nodePaths[0].fileStore.name());
-        assertEquals("mockb", nodePaths[1].fileStore.name());
+        assertEquals("mocka", dataPaths[0].fileStore.name());
+        assertEquals("mockb", dataPaths[1].fileStore.name());
 
         // Path a has lots of free space, but b has little, so new shard should go to a:
         aFileStore.usableSpace = 100000;
@@ -224,11 +224,11 @@ public class NewPathForShardTests extends ESTestCase {
         NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
 
         // Make sure all our mocking above actually worked:
-        NodePath[] nodePaths = nodeEnv.nodePaths();
-        assertEquals(2, nodePaths.length);
+        DataPath[] dataPaths = nodeEnv.dataPaths();
+        assertEquals(2, dataPaths.length);
 
-        assertEquals("mocka", nodePaths[0].fileStore.name());
-        assertEquals("mockb", nodePaths[1].fileStore.name());
+        assertEquals("mocka", dataPaths[0].fileStore.name());
+        assertEquals("mockb", dataPaths[1].fileStore.name());
 
         // Path a has lots of free space, but b has little, so new shard should go to a:
         aFileStore.usableSpace = 100000;
@@ -285,12 +285,12 @@ public class NewPathForShardTests extends ESTestCase {
         aFileStore.usableSpace = 100000;
         bFileStore.usableSpace = 1000;
 
-        assertThat(ShardPath.getPathWithMostFreeSpace(nodeEnv), equalTo(nodeEnv.nodePaths()[0]));
+        assertThat(ShardPath.getPathWithMostFreeSpace(nodeEnv), equalTo(nodeEnv.dataPaths()[0]));
 
         aFileStore.usableSpace = 10000;
         bFileStore.usableSpace = 20000;
 
-        assertThat(ShardPath.getPathWithMostFreeSpace(nodeEnv), equalTo(nodeEnv.nodePaths()[1]));
+        assertThat(ShardPath.getPathWithMostFreeSpace(nodeEnv), equalTo(nodeEnv.dataPaths()[1]));
 
         nodeEnv.close();
     }
@@ -308,11 +308,11 @@ public class NewPathForShardTests extends ESTestCase {
         NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
 
         // Make sure all our mocking above actually worked:
-        NodePath[] nodePaths = nodeEnv.nodePaths();
-        assertEquals(2, nodePaths.length);
+        DataPath[] dataPaths = nodeEnv.dataPaths();
+        assertEquals(2, dataPaths.length);
 
-        assertEquals("mocka", nodePaths[0].fileStore.name());
-        assertEquals("mockb", nodePaths[1].fileStore.name());
+        assertEquals("mocka", dataPaths[0].fileStore.name());
+        assertEquals("mockb", dataPaths[1].fileStore.name());
 
         // Path a has lots of free space, but b has little, so new shard should go to a:
         aFileStore.usableSpace = 100000;

+ 3 - 3
server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java

@@ -130,8 +130,8 @@ public class RemoveCorruptedShardDataCommandTests extends IndexShardTestCase {
             .put(IndexMetadata.SETTING_INDEX_UUID, shardId.getIndex().getUUID())
             .build();
 
-        final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(tempDir);
-        shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
+        final NodeEnvironment.DataPath dataPath = new NodeEnvironment.DataPath(tempDir);
+        shardPath = new ShardPath(false, dataPath.resolve(shardId), dataPath.resolve(shardId), shardId);
 
         // Adding rollover info to IndexMetadata to check that NamedXContentRegistry is properly configured
         Condition<?> rolloverCondition = randomFrom(
@@ -152,7 +152,7 @@ public class RemoveCorruptedShardDataCommandTests extends IndexShardTestCase {
         clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(Metadata.builder().put(indexMetadata, false).build()).build();
 
         try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(logger, environment, Files::exists)) {
-            final Path[] paths = Arrays.stream(lock.getNodePaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new);
+            final Path[] paths = Arrays.stream(lock.getDataPaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new);
             try (
                 PersistedClusterStateService.Writer writer = new PersistedClusterStateService(
                     paths,

+ 3 - 3
server/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java

@@ -186,7 +186,7 @@ public class ShardPathTests extends ESTestCase {
 
     public void testShardPathSelection() throws IOException {
         try (NodeEnvironment env = newNodeEnvironment(Settings.builder().build())) {
-            NodeEnvironment.NodePath[] paths = env.nodePaths();
+            NodeEnvironment.DataPath[] paths = env.dataPaths();
             assertThat(List.of(paths), hasItem(ShardPath.getPathWithMostFreeSpace(env)));
             ShardId shardId = new ShardId("foo", "0xDEADBEEF", 0);
 
@@ -197,8 +197,8 @@ public class ShardPathTests extends ESTestCase {
             assertNotNull(shardPath.getDataPath());
 
             List<Path> indexPaths = new ArrayList<>();
-            for (NodeEnvironment.NodePath nodePath : paths) {
-                indexPaths.add(nodePath.indicesPath.resolve("0xDEADBEEF").resolve("0"));
+            for (NodeEnvironment.DataPath dataPath : paths) {
+                indexPaths.add(dataPath.indicesPath.resolve("0xDEADBEEF").resolve("0"));
             }
 
             assertThat(indexPaths, hasItem(shardPath.getDataPath()));

+ 4 - 4
server/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java

@@ -11,7 +11,7 @@ package org.elasticsearch.monitor.fs;
 import org.apache.lucene.util.Constants;
 import org.elasticsearch.core.Tuple;
 import org.elasticsearch.env.NodeEnvironment;
-import org.elasticsearch.env.NodeEnvironment.NodePath;
+import org.elasticsearch.env.NodeEnvironment.DataPath;
 import org.elasticsearch.test.ESTestCase;
 
 import java.io.IOException;
@@ -241,16 +241,16 @@ public class FsProbeTests extends ESTestCase {
     }
 
     public void testAdjustForHugeFilesystems() throws Exception {
-        NodePath np = new FakeNodePath(createTempDir());
+        DataPath np = new FakeDataPath(createTempDir());
         assertThat(FsProbe.getFSInfo(np).total, greaterThanOrEqualTo(0L));
         assertThat(FsProbe.getFSInfo(np).free, greaterThanOrEqualTo(0L));
         assertThat(FsProbe.getFSInfo(np).available, greaterThanOrEqualTo(0L));
     }
 
-    static class FakeNodePath extends NodeEnvironment.NodePath {
+    static class FakeDataPath extends DataPath {
         public final FileStore fileStore;
 
-        FakeNodePath(Path path) throws IOException {
+        FakeDataPath(Path path) throws IOException {
             super(path);
             this.fileStore = new HugeFileStore();
         }

+ 2 - 2
test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java

@@ -374,8 +374,8 @@ public abstract class IndexShardTestCase extends ESTestCase {
     ) throws IOException {
         // add node id as name to settings for proper logging
         final ShardId shardId = routing.shardId();
-        final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir());
-        ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
+        final NodeEnvironment.DataPath dataPath = new NodeEnvironment.DataPath(createTempDir());
+        ShardPath shardPath = new ShardPath(false, dataPath.resolve(shardId), dataPath.resolve(shardId), shardId);
         return newShard(
             routing,
             shardPath,

+ 12 - 12
test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java

@@ -295,15 +295,15 @@ public class InternalTestClusterTests extends ESTestCase {
         try {
             cluster.beforeTest(random());
             final int originalMasterCount = cluster.numMasterNodes();
-            final Map<String, Path[]> shardNodePaths = new HashMap<>();
+            final Map<String, Path[]> shardDataPaths = new HashMap<>();
             for (String name : cluster.getNodeNames()) {
-                shardNodePaths.put(name, getNodePaths(cluster, name));
+                shardDataPaths.put(name, getDataPaths(cluster, name));
             }
             String poorNode = randomValueOtherThanMany(
                 n -> originalMasterCount == 1 && n.equals(cluster.getMasterName()),
                 () -> randomFrom(cluster.getNodeNames())
             );
-            Path dataPath = getNodePaths(cluster, poorNode)[0];
+            Path dataPath = getDataPaths(cluster, poorNode)[0];
             final Settings poorNodeDataPathSettings = cluster.dataPathSettings(poorNode);
             final Path testMarker = dataPath.resolve("testMarker");
             Files.createDirectories(testMarker);
@@ -311,40 +311,40 @@ public class InternalTestClusterTests extends ESTestCase {
             assertFileExists(testMarker); // stopping a node half way shouldn't clean data
 
             final String stableNode = randomFrom(cluster.getNodeNames());
-            final Path stableDataPath = getNodePaths(cluster, stableNode)[0];
+            final Path stableDataPath = getDataPaths(cluster, stableNode)[0];
             final Path stableTestMarker = stableDataPath.resolve("stableTestMarker");
             assertThat(stableDataPath, not(dataPath));
             Files.createDirectories(stableTestMarker);
 
             final String newNode1 = cluster.startNode();
-            assertThat(getNodePaths(cluster, newNode1)[0], not(dataPath));
+            assertThat(getDataPaths(cluster, newNode1)[0], not(dataPath));
             assertFileExists(testMarker); // starting a node should re-use data folders and not clean it
             final String newNode2 = cluster.startNode();
-            final Path newDataPath = getNodePaths(cluster, newNode2)[0];
+            final Path newDataPath = getDataPaths(cluster, newNode2)[0];
             final Path newTestMarker = newDataPath.resolve("newTestMarker");
             assertThat(newDataPath, not(dataPath));
             Files.createDirectories(newTestMarker);
             final String newNode3 = cluster.startNode(poorNodeDataPathSettings);
-            assertThat(getNodePaths(cluster, newNode3)[0], equalTo(dataPath));
+            assertThat(getDataPaths(cluster, newNode3)[0], equalTo(dataPath));
             cluster.beforeTest(random());
             assertFileNotExists(newTestMarker); // the cluster should be reset for a new test, cleaning up the extra path we made
             assertFileNotExists(testMarker); // a new unknown node used this path, it should be cleaned
             assertFileExists(stableTestMarker); // but leaving the structure of existing, reused nodes
             for (String name : cluster.getNodeNames()) {
-                assertThat("data paths for " + name + " changed", getNodePaths(cluster, name), equalTo(shardNodePaths.get(name)));
+                assertThat("data paths for " + name + " changed", getDataPaths(cluster, name), equalTo(shardDataPaths.get(name)));
             }
 
             cluster.beforeTest(random());
             assertFileExists(stableTestMarker); // but leaving the structure of existing, reused nodes
             for (String name : cluster.getNodeNames()) {
-                assertThat("data paths for " + name + " changed", getNodePaths(cluster, name), equalTo(shardNodePaths.get(name)));
+                assertThat("data paths for " + name + " changed", getDataPaths(cluster, name), equalTo(shardDataPaths.get(name)));
             }
         } finally {
             cluster.close();
         }
     }
 
-    private Path[] getNodePaths(InternalTestCluster cluster, String name) {
+    private Path[] getDataPaths(InternalTestCluster cluster, String name) {
         final NodeEnvironment nodeEnvironment = cluster.getInstance(NodeEnvironment.class, name);
         if (nodeEnvironment.hasNodeFile()) {
             return nodeEnvironment.nodeDataPaths();
@@ -416,7 +416,7 @@ public class InternalTestClusterTests extends ESTestCase {
                     throw new IllegalStateException("get your story straight");
                 }
                 Set<String> rolePaths = pathsPerRole.computeIfAbsent(role, k -> new HashSet<>());
-                for (Path path : getNodePaths(cluster, node)) {
+                for (Path path : getDataPaths(cluster, node)) {
                     assertTrue(rolePaths.add(path.toString()));
                 }
             }
@@ -426,7 +426,7 @@ public class InternalTestClusterTests extends ESTestCase {
             Map<DiscoveryNodeRole, Set<String>> result = new HashMap<>();
             for (String name : cluster.getNodeNames()) {
                 DiscoveryNode node = cluster.getInstance(ClusterService.class, name).localNode();
-                List<String> paths = Arrays.stream(getNodePaths(cluster, name)).map(Path::toString).collect(Collectors.toList());
+                List<String> paths = Arrays.stream(getDataPaths(cluster, name)).map(Path::toString).collect(Collectors.toList());
                 if (node.isMasterNode()) {
                     result.computeIfAbsent(DiscoveryNodeRole.MASTER_ROLE, k -> new HashSet<>()).addAll(paths);
                 } else if (node.canContainData()) {

+ 1 - 1
x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingCapacityActionIT.java

@@ -37,7 +37,7 @@ public class TransportGetAutoscalingCapacityActionIT extends AutoscalingIntegTes
     public void testCurrentCapacity() throws Exception {
         assertThat(capacity().results().keySet(), Matchers.empty());
         long memory = OsProbe.getInstance().getTotalPhysicalMemorySize();
-        long storage = internalCluster().getInstance(NodeEnvironment.class).nodePaths()[0].fileStore.getTotalSpace();
+        long storage = internalCluster().getInstance(NodeEnvironment.class).dataPaths()[0].fileStore.getTotalSpace();
         assertThat(memory, greaterThan(0L));
         assertThat(storage, greaterThan(0L));
         putAutoscalingPolicy("test");

+ 3 - 3
x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/test/IdentityProviderIntegTestCase.java

@@ -98,7 +98,7 @@ public abstract class IdentityProviderIntegTestCase extends ESIntegTestCase {
 
     @Override
     protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
-        final Path home = nodePath(PARENT_DIR, nodeOrdinal);
+        final Path home = dataPath(PARENT_DIR, nodeOrdinal);
         final Path xpackConf = home.resolve("config");
         try {
             Files.createDirectories(xpackConf);
@@ -163,7 +163,7 @@ public abstract class IdentityProviderIntegTestCase extends ESIntegTestCase {
 
     @Override
     protected Path nodeConfigPath(int nodeOrdinal) {
-        return nodePath(PARENT_DIR, nodeOrdinal).resolve("config");
+        return dataPath(PARENT_DIR, nodeOrdinal).resolve("config");
     }
 
     private String configRoles() {
@@ -223,7 +223,7 @@ public abstract class IdentityProviderIntegTestCase extends ESIntegTestCase {
             + "\n";
     }
 
-    Path nodePath(Path confDir, final int nodeOrdinal) {
+    Path dataPath(Path confDir, final int nodeOrdinal) {
         return confDir.resolve(getCurrentClusterScope() + "-" + nodeOrdinal);
     }
 

+ 32 - 32
x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/PersistentCache.java

@@ -125,7 +125,7 @@ public class PersistentCache implements Closeable {
         } else {
             final Path path = cacheFile.getFile().toAbsolutePath();
             return writers.stream()
-                .filter(writer -> path.startsWith(writer.nodePath().path))
+                .filter(writer -> path.startsWith(writer.dataPath().path))
                 .findFirst()
                 .orElseThrow(() -> new PersistentCacheIndexNotFoundException(nodeEnvironment, cacheFile));
         }
@@ -149,7 +149,7 @@ public class PersistentCache implements Closeable {
     long getCacheSize(ShardId shardId, SnapshotId snapshotId, Predicate<Path> predicate) {
         long aggregateSize = 0L;
         for (CacheIndexWriter writer : writers) {
-            final Path snapshotCacheDir = resolveSnapshotCache(writer.nodePath().resolve(shardId)).resolve(snapshotId.getUUID());
+            final Path snapshotCacheDir = resolveSnapshotCache(writer.dataPath().resolve(shardId)).resolve(snapshotId.getUUID());
             if (Files.exists(snapshotCacheDir) == false) {
                 continue; // searchable snapshot shard is not present on this node path, not need to run a query
             }
@@ -217,12 +217,12 @@ public class PersistentCache implements Closeable {
         if (started.compareAndSet(false, true)) {
             try {
                 for (CacheIndexWriter writer : writers) {
-                    final NodeEnvironment.NodePath nodePath = writer.nodePath();
-                    logger.debug("loading persistent cache on data path [{}]", nodePath);
+                    final NodeEnvironment.DataPath dataPath = writer.dataPath();
+                    logger.debug("loading persistent cache on data path [{}]", dataPath);
 
-                    for (String indexUUID : nodeEnvironment.availableIndexFoldersForPath(nodePath)) {
+                    for (String indexUUID : nodeEnvironment.availableIndexFoldersForPath(dataPath)) {
                         for (ShardId shardId : nodeEnvironment.findAllShardIds(new Index("_unknown_", indexUUID))) {
-                            final Path shardDataPath = writer.nodePath().resolve(shardId);
+                            final Path shardDataPath = writer.dataPath().resolve(shardId);
                             final Path shardCachePath = getShardCachePath(new ShardPath(false, shardDataPath, shardDataPath, shardId));
 
                             if (Files.isDirectory(shardCachePath)) {
@@ -337,9 +337,9 @@ public class PersistentCache implements Closeable {
         final List<CacheIndexWriter> writers = new ArrayList<>();
         boolean success = false;
         try {
-            final NodeEnvironment.NodePath[] nodePaths = nodeEnvironment.nodePaths();
-            for (NodeEnvironment.NodePath nodePath : nodePaths) {
-                writers.add(createCacheIndexWriter(nodePath));
+            final NodeEnvironment.DataPath[] dataPaths = nodeEnvironment.dataPaths();
+            for (NodeEnvironment.DataPath dataPath : dataPaths) {
+                writers.add(createCacheIndexWriter(dataPath));
             }
             success = true;
         } catch (IOException e) {
@@ -355,15 +355,15 @@ public class PersistentCache implements Closeable {
     /**
      * Creates a new {@link CacheIndexWriter} for the specified data path. There is a single instance per data path.
      *
-     * @param nodePath the data path
+     * @param dataPath the data path
      * @return a new {@link CacheIndexWriter} instance
      * @throws IOException if something went wrong
      */
-    static CacheIndexWriter createCacheIndexWriter(NodeEnvironment.NodePath nodePath) throws IOException {
+    static CacheIndexWriter createCacheIndexWriter(NodeEnvironment.DataPath dataPath) throws IOException {
         final List<Closeable> closeables = new ArrayList<>();
         boolean success = false;
         try {
-            Path directoryPath = createCacheIndexFolder(nodePath);
+            Path directoryPath = createCacheIndexFolder(dataPath);
             final Directory directory = FSDirectory.open(directoryPath);
             closeables.add(directory);
 
@@ -377,7 +377,7 @@ public class PersistentCache implements Closeable {
             final IndexWriter indexWriter = new IndexWriter(directory, config);
             closeables.add(indexWriter);
 
-            final CacheIndexWriter cacheIndexWriter = new CacheIndexWriter(nodePath, directory, indexWriter);
+            final CacheIndexWriter cacheIndexWriter = new CacheIndexWriter(dataPath, directory, indexWriter);
             success = true;
             return cacheIndexWriter;
         } finally {
@@ -396,8 +396,8 @@ public class PersistentCache implements Closeable {
     static Map<String, Document> loadDocuments(NodeEnvironment nodeEnvironment) {
         final Map<String, Document> documents = new HashMap<>();
         try {
-            for (NodeEnvironment.NodePath nodePath : nodeEnvironment.nodePaths()) {
-                final Path directoryPath = resolveCacheIndexFolder(nodePath);
+            for (NodeEnvironment.DataPath dataPath : nodeEnvironment.dataPaths()) {
+                final Path directoryPath = resolveCacheIndexFolder(dataPath);
                 if (Files.exists(directoryPath)) {
                     documents.putAll(loadDocuments(directoryPath));
                 }
@@ -450,10 +450,10 @@ public class PersistentCache implements Closeable {
             throw new IllegalStateException("Cannot clean searchable snapshot caches: node is a data node");
         }
         try {
-            for (NodeEnvironment.NodePath nodePath : nodeEnvironment.nodePaths()) {
-                for (String indexUUID : nodeEnvironment.availableIndexFoldersForPath(nodePath)) {
+            for (NodeEnvironment.DataPath dataPath : nodeEnvironment.dataPaths()) {
+                for (String indexUUID : nodeEnvironment.availableIndexFoldersForPath(dataPath)) {
                     for (ShardId shardId : nodeEnvironment.findAllShardIds(new Index("_unknown_", indexUUID))) {
-                        final Path shardDataPath = nodePath.resolve(shardId);
+                        final Path shardDataPath = dataPath.resolve(shardId);
                         final ShardPath shardPath = new ShardPath(false, shardDataPath, shardDataPath, shardId);
                         final Path cacheDir = getShardCachePath(shardPath);
                         if (Files.isDirectory(cacheDir)) {
@@ -462,7 +462,7 @@ public class PersistentCache implements Closeable {
                         }
                     }
                 }
-                final Path cacheIndexDir = resolveCacheIndexFolder(nodePath);
+                final Path cacheIndexDir = resolveCacheIndexFolder(dataPath);
                 if (Files.isDirectory(cacheIndexDir)) {
                     logger.debug("deleting searchable snapshot lucene directory [{}]", cacheIndexDir);
                     IOUtils.rm(cacheIndexDir);
@@ -479,22 +479,22 @@ public class PersistentCache implements Closeable {
      */
     static class CacheIndexWriter implements Closeable {
 
-        private final NodeEnvironment.NodePath nodePath;
+        private final NodeEnvironment.DataPath dataPath;
         private final IndexWriter indexWriter;
         private final Directory directory;
 
-        private CacheIndexWriter(NodeEnvironment.NodePath nodePath, Directory directory, IndexWriter indexWriter) {
-            this.nodePath = nodePath;
+        private CacheIndexWriter(NodeEnvironment.DataPath dataPath, Directory directory, IndexWriter indexWriter) {
+            this.dataPath = dataPath;
             this.directory = directory;
             this.indexWriter = indexWriter;
         }
 
-        NodeEnvironment.NodePath nodePath() {
-            return nodePath;
+        NodeEnvironment.DataPath dataPath() {
+            return dataPath;
         }
 
         void updateCacheFile(CacheFile cacheFile, SortedSet<ByteRange> cacheRanges) throws IOException {
-            updateCacheFile(buildId(cacheFile), buildDocument(nodePath, cacheFile, cacheRanges));
+            updateCacheFile(buildId(cacheFile), buildDocument(dataPath, cacheFile, cacheRanges));
         }
 
         void updateCacheFile(String cacheFileId, Document cacheFileDocument) throws IOException {
@@ -532,7 +532,7 @@ public class PersistentCache implements Closeable {
 
         @Override
         public String toString() {
-            return "[persistent cache index][" + nodePath + ']';
+            return "[persistent cache index][" + dataPath + ']';
         }
     }
 
@@ -559,11 +559,11 @@ public class PersistentCache implements Closeable {
         return new Term(CACHE_ID_FIELD, cacheFileUuid);
     }
 
-    private static Document buildDocument(NodeEnvironment.NodePath nodePath, CacheFile cacheFile, SortedSet<ByteRange> cacheRanges)
+    private static Document buildDocument(NodeEnvironment.DataPath dataPath, CacheFile cacheFile, SortedSet<ByteRange> cacheRanges)
         throws IOException {
         final Document document = new Document();
         document.add(new StringField(CACHE_ID_FIELD, buildId(cacheFile), Field.Store.YES));
-        document.add(new StringField(CACHE_PATH_FIELD, nodePath.indicesPath.relativize(cacheFile.getFile()).toString(), Field.Store.YES));
+        document.add(new StringField(CACHE_PATH_FIELD, dataPath.indicesPath.relativize(cacheFile.getFile()).toString(), Field.Store.YES));
 
         try (BytesStreamOutput output = new BytesStreamOutput()) {
             output.writeVInt(cacheRanges.size());
@@ -636,8 +636,8 @@ public class PersistentCache implements Closeable {
         return unmodifiableSortedSet(cacheRanges);
     }
 
-    static Path resolveCacheIndexFolder(NodeEnvironment.NodePath nodePath) {
-        return resolveCacheIndexFolder(nodePath.path);
+    static Path resolveCacheIndexFolder(NodeEnvironment.DataPath dataPath) {
+        return resolveCacheIndexFolder(dataPath.path);
     }
 
     static Path resolveCacheIndexFolder(Path dataPath) {
@@ -647,9 +647,9 @@ public class PersistentCache implements Closeable {
     /**
      * Creates a directory for the snapshot cache Lucene index.
      */
-    private static Path createCacheIndexFolder(NodeEnvironment.NodePath nodePath) throws IOException {
+    private static Path createCacheIndexFolder(NodeEnvironment.DataPath dataPath) throws IOException {
         // "snapshot_cache" directory at the root of the specified data path
-        final Path snapshotCacheRootDir = resolveCacheIndexFolder(nodePath);
+        final Path snapshotCacheRootDir = resolveCacheIndexFolder(dataPath);
         if (Files.exists(snapshotCacheRootDir) == false) {
             logger.debug("creating new persistent cache index directory [{}]", snapshotCacheRootDir);
             Files.createDirectories(snapshotCacheRootDir);

+ 4 - 4
x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/PersistentCacheTests.java

@@ -64,7 +64,7 @@ import static org.hamcrest.Matchers.sameInstance;
 public class PersistentCacheTests extends AbstractSearchableSnapshotsTestCase {
 
     public void testCacheIndexWriter() throws Exception {
-        final NodeEnvironment.NodePath nodePath = randomFrom(nodeEnvironment.nodePaths());
+        final NodeEnvironment.DataPath dataPath = randomFrom(nodeEnvironment.dataPaths());
 
         int docId = 0;
         final Map<String, Integer> liveDocs = new HashMap<>();
@@ -72,15 +72,15 @@ public class PersistentCacheTests extends AbstractSearchableSnapshotsTestCase {
 
         for (int iter = 0; iter < 20; iter++) {
 
-            final Path snapshotCacheIndexDir = resolveCacheIndexFolder(nodePath);
+            final Path snapshotCacheIndexDir = resolveCacheIndexFolder(dataPath);
             assertThat(Files.exists(snapshotCacheIndexDir), equalTo(iter > 0));
 
             // load existing documents from persistent cache index before each iteration
             final Map<String, Document> documents = PersistentCache.loadDocuments(nodeEnvironment);
             assertThat(documents.size(), equalTo(liveDocs.size()));
 
-            try (PersistentCache.CacheIndexWriter writer = createCacheIndexWriter(nodePath)) {
-                assertThat(writer.nodePath(), sameInstance(nodePath));
+            try (PersistentCache.CacheIndexWriter writer = createCacheIndexWriter(dataPath)) {
+                assertThat(writer.dataPath(), sameInstance(dataPath));
 
                 // verify that existing documents are loaded
                 for (Map.Entry<String, Integer> liveDoc : liveDocs.entrySet()) {

+ 1 - 1
x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java

@@ -135,7 +135,7 @@ public abstract class SecuritySingleNodeTestCase extends ESSingleNodeTestCase {
         builder.put(customSettings, false); // handle secure settings separately
         builder.put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial");
         builder.put("transport.type", "security4");
-        builder.put("path.home", customSecuritySettingsSource.nodePath(0));
+        builder.put("path.home", customSecuritySettingsSource.homePath(0));
         Settings.Builder customBuilder = Settings.builder().put(customSettings);
         if (customBuilder.getSecureSettings() != null) {
             SecuritySettingsSource.addSecureSettings(

+ 3 - 3
x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java

@@ -133,13 +133,13 @@ public class SecuritySettingsSource extends NodeConfigurationSource {
         }
     }
 
-    Path nodePath(final int nodeOrdinal) {
+    Path homePath(final int nodeOrdinal) {
         return parentFolder.resolve(subfolderPrefix + "-" + nodeOrdinal);
     }
 
     @Override
     public Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
-        final Path home = nodePath(nodeOrdinal);
+        final Path home = homePath(nodeOrdinal);
         final Path xpackConf = home.resolve("config");
         try {
             Files.createDirectories(xpackConf);
@@ -175,7 +175,7 @@ public class SecuritySettingsSource extends NodeConfigurationSource {
 
     @Override
     public Path nodeConfigPath(int nodeOrdinal) {
-        return nodePath(nodeOrdinal).resolve("config");
+        return homePath(nodeOrdinal).resolve("config");
     }
 
     @Override