Browse Source

Remove "nodes/0" folder prefix from data path (#42489)

With the removal of node.max_local_storage_nodes, there is no need anymore to keep the data in
subfolders indexed by a node ordinal. This commit makes it so that ES 8.0 will store data directly in
$DATA_DIR instead of $DATA_DIR/nodes/$nodeOrdinal.

Upon startup, Elasticsearch will check to see if there is data in the old location, and automatically
move it to the new location. This automatic migration only works if $nodeOrdinal is 0, i.e., multiple
node instances have not previously run on the same data path, which required for
node.max_local_storage_nodes to explicitly be configured.
Yannick Welsch 6 years ago
parent
commit
6e39433cd5

+ 5 - 5
docs/reference/commands/shard-tool.asciidoc

@@ -51,14 +51,14 @@ $ bin/elasticsearch-shard remove-corrupted-data --index twitter --shard-id 0
   Please make a complete backup of your index before using this tool.
 
 
-Opening Lucene index at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/
+Opening Lucene index at /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/
 
- >> Lucene index is corrupted at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/
+ >> Lucene index is corrupted at /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/
 
-Opening translog at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/translog/
+Opening translog at /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/translog/
 
 
- >> Translog is clean at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/translog/
+ >> Translog is clean at /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/translog/
 
 
   Corrupted Lucene index segments found - 32 documents will be lost.
@@ -93,7 +93,7 @@ POST /_cluster/reroute
 
 You must accept the possibility of data loss by changing parameter `accept_data_loss` to `true`.
 
-Deleted corrupt marker corrupted_FzTSBSuxT7i3Tls_TgwEag from /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/
+Deleted corrupt marker corrupted_FzTSBSuxT7i3Tls_TgwEag from /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/
 
 --------------------------------------------------
 

+ 22 - 0
docs/reference/migration/migrate_8_0/node.asciidoc

@@ -14,3 +14,25 @@
 The `node.max_local_storage_nodes` setting was deprecated in 7.x and
 has been removed in 8.0. Nodes should be run on separate data paths
 to ensure that each node is consistently assigned to the same data path.
+
+[float]
+==== Change of data folder layout
+
+Each node's data is now stored directly in the data directory set by the
+`path.data` setting, rather than in `${path.data}/nodes/0`, because the removal
+of the `node.max_local_storage_nodes` setting means that nodes may no longer
+share a data path. At startup, Elasticsearch will automatically migrate the data
+path to the new layout. This automatic migration will not proceed if the data
+path contains data for more than one node. You should move to a configuration in
+which each node has its own data path before upgrading.
+
+If you try to upgrade a configuration in which there is data for more than one
+node in a data path then the automatic migration will fail and Elasticsearch
+will refuse to start. To resolve this you will need to perform the migration
+manually. The data for the extra nodes are stored in folders named
+`${path.data}/nodes/1`, `${path.data}/nodes/2` and so on, and you should move
+each of these folders to an appropriate location and then configure the
+corresponding node to use this location for its data path. If your nodes each
+have more than one data path in their `path.data` settings then you should move
+all the corresponding subfolders in parallel. Each node uses the same subfolder
+(e.g. `nodes/2`) across all its data paths.

+ 5 - 4
qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java

@@ -51,10 +51,11 @@ public class NodeEnvironmentEvilTests extends ESTestCase {
             Settings build = Settings.builder()
                     .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
                     .putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build();
-            IOException exception = expectThrows(IOException.class, () -> {
+            IllegalStateException exception = expectThrows(IllegalStateException.class, () -> {
                 new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
             });
-            assertTrue(exception.getMessage(), exception.getMessage().startsWith(path.toString()));
+            assertTrue(exception.getCause().getCause().getMessage(),
+                exception.getCause().getCause().getMessage().startsWith(path.toString()));
         }
     }
 
@@ -62,7 +63,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase {
         assumeTrue("posix filesystem", isPosix);
         final String[] tempPaths = tmpPaths();
         Path path = PathUtils.get(randomFrom(tempPaths));
-        Path fooIndex = path.resolve("nodes").resolve("0").resolve(NodeEnvironment.INDICES_FOLDER)
+        Path fooIndex = path.resolve(NodeEnvironment.INDICES_FOLDER)
             .resolve("foo");
         Files.createDirectories(fooIndex);
         try (PosixPermissionsResetter attr = new PosixPermissionsResetter(fooIndex)) {
@@ -82,7 +83,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase {
         assumeTrue("posix filesystem", isPosix);
         final String[] tempPaths = tmpPaths();
         Path path = PathUtils.get(randomFrom(tempPaths));
-        Path fooIndex = path.resolve("nodes").resolve("0").resolve(NodeEnvironment.INDICES_FOLDER)
+        Path fooIndex = path.resolve(NodeEnvironment.INDICES_FOLDER)
             .resolve("foo");
         Path fooShard = fooIndex.resolve("0");
         Path fooShardIndex = fooShard.resolve("index");

+ 150 - 20
server/src/main/java/org/elasticsearch/env/NodeEnvironment.java

@@ -35,6 +35,7 @@ import org.elasticsearch.Version;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.common.CheckedFunction;
+import org.elasticsearch.common.CheckedRunnable;
 import org.elasticsearch.common.Randomness;
 import org.elasticsearch.common.SuppressForbidden;
 import org.elasticsearch.common.UUIDs;
@@ -45,6 +46,7 @@ import org.elasticsearch.common.settings.Setting.Property;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.ByteSizeValue;
 import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.set.Sets;
 import org.elasticsearch.common.xcontent.NamedXContentRegistry;
 import org.elasticsearch.core.internal.io.IOUtils;
 import org.elasticsearch.gateway.MetaDataStateFormat;
@@ -81,6 +83,7 @@ import java.util.Set;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.Function;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
@@ -90,9 +93,9 @@ import java.util.stream.Stream;
  */
 public final class NodeEnvironment  implements Closeable {
     public static class NodePath {
-        /* ${data.paths}/nodes/0 */
+        /* ${data.paths} */
         public final Path path;
-        /* ${data.paths}/nodes/0/indices */
+        /* ${data.paths}/indices */
         public final Path indicesPath;
         /** Cached FileStore from path */
         public final FileStore fileStore;
@@ -115,7 +118,7 @@ public final class NodeEnvironment  implements Closeable {
 
         /**
          * Resolves the given shards directory against this NodePath
-         * ${data.paths}/nodes/{node.id}/indices/{index.uuid}/{shard.id}
+         * ${data.paths}/indices/{index.uuid}/{shard.id}
          */
         public Path resolve(ShardId shardId) {
             return resolve(shardId.getIndex()).resolve(Integer.toString(shardId.id()));
@@ -123,7 +126,7 @@ public final class NodeEnvironment  implements Closeable {
 
         /**
          * Resolves index directory against this NodePath
-         * ${data.paths}/nodes/{node.id}/indices/{index.uuid}
+         * ${data.paths}/indices/{index.uuid}
          */
         public Path resolve(Index index) {
             return resolve(index.getUUID());
@@ -170,7 +173,6 @@ public final class NodeEnvironment  implements Closeable {
     public static final Setting<Boolean> ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING =
         Setting.boolSetting("node.enable_lucene_segment_infos_trace", false, Property.NodeScope);
 
-    public static final String NODES_FOLDER = "nodes";
     public static final String INDICES_FOLDER = "indices";
     public static final String NODE_LOCK_FILENAME = "node.lock";
 
@@ -179,20 +181,28 @@ public final class NodeEnvironment  implements Closeable {
         private final Lock[] locks;
         private final NodePath[] nodePaths;
 
+
+        public NodeLock(final Logger logger,
+                        final Environment environment,
+                        final CheckedFunction<Path, Boolean, IOException> pathFunction) throws IOException {
+            this(logger, environment, pathFunction, Function.identity());
+        }
+
         /**
          * Tries to acquire a node lock for a node id, throws {@code IOException} if it is unable to acquire it
          * @param pathFunction function to check node path before attempt of acquiring a node lock
          */
         public NodeLock(final Logger logger,
                         final Environment environment,
-                        final CheckedFunction<Path, Boolean, IOException> pathFunction) throws IOException {
+                        final CheckedFunction<Path, Boolean, IOException> pathFunction,
+                        final Function<Path, Path> subPathMapping) throws IOException {
             nodePaths = new NodePath[environment.dataFiles().length];
             locks = new Lock[nodePaths.length];
             try {
                 final Path[] dataPaths = environment.dataFiles();
                 for (int dirIndex = 0; dirIndex < dataPaths.length; dirIndex++) {
                     Path dataDir = dataPaths[dirIndex];
-                    Path dir = resolveNodePath(dataDir);
+                    Path dir = subPathMapping.apply(dataDir);
                     if (pathFunction.apply(dir) == false) {
                         continue;
                     }
@@ -247,7 +257,7 @@ public final class NodeEnvironment  implements Closeable {
             sharedDataPath = environment.sharedDataFile();
 
             for (Path path : environment.dataFiles()) {
-                Files.createDirectories(resolveNodePath(path));
+                Files.createDirectories(path);
             }
 
             final NodeLock nodeLock;
@@ -264,7 +274,6 @@ public final class NodeEnvironment  implements Closeable {
 
             this.locks = nodeLock.locks;
             this.nodePaths = nodeLock.nodePaths;
-            this.nodeMetaData = loadOrCreateNodeMetaData(settings, logger, nodePaths);
 
             logger.debug("using node location {}", Arrays.toString(nodePaths));
 
@@ -278,6 +287,10 @@ public final class NodeEnvironment  implements Closeable {
                 ensureAtomicMoveSupported(nodePaths);
             }
 
+            if (upgradeLegacyNodeFolders(logger, settings, environment, nodeLock)) {
+                assertCanWrite();
+            }
+
             if (DiscoveryNode.isDataNode(settings) == false) {
                 if (DiscoveryNode.isMasterNode(settings) == false) {
                     ensureNoIndexMetaData(nodePaths);
@@ -286,6 +299,8 @@ public final class NodeEnvironment  implements Closeable {
                 ensureNoShardData(nodePaths);
             }
 
+            this.nodeMetaData = loadOrCreateNodeMetaData(settings, logger, nodePaths);
+
             success = true;
         } finally {
             if (success == false) {
@@ -295,13 +310,128 @@ public final class NodeEnvironment  implements Closeable {
     }
 
     /**
-     * Resolve a specific nodes/{node.id} path for the specified path and node lock id.
-     *
-     * @param path       the path
-     * @return the resolved path
+     * Upgrades all data paths that have been written to by an older ES version to the 8.0+ compatible folder layout,
+     * removing the "nodes/${lockId}" folder prefix
      */
-    public static Path resolveNodePath(final Path path) {
-        return path.resolve(NODES_FOLDER).resolve("0");
+    private static boolean upgradeLegacyNodeFolders(Logger logger, Settings settings, Environment environment,
+                                                    NodeLock nodeLock) throws IOException {
+        boolean upgradeNeeded = false;
+
+        // check if we can do an auto-upgrade
+        for (Path path : environment.dataFiles()) {
+            final Path nodesFolderPath = path.resolve("nodes");
+            if (Files.isDirectory(nodesFolderPath)) {
+                final List<Integer> nodeLockIds = new ArrayList<>();
+
+                try (DirectoryStream<Path> stream = Files.newDirectoryStream(nodesFolderPath)) {
+                    for (Path nodeLockIdPath : stream) {
+                        String fileName = nodeLockIdPath.getFileName().toString();
+                        if (Files.isDirectory(nodeLockIdPath) && fileName.chars().allMatch(Character::isDigit)) {
+                            int nodeLockId = Integer.parseInt(fileName);
+                            nodeLockIds.add(nodeLockId);
+                        } else if (FileSystemUtils.isDesktopServicesStore(nodeLockIdPath) == false) {
+                            throw new IllegalStateException("unexpected file/folder encountered during data folder upgrade: " +
+                                nodeLockIdPath);
+                        }
+                    }
+                }
+
+                if (nodeLockIds.isEmpty() == false) {
+                    upgradeNeeded = true;
+
+                    if (nodeLockIds.equals(Arrays.asList(0)) == false) {
+                        throw new IllegalStateException("data path " + nodesFolderPath + " cannot be upgraded automatically because it " +
+                            "contains data from nodes with ordinals " + nodeLockIds + ", due to previous use of the now obsolete " +
+                            "[node.max_local_storage_nodes] setting. Please check the breaking changes docs for the current version of " +
+                            "Elasticsearch to find an upgrade path");
+                    }
+                }
+            }
+        }
+
+        if (upgradeNeeded == false) {
+            logger.trace("data folder upgrade not required");
+            return false;
+        }
+
+        logger.info("upgrading legacy data folders: {}", Arrays.toString(environment.dataFiles()));
+
+        // acquire locks on legacy path for duration of upgrade (to ensure there is no older ES version running on this path)
+        final NodeLock legacyNodeLock;
+        try {
+            legacyNodeLock = new NodeLock(logger, environment, dir -> true, path -> path.resolve("nodes").resolve("0"));
+        } catch (IOException e) {
+            final String message = String.format(
+                Locale.ROOT,
+                "failed to obtain legacy node locks, tried %s;" +
+                    " maybe these locations are not writable or multiple nodes were started on the same data path?",
+                Arrays.toString(environment.dataFiles()));
+            throw new IllegalStateException(message, e);
+        }
+
+        // move contents from legacy path to new path
+        assert nodeLock.getNodePaths().length == legacyNodeLock.getNodePaths().length;
+        try {
+            final List<CheckedRunnable<IOException>> upgradeActions = new ArrayList<>();
+            for (int i = 0; i < legacyNodeLock.getNodePaths().length; i++) {
+                final NodePath legacyNodePath = legacyNodeLock.getNodePaths()[i];
+                final NodePath nodePath = nodeLock.getNodePaths()[i];
+
+                // determine folders to move and check that there are no extra files/folders
+                final Set<String> folderNames = new HashSet<>();
+
+                try (DirectoryStream<Path> stream = Files.newDirectoryStream(legacyNodePath.path)) {
+                    for (Path subFolderPath : stream) {
+                        final String fileName = subFolderPath.getFileName().toString();
+                        if (FileSystemUtils.isDesktopServicesStore(subFolderPath)) {
+                            // ignore
+                        } else if (FileSystemUtils.isAccessibleDirectory(subFolderPath, logger)) {
+                            if (fileName.equals(INDICES_FOLDER) == false && // indices folder
+                                fileName.equals(MetaDataStateFormat.STATE_DIR_NAME) == false) { // global metadata & node state folder
+                                throw new IllegalStateException("unexpected folder encountered during data folder upgrade: " +
+                                    subFolderPath);
+                            }
+                            final Path targetSubFolderPath = nodePath.path.resolve(fileName);
+                            if (Files.exists(targetSubFolderPath)) {
+                                throw new IllegalStateException("target folder already exists during data folder upgrade: " +
+                                    targetSubFolderPath);
+                            }
+                            folderNames.add(fileName);
+                        } else if (fileName.equals(NODE_LOCK_FILENAME) == false &&
+                                   fileName.equals(TEMP_FILE_NAME) == false) {
+                            throw new IllegalStateException("unexpected file/folder encountered during data folder upgrade: " +
+                                subFolderPath);
+                        }
+                    }
+                }
+
+                assert Sets.difference(folderNames, Sets.newHashSet(INDICES_FOLDER, MetaDataStateFormat.STATE_DIR_NAME)).isEmpty() :
+                    "expected indices and/or state dir folder but was " + folderNames;
+
+                upgradeActions.add(() -> {
+                    for (String folderName : folderNames) {
+                        final Path sourceSubFolderPath = legacyNodePath.path.resolve(folderName);
+                        final Path targetSubFolderPath = nodePath.path.resolve(folderName);
+                        Files.move(sourceSubFolderPath, targetSubFolderPath, StandardCopyOption.ATOMIC_MOVE);
+                        logger.info("data folder upgrade: moved from [{}] to [{}]", sourceSubFolderPath, targetSubFolderPath);
+                    }
+                    IOUtils.fsync(nodePath.path, true);
+                });
+            }
+            // now do the actual upgrade. start by upgrading the node metadata file before moving anything, since a downgrade in an
+            // intermediate state would be pretty disastrous
+            loadOrCreateNodeMetaData(settings, logger, legacyNodeLock.getNodePaths());
+            for (CheckedRunnable<IOException> upgradeAction : upgradeActions) {
+                upgradeAction.run();
+            }
+        } finally {
+            legacyNodeLock.close();
+        }
+
+        // upgrade successfully completed, remove legacy nodes folders
+        IOUtils.rm(Stream.of(environment.dataFiles()).map(path -> path.resolve("nodes")).toArray(Path[]::new));
+
+        return true;
     }
 
     private void maybeLogPathDetails() throws IOException {
@@ -801,14 +931,14 @@ public final class NodeEnvironment  implements Closeable {
     }
 
     /**
-     * Returns all folder names in ${data.paths}/nodes/{node.id}/indices folder
+     * Returns all folder names in ${data.paths}/indices folder
      */
     public Set<String> availableIndexFolders() throws IOException {
         return availableIndexFolders(p -> false);
     }
 
     /**
-     * Returns folder names in ${data.paths}/nodes/{node.id}/indices folder that don't match the given predicate.
+     * Returns folder names in ${data.paths}/indices folder that don't match the given predicate.
      * @param excludeIndexPathIdsPredicate folder names to exclude
      */
     public Set<String> availableIndexFolders(Predicate<String> excludeIndexPathIdsPredicate) throws IOException {
@@ -825,7 +955,7 @@ public final class NodeEnvironment  implements Closeable {
     }
 
     /**
-     * Return all directory names in the nodes/{node.id}/indices directory for the given node path.
+     * Return all directory names in the indices directory for the given node path.
      *
      * @param nodePath the path
      * @return all directories that could be indices for the given node path.
@@ -836,7 +966,7 @@ public final class NodeEnvironment  implements Closeable {
     }
 
     /**
-     * Return directory names in the nodes/{node.id}/indices directory for the given node path that don't match the given predicate.
+     * Return directory names in the indices directory for the given node path that don't match the given predicate.
      *
      * @param nodePath the path
      * @param excludeIndexPathIdsPredicate folder names to exclude
@@ -865,7 +995,7 @@ public final class NodeEnvironment  implements Closeable {
     }
 
     /**
-     * Resolves all existing paths to <code>indexFolderName</code> in ${data.paths}/nodes/{node.id}/indices
+     * Resolves all existing paths to <code>indexFolderName</code> in ${data.paths}/indices
      */
     public Path[] resolveIndexFolder(String indexFolderName) {
         if (nodePaths == null || locks == null) {

+ 1 - 4
server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java

@@ -140,17 +140,14 @@ public class RemoveCorruptedShardDataCommand extends EnvironmentAwareCommand {
                 IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, shardParent);
 
             final String shardIdFileName = path.getFileName().toString();
-            final String nodeIdFileName = shardParentParent.getParent().getFileName().toString();
             if (Files.isDirectory(path) && shardIdFileName.chars().allMatch(Character::isDigit) // SHARD-ID path element check
                 && NodeEnvironment.INDICES_FOLDER.equals(shardParentParent.getFileName().toString()) // `indices` check
-                && nodeIdFileName.chars().allMatch(Character::isDigit) // NODE-ID check
-                && NodeEnvironment.NODES_FOLDER.equals(shardParentParent.getParent().getParent().getFileName().toString()) // `nodes` check
             ) {
                 shardId = Integer.parseInt(shardIdFileName);
                 indexName = indexMetaData.getIndex().getName();
             } else {
                 throw new ElasticsearchException("Unable to resolve shard id. Wrong folder structure at [ " + path.toString()
-                    + " ], expected .../nodes/[NODE-ID]/indices/[INDEX-UUID]/[SHARD-ID]");
+                    + " ], expected .../indices/[INDEX-UUID]/[SHARD-ID]");
             }
         } else {
             // otherwise resolve shardPath based on the index name and shard id

+ 8 - 9
server/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java

@@ -18,6 +18,12 @@
  */
 package org.elasticsearch.bwcompat;
 
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ESIntegTestCase;
+
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.file.DirectoryStream;
@@ -26,13 +32,6 @@ import java.nio.file.Path;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.TestUtil;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.env.Environment;
-import org.elasticsearch.env.NodeEnvironment;
-import org.elasticsearch.test.ESIntegTestCase;
-
 import static org.hamcrest.Matchers.containsString;
 
 @LuceneTestCase.SuppressCodecs("*")
@@ -69,8 +68,8 @@ public class RecoveryWithUnsupportedIndicesIT extends ESIntegTestCase {
             }
             throw new IllegalStateException(builder.toString());
         }
-        Path src = list[0].resolve(NodeEnvironment.NODES_FOLDER);
-        Path dest = dataDir.resolve(NodeEnvironment.NODES_FOLDER);
+        Path src = list[0].resolve("nodes");
+        Path dest = dataDir.resolve("nodes");
         assertTrue(Files.exists(src));
         Files.move(src, dest);
         assertFalse(Files.exists(src));

+ 85 - 0
server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java

@@ -21,13 +21,24 @@ package org.elasticsearch.env;
 
 import org.elasticsearch.Version;
 import org.elasticsearch.common.CheckedConsumer;
+import org.elasticsearch.common.io.PathUtils;
 import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.node.Node;
 import org.elasticsearch.test.ESIntegTestCase;
 import org.elasticsearch.test.InternalTestCluster;
 
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
 import java.nio.file.Path;
+import java.nio.file.StandardCopyOption;
+import java.util.List;
+import java.util.stream.Collectors;
 
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
 import static org.hamcrest.Matchers.allOf;
 import static org.hamcrest.Matchers.containsString;
 import static org.hamcrest.Matchers.endsWith;
@@ -123,4 +134,78 @@ public class NodeEnvironmentIT extends ESIntegTestCase {
         assertThat(illegalStateException.getMessage(),
             allOf(startsWith("cannot upgrade a node from version ["), endsWith("] directly to version [" + Version.CURRENT + "]")));
     }
+
+    public void testUpgradeDataFolder() throws IOException, InterruptedException {
+        String node = internalCluster().startNode();
+        prepareCreate("test").get();
+        indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("{}", XContentType.JSON));
+        String nodeId = client().admin().cluster().prepareState().get().getState().nodes().getMasterNodeId();
+
+        final Settings dataPathSettings = internalCluster().dataPathSettings(node);
+        internalCluster().stopRandomDataNode();
+
+        // simulate older data path layout by moving data under "nodes/0" folder
+        final List<Path> dataPaths = Environment.PATH_DATA_SETTING.get(dataPathSettings)
+            .stream().map(PathUtils::get).collect(Collectors.toList());
+        dataPaths.forEach(path -> {
+                final Path targetPath = path.resolve("nodes").resolve("0");
+                try {
+                    Files.createDirectories(targetPath);
+
+                    try (DirectoryStream<Path> stream = Files.newDirectoryStream(path)) {
+                        for (Path subPath : stream) {
+                            String fileName = subPath.getFileName().toString();
+                            Path targetSubPath = targetPath.resolve(fileName);
+                            if (fileName.equals("nodes") == false) {
+                                Files.move(subPath, targetSubPath, StandardCopyOption.ATOMIC_MOVE);
+                            }
+                        }
+                    }
+                } catch (IOException e) {
+                    throw new UncheckedIOException(e);
+                }
+            });
+
+        dataPaths.forEach(path -> assertTrue(Files.exists(path.resolve("nodes"))));
+
+        // create extra file/folder, and check that upgrade fails
+        if (dataPaths.isEmpty() == false) {
+            final Path badFileInNodesDir = Files.createTempFile(randomFrom(dataPaths).resolve("nodes"), "bad", "file");
+            IllegalStateException ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings));
+            assertThat(ise.getMessage(), containsString("unexpected file/folder encountered during data folder upgrade"));
+            Files.delete(badFileInNodesDir);
+
+            final Path badFolderInNodesDir = Files.createDirectories(randomFrom(dataPaths).resolve("nodes").resolve("bad-folder"));
+            ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings));
+            assertThat(ise.getMessage(), containsString("unexpected file/folder encountered during data folder upgrade"));
+            Files.delete(badFolderInNodesDir);
+
+            final Path badFile = Files.createTempFile(randomFrom(dataPaths).resolve("nodes").resolve("0"), "bad", "file");
+            ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings));
+            assertThat(ise.getMessage(), containsString("unexpected file/folder encountered during data folder upgrade"));
+            Files.delete(badFile);
+
+            final Path badFolder = Files.createDirectories(randomFrom(dataPaths).resolve("nodes").resolve("0").resolve("bad-folder"));
+            ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings));
+            assertThat(ise.getMessage(), containsString("unexpected folder encountered during data folder upgrade"));
+            Files.delete(badFolder);
+
+            final Path conflictingFolder = randomFrom(dataPaths).resolve("indices");
+            if (Files.exists(conflictingFolder) == false) {
+                Files.createDirectories(conflictingFolder);
+                ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings));
+                assertThat(ise.getMessage(), containsString("target folder already exists during data folder upgrade"));
+                Files.delete(conflictingFolder);
+            }
+        }
+
+        // check that upgrade works
+        dataPaths.forEach(path -> assertTrue(Files.exists(path.resolve("nodes"))));
+        internalCluster().startNode(dataPathSettings);
+        dataPaths.forEach(path -> assertFalse(Files.exists(path.resolve("nodes"))));
+        assertEquals(nodeId, client().admin().cluster().prepareState().get().getState().nodes().getMasterNodeId());
+        assertTrue(client().admin().indices().prepareExists("test").get().isExists());
+        ensureYellow("test");
+        assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L);
+    }
 }

+ 6 - 6
server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java

@@ -373,10 +373,10 @@ public class NodeEnvironmentTests extends ESTestCase {
 
         assertThat("shard paths with a custom data_path should contain only regular paths",
                 env.availableShardPaths(sid),
-                equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID() + "/0")));
+                equalTo(stringsToPaths(dataPaths, "indices/" + index.getUUID() + "/0")));
 
         assertThat("index paths uses the regular template",
-                env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID())));
+                env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "indices/" + index.getUUID())));
 
         IndexSettings s3 = new IndexSettings(s2.getIndexMetaData(), Settings.builder().build());
 
@@ -385,10 +385,10 @@ public class NodeEnvironmentTests extends ESTestCase {
 
         assertThat("shard paths with a custom data_path should contain only regular paths",
                 env.availableShardPaths(sid),
-                equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID() + "/0")));
+                equalTo(stringsToPaths(dataPaths, "indices/" + index.getUUID() + "/0")));
 
         assertThat("index paths uses the regular template",
-                env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID())));
+                env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "indices/" + index.getUUID())));
 
         env.close();
     }
@@ -418,7 +418,7 @@ public class NodeEnvironmentTests extends ESTestCase {
         String[] paths = tmpPaths();
         // simulate some previous left over temp files
         for (String path : randomSubsetOf(randomIntBetween(1, paths.length), paths)) {
-            final Path nodePath = NodeEnvironment.resolveNodePath(PathUtils.get(path));
+            final Path nodePath = PathUtils.get(path);
             Files.createDirectories(nodePath);
             Files.createFile(nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME));
             if (randomBoolean()) {
@@ -433,7 +433,7 @@ public class NodeEnvironmentTests extends ESTestCase {
 
         // check we clean up
         for (String path: paths) {
-            final Path nodePath = NodeEnvironment.resolveNodePath(PathUtils.get(path));
+            final Path nodePath = PathUtils.get(path);
             final Path tempFile = nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME);
             assertFalse(tempFile + " should have been cleaned", Files.exists(tempFile));
             final Path srcTempFile = nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME + ".src");

+ 1 - 1
server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java

@@ -90,7 +90,7 @@ public class NewPathForShardTests extends ESTestCase {
 
         @Override
         public FileStore getFileStore(Path path) throws IOException {
-            if (path.toString().contains(aPathPart)) {
+            if (path.toString().contains(aPathPart) || (path.toString() + path.getFileSystem().getSeparator()).contains(aPathPart)) {
                 return aFileStore;
             } else {
                 return bFileStore;

+ 2 - 3
server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java

@@ -94,8 +94,7 @@ public class RemoveCorruptedShardDataCommandTests extends IndexShardTestCase {
                 .putList(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath().toString()).build());
 
         // create same directory structure as prod does
-        final Path path = NodeEnvironment.resolveNodePath(dataDir);
-        Files.createDirectories(path);
+        Files.createDirectories(dataDir);
         settings = Settings.builder()
             .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
             .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
@@ -103,7 +102,7 @@ public class RemoveCorruptedShardDataCommandTests extends IndexShardTestCase {
             .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
             .build();
 
-        final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(path);
+        final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(dataDir);
         shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
         final IndexMetaData.Builder metaData = IndexMetaData.builder(routing.getIndexName())
             .settings(settings)