Browse Source

Improve Test Coverage for Old Repository Metadata Formats (#57915)

Use the the hack used in `CorruptedBlobStoreRepositoryIT` in more snapshot
failure tests to verify that BwC repository metadata is handled properly
in these so far not-test-covered scenarios.
Also, some minor related dry-up of snapshot tests.

Relates #57798
Armin Braun 5 years ago
parent
commit
f3b53d9dfe

+ 11 - 60
server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java

@@ -28,15 +28,10 @@ import org.elasticsearch.cluster.ClusterStateUpdateTask;
 import org.elasticsearch.cluster.metadata.Metadata;
 import org.elasticsearch.cluster.metadata.RepositoriesMetadata;
 import org.elasticsearch.cluster.service.ClusterService;
-import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.ByteSizeUnit;
-import org.elasticsearch.common.xcontent.DeprecationHandler;
-import org.elasticsearch.common.xcontent.NamedXContentRegistry;
-import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.common.xcontent.json.JsonXContent;
 import org.elasticsearch.repositories.IndexId;
 import org.elasticsearch.repositories.IndexMetaDataGenerations;
 import org.elasticsearch.repositories.RepositoriesService;
@@ -96,8 +91,7 @@ public class CorruptedBlobStoreRepositoryIT extends AbstractSnapshotIntegTestCas
             equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
 
         logger.info("--> move index-N blob to next generation");
-        final RepositoryData repositoryData =
-            getRepositoryData(internalCluster().getMasterNodeInstance(RepositoriesService.class).repository(repoName));
+        final RepositoryData repositoryData = getRepositoryData(repoName);
         Files.move(repo.resolve("index-" + repositoryData.getGenId()), repo.resolve("index-" + (repositoryData.getGenId() + 1)));
 
         assertRepositoryBlocked(client, repoName, snapshot);
@@ -156,7 +150,7 @@ public class CorruptedBlobStoreRepositoryIT extends AbstractSnapshotIntegTestCas
         final Repository repository = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName);
 
         logger.info("--> move index-N blob to next generation");
-        final RepositoryData repositoryData = getRepositoryData(repository);
+        final RepositoryData repositoryData = getRepositoryData(repoName);
         final long beforeMoveGen = repositoryData.getGenId();
         Files.move(repo.resolve("index-" + beforeMoveGen), repo.resolve("index-" + (beforeMoveGen + 1)));
 
@@ -189,16 +183,14 @@ public class CorruptedBlobStoreRepositoryIT extends AbstractSnapshotIntegTestCas
         internalCluster().fullRestart();
         ensureGreen();
 
-        Repository repositoryAfterRestart = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName);
-
         logger.info("--> verify index-N blob is found at the new location");
-        assertThat(getRepositoryData(repositoryAfterRestart).getGenId(), is(beforeMoveGen + 1));
+        assertThat(getRepositoryData(repoName).getGenId(), is(beforeMoveGen + 1));
 
         logger.info("--> delete snapshot");
         client().admin().cluster().prepareDeleteSnapshot(repoName, snapshot).get();
 
         logger.info("--> verify index-N blob is found at the expected location");
-        assertThat(getRepositoryData(repositoryAfterRestart).getGenId(), is(beforeMoveGen + 2));
+        assertThat(getRepositoryData(repoName).getGenId(), is(beforeMoveGen + 2));
 
         logger.info("--> make sure snapshot doesn't exist");
         expectThrows(SnapshotMissingException.class, () -> client().admin().cluster().prepareGetSnapshots(repoName)
@@ -229,8 +221,7 @@ public class CorruptedBlobStoreRepositoryIT extends AbstractSnapshotIntegTestCas
             assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(),
                 equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
         }
-        final Repository repository = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName);
-        final RepositoryData repositoryData = getRepositoryData(repository);
+        final RepositoryData repositoryData = getRepositoryData(repoName);
 
         final SnapshotId snapshotToCorrupt = randomFrom(repositoryData.getSnapshotIds());
         logger.info("--> delete root level snapshot metadata blob for snapshot [{}]", snapshotToCorrupt);
@@ -254,7 +245,7 @@ public class CorruptedBlobStoreRepositoryIT extends AbstractSnapshotIntegTestCas
         final ThreadPool threadPool = internalCluster().getCurrentMasterNodeInstance(ThreadPool.class);
         assertThat(PlainActionFuture.get(f -> threadPool.generic().execute(
             ActionRunnable.supply(f, () ->
-                snapshotsService.minCompatibleVersion(Version.CURRENT, getRepositoryData(repository), null)))),
+                snapshotsService.minCompatibleVersion(Version.CURRENT, getRepositoryData(repoName), null)))),
             is(SnapshotsService.OLD_SNAPSHOT_FORMAT));
 
         logger.info("--> verify that snapshot with missing root level metadata can be deleted");
@@ -263,9 +254,9 @@ public class CorruptedBlobStoreRepositoryIT extends AbstractSnapshotIntegTestCas
         logger.info("--> verify that repository is assumed in new metadata format after removing corrupted snapshot");
         assertThat(PlainActionFuture.get(f -> threadPool.generic().execute(
             ActionRunnable.supply(f, () ->
-                snapshotsService.minCompatibleVersion(Version.CURRENT, getRepositoryData(repository), null)))),
+                snapshotsService.minCompatibleVersion(Version.CURRENT, getRepositoryData(repoName), null)))),
             is(Version.CURRENT));
-        final RepositoryData finalRepositoryData = getRepositoryData(repository);
+        final RepositoryData finalRepositoryData = getRepositoryData(repoName);
         for (SnapshotId snapshotId : finalRepositoryData.getSnapshotIds()) {
             assertThat(finalRepositoryData.getVersion(snapshotId), is(Version.CURRENT));
         }
@@ -295,7 +286,7 @@ public class CorruptedBlobStoreRepositoryIT extends AbstractSnapshotIntegTestCas
 
         logger.info("--> corrupt index-N blob");
         final Repository repository = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName);
-        final RepositoryData repositoryData = getRepositoryData(repository);
+        final RepositoryData repositoryData = getRepositoryData(repoName);
         Files.write(repo.resolve("index-" + repositoryData.getGenId()), randomByteArrayOfLength(randomIntBetween(1, 100)));
 
         logger.info("--> verify loading repository data throws RepositoryException");
@@ -317,27 +308,7 @@ public class CorruptedBlobStoreRepositoryIT extends AbstractSnapshotIntegTestCas
         final String repoName = "test-repo";
         final Path repoPath = randomRepoPath();
         createRepository(repoName, "fs", repoPath);
-
-        // Workaround to simulate BwC situation: taking a snapshot without indices here so that we don't create any new version shard
-        // generations (the existence of which would short-circuit checks for the repo containing old version snapshots)
-        final String oldVersionSnapshot = "old-version-snapshot";
-        final CreateSnapshotResponse createSnapshotResponse = client().admin().cluster()
-                .prepareCreateSnapshot(repoName, oldVersionSnapshot).setIndices().setWaitForCompletion(true).get();
-        assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), is(0));
-
-        logger.info("--> writing downgraded RepositoryData");
-        final RepositoryData repositoryData = getRepositoryData(repoName);
-        final XContentBuilder jsonBuilder = JsonXContent.contentBuilder();
-        repositoryData.snapshotsToXContent(jsonBuilder, SnapshotsService.OLD_SNAPSHOT_FORMAT);
-        final RepositoryData downgradedRepoData = RepositoryData.snapshotsFromXContent(JsonXContent.jsonXContent.createParser(
-                NamedXContentRegistry.EMPTY,
-                DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
-                Strings.toString(jsonBuilder).replace(Version.CURRENT.toString(), SnapshotsService.OLD_SNAPSHOT_FORMAT.toString())),
-                repositoryData.getGenId(), randomBoolean());
-        Files.write(repoPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryData.getGenId()),
-                BytesReference.toBytes(BytesReference.bytes(
-                        downgradedRepoData.snapshotsToXContent(XContentFactory.jsonBuilder(), SnapshotsService.OLD_SNAPSHOT_FORMAT))),
-                StandardOpenOption.TRUNCATE_EXISTING);
+        final String oldVersionSnapshot = initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT);
 
         logger.info("--> recreating repository to clear caches");
         client().admin().cluster().prepareDeleteRepository(repoName).get();
@@ -367,27 +338,7 @@ public class CorruptedBlobStoreRepositoryIT extends AbstractSnapshotIntegTestCas
         final String repoName = "test-repo";
         final Path repoPath = randomRepoPath();
         createRepository(repoName, "fs", repoPath);
-
-        // Workaround to simulate BwC situation: taking a snapshot without indices here so that we don't create any new version shard
-        // generations (the existence of which would short-circuit checks for the repo containing old version snapshots)
-        final String oldVersionSnapshot = "old-version-snapshot";
-        final CreateSnapshotResponse createSnapshotResponse = client().admin().cluster()
-                .prepareCreateSnapshot(repoName, oldVersionSnapshot).setIndices().setWaitForCompletion(true).get();
-        assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), is(0));
-
-        logger.info("--> writing downgraded RepositoryData");
-        final RepositoryData repositoryData = getRepositoryData(repoName);
-        final XContentBuilder jsonBuilder = JsonXContent.contentBuilder();
-        repositoryData.snapshotsToXContent(jsonBuilder, SnapshotsService.OLD_SNAPSHOT_FORMAT);
-        final RepositoryData downgradedRepoData = RepositoryData.snapshotsFromXContent(JsonXContent.jsonXContent.createParser(
-                NamedXContentRegistry.EMPTY,
-                DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
-                Strings.toString(jsonBuilder).replace(Version.CURRENT.toString(), SnapshotsService.OLD_SNAPSHOT_FORMAT.toString())),
-                repositoryData.getGenId(), randomBoolean());
-        Files.write(repoPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryData.getGenId()),
-                BytesReference.toBytes(BytesReference.bytes(
-                        downgradedRepoData.snapshotsToXContent(XContentFactory.jsonBuilder(), SnapshotsService.OLD_SNAPSHOT_FORMAT))),
-                StandardOpenOption.TRUNCATE_EXISTING);
+        final String oldVersionSnapshot = initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT);
 
         logger.info("--> recreating repository to clear caches");
         client().admin().cluster().prepareDeleteRepository(repoName).get();

+ 21 - 7
server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java

@@ -385,14 +385,16 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
         assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
 
         logger.info("--> creating repository");
+        final Path repoPath = randomRepoPath();
         AcknowledgedResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
                 .setType("mock").setSettings(
                         Settings.builder()
-                                .put("location", randomRepoPath())
+                                .put("location", repoPath)
                                 .put("random", randomAlphaOfLength(10))
                                 .put("wait_after_unblock", 200)
                 ).get();
         assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+        maybeInitWithOldSnapshotVersion("test-repo", repoPath);
 
         // Pick one node and block it
         String blockedNode = blockNodeWithIndex("test-repo", "test-idx");
@@ -803,11 +805,13 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
         final Client client = client();
 
         logger.info("-->  creating repository");
+        final Path repoPath = randomRepoPath();
         assertAcked(client.admin().cluster().preparePutRepository("test-repo")
                 .setType("fs").setSettings(Settings.builder()
-                        .put("location", randomRepoPath())
+                        .put("location", repoPath)
                         .put("compress", randomBoolean())
                         .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+        maybeInitWithOldSnapshotVersion("test-repo", repoPath);
 
         assertAcked(prepareCreate("test-idx", 0, Settings.builder().put("number_of_shards", between(1, 20))
                 .put("number_of_replicas", 0)));
@@ -840,7 +844,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
             assertTrue(snapshotInfo.state().completed());
         }, 1, TimeUnit.MINUTES);
 
-        logger.info("--> verify that snapshot was succesful");
+        logger.info("--> verify that snapshot was successful");
 
         GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster().prepareGetSnapshots("test-repo")
             .setSnapshots("test-snap").get();
@@ -858,11 +862,13 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
         final Client client = client();
 
         logger.info("-->  creating repository");
+        final Path repoPath = randomRepoPath();
         assertAcked(client.admin().cluster().preparePutRepository("test-repo")
             .setType("mock").setSettings(Settings.builder()
-                .put("location", randomRepoPath())
+                .put("location", repoPath)
                 .put("compress", randomBoolean())
                 .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+        maybeInitWithOldSnapshotVersion("test-repo", repoPath);
 
         assertAcked(prepareCreate("test-idx", 0, Settings.builder().put("number_of_shards", between(1, 20))
             .put("number_of_replicas", 0)));
@@ -919,11 +925,13 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
         internalCluster().startDataOnlyNodes(2);
 
         logger.info("-->  creating repository");
+        final Path repoPath = randomRepoPath();
         assertAcked(client().admin().cluster().preparePutRepository("test-repo")
             .setType("mock").setSettings(Settings.builder()
-                .put("location", randomRepoPath())
+                .put("location", repoPath)
                 .put("compress", randomBoolean())
                 .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+        maybeInitWithOldSnapshotVersion("test-repo", repoPath);
 
         assertAcked(prepareCreate("test-idx", 0, Settings.builder()
             .put("number_of_shards", 6).put("number_of_replicas", 0)));
@@ -1237,11 +1245,14 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
         internalCluster().startMasterOnlyNode();
         internalCluster().startDataOnlyNodes(2);
         logger.info("-->  creating repository");
+        final Path repoPath = randomRepoPath();
         assertAcked(client().admin().cluster().preparePutRepository("test-repo")
             .setType("mock").setSettings(Settings.builder()
-                .put("location", randomRepoPath())
+                .put("location", repoPath)
                 .put("compress", randomBoolean())
                 .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+        maybeInitWithOldSnapshotVersion("test-repo", repoPath);
+
         assertAcked(prepareCreate("test-idx", 0, Settings.builder()
             .put("number_of_shards", 5).put("number_of_replicas", 0)));
         ensureGreen();
@@ -1285,11 +1296,14 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
         internalCluster().startMasterOnlyNode();
         final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
         logger.info("-->  creating repository");
+        final Path repoPath = randomRepoPath();
         assertAcked(client().admin().cluster().preparePutRepository("test-repo")
             .setType("mock").setSettings(Settings.builder()
-                .put("location", randomRepoPath())
+                .put("location", repoPath)
                 .put("compress", randomBoolean())
                 .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+        maybeInitWithOldSnapshotVersion("test-repo", repoPath);
+
         assertAcked(prepareCreate("test-idx", 0, Settings.builder()
             .put("number_of_shards", 2).put("number_of_replicas", 0)));
         ensureGreen();

+ 4 - 14
server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java

@@ -81,7 +81,6 @@ import org.elasticsearch.ingest.IngestTestPlugin;
 import org.elasticsearch.plugins.Plugin;
 import org.elasticsearch.repositories.IndexId;
 import org.elasticsearch.repositories.RepositoriesService;
-import org.elasticsearch.repositories.Repository;
 import org.elasticsearch.repositories.RepositoryData;
 import org.elasticsearch.repositories.RepositoryException;
 import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
@@ -1433,10 +1432,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
         assertThat(snapshotInfo.successfulShards(), greaterThan(0));
         assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards()));
 
-        RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName());
-        Repository repository = service.repository("test-repo");
-
-        final Map<String, IndexId> indexIds = getRepositoryData(repository).getIndices();
+        final Map<String, IndexId> indexIds = getRepositoryData("test-repo").getIndices();
         final Path indicesPath = repo.resolve("indices");
 
         logger.info("--> delete index metadata and shard metadata");
@@ -1444,7 +1440,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
             Path shardZero = indicesPath.resolve(indexIds.get(index).getId()).resolve("0");
             if (randomBoolean()) {
                 Files.delete(
-                    shardZero.resolve("index-" + getRepositoryData(repository).shardGenerations().getShardGen(indexIds.get(index), 0)));
+                    shardZero.resolve("index-" + getRepositoryData("test-repo").shardGenerations().getShardGen(indexIds.get(index), 0)));
             }
             Files.delete(shardZero.resolve("snap-" + snapshotInfo.snapshotId().getUUID() + ".dat"));
         }
@@ -2878,10 +2874,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
         assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards()));
         assertThat(snapshotInfo.indices(), hasSize(nbIndices));
 
-        RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName());
-        Repository repository = service.repository("test-repo");
-
-        final RepositoryData repositoryData = getRepositoryData(repository);
+        final RepositoryData repositoryData = getRepositoryData("test-repo");
         final Map<String, IndexId> indexIds = repositoryData.getIndices();
         assertThat(indexIds.size(), equalTo(nbIndices));
 
@@ -2954,10 +2947,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
         assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards()));
         assertThat(snapshotInfo.indices(), hasSize(1));
 
-        RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName());
-        Repository repository = service.repository("test-repo");
-
-        final RepositoryData repositoryData = getRepositoryData(repository);
+        final RepositoryData repositoryData = getRepositoryData("test-repo");
         final Map<String, IndexId> indexIds = repositoryData.getIndices();
         assertThat(indexIds.size(), equalTo(1));
 

+ 1 - 5
server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java

@@ -30,8 +30,6 @@ import org.elasticsearch.cluster.SnapshotsInProgress;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.core.internal.io.IOUtils;
-import org.elasticsearch.repositories.RepositoriesService;
-import org.elasticsearch.repositories.Repository;
 import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
 
 import java.io.IOException;
@@ -164,9 +162,7 @@ public class SnapshotStatusApisIT extends AbstractSnapshotIntegTestCase {
             client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).get();
 
         logger.info("--> delete shard-level snap-${uuid}.dat file for one shard in this snapshot to simulate concurrent delete");
-        final RepositoriesService service = internalCluster().getMasterNodeInstance(RepositoriesService.class);
-        final Repository repository = service.repository("test-repo");
-        final String indexRepoId = getRepositoryData(repository).resolveIndexId(response.getSnapshotInfo().indices().get(0)).getId();
+        final String indexRepoId = getRepositoryData("test-repo").resolveIndexId(response.getSnapshotInfo().indices().get(0)).getId();
         IOUtils.rm(repoPath.resolve("indices").resolve(indexRepoId).resolve("0").resolve(
             BlobStoreRepository.SNAPSHOT_PREFIX + response.getSnapshotInfo().snapshotId().getUUID() + ".dat"));
 

+ 54 - 0
server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java

@@ -18,19 +18,30 @@
  */
 package org.elasticsearch.snapshots;
 
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
 import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
 import org.elasticsearch.action.support.PlainActionFuture;
 import org.elasticsearch.cluster.SnapshotsInProgress;
 import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.DeprecationHandler;
+import org.elasticsearch.common.xcontent.NamedXContentRegistry;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
 import org.elasticsearch.plugins.Plugin;
 import org.elasticsearch.repositories.RepositoriesService;
 import org.elasticsearch.repositories.Repository;
 import org.elasticsearch.repositories.RepositoryData;
+import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
 import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil;
 import org.elasticsearch.snapshots.mockstore.MockRepository;
 import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.VersionUtils;
 import org.junit.After;
 
 import java.io.IOException;
@@ -38,6 +49,7 @@ import java.nio.file.FileVisitResult;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.SimpleFileVisitor;
+import java.nio.file.StandardOpenOption;
 import java.nio.file.attribute.BasicFileAttributes;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -47,10 +59,14 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.empty;
 import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
 
 public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
 
+    private static final String OLD_VERSION_SNAPSHOT_PREFIX = "old-version-snapshot-";
+
     @Override
     protected Settings nodeSettings(int nodeOrdinal) {
         return Settings.builder().put(super.nodeSettings(nodeOrdinal))
@@ -78,6 +94,7 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
             client().admin().cluster().prepareGetRepositories().get().repositories().forEach(repositoryMetadata -> {
                 final String name = repositoryMetadata.name();
                 if (repositoryMetadata.settings().getAsBoolean("readonly", false) == false) {
+                    client().admin().cluster().prepareDeleteSnapshot(name, OLD_VERSION_SNAPSHOT_PREFIX + "*").get();
                     client().admin().cluster().prepareCleanupRepository(name).get();
                 }
                 BlobStoreTestUtil.assertRepoConsistency(internalCluster(), name);
@@ -243,4 +260,41 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
                 .setType(type)
                 .setSettings(Settings.builder().put("location", location)));
     }
+
+    /**
+     * Randomly write an empty snapshot of an older version to an empty repository to simulate an older repository metadata format.
+     */
+    protected void maybeInitWithOldSnapshotVersion(String repoName, Path repoPath) throws IOException {
+        if (randomBoolean() && randomBoolean()) {
+            initWithSnapshotVersion(repoName, repoPath, VersionUtils.randomIndexCompatibleVersion(random()));
+        }
+    }
+
+    /**
+     * Workaround to simulate BwC situation: taking a snapshot without indices here so that we don't create any new version shard
+     * generations (the existence of which would short-circuit checks for the repo containing old version snapshots)
+     */
+    protected String initWithSnapshotVersion(String repoName, Path repoPath, Version version) throws IOException {
+        assertThat("This hack only works on an empty repository", getRepositoryData(repoName).getSnapshotIds(), empty());
+        final String oldVersionSnapshot = OLD_VERSION_SNAPSHOT_PREFIX + version.id;
+        final CreateSnapshotResponse createSnapshotResponse = client().admin().cluster()
+                .prepareCreateSnapshot(repoName, oldVersionSnapshot).setIndices("does-not-exist-for-sure-*")
+                .setWaitForCompletion(true).get();
+        assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), is(0));
+
+        logger.info("--> writing downgraded RepositoryData for repository metadata version [{}]", version);
+        final RepositoryData repositoryData = getRepositoryData(repoName);
+        final XContentBuilder jsonBuilder = JsonXContent.contentBuilder();
+        repositoryData.snapshotsToXContent(jsonBuilder, version);
+        final RepositoryData downgradedRepoData = RepositoryData.snapshotsFromXContent(JsonXContent.jsonXContent.createParser(
+                NamedXContentRegistry.EMPTY,
+                DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
+                Strings.toString(jsonBuilder).replace(Version.CURRENT.toString(), version.toString())),
+                repositoryData.getGenId(), randomBoolean());
+        Files.write(repoPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryData.getGenId()),
+                BytesReference.toBytes(BytesReference.bytes(
+                        downgradedRepoData.snapshotsToXContent(XContentFactory.jsonBuilder(), version))),
+                StandardOpenOption.TRUNCATE_EXISTING);
+        return oldVersionSnapshot;
+    }
 }