Răsfoiți Sursa

Abort writes in repo analyzer (#72077)

We rely on the repository implementation correctly handling the case where a
write is aborted before it completes. This is not guaranteed for third-party
repositories.

This commit adds a rare action during analysis which aborts the write
just before it completes and verifies that the target blob is not found
by any node.
David Turner 4 ani în urmă
părinte
comite
1c4791e398

+ 13 - 3
docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc

@@ -151,6 +151,11 @@ uploads. Similarly, the reading nodes will use a variety of different methods
 to read the data back again. For instance they may read the entire blob from
 start to end, or may read only a subset of the data.
 
+For some blob-level tasks, the executing node will abort the write before it is
+complete. In this case it still instructs some of the other nodes in the
+cluster to attempt to read the blob, but all of these reads must fail to find
+the blob.
+
 [[repo-analysis-api-path-params]]
 ==== {api-path-parms-title}
 
@@ -200,8 +205,8 @@ operation while writing each blob. Defaults to `2`. Early read operations are
 only rarely performed.
 
 `rare_action_probability`::
-(Optional, double) The probability of performing a rare action (an early read
-or an overwrite) on each blob. Defaults to `0.02`.
+(Optional, double) The probability of performing a rare action (an early read,
+an overwrite, or an aborted write) on each blob. Defaults to `0.02`.
 
 `seed`::
 (Optional, integer) The seed for the pseudo-random number generator used to
@@ -215,6 +220,10 @@ always happen in the same order on each run.
 information for every operation performed during the analysis. Defaults to
 `false`, meaning to return only a summary of the analysis.
 
+`rarely_abort_writes`::
+(Optional, boolean) Whether to rarely abort some write requests. Defaults to
+`true`.
+
 [role="child_attributes"]
 [[repo-analysis-api-response-body]]
 ==== {api-response-body-title}
@@ -529,7 +538,8 @@ complete. Omitted if `false`.
 `found`::
 (boolean)
 Whether the blob was found by this read operation or not. May be `false` if the
-read was started before the write completed.
+read was started before the write completed, or the write was aborted before
+completion.
 
 `first_byte_time`::
 (string)

+ 4 - 0
x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/test/resources/rest-api-spec/api/snapshot.repository_analyze.json

@@ -62,6 +62,10 @@
       "detailed":{
         "type":"boolean",
         "description":"Whether to return detailed results or a summary. Defaults to 'false' so that only the summary is returned."
+      },
+      "rarely_abort_writes":{
+        "type":"boolean",
+        "description":"Whether to rarely abort writes before they complete. Defaults to 'true'."
       }
     }
   }

+ 10 - 10
x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/test/resources/rest-api-spec/test/10_analyze.yml

@@ -30,8 +30,8 @@ setup:
 ---
 "Analysis fails on readonly repositories":
   - skip:
-      version: "- 7.11.99"
-      reason: "introduced in 7.12"
+      version: "- 7.99.99"
+      reason: "abortWrites flag introduced in 7.14, and mixed-cluster support not required"
 
   - do:
       catch: bad_request
@@ -46,8 +46,8 @@ setup:
 ---
 "Analysis without details":
   - skip:
-      version: "- 7.11.99"
-      reason: "introduced in 7.12"
+      version: "- 7.99.99"
+      reason: "abortWrites flag introduced in 7.14, and mixed-cluster support not required"
 
   - do:
       snapshot.repository_analyze:
@@ -101,8 +101,8 @@ setup:
 ---
 "Analysis with details":
   - skip:
-      version: "- 7.11.99"
-      reason: "introduced in 7.12"
+      version: "- 7.99.99"
+      reason: "abortWrites flag introduced in 7.14, and mixed-cluster support not required"
 
   - do:
       snapshot.repository_analyze:
@@ -132,8 +132,8 @@ setup:
 ---
 "Analysis with ?human=false":
   - skip:
-      version: "- 7.11.99"
-      reason: "introduced in 7.12"
+      version: "- 7.99.99"
+      reason: "abortWrites flag introduced in 7.14, and mixed-cluster support not required"
 
   - do:
       snapshot.repository_analyze:
@@ -158,8 +158,8 @@ setup:
 ---
 "Timeout with large blobs":
   - skip:
-      version: "- 7.11.99"
-      reason: "introduced in 7.12"
+      version: "- 7.99.99"
+      reason: "abortWrites flag introduced in 7.14, and mixed-cluster support not required"
 
   - do:
       catch: request

+ 55 - 2
x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java

@@ -18,6 +18,7 @@ import org.elasticsearch.common.blobstore.BlobStore;
 import org.elasticsearch.common.blobstore.DeleteResult;
 import org.elasticsearch.common.blobstore.support.PlainBlobMetadata;
 import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.unit.ByteSizeValue;
 import org.elasticsearch.common.util.BigArrays;
 import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
@@ -49,6 +50,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Consumer;
 import java.util.stream.Collectors;
@@ -97,6 +99,7 @@ public class RepositoryAnalysisFailureIT extends AbstractSnapshotIntegTestCase {
     public void testFailsOnReadError() {
         final RepositoryAnalyzeAction.Request request = new RepositoryAnalyzeAction.Request("test-repo");
         request.maxBlobSize(new ByteSizeValue(10L));
+        request.abortWritePermitted(false);
 
         final CountDown countDown = new CountDown(between(1, request.getBlobCount()));
         blobStore.setDisruption(new Disruption() {
@@ -118,6 +121,7 @@ public class RepositoryAnalysisFailureIT extends AbstractSnapshotIntegTestCase {
     public void testFailsOnNotFoundAfterWrite() {
         final RepositoryAnalyzeAction.Request request = new RepositoryAnalyzeAction.Request("test-repo");
         request.maxBlobSize(new ByteSizeValue(10L));
+        request.abortWritePermitted(false);
         request.rareActionProbability(0.0); // not found on an early read or an overwrite is ok
 
         final CountDown countDown = new CountDown(between(1, request.getBlobCount()));
@@ -138,6 +142,7 @@ public class RepositoryAnalysisFailureIT extends AbstractSnapshotIntegTestCase {
     public void testFailsOnChecksumMismatch() {
         final RepositoryAnalyzeAction.Request request = new RepositoryAnalyzeAction.Request("test-repo");
         request.maxBlobSize(new ByteSizeValue(10L));
+        request.abortWritePermitted(false);
 
         final CountDown countDown = new CountDown(between(1, request.getBlobCount()));
 
@@ -159,6 +164,7 @@ public class RepositoryAnalysisFailureIT extends AbstractSnapshotIntegTestCase {
     public void testFailsOnWriteException() {
         final RepositoryAnalyzeAction.Request request = new RepositoryAnalyzeAction.Request("test-repo");
         request.maxBlobSize(new ByteSizeValue(10L));
+        request.abortWritePermitted(false);
 
         final CountDown countDown = new CountDown(between(1, request.getBlobCount()));
 
@@ -182,6 +188,7 @@ public class RepositoryAnalysisFailureIT extends AbstractSnapshotIntegTestCase {
     public void testFailsOnIncompleteListing() {
         final RepositoryAnalyzeAction.Request request = new RepositoryAnalyzeAction.Request("test-repo");
         request.maxBlobSize(new ByteSizeValue(10L));
+        request.abortWritePermitted(false);
 
         blobStore.setDisruption(new Disruption() {
 
@@ -200,6 +207,7 @@ public class RepositoryAnalysisFailureIT extends AbstractSnapshotIntegTestCase {
     public void testFailsOnListingException() {
         final RepositoryAnalyzeAction.Request request = new RepositoryAnalyzeAction.Request("test-repo");
         request.maxBlobSize(new ByteSizeValue(10L));
+        request.abortWritePermitted(false);
 
         final CountDown countDown = new CountDown(1);
         blobStore.setDisruption(new Disruption() {
@@ -219,6 +227,7 @@ public class RepositoryAnalysisFailureIT extends AbstractSnapshotIntegTestCase {
     public void testFailsOnDeleteException() {
         final RepositoryAnalyzeAction.Request request = new RepositoryAnalyzeAction.Request("test-repo");
         request.maxBlobSize(new ByteSizeValue(10L));
+        request.abortWritePermitted(false);
 
         blobStore.setDisruption(new Disruption() {
             @Override
@@ -233,6 +242,7 @@ public class RepositoryAnalysisFailureIT extends AbstractSnapshotIntegTestCase {
     public void testFailsOnIncompleteDelete() {
         final RepositoryAnalyzeAction.Request request = new RepositoryAnalyzeAction.Request("test-repo");
         request.maxBlobSize(new ByteSizeValue(10L));
+        request.abortWritePermitted(false);
 
         blobStore.setDisruption(new Disruption() {
 
@@ -257,6 +267,28 @@ public class RepositoryAnalysisFailureIT extends AbstractSnapshotIntegTestCase {
         expectThrows(RepositoryVerificationException.class, () -> analyseRepository(request));
     }
 
+    public void testFailsIfBlobCreatedOnAbort() {
+        final RepositoryAnalyzeAction.Request request = new RepositoryAnalyzeAction.Request("test-repo");
+        request.maxBlobSize(new ByteSizeValue(10L));
+        request.rareActionProbability(0.7); // abort writes quite often
+
+        final AtomicBoolean writeWasAborted = new AtomicBoolean();
+        blobStore.setDisruption(new Disruption() {
+            @Override
+            public boolean createBlobOnAbort() {
+                writeWasAborted.set(true);
+                return true;
+            }
+        });
+
+        try {
+            analyseRepository(request);
+            assertFalse(writeWasAborted.get());
+        } catch (RepositoryVerificationException e) {
+            assertTrue(writeWasAborted.get());
+        }
+    }
+
     private RepositoryAnalyzeAction.Response analyseRepository(RepositoryAnalyzeAction.Request request) {
         return client().execute(RepositoryAnalyzeAction.INSTANCE, request).actionGet(30L, TimeUnit.SECONDS);
     }
@@ -360,6 +392,10 @@ public class RepositoryAnalysisFailureIT extends AbstractSnapshotIntegTestCase {
         }
 
         default void onDelete() throws IOException {}
+
+        default boolean createBlobOnAbort() {
+            return false;
+        }
     }
 
     static class DisruptableBlobContainer implements BlobContainer {
@@ -418,7 +454,16 @@ public class RepositoryAnalysisFailureIT extends AbstractSnapshotIntegTestCase {
 
         @Override
         public void writeBlobAtomic(String blobName, BytesReference bytes, boolean failIfAlreadyExists) throws IOException {
-            writeBlobAtomic(blobName, bytes.streamInput(), failIfAlreadyExists);
+            final StreamInput inputStream;
+            try {
+                inputStream = bytes.streamInput();
+            } catch (BlobWriteAbortedException e) {
+                if (disruption.createBlobOnAbort()) {
+                    blobs.put(blobName, new byte[0]);
+                }
+                throw e;
+            }
+            writeBlobAtomic(blobName, inputStream, failIfAlreadyExists);
         }
 
         private void writeBlobAtomic(String blobName, InputStream inputStream, boolean failIfAlreadyExists) throws IOException {
@@ -426,7 +471,15 @@ public class RepositoryAnalysisFailureIT extends AbstractSnapshotIntegTestCase {
                 throw new FileAlreadyExistsException(blobName);
             }
 
-            final byte[] contents = inputStream.readAllBytes();
+            final byte[] contents;
+            try {
+                contents = inputStream.readAllBytes();
+            } catch (BlobWriteAbortedException e) {
+                if (disruption.createBlobOnAbort()) {
+                    blobs.put(blobName, new byte[0]);
+                }
+                throw e;
+            }
             disruption.onWrite();
             blobs.put(blobName, contents);
         }

+ 47 - 8
x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java

@@ -10,6 +10,7 @@ package org.elasticsearch.repositories.blobstore.testkit;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.elasticsearch.Version;
 import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.action.ActionListenerResponseHandler;
 import org.elasticsearch.action.ActionRequest;
@@ -273,9 +274,9 @@ public class BlobAnalyzeAction extends ActionType<BlobAnalyzeAction.Response> {
 
         void run() {
             writeRandomBlob(
-                request.readEarly || (request.targetLength <= MAX_ATOMIC_WRITE_SIZE && random.nextBoolean()),
+                request.readEarly || request.getAbortWrite() || (request.targetLength <= MAX_ATOMIC_WRITE_SIZE && random.nextBoolean()),
                 true,
-                this::doReadBeforeWriteComplete,
+                this::onLastReadForInitialWrite,
                 write1Step
             );
 
@@ -321,7 +322,11 @@ public class BlobAnalyzeAction extends ActionType<BlobAnalyzeAction.Response> {
                         }
                     };
                     if (atomic) {
-                        blobContainer.writeBlobAtomic(request.blobName, bytesReference, failIfExists);
+                        try {
+                            blobContainer.writeBlobAtomic(request.blobName, bytesReference, failIfExists);
+                        } catch (BlobWriteAbortedException e) {
+                            assert request.getAbortWrite() : "write unexpectedly aborted";
+                        }
                     } else {
                         blobContainer.writeBlob(request.blobName, bytesReference, failIfExists);
                     }
@@ -345,13 +350,16 @@ public class BlobAnalyzeAction extends ActionType<BlobAnalyzeAction.Response> {
             });
         }
 
-        private void doReadBeforeWriteComplete() {
+        private void onLastReadForInitialWrite() {
             if (earlyReadNodes.isEmpty() == false) {
                 if (logger.isTraceEnabled()) {
                     logger.trace("sending read request to [{}] for [{}] before write complete", earlyReadNodes, request.getDescription());
                 }
                 readOnNodes(earlyReadNodes, true);
             }
+            if (request.getAbortWrite()) {
+                throw new BlobWriteAbortedException();
+            }
         }
 
         private void doReadAfterWrite() {
@@ -479,12 +487,13 @@ public class BlobAnalyzeAction extends ActionType<BlobAnalyzeAction.Response> {
                 expectedChecksumDescription = write1Details.checksum + " or " + write2Details.checksum;
             }
 
+            boolean anyFound = false;
             RepositoryVerificationException failure = null;
             for (final NodeResponse nodeResponse : responses) {
                 final GetBlobChecksumAction.Response response = nodeResponse.response;
                 final RepositoryVerificationException nodeFailure;
                 if (response.isNotFound()) {
-                    if (request.readEarly) {
+                    if (request.readEarly || request.getAbortWrite()) {
                         nodeFailure = null; // "not found" is legitimate iff we tried to read it before the write completed
                     } else {
                         nodeFailure = new RepositoryVerificationException(
@@ -493,6 +502,7 @@ public class BlobAnalyzeAction extends ActionType<BlobAnalyzeAction.Response> {
                         );
                     }
                 } else {
+                    anyFound = true;
                     final long actualChecksum = response.getChecksum();
                     if (response.getBytesRead() == checksumLength && checksumPredicate.test(actualChecksum)) {
                         nodeFailure = null; // checksum ok
@@ -524,6 +534,19 @@ public class BlobAnalyzeAction extends ActionType<BlobAnalyzeAction.Response> {
                     }
                 }
             }
+
+            if (request.getAbortWrite() && anyFound) {
+                final RepositoryVerificationException atomicityFailure = new RepositoryVerificationException(
+                    request.getRepositoryName(),
+                    "upload of blob was aborted, but blob was erroneously found by at least one node"
+                );
+                if (failure == null) {
+                    failure = atomicityFailure;
+                } else {
+                    failure.addSuppressed(atomicityFailure);
+                }
+            }
+
             if (failure != null) {
                 cleanUpAndReturnFailure(failure);
                 return;
@@ -609,6 +632,7 @@ public class BlobAnalyzeAction extends ActionType<BlobAnalyzeAction.Response> {
         private final int earlyReadNodeCount;
         private final boolean readEarly;
         private final boolean writeAndOverwrite;
+        private final boolean abortWrite;
 
         Request(
             String repositoryName,
@@ -620,10 +644,12 @@ public class BlobAnalyzeAction extends ActionType<BlobAnalyzeAction.Response> {
             int readNodeCount,
             int earlyReadNodeCount,
             boolean readEarly,
-            boolean writeAndOverwrite
+            boolean writeAndOverwrite,
+            boolean abortWrite
         ) {
             assert 0 < targetLength;
             assert targetLength <= MAX_ATOMIC_WRITE_SIZE || (readEarly == false && writeAndOverwrite == false) : "oversized atomic write";
+            assert writeAndOverwrite == false || abortWrite == false : "cannot set writeAndOverwrite and abortWrite";
             this.repositoryName = repositoryName;
             this.blobPath = blobPath;
             this.blobName = blobName;
@@ -634,6 +660,7 @@ public class BlobAnalyzeAction extends ActionType<BlobAnalyzeAction.Response> {
             this.earlyReadNodeCount = earlyReadNodeCount;
             this.readEarly = readEarly;
             this.writeAndOverwrite = writeAndOverwrite;
+            this.abortWrite = abortWrite;
         }
 
         Request(StreamInput in) throws IOException {
@@ -648,6 +675,11 @@ public class BlobAnalyzeAction extends ActionType<BlobAnalyzeAction.Response> {
             earlyReadNodeCount = in.readVInt();
             readEarly = in.readBoolean();
             writeAndOverwrite = in.readBoolean();
+            if (in.getVersion().onOrAfter(Version.V_8_0_0)) {
+                abortWrite = in.readBoolean();
+            } else {
+                abortWrite = false;
+            }
         }
 
         @Override
@@ -663,6 +695,11 @@ public class BlobAnalyzeAction extends ActionType<BlobAnalyzeAction.Response> {
             out.writeVInt(earlyReadNodeCount);
             out.writeBoolean(readEarly);
             out.writeBoolean(writeAndOverwrite);
+            if (out.getVersion().onOrAfter(Version.V_8_0_0)) {
+                out.writeBoolean(abortWrite);
+            } else if (abortWrite) {
+                throw new IllegalStateException("cannot send abortWrite request to node of version [" + out.getVersion() + "]");
+            }
         }
 
         @Override
@@ -686,6 +723,8 @@ public class BlobAnalyzeAction extends ActionType<BlobAnalyzeAction.Response> {
                 + readEarly
                 + ", writeAndOverwrite="
                 + writeAndOverwrite
+                + ", abortWrite="
+                + abortWrite
                 + "]";
         }
 
@@ -720,8 +759,8 @@ public class BlobAnalyzeAction extends ActionType<BlobAnalyzeAction.Response> {
             return targetLength;
         }
 
-        public long getSeed() {
-            return seed;
+        public boolean getAbortWrite() {
+            return abortWrite;
         }
 
     }

+ 14 - 0
x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobWriteAbortedException.java

@@ -0,0 +1,14 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+package org.elasticsearch.repositories.blobstore.testkit;
+
+public class BlobWriteAbortedException extends RuntimeException {
+    public BlobWriteAbortedException() {
+        super("write aborted");
+    }
+}

+ 34 - 3
x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java

@@ -13,6 +13,7 @@ import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 import org.apache.logging.log4j.message.ParameterizedMessage;
 import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.Version;
 import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.action.ActionListenerResponseHandler;
 import org.elasticsearch.action.ActionRequest;
@@ -453,6 +454,7 @@ public class RepositoryAnalyzeAction extends ActionType<RepositoryAnalyzeAction.
             for (int i = 0; i < request.getBlobCount(); i++) {
                 final long targetLength = blobSizes.get(i);
                 final boolean smallBlob = targetLength <= MAX_ATOMIC_WRITE_SIZE; // avoid the atomic API for larger blobs
+                final boolean abortWrite = smallBlob && request.isAbortWritePermitted() && rarely(random);
                 final VerifyBlobTask verifyBlobTask = new VerifyBlobTask(
                     nodes.get(random.nextInt(nodes.size())),
                     new BlobAnalyzeAction.Request(
@@ -464,11 +466,13 @@ public class RepositoryAnalyzeAction extends ActionType<RepositoryAnalyzeAction.
                         nodes,
                         request.getReadNodeCount(),
                         request.getEarlyReadNodeCount(),
-                        smallBlob && random.nextDouble() < request.getRareActionProbability(),
+                        smallBlob && rarely(random),
                         repository.supportURLRepo()
                             && repository.hasAtomicOverwrites()
                             && smallBlob
-                            && random.nextDouble() < request.getRareActionProbability()
+                            && rarely(random)
+                            && abortWrite == false,
+                        abortWrite
                     )
                 );
                 queue.add(verifyBlobTask);
@@ -479,6 +483,10 @@ public class RepositoryAnalyzeAction extends ActionType<RepositoryAnalyzeAction.
             }
         }
 
+        private boolean rarely(Random random) {
+            return random.nextDouble() < request.getRareActionProbability();
+        }
+
         private void processNextTask() {
             final VerifyBlobTask thisTask = queue.poll();
             if (isRunning() == false || thisTask == null) {
@@ -500,7 +508,9 @@ public class RepositoryAnalyzeAction extends ActionType<RepositoryAnalyzeAction.
                         @Override
                         public void handleResponse(BlobAnalyzeAction.Response response) {
                             logger.trace("finished [{}]", thisTask);
-                            expectedBlobs.add(thisTask.request.getBlobName()); // each task cleans up its own mess on failure
+                            if (thisTask.request.getAbortWrite() == false) {
+                                expectedBlobs.add(thisTask.request.getBlobName()); // each task cleans up its own mess on failure
+                            }
                             if (request.detailed) {
                                 synchronized (responses) {
                                     responses.add(response);
@@ -672,6 +682,7 @@ public class RepositoryAnalyzeAction extends ActionType<RepositoryAnalyzeAction.
         private ByteSizeValue maxTotalDataSize = ByteSizeValue.ofGb(1);
         private boolean detailed = false;
         private DiscoveryNode reroutedFrom = null;
+        private boolean abortWritePermitted = true;
 
         public Request(String repositoryName) {
             this.repositoryName = repositoryName;
@@ -691,6 +702,11 @@ public class RepositoryAnalyzeAction extends ActionType<RepositoryAnalyzeAction.
             maxTotalDataSize = new ByteSizeValue(in);
             detailed = in.readBoolean();
             reroutedFrom = in.readOptionalWriteable(DiscoveryNode::new);
+            if (in.getVersion().onOrAfter(Version.V_8_0_0)) {
+                abortWritePermitted = in.readBoolean();
+            } else {
+                abortWritePermitted = false;
+            }
         }
 
         @Override
@@ -713,6 +729,11 @@ public class RepositoryAnalyzeAction extends ActionType<RepositoryAnalyzeAction.
             maxTotalDataSize.writeTo(out);
             out.writeBoolean(detailed);
             out.writeOptionalWriteable(reroutedFrom);
+            if (out.getVersion().onOrAfter(Version.V_8_0_0)) {
+                out.writeBoolean(abortWritePermitted);
+            } else if (abortWritePermitted) {
+                throw new IllegalStateException("cannot send abortWritePermitted request to node of version [" + out.getVersion() + "]");
+            }
         }
 
         @Override
@@ -839,6 +860,14 @@ public class RepositoryAnalyzeAction extends ActionType<RepositoryAnalyzeAction.
             return rareActionProbability;
         }
 
+        public void abortWritePermitted(boolean abortWritePermitted) {
+            this.abortWritePermitted = abortWritePermitted;
+        }
+
+        public boolean isAbortWritePermitted() {
+            return abortWritePermitted;
+        }
+
         @Override
         public String toString() {
             return "Request{" + getDescription() + '}';
@@ -868,6 +897,8 @@ public class RepositoryAnalyzeAction extends ActionType<RepositoryAnalyzeAction.
                 + maxTotalDataSize
                 + ", detailed="
                 + detailed
+                + ", abortWritePermitted="
+                + abortWritePermitted
                 + "]";
         }
 

+ 3 - 0
x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RestRepositoryAnalyzeAction.java

@@ -51,6 +51,9 @@ public class RestRepositoryAnalyzeAction extends BaseRestHandler {
         );
         analyzeRepositoryRequest.timeout(request.paramAsTime("timeout", analyzeRepositoryRequest.getTimeout()));
         analyzeRepositoryRequest.detailed(request.paramAsBoolean("detailed", analyzeRepositoryRequest.getDetailed()));
+        analyzeRepositoryRequest.abortWritePermitted(
+            request.paramAsBoolean("rarely_abort_writes", analyzeRepositoryRequest.isAbortWritePermitted())
+        );
 
         RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel());
         return channel -> cancelClient.execute(