Explorar el Código

Don't mark backing indices of overlapping data streams as conflicts. (#69666)

Forward port #69625 to master.

Today when upgrading from 7.9.x or 7.10.x version to 7.11.x or later and
if two data (or more) data streams exist that have a overlapping prefix and
one data stream name ends with the a date suffix that matches with backing index
date pattern (uuuu.MM.dd) then new upgraded nodes may refuse to join. Essentially
preventing upgrade of the cluster to continue.

In this case the validation logic in `Metadata#validateDataStreams(...)` confuses
backing indices of one data stream as regular indices and thinks these indices
collide with another data stream.

In this validation only incorrectly fails if {data-stream-name} and
{data-steam-name}-{uuuu.MM.dd} name exist and later has been rolled
over more than the former and then upgrade cluster to 7.11+.

A 7.10.2 cluster with:

Data stream 1: logs-foobar
Backing indices: logs-foobar-000001

Data stream 2: logs-foobar-2021.01.13
Backing indices: logs-foobar-2021.01.13-000001, logs-foobar-2021.01.13-000002

When upgrading, then the new node will not join, because it thinks that
'logs-foobar-2021.01.13-000002' index collides with the backing index space
of data stream 'logs-foobar'.

This change tries to address this.
Martijn van Groningen hace 4 años
padre
commit
2f49109175

+ 17 - 0
server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java

@@ -1454,6 +1454,23 @@ public class Metadata implements Iterable<IndexMetadata>, Diffable<Metadata>, To
                             .keySet().stream()
                             .filter(s -> BACKING_INDEX_SUFFIX.matcher(s.substring(prefix.length())).matches())
                             .filter(s -> IndexMetadata.parseIndexNameCounter(s) > ds.getGeneration())
+                            .filter(indexName -> {
+                                // Logic to avoid marking backing indices of other data streams as conflict:
+
+                                // Backing index pattern is either .ds-[ds-name]-[date]-[generation] for 7.11 and up or
+                                // .ds-[ds-name]-[generation] for 7.9 to 7.10.2. So two step process to capture the data stream name:
+                                String dataStreamName =
+                                    indexName.substring(DataStream.BACKING_INDEX_PREFIX.length(), indexName.lastIndexOf('-'));
+                                if (dsMetadata.dataStreams().containsKey(dataStreamName)) {
+                                    return false;
+                                }
+                                dataStreamName = indexName.substring(0, indexName.lastIndexOf('-'));
+                                if (dsMetadata.dataStreams().containsKey(dataStreamName)) {
+                                    return false;
+                                } else {
+                                    return true;
+                                }
+                            })
                             .collect(Collectors.toSet());
 
                     if (conflicts.size() > 0) {

+ 33 - 0
server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java

@@ -35,6 +35,7 @@ import org.elasticsearch.test.ESTestCase;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -1013,6 +1014,38 @@ public class MetadataTests extends ESTestCase {
         assertThat(metadata.dataStreams().get(dataStreamName).getName(), equalTo(dataStreamName));
     }
 
+    public void testOverlappingDataStreamNamesWithBackingIndexDatePattern() {
+        final String dataStreamName1 = "logs-foobar-2021.01.13";
+        Metadata.Builder b = Metadata.builder();
+        IndexMetadata ds1Index1 = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName1, 1, Version.V_7_10_2))
+            .settings(settings(Version.CURRENT))
+            .numberOfShards(1)
+            .numberOfReplicas(1)
+            .build();
+        b.put(ds1Index1, false);
+        IndexMetadata ds1Index2 = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName1, 2, Version.V_7_10_2))
+            .settings(settings(Version.CURRENT))
+            .numberOfShards(1)
+            .numberOfReplicas(1)
+            .build();
+        b.put(ds1Index2, false);
+        b.put(new DataStream(dataStreamName1, createTimestampField("@timestamp"),
+            Arrays.asList(ds1Index1.getIndex(), ds1Index2.getIndex()), 2, null));
+
+        final String dataStreamName2 = "logs-foobar";
+        IndexMetadata ds2Index1 = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName2, 1, Version.V_7_10_2))
+            .settings(settings(Version.CURRENT))
+            .numberOfShards(1)
+            .numberOfReplicas(1)
+            .build();
+        b.put(ds2Index1, false);
+        b.put(new DataStream(dataStreamName2, createTimestampField("@timestamp"),
+            Collections.singletonList(ds2Index1.getIndex()), 1, null));
+
+        Metadata metadata = b.build();
+        assertThat(metadata.dataStreams().size(), equalTo(2));
+    }
+
     public void testBuildIndicesLookupForDataStreams() {
         Metadata.Builder b = Metadata.builder();
         int numDataStreams = randomIntBetween(2, 8);

+ 7 - 0
x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataStreamsUpgradeIT.java

@@ -49,12 +49,19 @@ public class DataStreamsUpgradeIT extends AbstractUpgradeTestCase {
                 b.append("{\"create\":{\"_index\":\"").append("logs-foobar").append("\"}}\n");
                 b.append("{\"@timestamp\":\"2020-12-12\",\"test\":\"value").append(i).append("\"}\n");
             }
+
+            b.append("{\"create\":{\"_index\":\"").append("logs-foobar-2021.01.13").append("\"}}\n");
+            b.append("{\"@timestamp\":\"2020-12-12\",\"test\":\"value").append(0).append("\"}\n");
+
             Request bulk = new Request("POST", "/_bulk");
             bulk.addParameter("refresh", "true");
             bulk.addParameter("filter_path", "errors");
             bulk.setJsonEntity(b.toString());
             Response response = client().performRequest(bulk);
             assertEquals("{\"errors\":false}", EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8));
+
+            Request rolloverRequest = new Request("POST", "/logs-foobar-2021.01.13/_rollover");
+            client().performRequest(rolloverRequest);
         } else if (CLUSTER_TYPE == ClusterType.MIXED) {
             long nowMillis = System.currentTimeMillis();
             Request rolloverRequest = new Request("POST", "/logs-foobar/_rollover");