Browse Source

upgrade to Lucene 8.6.0 snapshot (#56175)

Ignacio Vera 5 năm trước cách đây
mục cha
commit
e62fb090fa
66 tập tin đã thay đổi với 228 bổ sung315 xóa
  1. 1 1
      buildSrc/version.properties
  2. 2 2
      docs/Versions.asciidoc
  3. 0 1
      modules/lang-expression/licenses/lucene-expressions-8.5.1.jar.sha1
  4. 1 0
      modules/lang-expression/licenses/lucene-expressions-8.6.0-snapshot-6c9024f7735.jar.sha1
  5. 19 0
      modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java
  6. 0 1
      plugins/analysis-icu/licenses/lucene-analyzers-icu-8.5.1.jar.sha1
  7. 1 0
      plugins/analysis-icu/licenses/lucene-analyzers-icu-8.6.0-snapshot-6c9024f7735.jar.sha1
  8. 0 1
      plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.5.1.jar.sha1
  9. 1 0
      plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.6.0-snapshot-6c9024f7735.jar.sha1
  10. 0 1
      plugins/analysis-nori/licenses/lucene-analyzers-nori-8.5.1.jar.sha1
  11. 1 0
      plugins/analysis-nori/licenses/lucene-analyzers-nori-8.6.0-snapshot-6c9024f7735.jar.sha1
  12. 0 1
      plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.5.1.jar.sha1
  13. 1 0
      plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.6.0-snapshot-6c9024f7735.jar.sha1
  14. 0 1
      plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.5.1.jar.sha1
  15. 1 0
      plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.6.0-snapshot-6c9024f7735.jar.sha1
  16. 0 1
      plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.5.1.jar.sha1
  17. 1 0
      plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.6.0-snapshot-6c9024f7735.jar.sha1
  18. 0 1
      plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.5.1.jar.sha1
  19. 1 0
      plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.6.0-snapshot-6c9024f7735.jar.sha1
  20. 116 20
      qa/evil-tests/src/test/java/org/elasticsearch/index/engine/EvilInternalEngineTests.java
  21. 0 1
      server/licenses/lucene-analyzers-common-8.5.1.jar.sha1
  22. 1 0
      server/licenses/lucene-analyzers-common-8.6.0-snapshot-6c9024f7735.jar.sha1
  23. 0 1
      server/licenses/lucene-backward-codecs-8.5.1.jar.sha1
  24. 1 0
      server/licenses/lucene-backward-codecs-8.6.0-snapshot-6c9024f7735.jar.sha1
  25. 0 1
      server/licenses/lucene-core-8.5.1.jar.sha1
  26. 1 0
      server/licenses/lucene-core-8.6.0-snapshot-6c9024f7735.jar.sha1
  27. 0 1
      server/licenses/lucene-grouping-8.5.1.jar.sha1
  28. 1 0
      server/licenses/lucene-grouping-8.6.0-snapshot-6c9024f7735.jar.sha1
  29. 0 1
      server/licenses/lucene-highlighter-8.5.1.jar.sha1
  30. 1 0
      server/licenses/lucene-highlighter-8.6.0-snapshot-6c9024f7735.jar.sha1
  31. 0 1
      server/licenses/lucene-join-8.5.1.jar.sha1
  32. 1 0
      server/licenses/lucene-join-8.6.0-snapshot-6c9024f7735.jar.sha1
  33. 0 1
      server/licenses/lucene-memory-8.5.1.jar.sha1
  34. 1 0
      server/licenses/lucene-memory-8.6.0-snapshot-6c9024f7735.jar.sha1
  35. 0 1
      server/licenses/lucene-misc-8.5.1.jar.sha1
  36. 1 0
      server/licenses/lucene-misc-8.6.0-snapshot-6c9024f7735.jar.sha1
  37. 0 1
      server/licenses/lucene-queries-8.5.1.jar.sha1
  38. 1 0
      server/licenses/lucene-queries-8.6.0-snapshot-6c9024f7735.jar.sha1
  39. 0 1
      server/licenses/lucene-queryparser-8.5.1.jar.sha1
  40. 1 0
      server/licenses/lucene-queryparser-8.6.0-snapshot-6c9024f7735.jar.sha1
  41. 0 1
      server/licenses/lucene-sandbox-8.5.1.jar.sha1
  42. 1 0
      server/licenses/lucene-sandbox-8.6.0-snapshot-6c9024f7735.jar.sha1
  43. 0 1
      server/licenses/lucene-spatial-extras-8.5.1.jar.sha1
  44. 1 0
      server/licenses/lucene-spatial-extras-8.6.0-snapshot-6c9024f7735.jar.sha1
  45. 0 1
      server/licenses/lucene-spatial3d-8.5.1.jar.sha1
  46. 1 0
      server/licenses/lucene-spatial3d-8.6.0-snapshot-6c9024f7735.jar.sha1
  47. 0 1
      server/licenses/lucene-suggest-8.5.1.jar.sha1
  48. 1 0
      server/licenses/lucene-suggest-8.6.0-snapshot-6c9024f7735.jar.sha1
  49. 1 1
      server/src/main/java/org/elasticsearch/Version.java
  50. 0 1
      server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
  51. 0 3
      server/src/main/java/org/elasticsearch/index/IndexSettings.java
  52. 5 6
      server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java
  53. 2 33
      server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
  54. 2 7
      server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java
  55. 0 79
      server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
  56. 8 3
      server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java
  57. 0 8
      test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java
  58. 0 4
      test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
  59. 0 81
      test/framework/src/test/java/org/elasticsearch/test/CorruptionUtilsTests.java
  60. 0 1
      x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java
  61. 6 6
      x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java
  62. 1 4
      x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java
  63. 19 9
      x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/cache/CachedBlobContainerIndexInput.java
  64. 23 23
      x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/direct/DirectBlobContainerIndexInput.java
  65. 0 1
      x-pack/plugin/sql/sql-action/licenses/lucene-core-8.5.1.jar.sha1
  66. 1 0
      x-pack/plugin/sql/sql-action/licenses/lucene-core-8.6.0-snapshot-6c9024f7735.jar.sha1

+ 1 - 1
buildSrc/version.properties

@@ -1,5 +1,5 @@
 elasticsearch     = 8.0.0
-lucene            = 8.5.1
+lucene            = 8.6.0-snapshot-6c9024f7735
 
 bundled_jdk_vendor = adoptopenjdk
 bundled_jdk = 14+36

+ 2 - 2
docs/Versions.asciidoc

@@ -1,8 +1,8 @@
 
 include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[]
 
-:lucene_version:        8.5.1
-:lucene_version_path:   8_5_1
+:lucene_version:        8.6.0
+:lucene_version_path:   8_6_0
 :jdk:                   11.0.2
 :jdk_major:             11
 :build_flavor:          default

+ 0 - 1
modules/lang-expression/licenses/lucene-expressions-8.5.1.jar.sha1

@@ -1 +0,0 @@
-666436c6624adac8af49623e7ac58d565bd88902

+ 1 - 0
modules/lang-expression/licenses/lucene-expressions-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+f7e94697a2f2e65aa19056118ddaa1a00df7ebbc

+ 19 - 0
modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java

@@ -19,6 +19,7 @@
 package org.elasticsearch.percolator;
 
 import org.apache.lucene.document.BinaryRange;
+import org.apache.lucene.index.PrefixCodedTerms;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queries.BlendedTermQuery;
 import org.apache.lucene.search.BooleanClause.Occur;
@@ -38,6 +39,7 @@ import org.apache.lucene.search.spans.SpanOrQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.automaton.ByteRunAutomaton;
 import org.elasticsearch.Version;
 import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
 import org.elasticsearch.index.query.DateRangeIncludingNowQuery;
@@ -49,6 +51,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Objects;
 import java.util.Set;
+import java.util.function.Supplier;
 import java.util.stream.Collectors;
 
 final class QueryAnalyzer {
@@ -204,6 +207,22 @@ final class QueryAnalyzer {
             }
         }
 
+        @Override
+        public void consumeTermsMatching(Query query, String field, Supplier<ByteRunAutomaton> automaton) {
+            if (query instanceof TermInSetQuery) {
+                TermInSetQuery q = (TermInSetQuery) query;
+                PrefixCodedTerms.TermIterator ti = q.getTermData().iterator();
+                BytesRef term;
+                Set<QueryExtraction> qe = new HashSet<>();
+                while ((term = ti.next()) != null) {
+                    qe.add(new QueryExtraction(new Term(field, term)));
+                }
+                this.terms.add(new Result(true, qe, 1));
+            } else {
+                super.consumeTermsMatching(query, field, automaton);
+            }
+        }
+
     }
 
     private static Result pointRangeQuery(PointRangeQuery query) {

+ 0 - 1
plugins/analysis-icu/licenses/lucene-analyzers-icu-8.5.1.jar.sha1

@@ -1 +0,0 @@
-0a8422b9b8a1b936ff354add5fa70e8e74497b30

+ 1 - 0
plugins/analysis-icu/licenses/lucene-analyzers-icu-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+891b25ddde3edffe59f9f25345762078203ddb29

+ 0 - 1
plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.5.1.jar.sha1

@@ -1 +0,0 @@
-87015734d14c46347fac8b6e5f52ea972082a34e

+ 1 - 0
plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+56f0262ae595875e6e163b8e31d8fcc464c208fe

+ 0 - 1
plugins/analysis-nori/licenses/lucene-analyzers-nori-8.5.1.jar.sha1

@@ -1 +0,0 @@
-bb3a59f0e68d659d677a9534282b94a3caaf20be

+ 1 - 0
plugins/analysis-nori/licenses/lucene-analyzers-nori-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+0b97440d8349b6e19059ef1f8566ea8753166e81

+ 0 - 1
plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.5.1.jar.sha1

@@ -1 +0,0 @@
-f281e8f6446250e0b2ef93768b9f822f4a2dc7b5

+ 1 - 0
plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+2b65d6825a96eb2bea79d76606fdd76a789a3cd4

+ 0 - 1
plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.5.1.jar.sha1

@@ -1 +0,0 @@
-d6f919075b16eb42461500838367227c467b633c

+ 1 - 0
plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+f88549ade9a0f77856d3b7808920e105e9d61bc4

+ 0 - 1
plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.5.1.jar.sha1

@@ -1 +0,0 @@
-6af456327323cf6897a5fe64ba9628556665094b

+ 1 - 0
plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+ea5671e66acb6f70a6c7cd16276b24ed0751dbf5

+ 0 - 1
plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.5.1.jar.sha1

@@ -1 +0,0 @@
-1994c5719e4a6e39aaffdb2b5832511d87fbc675

+ 1 - 0
plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+bac12d02041e93e9c73c99ac0e7798a0382453c7

+ 116 - 20
qa/evil-tests/src/test/java/org/elasticsearch/index/engine/EvilInternalEngineTests.java

@@ -19,14 +19,20 @@
 
 package org.elasticsearch.index.engine;
 
+import org.apache.lucene.index.FilterMergePolicy;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.MergeScheduler;
+import org.apache.lucene.index.MergeTrigger;
 import org.apache.lucene.index.SegmentCommitInfo;
+import org.apache.lucene.index.SegmentInfos;
+import org.apache.lucene.store.Directory;
 import org.elasticsearch.common.UUIDs;
 import org.elasticsearch.index.mapper.ParsedDocument;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.stream.Collectors;
@@ -54,31 +60,72 @@ public class EvilInternalEngineTests extends EngineTestCase {
             });
             final AtomicReference<List<SegmentCommitInfo>> segmentsReference = new AtomicReference<>();
 
+            final FilterMergePolicy mergePolicy = new FilterMergePolicy(newMergePolicy()) {
+                @Override
+                public MergeSpecification findForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount,
+                                                           Map<SegmentCommitInfo, Boolean> segmentsToMerge,
+                                                           MergeContext mergeContext) throws IOException {
+                    final List<SegmentCommitInfo> segments = segmentsReference.get();
+                    if (segments != null) {
+                        final MergeSpecification spec = new MergeSpecification();
+                        spec.add(new OneMerge(segments));
+                        return spec;
+                    }
+                    return super.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge, mergeContext);
+                }
+
+                @Override
+                public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos,
+                                                     MergeContext mergeContext) throws IOException {
+                    final List<SegmentCommitInfo> segments = segmentsReference.get();
+                    if (segments != null) {
+                        final MergeSpecification spec = new MergeSpecification();
+                        spec.add(new OneMerge(segments));
+                        return spec;
+                    }
+                    return super.findMerges(mergeTrigger, segmentInfos, mergeContext);
+                }
+            };
+
             try (Engine e = createEngine(
                     defaultSettings,
                     store,
                     primaryTranslogDir,
-                    newMergePolicy(),
-                    (directory, iwc) -> new IndexWriter(directory, iwc) {
-                        @Override
-                        public void merge(final MergePolicy.OneMerge merge) throws IOException {
-                            throw new OutOfMemoryError("640K ought to be enough for anybody");
-                        }
-
-                        @Override
-                        public synchronized MergePolicy.OneMerge getNextMerge() {
-                            /*
-                             * This will be called when we flush when we will not be ready to return the segments. After the segments are on
-                             * disk, we can only return them from here once or the merge scheduler will be stuck in a loop repeatedly
-                             * peeling off the same segments to schedule for merging.
-                             */
-                            if (segmentsReference.get() == null) {
-                                return super.getNextMerge();
-                            } else {
-                                final List<SegmentCommitInfo> segments = segmentsReference.getAndSet(null);
-                                return new MergePolicy.OneMerge(segments);
+                    mergePolicy,
+                    (directory, iwc) -> {
+                        final MergeScheduler mergeScheduler = iwc.getMergeScheduler();
+                        assertNotNull(mergeScheduler);
+                        iwc.setMergeScheduler(new FilterMergeScheduler(mergeScheduler) {
+                            @Override
+                            public void merge(MergeSource mergeSource, MergeTrigger trigger) throws IOException {
+                                final FilterMergeSource wrappedMergeSource = new FilterMergeSource(mergeSource) {
+                                    @Override
+                                    public MergePolicy.OneMerge getNextMerge() {
+                                        synchronized (mergeSource) {
+                                            /*
+                                             * This will be called when we flush when we will not be ready to return the segments.
+                                             * After the segments are on disk, we can only return them from here once or the merge
+                                             * scheduler will be stuck in a loop repeatedly peeling off the same segments to schedule
+                                             * for merging.
+                                             */
+                                            if (segmentsReference.get() == null) {
+                                                return super.getNextMerge();
+                                            } else {
+                                                final List<SegmentCommitInfo> segments = segmentsReference.getAndSet(null);
+                                                return new MergePolicy.OneMerge(segments);
+                                            }
+                                        }
+                                    }
+
+                                    @Override
+                                    public void merge(MergePolicy.OneMerge merge) {
+                                        throw new OutOfMemoryError("640K ought to be enough for anybody");
+                                    }
+                                };
+                                super.merge(wrappedMergeSource, trigger);
                             }
-                        }
+                        });
+                        return new IndexWriter(directory, iwc);
                     },
                     null,
                     null)) {
@@ -105,5 +152,54 @@ public class EvilInternalEngineTests extends EngineTestCase {
         }
     }
 
+    static class FilterMergeScheduler extends MergeScheduler {
+        private final MergeScheduler delegate;
+
+        FilterMergeScheduler(MergeScheduler delegate) {
+            this.delegate = delegate;
+        }
+
+        @Override
+        public Directory wrapForMerge(MergePolicy.OneMerge merge, Directory in) {
+            return delegate.wrapForMerge(merge, in);
+        }
+
+        @Override
+        public void merge(MergeSource mergeSource, MergeTrigger trigger) throws IOException {
+            delegate.merge(mergeSource, trigger);
+        }
+
+        @Override
+        public void close() throws IOException {
+            delegate.close();
+        }
+    }
 
+    static class FilterMergeSource implements MergeScheduler.MergeSource {
+        private final MergeScheduler.MergeSource delegate;
+
+        FilterMergeSource(MergeScheduler.MergeSource delegate) {
+            this.delegate = delegate;
+        }
+
+        @Override
+        public MergePolicy.OneMerge getNextMerge() {
+            return delegate.getNextMerge();
+        }
+
+        @Override
+        public void onMergeFinished(MergePolicy.OneMerge merge) {
+            delegate.onMergeFinished(merge);
+        }
+
+        @Override
+        public boolean hasPendingMerges() {
+            return delegate.hasPendingMerges();
+        }
+
+        @Override
+        public void merge(MergePolicy.OneMerge merge) throws IOException {
+            delegate.merge(merge);
+        }
+    }
 }

+ 0 - 1
server/licenses/lucene-analyzers-common-8.5.1.jar.sha1

@@ -1 +0,0 @@
-704685ddf536e1af4da025b6e6f4e50b9846ef18

+ 1 - 0
server/licenses/lucene-analyzers-common-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+e7fb1f82ecc255e015ae6fe8bb77c8ec8b8748ce

+ 0 - 1
server/licenses/lucene-backward-codecs-8.5.1.jar.sha1

@@ -1 +0,0 @@
-0ab12c24a7c33ef5dfe8b57f17f67fec4a3fee1c

+ 1 - 0
server/licenses/lucene-backward-codecs-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+637bd4a785df39cf7720160e3d7ef40eabad13fc

+ 0 - 1
server/licenses/lucene-core-8.5.1.jar.sha1

@@ -1 +0,0 @@
-24212de43c19269f5211f3e79eb2f414c4a0254b

+ 1 - 0
server/licenses/lucene-core-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+ec8401dfd1b41113eba06f3d626a1f2fdf589335

+ 0 - 1
server/licenses/lucene-grouping-8.5.1.jar.sha1

@@ -1 +0,0 @@
-4404f3ff6341b7518843d09141df743bf91a8284

+ 1 - 0
server/licenses/lucene-grouping-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+138abf463c27088a50a2b7c4a267694386c3a0cb

+ 0 - 1
server/licenses/lucene-highlighter-8.5.1.jar.sha1

@@ -1 +0,0 @@
-142f5f249aa0803f8283a3d08615e37a56f40e8a

+ 1 - 0
server/licenses/lucene-highlighter-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+d8d618145b7eff4ea1a4081ef1918e8967b5ec8e

+ 0 - 1
server/licenses/lucene-join-8.5.1.jar.sha1

@@ -1 +0,0 @@
-b0a48846662fc504bd7796b5506dad94981fca08

+ 1 - 0
server/licenses/lucene-join-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+4b09c5c1a69ce6fe22328006d9129585009eb41a

+ 0 - 1
server/licenses/lucene-memory-8.5.1.jar.sha1

@@ -1 +0,0 @@
-ba9e24b90323aacc98a4ac661ac34bfbf0ed66d8

+ 1 - 0
server/licenses/lucene-memory-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+1491780984dc014d34d3d1d0c6656630ba67ca98

+ 0 - 1
server/licenses/lucene-misc-8.5.1.jar.sha1

@@ -1 +0,0 @@
-a0418e9bc16fc876448accb828a6ca38ed63d4a8

+ 1 - 0
server/licenses/lucene-misc-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+08a78a91082bd6ae7e4e5535060a1e59a51d8983

+ 0 - 1
server/licenses/lucene-queries-8.5.1.jar.sha1

@@ -1 +0,0 @@
-269c67a4ee9b806cfdacddc211744243cbcbd127

+ 1 - 0
server/licenses/lucene-queries-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+a58e4fa4d7390d0c2dfaa7697702e0c4ab5add48

+ 0 - 1
server/licenses/lucene-queryparser-8.5.1.jar.sha1

@@ -1 +0,0 @@
-ee5ba0e07a178a32987b0a92da149f2104e26dd9

+ 1 - 0
server/licenses/lucene-queryparser-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+745ed85193fa82ef75ac92be524e90d89ead7345

+ 0 - 1
server/licenses/lucene-sandbox-8.5.1.jar.sha1

@@ -1 +0,0 @@
-f1461680109e499d8c58dcaf5d314aeeef41d99a

+ 1 - 0
server/licenses/lucene-sandbox-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+e46dc362bfbac1609f4ecbd7838acd5dae1aa960

+ 0 - 1
server/licenses/lucene-spatial-extras-8.5.1.jar.sha1

@@ -1 +0,0 @@
-eece1ef3f919634c79b9ae9d99264ac9efa4276c

+ 1 - 0
server/licenses/lucene-spatial-extras-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+167ea704134a3e5bd6dd93283c030c21d3360c63

+ 0 - 1
server/licenses/lucene-spatial3d-8.5.1.jar.sha1

@@ -1 +0,0 @@
-a8fb2771ac562d60a3c945a4cef0e3742c390329

+ 1 - 0
server/licenses/lucene-spatial3d-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+f20507834f2b8c6103ffdc08ac687bdf73d45a09

+ 0 - 1
server/licenses/lucene-suggest-8.5.1.jar.sha1

@@ -1 +0,0 @@
-b5613f4995836fd9edae5925ed38559460721492

+ 1 - 0
server/licenses/lucene-suggest-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+ac6c604ce977f2e44a13159021ba4133594ccc40

+ 1 - 1
server/src/main/java/org/elasticsearch/Version.java

@@ -78,7 +78,7 @@ public class Version implements Comparable<Version>, ToXContentFragment {
     public static final Version V_7_7_0 = new Version(7070099, org.apache.lucene.util.Version.LUCENE_8_5_1);
     public static final Version V_7_8_0 = new Version(7080099, org.apache.lucene.util.Version.LUCENE_8_5_1);
     public static final Version V_7_9_0 = new Version(7090099, org.apache.lucene.util.Version.LUCENE_8_5_1);
-    public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_8_5_1);
+    public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_8_6_0);
     public static final Version CURRENT = V_8_0_0;
 
     private static final ImmutableOpenIntMap<Version> idToVersion;

+ 0 - 1
server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java

@@ -165,7 +165,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
             IndexSettings.DEFAULT_PIPELINE,
             IndexSettings.FINAL_PIPELINE,
             MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING,
-            IndexSettings.ON_HEAP_ID_TERMS_INDEX,
             ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING,
 
             // validate that built-in similarities don't get redefined

+ 0 - 3
server/src/main/java/org/elasticsearch/index/IndexSettings.java

@@ -84,9 +84,6 @@ public final class IndexSettings {
                         "[true, false, checksum] but was: " + s);
             }
         }, Property.IndexScope);
-    // This setting is undocumented as it is considered as an escape hatch.
-    public static final Setting<Boolean> ON_HEAP_ID_TERMS_INDEX =
-            Setting.boolSetting("index.force_memory_id_terms_dictionary", false, Property.IndexScope);
 
     /**
      * Index setting describing the maximum value of from + size on a query.

+ 5 - 6
server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java

@@ -21,7 +21,6 @@ package org.elasticsearch.index.engine;
 
 import org.apache.logging.log4j.Logger;
 import org.apache.lucene.index.ConcurrentMergeScheduler;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.MergePolicy;
 import org.apache.lucene.index.MergeScheduler;
 import org.apache.lucene.index.OneMergeHelper;
@@ -80,7 +79,7 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {
     }
 
     @Override
-    protected void doMerge(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
+    protected void doMerge(MergeSource mergeSource, MergePolicy.OneMerge merge) throws IOException {
         int totalNumDocs = merge.totalNumDocs();
         long totalSizeInBytes = merge.totalBytesSize();
         long timeNS = System.nanoTime();
@@ -98,7 +97,7 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {
         }
         try {
             beforeMerge(onGoingMerge);
-            super.doMerge(writer, merge);
+            super.doMerge(mergeSource, merge);
         } finally {
             long tookMS = TimeValue.nsecToMSec(System.nanoTime() - timeNS);
 
@@ -162,14 +161,14 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {
     }
 
     @Override
-    protected boolean maybeStall(IndexWriter writer) {
+    protected boolean maybeStall(MergeSource mergeSource) {
         // Don't stall here, because we do our own index throttling (in InternalEngine.IndexThrottle) when merges can't keep up
         return true;
     }
 
     @Override
-    protected MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
-        MergeThread thread = super.getMergeThread(writer, merge);
+    protected MergeThread getMergeThread(MergeSource mergeSource, MergePolicy.OneMerge merge) throws IOException {
+        MergeThread thread = super.getMergeThread(mergeSource, merge);
         thread.setName(EsExecutors.threadName(indexSettings, "[" + shardId.getIndexName() + "][" + shardId.id() + "]: " +
             thread.getName()));
         return thread;

+ 2 - 33
server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java

@@ -20,8 +20,6 @@
 package org.elasticsearch.index.engine;
 
 import org.apache.logging.log4j.Logger;
-import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
-import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader.FSTLoadMode;
 import org.apache.lucene.document.LongPoint;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.index.DirectoryReader;
@@ -29,7 +27,6 @@ import org.apache.lucene.index.IndexCommit;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.LiveIndexWriterConfig;
 import org.apache.lucene.index.MergePolicy;
@@ -50,9 +47,7 @@ import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FilterDirectory;
 import org.apache.lucene.store.LockObtainFailedException;
-import org.apache.lucene.store.MMapDirectory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.InfoStream;
 import org.elasticsearch.Assertions;
@@ -91,7 +86,6 @@ import org.elasticsearch.index.seqno.SeqNoStats;
 import org.elasticsearch.index.seqno.SequenceNumbers;
 import org.elasticsearch.index.shard.ElasticsearchMergePolicy;
 import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.index.store.FsDirectoryFactory;
 import org.elasticsearch.index.translog.Translog;
 import org.elasticsearch.index.translog.TranslogConfig;
 import org.elasticsearch.index.translog.TranslogCorruptedException;
@@ -103,7 +97,6 @@ import org.elasticsearch.threadpool.ThreadPool;
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Locale;
@@ -2102,24 +2095,10 @@ public class InternalEngine extends Engine {
         }
     }
 
-    static Map<String, String> getReaderAttributes(Directory directory, IndexSettings indexSettings) {
-        Directory unwrap = FilterDirectory.unwrap(directory);
-        boolean defaultOffHeap = FsDirectoryFactory.isHybridFs(unwrap) || unwrap instanceof MMapDirectory;
-        Map<String, String> attributes = new HashMap<>();
-        attributes.put(BlockTreeTermsReader.FST_MODE_KEY, defaultOffHeap ? FSTLoadMode.OFF_HEAP.name() : FSTLoadMode.ON_HEAP.name());
-        if (IndexSettings.ON_HEAP_ID_TERMS_INDEX.exists(indexSettings.getSettings())) {
-            final boolean idOffHeap = IndexSettings.ON_HEAP_ID_TERMS_INDEX.get(indexSettings.getSettings()) == false;
-            attributes.put(BlockTreeTermsReader.FST_MODE_KEY + "." + IdFieldMapper.NAME,
-                    idOffHeap ? FSTLoadMode.OFF_HEAP.name() : FSTLoadMode.ON_HEAP.name());
-        }
-        return Collections.unmodifiableMap(attributes);
-    }
-
     private IndexWriterConfig getIndexWriterConfig() {
         final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer());
         iwc.setCommitOnClose(false); // we by default don't commit on close
         iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
-        iwc.setReaderAttributes(getReaderAttributes(store.directory(), engineConfig.getIndexSettings()));
         iwc.setIndexDeletionPolicy(combinedDeletionPolicy);
         // with tests.verbose, lucene sets this up: plumb to align with filesystem stream
         boolean verbose = false;
@@ -2274,7 +2253,7 @@ public class InternalEngine extends Engine {
         }
 
         @Override
-        protected void handleMergeException(final Directory dir, final Throwable exc) {
+        protected void handleMergeException(final Throwable exc) {
             engineConfig.getThreadPool().generic().execute(new AbstractRunnable() {
                 @Override
                 public void onFailure(Exception e) {
@@ -2288,7 +2267,7 @@ public class InternalEngine extends Engine {
                      * confidence that the call stack does not contain catch statements that would cause the error that might be thrown
                      * here from being caught and never reaching the uncaught exception handler.
                      */
-                    failEngine("merge failed", new MergePolicy.MergeException(exc, dir));
+                    failEngine("merge failed", new MergePolicy.MergeException(exc));
                 }
             });
         }
@@ -2535,16 +2514,6 @@ public class InternalEngine extends Engine {
             super(d, conf);
         }
 
-        @Override
-        public long updateDocument(Term term, Iterable<? extends IndexableField> doc) {
-            throw new AssertionError("must not hard update document");
-        }
-
-        @Override
-        public long updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs) {
-            throw new AssertionError("must not hard update documents");
-        }
-
         @Override
         public long deleteDocuments(Term... terms) {
             throw new AssertionError("must not hard delete documents");

+ 2 - 7
server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java

@@ -18,7 +18,6 @@
  */
 package org.elasticsearch.index.engine;
 
-import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexCommit;
 import org.apache.lucene.index.IndexWriter;
@@ -47,9 +46,7 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.io.UncheckedIOException;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.List;
-import java.util.Map;
 import java.util.concurrent.CountDownLatch;
 import java.util.function.BiFunction;
 import java.util.function.Function;
@@ -68,8 +65,6 @@ public class ReadOnlyEngine extends Engine {
      * Reader attributes used for read only engines. These attributes prevent loading term dictionaries on-heap even if the field is an
      * ID field.
      */
-    private static final Map<String, String> OFF_HEAP_READER_ATTRIBUTES = Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY,
-        BlockTreeTermsReader.FSTLoadMode.OFF_HEAP.name());
     private final SegmentInfos lastCommittedSegmentInfos;
     private final SeqNoStats seqNoStats;
     private final ElasticsearchReaderManager readerManager;
@@ -183,7 +178,7 @@ public class ReadOnlyEngine extends Engine {
 
     protected DirectoryReader open(IndexCommit commit) throws IOException {
         assert Transports.assertNotTransportThread("opening index commit of a read-only engine");
-        return new SoftDeletesDirectoryReaderWrapper(DirectoryReader.open(commit, OFF_HEAP_READER_ATTRIBUTES), Lucene.SOFT_DELETES_FIELD);
+        return new SoftDeletesDirectoryReaderWrapper(DirectoryReader.open(commit), Lucene.SOFT_DELETES_FIELD);
     }
 
     @Override
@@ -494,7 +489,7 @@ public class ReadOnlyEngine extends Engine {
 
     protected static DirectoryReader openDirectory(Directory directory) throws IOException {
         assert Transports.assertNotTransportThread("opening directory reader of a read-only engine");
-        final DirectoryReader reader = DirectoryReader.open(directory, OFF_HEAP_READER_ATTRIBUTES);
+        final DirectoryReader reader = DirectoryReader.open(directory);
         return new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD);
     }
 

+ 0 - 79
server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java

@@ -61,11 +61,8 @@ import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.TotalHitCountCollector;
 import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FilterDirectory;
 import org.apache.lucene.store.Lock;
-import org.apache.lucene.store.MMapDirectory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -105,7 +102,6 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
 import org.elasticsearch.common.util.concurrent.ReleasableLock;
 import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.core.internal.io.IOUtils;
-import org.elasticsearch.index.IndexModule;
 import org.elasticsearch.index.IndexSettings;
 import org.elasticsearch.index.VersionType;
 import org.elasticsearch.index.codec.CodecService;
@@ -126,9 +122,7 @@ import org.elasticsearch.index.seqno.RetentionLeases;
 import org.elasticsearch.index.seqno.SeqNoStats;
 import org.elasticsearch.index.seqno.SequenceNumbers;
 import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.index.shard.ShardPath;
 import org.elasticsearch.index.shard.ShardUtils;
-import org.elasticsearch.index.store.FsDirectoryFactory;
 import org.elasticsearch.index.store.Store;
 import org.elasticsearch.index.translog.SnapshotMatchers;
 import org.elasticsearch.index.translog.TestTranslog;
@@ -5570,79 +5564,6 @@ public class InternalEngineTests extends EngineTestCase {
         assertThat(engine.config().getCircuitBreakerService().getBreaker(CircuitBreaker.ACCOUNTING).getUsed(), equalTo(0L));
     }
 
-    public void testGetReaderAttributes() throws IOException {
-        Settings.Builder settingsBuilder = Settings.builder()
-                .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT);
-        Settings settings = settingsBuilder.build();
-        IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings);
-
-        try(BaseDirectoryWrapper dir = newFSDirectory(createTempDir())) {
-            Directory unwrap = FilterDirectory.unwrap(dir);
-            boolean isMMap = unwrap instanceof MMapDirectory;
-            Map<String, String> readerAttributes = InternalEngine.getReaderAttributes(dir, indexSettings);
-            assertEquals(Collections.singletonMap("blocktree.terms.fst", isMMap ? "OFF_HEAP" : "ON_HEAP"), readerAttributes);
-        }
-
-        try(MMapDirectory dir = new MMapDirectory(createTempDir())) {
-            Map<String, String> readerAttributes =
-                InternalEngine.getReaderAttributes(randomBoolean() ? dir :
-                    new MockDirectoryWrapper(random(), dir), indexSettings);
-            assertEquals(Collections.singletonMap("blocktree.terms.fst", "OFF_HEAP"), readerAttributes);
-        }
-
-        FsDirectoryFactory service = new FsDirectoryFactory();
-        Path tempDir = createTempDir().resolve(indexSettings.getUUID()).resolve("0");
-        ShardPath path = new ShardPath(false, tempDir, tempDir, new ShardId(indexSettings.getIndex(), 0));
-        try (Directory directory = service.newDirectory(indexSettings, path)) {
-
-            Map<String, String> readerAttributes =
-                InternalEngine.getReaderAttributes(randomBoolean() ? directory :
-                    new MockDirectoryWrapper(random(), directory), indexSettings);
-            assertEquals(1, readerAttributes.size());
-
-            switch (IndexModule.defaultStoreType(true)) {
-                case HYBRIDFS:
-                case MMAPFS:
-                    assertEquals("OFF_HEAP", readerAttributes.get("blocktree.terms.fst"));
-                    break;
-                case NIOFS:
-                case SIMPLEFS:
-                case FS:
-                    assertEquals("ON_HEAP", readerAttributes.get("blocktree.terms.fst"));
-                    break;
-                default:
-                    fail("unknownw type");
-            }
-        }
-
-        settingsBuilder.put(IndexSettings.ON_HEAP_ID_TERMS_INDEX.getKey(), true);
-        settings = settingsBuilder.build();
-        indexSettings = IndexSettingsModule.newIndexSettings("foo", settings);
-        try (Directory directory = service.newDirectory(indexSettings, path)) {
-
-            Map<String, String> readerAttributes =
-                InternalEngine.getReaderAttributes(randomBoolean() ? directory :
-                    new MockDirectoryWrapper(random(), directory), indexSettings);
-            assertEquals(2, readerAttributes.size());
-
-            switch (IndexModule.defaultStoreType(true)) {
-                case HYBRIDFS:
-                case MMAPFS:
-                    assertEquals("OFF_HEAP", readerAttributes.get("blocktree.terms.fst"));
-                    assertEquals("ON_HEAP", readerAttributes.get("blocktree.terms.fst._id"));
-                    break;
-                case NIOFS:
-                case SIMPLEFS:
-                case FS:
-                    assertEquals("ON_HEAP", readerAttributes.get("blocktree.terms.fst"));
-                    assertEquals("ON_HEAP", readerAttributes.get("blocktree.terms.fst._id"));
-                    break;
-                default:
-                    fail("unknownw type");
-            }
-        }
-    }
-
     public void testPruneAwayDeletedButRetainedIds() throws Exception {
         IOUtils.close(engine, store);
         store = createStore(defaultSettings, newDirectory());

+ 8 - 3
server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java

@@ -105,6 +105,7 @@ import static org.hamcrest.Matchers.anyOf;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.greaterThanOrEqualTo;
 import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
@@ -645,8 +646,8 @@ public class QueryPhaseTests extends IndexShardTestCase {
         MapperService mapperService = mock(MapperService.class);
         when(mapperService.fieldType(fieldNameLong)).thenReturn(fieldTypeLong);
         when(mapperService.fieldType(fieldNameDate)).thenReturn(fieldTypeDate);
-
-        final int numDocs = 7000;
+        // enough docs to have a tree with several leaf nodes
+        final int numDocs = 3500 * 20;
         Directory dir = newDirectory();
         IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(null));
         for (int i = 1; i <= numDocs; ++i) {
@@ -811,7 +812,11 @@ public class QueryPhaseTests extends IndexShardTestCase {
 
     // assert score docs are in order and their number is as expected
     private void assertSortResults(TopDocs topDocs, long expectedNumDocs, boolean isDoubleSort) {
-        assertEquals(topDocs.totalHits.value, expectedNumDocs);
+        if (topDocs.totalHits.relation == TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO) {
+            assertThat(topDocs.totalHits.value, lessThanOrEqualTo(expectedNumDocs));
+        } else {
+            assertEquals(topDocs.totalHits.value, expectedNumDocs);
+        }
         long cur1, cur2;
         long prev1 = Long.MIN_VALUE;
         long prev2 = Long.MIN_VALUE;

+ 0 - 8
test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java

@@ -79,14 +79,6 @@ public final class CorruptionUtils {
             }
             try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) {
                 long maxPosition = raf.size();
-
-                if (fileToCorrupt.getFileName().toString().endsWith(".cfs") && maxPosition > 4) {
-                    // TODO: it is known that Lucene does not check the checksum of CFS file (CompoundFileS, like an archive)
-                    // see note at https://github.com/elastic/elasticsearch/pull/33911
-                    // so far, don't corrupt crc32 part of checksum (last 4 bytes) of cfs file
-                    // checksum is 8 bytes: first 4 bytes have to be zeros, while crc32 value is not verified
-                    maxPosition -= 4;
-                }
                 final int position = random.nextInt((int) Math.min(Integer.MAX_VALUE, maxPosition));
                 corruptAt(fileToCorrupt, raf, position);
             }

+ 0 - 4
test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java

@@ -444,10 +444,6 @@ public abstract class ESIntegTestCase extends ESTestCase {
                     RandomNumbers.randomIntBetween(random, 1, 15) + "ms");
         }
 
-        if (random.nextBoolean()) {
-            builder.put(IndexSettings.ON_HEAP_ID_TERMS_INDEX.getKey(), random.nextBoolean());
-        }
-
         return builder;
     }
 

+ 0 - 81
test/framework/src/test/java/org/elasticsearch/test/CorruptionUtilsTests.java

@@ -1,81 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.elasticsearch.test;
-
-import org.apache.lucene.index.CheckIndex;
-import org.apache.lucene.store.SimpleFSDirectory;
-import org.elasticsearch.action.admin.indices.flush.FlushRequest;
-import org.elasticsearch.index.shard.IndexShard;
-import org.elasticsearch.index.shard.IndexShardTestCase;
-import org.elasticsearch.index.shard.ShardPath;
-
-import java.nio.channels.FileChannel;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.StandardOpenOption;
-import java.util.stream.Stream;
-
-import static org.elasticsearch.test.CorruptionUtils.corruptAt;
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.lessThan;
-
-public class CorruptionUtilsTests extends IndexShardTestCase {
-
-    /**
-     * There is a dependency on Lucene bug fix
-     * https://github.com/elastic/elasticsearch/pull/33911
-     */
-    public void testLuceneCheckIndexIgnoresLast4Bytes() throws Exception {
-        final IndexShard indexShard = newStartedShard(true);
-
-        final long numDocs = between(10, 100);
-        for (long i = 0; i < numDocs; i++) {
-            indexDoc(indexShard, "_doc", Long.toString(i), "{}");
-        }
-        indexShard.flush(new FlushRequest());
-        closeShards(indexShard);
-
-        final ShardPath shardPath = indexShard.shardPath();
-
-        final Path indexPath = shardPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME);
-
-        final Path cfsFile;
-        try (Stream<Path> paths = Files.walk(indexPath)) {
-            cfsFile = paths.filter(p -> p.getFileName().toString().endsWith(".cfs")).findFirst()
-                .orElseThrow(() -> new IllegalStateException("CFS file has to be there"));
-        }
-
-        try (FileChannel raf = FileChannel.open(cfsFile, StandardOpenOption.READ, StandardOpenOption.WRITE)) {
-            assertThat(raf.size(), lessThan(Integer.MAX_VALUE * 1L));
-            final int maxPosition = (int) raf.size();
-            // corrupt only last 4 bytes!
-            final int position = randomIntBetween(maxPosition - 4, maxPosition - 1);
-            corruptAt(cfsFile, raf, position);
-        }
-
-        final CheckIndex.Status status;
-        try (CheckIndex checkIndex = new CheckIndex(new SimpleFSDirectory(indexPath))) {
-            status = checkIndex.checkIndex();
-        }
-
-        assertThat("That's a good news! "
-                + "Lucene now validates CRC32 of CFS file: time to drop workaround at CorruptionUtils (and this test)",
-            status.clean, equalTo(true));
-    }
-}

+ 0 - 1
x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java

@@ -402,7 +402,6 @@ public class TransportResumeFollowAction extends TransportMasterNodeAction<Resum
             IndexSettings.INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING,
             IndexSettings.INDEX_GC_DELETES_SETTING,
             IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD,
-            IndexSettings.ON_HEAP_ID_TERMS_INDEX,
             IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING,
             BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING,
             SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING,

+ 6 - 6
x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java

@@ -6,7 +6,6 @@
 package org.elasticsearch.snapshots;
 
 import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
 import org.apache.lucene.index.CheckIndex;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocValuesType;
@@ -39,6 +38,7 @@ import org.apache.lucene.store.TrackingDirectoryWrapper;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.StringHelper;
 import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.lucene.Lucene;
 import org.elasticsearch.core.internal.io.IOUtils;
@@ -96,8 +96,7 @@ public class SourceOnlySnapshot {
         List<String> createdFiles = new ArrayList<>();
         String segmentFileName;
         try (Lock writeLock = targetDirectory.obtainLock(IndexWriter.WRITE_LOCK_NAME);
-             StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(commit,
-                 Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP.name()))) {
+             StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(commit)) {
             SegmentInfos segmentInfos = reader.getSegmentInfos().clone();
             DirectoryReader wrappedReader = wrapReader(reader);
             List<SegmentCommitInfo> newInfos = new ArrayList<>();
@@ -116,7 +115,7 @@ public class SourceOnlySnapshot {
             String pendingSegmentFileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.PENDING_SEGMENTS,
                 "", segmentInfos.getGeneration());
             try (IndexOutput segnOutput = targetDirectory.createOutput(pendingSegmentFileName, IOContext.DEFAULT)) {
-                segmentInfos.write(targetDirectory, segnOutput);
+                segmentInfos.write(segnOutput);
             }
             targetDirectory.sync(Collections.singleton(pendingSegmentFileName));
             targetDirectory.sync(createdFiles);
@@ -219,7 +218,7 @@ public class SourceOnlySnapshot {
                 SegmentInfo newSegmentInfo = new SegmentInfo(targetDirectory, si.getVersion(), si.getMinVersion(), si.name, si.maxDoc(),
                     false, si.getCodec(), si.getDiagnostics(), si.getId(), si.getAttributes(), null);
                 // we drop the sort on purpose since the field we sorted on doesn't exist in the target index anymore.
-                newInfo = new SegmentCommitInfo(newSegmentInfo, 0, 0, -1, -1, -1);
+                newInfo = new SegmentCommitInfo(newSegmentInfo, 0, 0, -1, -1, -1, StringHelper.randomId());
                 List<FieldInfo> fieldInfoCopy = new ArrayList<>(fieldInfos.size());
                 for (FieldInfo fieldInfo : fieldInfos) {
                     fieldInfoCopy.add(new FieldInfo(fieldInfo.name, fieldInfo.number,
@@ -254,7 +253,8 @@ public class SourceOnlySnapshot {
                 assert newInfo.getDelCount() == 0 || assertLiveDocs(liveDocs.bits, liveDocs.numDeletes);
                 codec.liveDocsFormat().writeLiveDocs(liveDocs.bits, trackingDir, newInfo, liveDocs.numDeletes - newInfo.getDelCount(),
                     IOContext.DEFAULT);
-                SegmentCommitInfo info = new SegmentCommitInfo(newInfo.info, liveDocs.numDeletes, 0, newInfo.getNextDelGen(), -1, -1);
+                SegmentCommitInfo info = new SegmentCommitInfo(newInfo.info, liveDocs.numDeletes, 0, newInfo.getNextDelGen(),
+                    -1, -1, StringHelper.randomId());
                 info.setFieldInfosFiles(newInfo.getFieldInfosFiles());
                 info.info.setFiles(trackingDir.getCreatedFiles());
                 newInfo = info;

+ 1 - 4
x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java

@@ -5,7 +5,6 @@
  */
 package org.elasticsearch.snapshots;
 
-import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexCommit;
 import org.apache.lucene.index.SegmentInfos;
@@ -46,7 +45,6 @@ import java.io.UncheckedIOException;
 import java.nio.file.Path;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.function.Function;
@@ -161,8 +159,7 @@ public final class SourceOnlySnapshotRepository extends FilterRepository {
             tempStore.bootstrapNewHistory(maxDoc, maxDoc);
             store.incRef();
             toClose.add(store::decRef);
-            DirectoryReader reader = DirectoryReader.open(tempStore.directory(),
-                Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP.name()));
+            DirectoryReader reader = DirectoryReader.open(tempStore.directory());
             toClose.add(reader);
             IndexCommit indexCommit = reader.getIndexCommit();
             super.snapshotShard(tempStore, mapperService, snapshotId, indexId, indexCommit, shardStateIdentifier, snapshotStatus,

+ 19 - 9
x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/cache/CachedBlobContainerIndexInput.java

@@ -128,16 +128,15 @@ public class CachedBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
     }
 
     @Override
-    protected void readInternal(final byte[] buffer, final int offset, final int length) throws IOException {
+    protected void readInternal(ByteBuffer b) throws IOException {
         ensureContext(ctx -> ctx != CACHE_WARMING_CONTEXT);
         final long position = getFilePointer() + this.offset;
+        final int length = b.remaining();
 
         int totalBytesRead = 0;
         while (totalBytesRead < length) {
             final long pos = position + totalBytesRead;
-            final int off = offset + totalBytesRead;
             final int len = length - totalBytesRead;
-
             int bytesRead = 0;
             try {
                 final CacheFile cacheFile = getCacheFileSafe();
@@ -146,7 +145,7 @@ public class CachedBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
                     bytesRead = cacheFile.fetchRange(
                         range.v1(),
                         range.v2(),
-                        (start, end) -> readCacheFile(cacheFile.getChannel(), end, pos, buffer, off, len),
+                        (start, end) -> readCacheFile(cacheFile.getChannel(), end, pos, b, len),
                         (start, end) -> writeCacheFile(cacheFile.getChannel(), start, end)
                     ).get();
                 }
@@ -154,7 +153,7 @@ public class CachedBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
                 if (e instanceof AlreadyClosedException || (e.getCause() != null && e.getCause() instanceof AlreadyClosedException)) {
                     try {
                         // cache file was evicted during the range fetching, read bytes directly from source
-                        bytesRead = readDirectly(pos, pos + len, buffer, off);
+                        bytesRead = readDirectly(pos, pos + len, b);
                         continue;
                     } catch (Exception inner) {
                         e.addSuppressed(inner);
@@ -319,9 +318,20 @@ public class CachedBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
         return true;
     }
 
-    private int readCacheFile(FileChannel fc, long end, long position, byte[] buffer, int offset, long length) throws IOException {
+    private int readCacheFile(FileChannel fc, long end, long position, ByteBuffer b, long length) throws IOException {
         assert assertFileChannelOpen(fc);
-        int bytesRead = Channels.readFromFileChannel(fc, position, buffer, offset, Math.toIntExact(Math.min(length, end - position)));
+        final int bytesRead;
+
+        assert b.remaining() == length;
+        if (end - position < b.remaining()) {
+            final ByteBuffer duplicate = b.duplicate();
+            duplicate.limit(b.position() + Math.toIntExact(end - position));
+            bytesRead = Channels.readFromFileChannel(fc, position, duplicate);
+            assert duplicate.position() < b.limit();
+            b.position(duplicate.position());
+        } else {
+            bytesRead = Channels.readFromFileChannel(fc, position, b);
+        }
         if (bytesRead == -1) {
             throw new EOFException(
                 String.format(Locale.ROOT, "unexpected EOF reading [%d-%d] from %s", position, position + length, cacheFileReference)
@@ -416,7 +426,7 @@ public class CachedBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
             + '}';
     }
 
-    private int readDirectly(long start, long end, byte[] buffer, int offset) throws IOException {
+    private int readDirectly(long start, long end, ByteBuffer b) throws IOException {
         final long length = end - start;
         final byte[] copyBuffer = new byte[Math.toIntExact(Math.min(COPY_BUFFER_SIZE, length))];
         logger.trace(() -> new ParameterizedMessage("direct reading of range [{}-{}] for cache file [{}]", start, end, cacheFileReference));
@@ -440,7 +450,7 @@ public class CachedBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
                         )
                     );
                 }
-                System.arraycopy(copyBuffer, 0, buffer, offset + bytesCopied, bytesRead);
+                b.put(copyBuffer, 0, bytesRead);
                 bytesCopied += bytesRead;
                 remaining -= bytesRead;
             }

+ 23 - 23
x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/direct/DirectBlobContainerIndexInput.java

@@ -21,6 +21,7 @@ import java.io.EOFException;
 import java.io.FilterInputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.nio.ByteBuffer;
 import java.util.Objects;
 import java.util.concurrent.atomic.LongAdder;
 
@@ -39,7 +40,7 @@ import java.util.concurrent.atomic.LongAdder;
  *
  * {@link DirectBlobContainerIndexInput} maintains a global position that indicates the current position in the Lucene file where the
  * next read will occur. In the case of a Lucene file snapshotted into multiple parts, this position is used to identify which part must
- * be read at which position (see {@link #readInternal(byte[], int, int)}. This position is also passed over to cloned and sliced input
+ * be read at which position (see {@link #readInternal(ByteBuffer)}. This position is also passed over to cloned and sliced input
  * along with the {@link FileInfo} so that they can also track their reading position.
  *
  * The {@code sequentialReadSize} constructor parameter configures the {@link DirectBlobContainerIndexInput} to perform a larger read on the
@@ -56,6 +57,7 @@ public class DirectBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
     private StreamForSequentialReads streamForSequentialReads;
     private long sequentialReadSize;
     private static final long NO_SEQUENTIAL_READ_OPTIMIZATION = 0L;
+    private static final int COPY_BUFFER_SIZE = 8192;
 
     public DirectBlobContainerIndexInput(
         BlobContainer blobContainer,
@@ -99,14 +101,12 @@ public class DirectBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
     }
 
     @Override
-    protected void readInternal(byte[] b, int offset, int length) throws IOException {
+    protected void readInternal(ByteBuffer b) throws IOException {
         ensureOpen();
         if (fileInfo.numberOfParts() == 1L) {
-            readInternalBytes(0, position, b, offset, length);
+            readInternalBytes(0, position, b, b.remaining());
         } else {
-            int len = length;
-            int off = offset;
-            while (len > 0) {
+            while (b.hasRemaining()) {
                 int currentPart = Math.toIntExact(position / fileInfo.partSize().getBytes());
                 int remainingBytesInPart;
                 if (currentPart < (fileInfo.numberOfParts() - 1)) {
@@ -114,16 +114,14 @@ public class DirectBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
                 } else {
                     remainingBytesInPart = Math.toIntExact(fileInfo.length() - position);
                 }
-                final int read = Math.min(len, remainingBytesInPart);
-                readInternalBytes(currentPart, position % fileInfo.partSize().getBytes(), b, off, read);
-                len -= read;
-                off += read;
+                final int read = Math.min(b.remaining(), remainingBytesInPart);
+                readInternalBytes(currentPart, position % fileInfo.partSize().getBytes(), b, read);
             }
         }
     }
 
-    private void readInternalBytes(final int part, long pos, final byte[] b, int offset, int length) throws IOException {
-        int optimizedReadSize = readOptimized(part, pos, b, offset, length);
+    private void readInternalBytes(final int part, long pos, final ByteBuffer b, int length) throws IOException {
+        int optimizedReadSize = readOptimized(part, pos, b, length);
         assert optimizedReadSize <= length;
         position += optimizedReadSize;
 
@@ -134,7 +132,6 @@ public class DirectBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
                 final int directReadSize = readFully(
                     inputStream,
                     b,
-                    offset + optimizedReadSize,
                     length - optimizedReadSize,
                     () -> { throw new EOFException("Read past EOF at [" + position + "] with length [" + fileInfo.partBytes(part) + "]"); }
                 );
@@ -150,7 +147,7 @@ public class DirectBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
      * Attempt to satisfy this read in an optimized fashion using {@code streamForSequentialReadsRef}.
      * @return the number of bytes read
      */
-    private int readOptimized(int part, long pos, byte[] b, int offset, int length) throws IOException {
+    private int readOptimized(int part, long pos, ByteBuffer b, int length) throws IOException {
         if (sequentialReadSize == NO_SEQUENTIAL_READ_OPTIMIZATION) {
             return 0;
         }
@@ -158,10 +155,10 @@ public class DirectBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
         int read = 0;
         if (streamForSequentialReads == null) {
             // starting a new sequential read
-            read = readFromNewSequentialStream(part, pos, b, offset, length);
+            read = readFromNewSequentialStream(part, pos, b, length);
         } else if (streamForSequentialReads.canContinueSequentialRead(part, pos)) {
             // continuing a sequential read that we started previously
-            read = streamForSequentialReads.read(b, offset, length);
+            read = streamForSequentialReads.read(b, length);
             if (streamForSequentialReads.isFullyRead()) {
                 // the current stream was exhausted by this read, so it should be closed
                 streamForSequentialReads.close();
@@ -173,7 +170,7 @@ public class DirectBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
 
             if (read < length) {
                 // the current stream didn't contain enough data for this read, so we must read more
-                read += readFromNewSequentialStream(part, pos + read, b, offset + read, length - read);
+                read += readFromNewSequentialStream(part, pos + read, b, length - read);
             }
         } else {
             // not a sequential read, so stop optimizing for this usage pattern and fall through to the unoptimized behaviour
@@ -196,7 +193,7 @@ public class DirectBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
      * If appropriate, open a new stream for sequential reading and satisfy the given read using it.
      * @return the number of bytes read; if a new stream wasn't opened then nothing was read so the caller should perform the read directly.
      */
-    private int readFromNewSequentialStream(int part, long pos, byte[] b, int offset, int length) throws IOException {
+    private int readFromNewSequentialStream(int part, long pos, ByteBuffer b, int length) throws IOException {
 
         assert streamForSequentialReads == null : "should only be called when a new stream is needed";
         assert sequentialReadSize > 0L : "should only be called if optimizing sequential reads";
@@ -243,7 +240,7 @@ public class DirectBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
             }
         }, part, pos, streamLength);
 
-        final int read = streamForSequentialReads.read(b, offset, length);
+        final int read = streamForSequentialReads.read(b, length);
         assert read == length : read + " vs " + length;
         assert streamForSequentialReads.isFullyRead() == false;
         return read;
@@ -347,15 +344,18 @@ public class DirectBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
     /**
      * Fully read up to {@code length} bytes from the given {@link InputStream}
      */
-    private static int readFully(InputStream inputStream, byte[] b, int offset, int length, CheckedRunnable<IOException> onEOF)
+    private static int readFully(InputStream inputStream, final ByteBuffer b, int length, CheckedRunnable<IOException> onEOF)
         throws IOException {
         int totalRead = 0;
+        final byte[] buffer = new byte[Math.min(length, COPY_BUFFER_SIZE)];
         while (totalRead < length) {
-            final int read = inputStream.read(b, offset + totalRead, length - totalRead);
+            final int len = Math.min(length - totalRead, COPY_BUFFER_SIZE);
+            final int read = inputStream.read(buffer, 0, len);
             if (read == -1) {
                 onEOF.run();
                 break;
             }
+            b.put(buffer, 0, read);
             totalRead += read;
         }
         return totalRead > 0 ? totalRead : -1;
@@ -378,9 +378,9 @@ public class DirectBlobContainerIndexInput extends BaseSearchableSnapshotIndexIn
             return this.part == part && this.pos == pos;
         }
 
-        int read(byte[] b, int offset, int length) throws IOException {
+        int read(ByteBuffer b, int length) throws IOException {
             assert this.pos < maxPos : "should not try and read from a fully-read stream";
-            final int read = readFully(inputStream, b, offset, length, () -> {});
+            final int read = readFully(inputStream, b, length, () -> {});
             assert read <= length : read + " vs " + length;
             pos += read;
             return read;

+ 0 - 1
x-pack/plugin/sql/sql-action/licenses/lucene-core-8.5.1.jar.sha1

@@ -1 +0,0 @@
-24212de43c19269f5211f3e79eb2f414c4a0254b

+ 1 - 0
x-pack/plugin/sql/sql-action/licenses/lucene-core-8.6.0-snapshot-6c9024f7735.jar.sha1

@@ -0,0 +1 @@
+ec8401dfd1b41113eba06f3d626a1f2fdf589335