Browse Source

Merge pull request ESQL-1500 from elastic/main

🤖 ESQL: Merge upstream
elasticsearchmachine 2 years ago
parent
commit
0b5016ae19

+ 45 - 0
.buildkite/hooks/pre-command

@@ -0,0 +1,45 @@
+#!/bin/bash
+
+# On some distros, this directory ends up not readable by the `elasticsearch` user that gets created during tests
+# This fixes that
+chmod 755 ~
+
+WORKSPACE="$(pwd)"
+export WORKSPACE
+
+BUILD_NUMBER="$BUILDKITE_BUILD_NUMBER"
+export BUILD_NUMBER
+
+COMPOSE_HTTP_TIMEOUT="120"
+export COMPOSE_HTTP_TIMEOUT
+
+JOB_BRANCH="$BUILDKITE_BRANCH"
+export JOB_BRANCH
+
+GRADLEW="./gradlew --parallel --scan --build-cache --no-watch-fs -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/"
+export GRADLEW
+
+GRADLEW_BAT="./gradlew.bat --parallel --scan --build-cache --no-watch-fs -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/"
+export GRADLEW_BAT
+
+export $(cat .ci/java-versions.properties | grep '=' | xargs)
+
+JAVA_HOME="$HOME/.java/$ES_BUILD_JAVA"
+export JAVA_HOME
+
+JAVA11_HOME="$HOME/.java/java11"
+export JAVA11_HOME
+
+JAVA16_HOME="$HOME/.java/openjdk16"
+export JAVA16_HOME
+
+if [[ "${ES_RUNTIME_JAVA:-}" ]]; then
+  RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA
+  export RUNTIME_JAVA_HOME
+fi
+
+GRADLE_BUILD_CACHE_USERNAME=$(vault read -field=username secret/ci/elastic-elasticsearch/migrated/gradle-build-cache)
+export GRADLE_BUILD_CACHE_USERNAME
+
+GRADLE_BUILD_CACHE_PASSWORD=$(vault read -field=password secret/ci/elastic-elasticsearch/migrated/gradle-build-cache)
+export GRADLE_BUILD_CACHE_PASSWORD

+ 52 - 0
.buildkite/pipelines/periodic.yml

@@ -0,0 +1,52 @@
+steps:
+  - group: java-fips-matrix
+    steps:
+      - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-fips-matrix"
+        command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.fips.enabled=true $$GRADLE_TASK
+        timeout_in_minutes: 180
+        matrix:
+          setup:
+            ES_RUNTIME_JAVA:
+              - openjdk17
+            GRADLE_TASK:
+              - checkPart1
+              - checkPart2
+              - checkPart3
+              - bwcTestSnapshots
+              - checkRestCompat
+        agents:
+          provider: gcp
+          image: family/elasticsearch-ubuntu-2004
+          machineType: custom-32-98304
+          buildDirectory: /dev/shm/bk
+        env:
+          ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}"
+          GRADLE_TASK: "{{matrix.GRADLE_TASK}}"
+  - group: java-matrix
+    steps:
+      - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-matrix"
+        command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true $$GRADLE_TASK
+        timeout_in_minutes: 180
+        matrix:
+          setup:
+            ES_RUNTIME_JAVA:
+              - graalvm-ce17
+              - openjdk17
+              - openjdk18
+              - openjdk19
+              - openjdk20
+              - openjdk21
+            GRADLE_TASK:
+              - checkPart1
+              - checkPart2
+              - checkPart3
+              - bwcTestSnapshots
+              - checkRestCompat
+        agents:
+          provider: gcp
+          image: family/elasticsearch-ubuntu-2004
+          machineType: custom-32-98304
+          buildDirectory: /dev/shm/bk
+        env:
+          ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}"
+          GRADLE_TASK: "{{matrix.GRADLE_TASK}}"

+ 21 - 9
.ci/init.gradle

@@ -13,14 +13,26 @@ initscript {
 boolean USE_ARTIFACTORY = false
 
 if (System.getenv('VAULT_ADDR') == null) {
+  // When trying to reproduce errors outside of CI, it can be useful to allow this to just return rather than blowing up
+  if (System.getenv('CI') == null) {
+    return
+  }
+
   throw new GradleException("You must set the VAULT_ADDR environment variable to use this init script.")
 }
 
 if (System.getenv('VAULT_ROLE_ID') == null && System.getenv('VAULT_SECRET_ID') == null && System.getenv('VAULT_TOKEN') == null) {
+    // When trying to reproduce errors outside of CI, it can be useful to allow this to just return rather than blowing up
+  if (System.getenv('CI') == null) {
+    return
+  }
+
   throw new GradleException("You must set either the VAULT_ROLE_ID and VAULT_SECRET_ID environment variables, " +
     "or the VAULT_TOKEN environment variable to use this init script.")
 }
 
+final String vaultPathPrefix = System.getenv('VAULT_ADDR') ==~ /.+vault-ci.+\.dev.*/ ? "secret/ci/elastic-elasticsearch/migrated" : "secret/elasticsearch-ci"
+
 final String vaultToken = System.getenv('VAULT_TOKEN') ?: new Vault(
   new VaultConfig()
     .address(System.env.VAULT_ADDR)
@@ -44,7 +56,7 @@ final Vault vault = new Vault(
 
 if (USE_ARTIFACTORY) {
   final Map<String, String> artifactoryCredentials = vault.logical()
-    .read("secret/elasticsearch-ci/artifactory.elstc.co")
+    .read("${vaultPathPrefix}/artifactory.elstc.co")
     .getData()
   logger.info("Using elastic artifactory repos")
   Closure configCache = {
@@ -78,10 +90,10 @@ if (USE_ARTIFACTORY) {
   }
 }
 
-projectsLoaded {
-  rootProject {
-    project.pluginManager.withPlugin('com.gradle.build-scan') {
-      buildScan.server = 'https://gradle-enterprise.elastic.co'
+gradle.settingsEvaluated { settings ->
+  settings.pluginManager.withPlugin("com.gradle.enterprise") {
+    settings.gradleEnterprise {
+      server = 'https://gradle-enterprise.elastic.co'
     }
   }
 }
@@ -91,8 +103,8 @@ final String buildCacheUrl = System.getProperty('org.elasticsearch.build.cache.u
 final boolean buildCachePush = Boolean.valueOf(System.getProperty('org.elasticsearch.build.cache.push', 'false'))
 
 if (buildCacheUrl) {
-  final Map<String, String> buildCacheCredentials = vault.logical()
-    .read("secret/elasticsearch-ci/gradle-build-cache")
+  final Map<String, String> buildCacheCredentials = System.getenv("GRADLE_BUILD_CACHE_USERNAME") ? [:] : vault.logical()
+    .read("${vaultPathPrefix}/gradle-build-cache")
     .getData()
   gradle.settingsEvaluated { settings ->
     settings.buildCache {
@@ -104,8 +116,8 @@ if (buildCacheUrl) {
         url = buildCacheUrl
         push = buildCachePush
         credentials {
-          username = buildCacheCredentials.get("username")
-          password = buildCacheCredentials.get("password")
+          username = System.getenv("GRADLE_BUILD_CACHE_USERNAME") ?: buildCacheCredentials.get("username")
+          password = System.getenv("GRADLE_BUILD_CACHE_PASSWORD") ?: buildCacheCredentials.get("password")
         }
       }
     }

+ 1 - 0
.ci/scripts/packaging-test.sh

@@ -39,6 +39,7 @@ if [ -f "/etc/os-release" ] ; then
         # Work around incorrect lintian version
         #  https://github.com/elastic/elasticsearch/issues/48573
         if [ $VERSION_ID == 10 ] ; then
+            sudo apt-get update -y
             sudo apt-get install -y --allow-downgrades lintian=2.15.0
         fi
     fi

+ 1 - 0
.ci/scripts/run-gradle.sh

@@ -1,6 +1,7 @@
 #!/bin/bash
 # drop page cache and kernel slab objects on linux
 [[ -x /usr/local/sbin/drop-caches ]] && sudo /usr/local/sbin/drop-caches
+
 rm -Rfv ~/.gradle/init.d
 mkdir -p ~/.gradle/init.d && cp -v $WORKSPACE/.ci/init.gradle ~/.gradle/init.d
 if [ "$(uname -m)" = "arm64" ] || [ "$(uname -m)" = "aarch64" ]; then

+ 1 - 1
build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle

@@ -16,7 +16,7 @@ buildScan {
   String buildKiteUrl = System.getenv('BUILDKITE_BUILD_URL') ? System.getenv('BUILDKITE_BUILD_URL') : null
 
   // Automatically publish scans from Elasticsearch CI
-  if (jenkinsUrl?.host?.endsWith('elastic.co') || jenkinsUrl?.host?.endsWith('elastic.dev')) {
+  if (jenkinsUrl?.host?.endsWith('elastic.co') || jenkinsUrl?.host?.endsWith('elastic.dev') || System.getenv('BUILDKITE') == 'true') {
     publishAlways()
     buildScan.server = 'https://gradle-enterprise.elastic.co'
   }

+ 8 - 0
build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/SnippetsTaskTests.java

@@ -51,5 +51,13 @@ public class SnippetsTaskTests {
         assertTrue(source.getMatches());
         assertEquals("console", source.getLanguage());
         assertEquals("snippet-name-1", source.getName());
+
+        source = SnippetsTask.matchSource("[source.merge.styled,esql]");
+        assertTrue(source.getMatches());
+        assertEquals("esql", source.getLanguage());
+
+        source = SnippetsTask.matchSource("[source.merge.styled,foo-bar]");
+        assertTrue(source.getMatches());
+        assertEquals("foo-bar", source.getLanguage());
     }
 }

+ 44 - 3
catalog-info.yaml

@@ -16,13 +16,14 @@ spec:
     apiVersion: buildkite.elastic.dev/v1
     kind: Pipeline
     metadata:
-      description: ':elasticsearch: Update elasticsearch submodule in elasticsearch-serverless'
+      description: ":elasticsearch: Update elasticsearch submodule in elasticsearch-serverless"
       name: elasticsearch / update serverless submodule
     spec:
       repository: elastic/elasticsearch
       pipeline_file: .buildkite/update-es-serverless.yml
       teams:
         elasticsearch-team: {}
+        ml-core: {}
         everyone:
           access_level: READ_ONLY
       provider_settings:
@@ -30,7 +31,7 @@ spec:
       schedules:
         daily promotion:
           branch: main
-          cronline: '@daily'
+          cronline: "@daily"
 ---
 # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json
 apiVersion: backstage.io/v1alpha1
@@ -49,7 +50,7 @@ spec:
     apiVersion: buildkite.elastic.dev/v1
     kind: Pipeline
     metadata:
-      description: ':elasticsearch: Validate elasticsearch changes against serverless'
+      description: ":elasticsearch: Validate elasticsearch changes against serverless"
       name: elasticsearch / check serverless submodule
     spec:
       repository: elastic/elasticsearch
@@ -57,8 +58,48 @@ spec:
       branch_configuration: main
       teams:
         elasticsearch-team: {}
+        ml-core: {}
         everyone:
           access_level: READ_ONLY
       provider_settings:
         build_pull_requests: false
         publish_commit_status: false
+---
+# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json
+apiVersion: backstage.io/v1alpha1
+kind: Resource
+metadata:
+  name: buildkite-pipeline-elasticsearch-periodic
+  description: Elasticsearch tests and checks that are run a few times daily
+  links:
+    - title: Pipeline
+      url: https://buildkite.com/elastic/elasticsearch-periodic
+spec:
+  type: buildkite-pipeline
+  system: buildkite
+  owner: group:elasticsearch-team
+  implementation:
+    apiVersion: buildkite.elastic.dev/v1
+    kind: Pipeline
+    metadata:
+      description: ":elasticsearch: Tests and checks that are run a few times daily"
+      name: elasticsearch / periodic
+    spec:
+      repository: elastic/elasticsearch
+      pipeline_file: .buildkite/pipelines/periodic.yml
+      branch_configuration: main
+      teams:
+        elasticsearch-team: {}
+        ml-core: {}
+        everyone:
+          access_level: READ_ONLY
+      provider_settings:
+        build_branches: false
+        build_pull_requests: false
+        publish_commit_status: false
+        trigger_mode: none
+      schedules:
+        Periodically on main:
+          branch: main
+          cronline: "0 0,8,16 * * * America/New_York"
+          message: "Tests and checks that are run 3x daily"

+ 1 - 0
distribution/packages/build.gradle

@@ -308,6 +308,7 @@ Closure commonDebConfig(String architecture) {
 
     into('/usr/share/lintian/overrides') {
       from('src/deb/lintian/elasticsearch')
+      fileMode 0644
     }
   }
 }

+ 35 - 2
server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java

@@ -16,6 +16,8 @@ import org.elasticsearch.plugins.Plugin;
 import org.elasticsearch.script.MockScriptPlugin;
 import org.elasticsearch.script.Script;
 import org.elasticsearch.script.ScriptType;
+import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
 import org.elasticsearch.test.ESIntegTestCase;
 
 import java.util.Collection;
@@ -28,6 +30,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDI
 import static org.elasticsearch.index.query.QueryBuilders.scriptQuery;
 import static org.elasticsearch.search.SearchTimeoutIT.ScriptedTimeoutPlugin.SCRIPT_NAME;
 import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
 
 @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE)
 public class SearchTimeoutIT extends ESIntegTestCase {
@@ -42,18 +45,48 @@ public class SearchTimeoutIT extends ESIntegTestCase {
         return Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)).build();
     }
 
-    public void testSimpleTimeout() throws Exception {
+    private void indexDocs() {
         for (int i = 0; i < 32; i++) {
             client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get();
         }
         refresh("test");
+    }
+
+    public void testTopHitsTimeout() {
+        indexDocs();
+        SearchResponse searchResponse = client().prepareSearch("test")
+            .setTimeout(new TimeValue(10, TimeUnit.MILLISECONDS))
+            .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", SCRIPT_NAME, Collections.emptyMap())))
+            .get();
+        assertThat(searchResponse.isTimedOut(), equalTo(true));
+        assertEquals(0, searchResponse.getShardFailures().length);
+        assertEquals(0, searchResponse.getFailedShards());
+        assertThat(searchResponse.getSuccessfulShards(), greaterThan(0));
+        assertEquals(searchResponse.getSuccessfulShards(), searchResponse.getTotalShards());
+        assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L));
+        assertThat(searchResponse.getHits().getHits().length, greaterThan(0));
+    }
 
+    public void testAggsTimeout() {
+        indexDocs();
         SearchResponse searchResponse = client().prepareSearch("test")
             .setTimeout(new TimeValue(10, TimeUnit.MILLISECONDS))
+            .setSize(0)
             .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", SCRIPT_NAME, Collections.emptyMap())))
-            .setAllowPartialSearchResults(true)
+            .addAggregation(new TermsAggregationBuilder("terms").field("field.keyword"))
             .get();
         assertThat(searchResponse.isTimedOut(), equalTo(true));
+        assertEquals(0, searchResponse.getShardFailures().length);
+        assertEquals(0, searchResponse.getFailedShards());
+        assertThat(searchResponse.getSuccessfulShards(), greaterThan(0));
+        assertEquals(searchResponse.getSuccessfulShards(), searchResponse.getTotalShards());
+        assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L));
+        assertEquals(searchResponse.getHits().getHits().length, 0);
+        StringTerms terms = searchResponse.getAggregations().get("terms");
+        assertEquals(1, terms.getBuckets().size());
+        StringTerms.Bucket bucket = terms.getBuckets().get(0);
+        assertEquals("value", bucket.getKeyAsString());
+        assertThat(bucket.getDocCount(), greaterThan(0L));
     }
 
     public void testPartialResultsIntolerantTimeout() throws Exception {

+ 322 - 0
server/src/test/java/org/elasticsearch/search/query/QueryPhaseTimeoutTests.java

@@ -0,0 +1,322 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.KnnByteVectorField;
+import org.apache.lucene.document.KnnFloatVectorField;
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.PointValues;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.BulkScorer;
+import org.apache.lucene.search.ConstantScoreScorer;
+import org.apache.lucene.search.ConstantScoreWeight;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryVisitor;
+import org.apache.lucene.search.Scorable;
+import org.apache.lucene.search.ScoreMode;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.tests.index.RandomIndexWriter;
+import org.apache.lucene.tests.util.LuceneTestCase;
+import org.apache.lucene.util.Bits;
+import org.elasticsearch.action.search.SearchShardTask;
+import org.elasticsearch.core.CheckedConsumer;
+import org.elasticsearch.index.query.ParsedQuery;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.IndexShardTestCase;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.TestSearchContext;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+import java.util.Collections;
+
+public class QueryPhaseTimeoutTests extends IndexShardTestCase {
+
+    private static Directory dir;
+    private static IndexReader reader;
+    private static int numDocs;
+    private IndexShard indexShard;
+
+    @BeforeClass
+    public static void init() throws Exception {
+        dir = newDirectory();
+        IndexWriterConfig iwc = new IndexWriterConfig();
+        RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+        // the upper bound is higher than 2048 so that in some cases we time out after the first batch of bulk scoring, but before
+        // getting to the end of the first segment
+        numDocs = scaledRandomIntBetween(500, 2500);
+        for (int i = 0; i < numDocs; ++i) {
+            Document doc = new Document();
+            doc.add(new StringField("field", Integer.toString(i), Field.Store.NO));
+            doc.add(new LongPoint("long", i));
+            doc.add(new KnnByteVectorField("byte_vector", new byte[] { 1, 2, 3 }));
+            doc.add(new KnnFloatVectorField("float_vector", new float[] { 1, 2, 3 }));
+            w.addDocument(doc);
+        }
+        w.close();
+        reader = DirectoryReader.open(dir);
+    }
+
+    @AfterClass
+    public static void destroy() throws Exception {
+        if (reader != null) {
+            reader.close();
+        }
+        dir.close();
+    }
+
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        indexShard = newShard(true);
+    }
+
+    @Override
+    public void tearDown() throws Exception {
+        super.tearDown();
+        closeShards(indexShard);
+    }
+
+    private static ContextIndexSearcher newContextSearcher(IndexReader reader) throws IOException {
+        return new ContextIndexSearcher(
+            reader,
+            IndexSearcher.getDefaultSimilarity(),
+            IndexSearcher.getDefaultQueryCache(),
+            LuceneTestCase.MAYBE_CACHE_POLICY,
+            true
+        );
+    }
+
+    public void testScorerTimeoutTerms() throws IOException {
+        assumeTrue("Test requires more than one segment", reader.leaves().size() > 1);
+        int size = randomBoolean() ? 0 : randomIntBetween(100, 500);
+        scorerTimeoutTest(size, context -> {
+            final TermsEnum termsEnum = context.reader().terms("field").iterator();
+            termsEnum.next();
+        });
+    }
+
+    public void testScorerTimeoutPoints() throws IOException {
+        assumeTrue("Test requires more than one segment", reader.leaves().size() > 1);
+        int size = randomBoolean() ? 0 : randomIntBetween(100, 500);
+        scorerTimeoutTest(size, context -> {
+            PointValues pointValues = context.reader().getPointValues("long");
+            pointValues.size();
+        });
+    }
+
+    private void scorerTimeoutTest(int size, CheckedConsumer<LeafReaderContext, IOException> timeoutTrigger) throws IOException {
+        {
+            TimeoutQuery query = newMatchAllScorerTimeoutQuery(timeoutTrigger, false);
+            SearchContext context = createSearchContext(query, size);
+            QueryPhase.executeQuery(context);
+            assertFalse(context.queryResult().searchTimedOut());
+            assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value);
+            assertEquals(size, context.queryResult().topDocs().topDocs.scoreDocs.length);
+        }
+        {
+            TimeoutQuery query = newMatchAllScorerTimeoutQuery(timeoutTrigger, true);
+            SearchContext context = createSearchContextWithTimeout(query, size);
+            QueryPhase.executeQuery(context);
+            assertTrue(context.queryResult().searchTimedOut());
+            int firstSegmentMaxDoc = reader.leaves().get(0).reader().maxDoc();
+            assertEquals(Math.min(2048, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.totalHits.value);
+            assertEquals(Math.min(size, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.scoreDocs.length);
+        }
+    }
+
+    private static TimeoutQuery newMatchAllScorerTimeoutQuery(
+        CheckedConsumer<LeafReaderContext, IOException> timeoutTrigger,
+        boolean isTimeoutExpected
+    ) {
+        return new TimeoutQuery() {
+            @Override
+            public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) {
+                return new MatchAllWeight(this, boost, scoreMode) {
+                    boolean firstSegment = true;
+
+                    @Override
+                    public Scorer scorer(LeafReaderContext context) throws IOException {
+                        if (firstSegment == false && isTimeoutExpected) {
+                            shouldTimeout = true;
+                        }
+                        timeoutTrigger.accept(context);
+                        assert shouldTimeout == false : "should have already timed out";
+                        firstSegment = false;
+                        return super.scorer(context);
+                    }
+                };
+            }
+        };
+    }
+
+    public void testBulkScorerTimeout() throws IOException {
+        int size = randomBoolean() ? 0 : randomIntBetween(100, 500);
+        {
+            TimeoutQuery query = newMatchAllBulkScorerTimeoutQuery(false);
+            SearchContext context = createSearchContext(query, size);
+            QueryPhase.executeQuery(context);
+            assertFalse(context.queryResult().searchTimedOut());
+            assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value);
+            assertEquals(size, context.queryResult().topDocs().topDocs.scoreDocs.length);
+        }
+        {
+            TimeoutQuery query = newMatchAllBulkScorerTimeoutQuery(true);
+            SearchContext context = createSearchContextWithTimeout(query, size);
+            QueryPhase.executeQuery(context);
+            assertTrue(context.queryResult().searchTimedOut());
+            int firstSegmentMaxDoc = reader.leaves().get(0).reader().maxDoc();
+            assertEquals(Math.min(2048, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.totalHits.value);
+            assertEquals(Math.min(size, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.scoreDocs.length);
+        }
+    }
+
+    private static TimeoutQuery newMatchAllBulkScorerTimeoutQuery(boolean timeoutExpected) {
+        return new TimeoutQuery() {
+            @Override
+            public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) {
+                return new MatchAllWeight(this, boost, scoreMode) {
+                    @Override
+                    public BulkScorer bulkScorer(LeafReaderContext context) {
+                        final float score = score();
+                        final int maxDoc = context.reader().maxDoc();
+                        return new BulkScorer() {
+                            @Override
+                            public int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException {
+                                assert shouldTimeout == false : "should have already timed out";
+                                max = Math.min(max, maxDoc);
+                                ScoreAndDoc scorer = new ScoreAndDoc();
+                                scorer.score = score;
+                                collector.setScorer(scorer);
+                                for (int doc = min; doc < max; ++doc) {
+                                    scorer.doc = doc;
+                                    if (acceptDocs == null || acceptDocs.get(doc)) {
+                                        collector.collect(doc);
+                                    }
+                                }
+                                if (timeoutExpected) {
+                                    // timeout after collecting the first batch of documents from the 1st segment, or the entire 1st segment
+                                    shouldTimeout = true;
+                                }
+                                return max == maxDoc ? DocIdSetIterator.NO_MORE_DOCS : max;
+                            }
+
+                            @Override
+                            public long cost() {
+                                return 0;
+                            }
+                        };
+                    }
+                };
+            }
+        };
+    }
+
+    private TestSearchContext createSearchContextWithTimeout(TimeoutQuery query, int size) throws IOException {
+        TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader)) {
+            @Override
+            public long getRelativeTimeInMillis() {
+                return query.shouldTimeout ? 1L : 0L;
+            }
+        };
+        context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+        context.parsedQuery(new ParsedQuery(query));
+        context.setSize(size);
+        return context;
+    }
+
+    private TestSearchContext createSearchContext(Query query, int size) throws IOException {
+        TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader));
+        context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+        context.parsedQuery(new ParsedQuery(query));
+        context.setSize(size);
+        return context;
+    }
+
+    private static class ScoreAndDoc extends Scorable {
+        float score;
+        int doc = -1;
+
+        @Override
+        public int docID() {
+            return doc;
+        }
+
+        @Override
+        public float score() {
+            return score;
+        }
+    }
+
+    /**
+     * Query that allows to artificially simulate a timeout error thrown at different stages during the execution of the query.
+     * Used in combination with {@link MatchAllWeight}.
+     */
+    private abstract static class TimeoutQuery extends Query {
+        boolean shouldTimeout = false;
+
+        @Override
+        public final String toString(String field) {
+            return "timeout query";
+        }
+
+        @Override
+        public final boolean equals(Object o) {
+            return sameClassAs(o);
+        }
+
+        @Override
+        public final int hashCode() {
+            return classHash();
+        }
+
+        @Override
+        public final void visit(QueryVisitor visitor) {
+            visitor.visitLeaf(this);
+        }
+    }
+
+    /**
+     * Weight that has similar behaviour to that exposed by {@link org.apache.lucene.search.MatchAllDocsQuery}, but it is not cacheable,
+     * and it does not override {@link org.apache.lucene.search.Weight#count(LeafReaderContext)}, which is important to be able to
+     * accurately simulate timeout errors
+     */
+    private abstract static class MatchAllWeight extends ConstantScoreWeight {
+        private final ScoreMode scoreMode;
+
+        protected MatchAllWeight(Query query, float score, ScoreMode scoreMode) {
+            super(query, score);
+            this.scoreMode = scoreMode;
+        }
+
+        @Override
+        public Scorer scorer(LeafReaderContext context) throws IOException {
+            return new ConstantScoreScorer(this, score(), scoreMode, DocIdSetIterator.all(context.reader().maxDoc()));
+        }
+
+        @Override
+        public final boolean isCacheable(LeafReaderContext ctx) {
+            return false;
+        }
+    }
+}

+ 0 - 2
test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java

@@ -55,8 +55,6 @@ import java.util.Map;
 import static java.util.Collections.emptyMap;
 
 public class TestSearchContext extends SearchContext {
-    public static final SearchShardTarget SHARD_TARGET = new SearchShardTarget("test", new ShardId("test", "test", 0), null);
-
     final IndexService indexService;
     final BitsetFilterCache fixedBitSetFilterCache;
     final IndexShard indexShard;