Browse Source

Fix HTTP corner-case response leaks (#105617)

Enhances `Netty4PipeliningIT` to demonstrate that the pipelined requests
do run concurrently, and to explore some corner cases around failures
(both client-side and server-side). This extra testing found two
response leaks: one when the channel has closed before even starting to
process a request, and a second when we throw an exception during
serialization of a chunk in a chunked response with other pipelined
responses enqueued for transmission behind it.
David Turner 1 year ago
parent
commit
ac08fe6076

+ 5 - 0
docs/changelog/105617.yaml

@@ -0,0 +1,5 @@
+pr: 105617
+summary: Fix HTTP corner-case response leaks
+area: Network
+type: bug
+issues: []

+ 230 - 15
modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java

@@ -8,43 +8,134 @@
 
 package org.elasticsearch.http.netty4;
 
-import io.netty.handler.codec.http.FullHttpResponse;
 import io.netty.util.ReferenceCounted;
 
+import org.apache.lucene.util.BytesRef;
 import org.elasticsearch.ESNetty4IntegTestCase;
-import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.CountDownActionListener;
+import org.elasticsearch.action.support.SubscribableListener;
+import org.elasticsearch.client.internal.node.NodeClient;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.bytes.ReleasableBytesReference;
+import org.elasticsearch.common.bytes.ZeroBytesReference;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.recycler.Recycler;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.IndexScopedSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.util.CollectionUtils;
 import org.elasticsearch.core.Strings;
+import org.elasticsearch.features.NodeFeature;
 import org.elasticsearch.http.HttpServerTransport;
-import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
-import org.elasticsearch.test.ESIntegTestCase.Scope;
+import org.elasticsearch.plugins.ActionPlugin;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.rest.BaseRestHandler;
+import org.elasticsearch.rest.ChunkedRestResponseBody;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestHandler;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.RestResponse;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.action.RestToXContentListener;
+import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.xcontent.ToXContentObject;
 
+import java.io.IOException;
+import java.util.Arrays;
 import java.util.Collection;
+import java.util.List;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
 
+import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS;
+import static org.elasticsearch.rest.RestRequest.Method.GET;
 import static org.hamcrest.Matchers.hasSize;
 import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
 
-@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1)
+@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
 public class Netty4PipeliningIT extends ESNetty4IntegTestCase {
 
+    @Override
+    protected Collection<Class<? extends Plugin>> nodePlugins() {
+        return CollectionUtils.concatLists(List.of(CountDown3Plugin.class, ChunkAndFailPlugin.class), super.nodePlugins());
+    }
+
+    private static final int MAX_PIPELINE_EVENTS = 10;
+
+    @Override
+    protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
+        return Settings.builder()
+            .put(super.nodeSettings(nodeOrdinal, otherSettings))
+            .put(SETTING_PIPELINING_MAX_EVENTS.getKey(), MAX_PIPELINE_EVENTS)
+            .build();
+    }
+
     @Override
     protected boolean addMockHttpTransport() {
         return false; // enable http
     }
 
     public void testThatNettyHttpServerSupportsPipelining() throws Exception {
-        String[] requests = new String[] { "/", "/_nodes/stats", "/", "/_cluster/state", "/" };
+        runPipeliningTest(
+            CountDown3Plugin.ROUTE,
+            "/_nodes",
+            "/_nodes/stats",
+            CountDown3Plugin.ROUTE,
+            "/_cluster/health",
+            "/_cluster/state",
+            CountDown3Plugin.ROUTE,
+            "/_cat/shards"
+        );
+    }
+
+    public void testChunkingFailures() throws Exception {
+        runPipeliningTest(0, ChunkAndFailPlugin.randomRequestUri());
+        runPipeliningTest(0, ChunkAndFailPlugin.randomRequestUri(), "/_cluster/state");
+        runPipeliningTest(
+            -1, // typically get the first 2 responses, but we can hit the failing chunk and close the channel soon enough to lose them too
+            CountDown3Plugin.ROUTE,
+            CountDown3Plugin.ROUTE,
+            ChunkAndFailPlugin.randomRequestUri(),
+            "/_cluster/health",
+            CountDown3Plugin.ROUTE
+        );
+    }
 
-        HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class);
-        TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses();
-        TransportAddress transportAddress = randomFrom(boundAddresses);
+    public void testPipelineOverflow() throws Exception {
+        final var routes = new String[1 // the first request which never returns a response so doesn't consume a spot in the queue
+            + MAX_PIPELINE_EVENTS // the responses which fill up the queue
+            + 1 // to cause the overflow
+            + between(0, 5) // for good measure, to e.g. make sure we don't leak these responses
+        ];
+        Arrays.fill(routes, "/_cluster/health");
+        routes[0] = CountDown3Plugin.ROUTE; // never returns
+        runPipeliningTest(0, routes);
+    }
 
-        try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) {
-            Collection<FullHttpResponse> responses = nettyHttpClient.get(transportAddress.address(), requests);
-            try {
-                assertThat(responses, hasSize(5));
+    private void runPipeliningTest(String... routes) throws InterruptedException {
+        runPipeliningTest(routes.length, routes);
+    }
 
-                Collection<String> opaqueIds = Netty4HttpClient.returnOpaqueIds(responses);
-                assertOpaqueIdsInOrder(opaqueIds);
+    private void runPipeliningTest(int expectedResponseCount, String... routes) throws InterruptedException {
+        try (var client = new Netty4HttpClient()) {
+            final var responses = client.get(
+                randomFrom(internalCluster().getInstance(HttpServerTransport.class).boundAddress().boundAddresses()).address(),
+                routes
+            );
+            try {
+                logger.info("response codes: {}", responses.stream().mapToInt(r -> r.status().code()).toArray());
+                if (expectedResponseCount >= 0) {
+                    assertThat(responses, hasSize(expectedResponseCount));
+                }
+                assertThat(responses.size(), lessThanOrEqualTo(routes.length));
+                assertTrue(responses.stream().allMatch(r -> r.status().code() == 200));
+                assertOpaqueIdsInOrder(Netty4HttpClient.returnOpaqueIds(responses));
             } finally {
                 responses.forEach(ReferenceCounted::release);
             }
@@ -60,4 +151,128 @@ public class Netty4PipeliningIT extends ESNetty4IntegTestCase {
         }
     }
 
+    private static final ToXContentObject EMPTY_RESPONSE = (builder, params) -> builder.startObject().endObject();
+
+    /**
+     * Adds an HTTP route that waits for 3 concurrent executions before returning any of them
+     */
+    public static class CountDown3Plugin extends Plugin implements ActionPlugin {
+
+        static final String ROUTE = "/_test/countdown_3";
+
+        @Override
+        public Collection<RestHandler> getRestHandlers(
+            Settings settings,
+            NamedWriteableRegistry namedWriteableRegistry,
+            RestController restController,
+            ClusterSettings clusterSettings,
+            IndexScopedSettings indexScopedSettings,
+            SettingsFilter settingsFilter,
+            IndexNameExpressionResolver indexNameExpressionResolver,
+            Supplier<DiscoveryNodes> nodesInCluster,
+            Predicate<NodeFeature> clusterSupportsFeature
+        ) {
+            return List.of(new BaseRestHandler() {
+                private final SubscribableListener<ToXContentObject> subscribableListener = new SubscribableListener<>();
+                private final CountDownActionListener countDownActionListener = new CountDownActionListener(
+                    3,
+                    subscribableListener.map(v -> EMPTY_RESPONSE)
+                );
+
+                private void addListener(ActionListener<ToXContentObject> listener) {
+                    subscribableListener.addListener(listener);
+                    countDownActionListener.onResponse(null);
+                }
+
+                @Override
+                public String getName() {
+                    return ROUTE;
+                }
+
+                @Override
+                public List<Route> routes() {
+                    return List.of(new Route(GET, ROUTE));
+                }
+
+                @Override
+                protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) {
+                    return channel -> addListener(new RestToXContentListener<>(channel));
+                }
+            });
+        }
+    }
+
+    /**
+     * Adds an HTTP route that waits for 3 concurrent executions before returning any of them
+     */
+    public static class ChunkAndFailPlugin extends Plugin implements ActionPlugin {
+
+        static final String ROUTE = "/_test/chunk_and_fail";
+        static final String FAIL_AFTER_BYTES_PARAM = "fail_after_bytes";
+
+        static String randomRequestUri() {
+            return ROUTE + '?' + FAIL_AFTER_BYTES_PARAM + '=' + between(0, ByteSizeUnit.MB.toIntBytes(2));
+        }
+
+        @Override
+        public Collection<RestHandler> getRestHandlers(
+            Settings settings,
+            NamedWriteableRegistry namedWriteableRegistry,
+            RestController restController,
+            ClusterSettings clusterSettings,
+            IndexScopedSettings indexScopedSettings,
+            SettingsFilter settingsFilter,
+            IndexNameExpressionResolver indexNameExpressionResolver,
+            Supplier<DiscoveryNodes> nodesInCluster,
+            Predicate<NodeFeature> clusterSupportsFeature
+        ) {
+            return List.of(new BaseRestHandler() {
+                @Override
+                public String getName() {
+                    return ROUTE;
+                }
+
+                @Override
+                public List<Route> routes() {
+                    return List.of(new Route(GET, ROUTE));
+                }
+
+                @Override
+                protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) {
+                    final var failAfterBytes = request.paramAsInt(FAIL_AFTER_BYTES_PARAM, -1);
+                    if (failAfterBytes < 0) {
+                        throw new IllegalArgumentException("[" + FAIL_AFTER_BYTES_PARAM + "] must be present and non-negative");
+                    }
+                    return channel -> client.threadPool()
+                        .executor(randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC))
+                        .execute(() -> channel.sendResponse(RestResponse.chunked(RestStatus.OK, new ChunkedRestResponseBody() {
+                            int bytesRemaining = failAfterBytes;
+
+                            @Override
+                            public boolean isDone() {
+                                return false;
+                            }
+
+                            @Override
+                            public ReleasableBytesReference encodeChunk(int sizeHint, Recycler<BytesRef> recycler) throws IOException {
+                                assert bytesRemaining >= 0 : "already failed";
+                                if (bytesRemaining == 0) {
+                                    bytesRemaining = -1;
+                                    throw new IOException("simulated failure");
+                                } else {
+                                    final var bytesToSend = between(1, bytesRemaining);
+                                    bytesRemaining -= bytesToSend;
+                                    return ReleasableBytesReference.wrap(new ZeroBytesReference(bytesToSend));
+                                }
+                            }
+
+                            @Override
+                            public String getResponseContentTypeString() {
+                                return RestResponse.TEXT_CONTENT_TYPE;
+                            }
+                        }, null)));
+                }
+            });
+        }
+    }
 }

+ 42 - 50
modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java

@@ -28,6 +28,7 @@ import io.netty.util.concurrent.PromiseCombiner;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.bytes.ReleasableBytesReference;
 import org.elasticsearch.core.Booleans;
 import org.elasticsearch.core.Nullable;
@@ -41,9 +42,7 @@ import org.elasticsearch.transport.netty4.NettyAllocator;
 import java.io.IOException;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayDeque;
-import java.util.ArrayList;
 import java.util.Comparator;
-import java.util.List;
 import java.util.PriorityQueue;
 import java.util.Queue;
 
@@ -165,8 +164,10 @@ public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
         } catch (IllegalStateException e) {
             ctx.channel().close();
         } finally {
-            if (success == false) {
-                promise.setFailure(new ClosedChannelException());
+            if (success == false && promise.isDone() == false) {
+                // The preceding failure may already have failed the promise; use tryFailure() to avoid log noise about double-completion,
+                // but also check isDone() first to avoid even constructing another exception in most cases.
+                promise.tryFailure(new ClosedChannelException());
             }
         }
     }
@@ -190,7 +191,7 @@ public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
         SPLIT_THRESHOLD = (int) (NettyAllocator.suggestedMaxAllocationSize() * 0.99);
     }
 
-    private void doWrite(ChannelHandlerContext ctx, Netty4HttpResponse readyResponse, ChannelPromise promise) throws IOException {
+    private void doWrite(ChannelHandlerContext ctx, Netty4HttpResponse readyResponse, ChannelPromise promise) {
         assert currentChunkedWrite == null : "unexpected existing write [" + currentChunkedWrite + "]";
         assert readyResponse != null : "cannot write null response";
         assert readyResponse.getSequence() == writeSequence;
@@ -216,8 +217,7 @@ public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
         writeSequence++;
     }
 
-    private void doWriteChunkedResponse(ChannelHandlerContext ctx, Netty4ChunkedHttpResponse readyResponse, ChannelPromise promise)
-        throws IOException {
+    private void doWriteChunkedResponse(ChannelHandlerContext ctx, Netty4ChunkedHttpResponse readyResponse, ChannelPromise promise) {
         final PromiseCombiner combiner = new PromiseCombiner(ctx.executor());
         final ChannelPromise first = ctx.newPromise();
         combiner.add((Future<Void>) first);
@@ -228,7 +228,7 @@ public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
             // We were able to write out the first chunk directly, try writing out subsequent chunks until the channel becomes unwritable.
             // NB "writable" means there's space in the downstream ChannelOutboundBuffer, we aren't trying to saturate the physical channel.
             while (ctx.channel().isWritable()) {
-                if (writeChunk(ctx, combiner, responseBody)) {
+                if (writeChunk(ctx, currentChunkedWrite)) {
                     finishChunkedWrite();
                     return;
                 }
@@ -237,12 +237,15 @@ public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
     }
 
     private void finishChunkedWrite() {
-        assert currentChunkedWrite != null;
+        if (currentChunkedWrite == null) {
+            // failure during chunked response serialization, we're closing the channel
+            return;
+        }
         assert currentChunkedWrite.responseBody().isDone();
         final var finishingWrite = currentChunkedWrite;
         currentChunkedWrite = null;
         writeSequence++;
-        finishingWrite.combiner.finish(finishingWrite.onDone());
+        finishingWrite.combiner().finish(finishingWrite.onDone());
     }
 
     private void splitAndWrite(ChannelHandlerContext ctx, Netty4FullHttpResponse msg, ChannelPromise promise) {
@@ -286,7 +289,7 @@ public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
         assert ctx.executor().inEventLoop();
         final Channel channel = ctx.channel();
         if (channel.isActive() == false) {
-            failQueuedWrites();
+            failQueuedWrites(ctx);
             return false;
         }
         while (channel.isWritable()) {
@@ -302,7 +305,7 @@ public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
             if (currentWrite == null) {
                 // no bytes were found queued, check if a chunked message might have become writable
                 if (currentChunkedWrite != null) {
-                    if (writeChunk(ctx, currentChunkedWrite.combiner, currentChunkedWrite.responseBody())) {
+                    if (writeChunk(ctx, currentChunkedWrite)) {
                         finishChunkedWrite();
                     }
                     continue;
@@ -313,17 +316,21 @@ public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
         }
         ctx.flush();
         if (channel.isActive() == false) {
-            failQueuedWrites();
+            failQueuedWrites(ctx);
         }
         return true;
     }
 
-    private boolean writeChunk(ChannelHandlerContext ctx, PromiseCombiner combiner, ChunkedRestResponseBody body) throws IOException {
+    private boolean writeChunk(ChannelHandlerContext ctx, ChunkedWrite chunkedWrite) {
+        final var body = chunkedWrite.responseBody();
+        final var combiner = chunkedWrite.combiner();
         assert body.isDone() == false : "should not continue to try and serialize once done";
-        final ReleasableBytesReference bytes = body.encodeChunk(
-            Netty4WriteThrottlingHandler.MAX_BYTES_PER_WRITE,
-            serverTransport.recycler()
-        );
+        final ReleasableBytesReference bytes;
+        try {
+            bytes = body.encodeChunk(Netty4WriteThrottlingHandler.MAX_BYTES_PER_WRITE, serverTransport.recycler());
+        } catch (Exception e) {
+            return handleChunkingFailure(ctx, chunkedWrite, e);
+        }
         final ByteBuf content = Netty4Utils.toByteBuf(bytes);
         final boolean done = body.isDone();
         final ChannelFuture f = ctx.write(done ? new DefaultLastHttpContent(content) : new DefaultHttpContent(content));
@@ -332,39 +339,30 @@ public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
         return done;
     }
 
-    private void failQueuedWrites() {
+    private boolean handleChunkingFailure(ChannelHandlerContext ctx, ChunkedWrite chunkedWrite, Exception e) {
+        logger.error(Strings.format("caught exception while encoding response chunk, closing connection %s", ctx.channel()), e);
+        assert currentChunkedWrite == chunkedWrite;
+        currentChunkedWrite = null;
+        chunkedWrite.combiner().add(ctx.channel().close());
+        chunkedWrite.combiner().add(ctx.newFailedFuture(e));
+        chunkedWrite.combiner().finish(chunkedWrite.onDone());
+        return true;
+    }
+
+    private void failQueuedWrites(ChannelHandlerContext ctx) {
         WriteOperation queuedWrite;
         while ((queuedWrite = queuedWrites.poll()) != null) {
             queuedWrite.failAsClosedChannel();
         }
         if (currentChunkedWrite != null) {
-            safeFailPromise(currentChunkedWrite.onDone, new ClosedChannelException());
-            currentChunkedWrite = null;
-        }
-    }
-
-    @Override
-    public void close(ChannelHandlerContext ctx, ChannelPromise promise) {
-        if (currentChunkedWrite != null) {
-            safeFailPromise(currentChunkedWrite.onDone, new ClosedChannelException());
+            final var chunkedWrite = currentChunkedWrite;
             currentChunkedWrite = null;
+            chunkedWrite.combiner().add(ctx.newFailedFuture(new ClosedChannelException()));
+            chunkedWrite.combiner().finish(chunkedWrite.onDone());
         }
-        List<Tuple<? extends Netty4HttpResponse, ChannelPromise>> inflightResponses = removeAllInflightResponses();
-
-        if (inflightResponses.isEmpty() == false) {
-            ClosedChannelException closedChannelException = new ClosedChannelException();
-            for (Tuple<? extends Netty4HttpResponse, ChannelPromise> inflightResponse : inflightResponses) {
-                safeFailPromise(inflightResponse.v2(), closedChannelException);
-            }
-        }
-        ctx.close(promise);
-    }
-
-    private void safeFailPromise(ChannelPromise promise, Exception ex) {
-        try {
-            promise.setFailure(ex);
-        } catch (RuntimeException e) {
-            logger.error("unexpected error while releasing pipelined http responses", e);
+        Tuple<? extends Netty4HttpResponse, ChannelPromise> pipelinedWrite;
+        while ((pipelinedWrite = outboundHoldingQueue.poll()) != null) {
+            pipelinedWrite.v2().tryFailure(new ClosedChannelException());
         }
     }
 
@@ -398,12 +396,6 @@ public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
         }
     }
 
-    private List<Tuple<? extends Netty4HttpResponse, ChannelPromise>> removeAllInflightResponses() {
-        ArrayList<Tuple<? extends Netty4HttpResponse, ChannelPromise>> responses = new ArrayList<>(outboundHoldingQueue);
-        outboundHoldingQueue.clear();
-        return responses;
-    }
-
     private record WriteOperation(HttpObject msg, ChannelPromise promise) {
 
         void failAsClosedChannel() {

+ 22 - 6
modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java

@@ -18,6 +18,7 @@ import io.netty.channel.ChannelOption;
 import io.netty.channel.SimpleChannelInboundHandler;
 import io.netty.channel.nio.NioEventLoopGroup;
 import io.netty.channel.socket.SocketChannel;
+import io.netty.handler.codec.PrematureChannelClosureException;
 import io.netty.handler.codec.http.DefaultFullHttpRequest;
 import io.netty.handler.codec.http.FullHttpRequest;
 import io.netty.handler.codec.http.FullHttpResponse;
@@ -31,8 +32,8 @@ import io.netty.handler.codec.http.HttpRequest;
 import io.netty.handler.codec.http.HttpResponse;
 import io.netty.handler.codec.http.HttpVersion;
 
+import org.elasticsearch.ExceptionsHelper;
 import org.elasticsearch.common.unit.ByteSizeUnit;
-import org.elasticsearch.common.unit.ByteSizeValue;
 import org.elasticsearch.core.Tuple;
 import org.elasticsearch.tasks.Task;
 import org.elasticsearch.transport.netty4.NettyAllocator;
@@ -173,10 +174,9 @@ class Netty4HttpClient implements Closeable {
 
         @Override
         protected void initChannel(SocketChannel ch) {
-            final int maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt();
             ch.pipeline().addLast(new HttpClientCodec());
             ch.pipeline().addLast(new HttpContentDecompressor());
-            ch.pipeline().addLast(new HttpObjectAggregator(maxContentLength));
+            ch.pipeline().addLast(new HttpObjectAggregator(ByteSizeUnit.MB.toIntBytes(100)));
             ch.pipeline().addLast(new SimpleChannelInboundHandler<HttpObject>() {
                 @Override
                 protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) {
@@ -189,9 +189,25 @@ class Netty4HttpClient implements Closeable {
                 }
 
                 @Override
-                public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
-                    super.exceptionCaught(ctx, cause);
-                    latch.countDown();
+                public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
+                    if (cause instanceof PrematureChannelClosureException) {
+                        // no more requests coming, so fast-forward the latch
+                        fastForward();
+                    } else {
+                        ExceptionsHelper.maybeDieOnAnotherThread(new AssertionError(cause));
+                    }
+                }
+
+                @Override
+                public void channelInactive(ChannelHandlerContext ctx) throws Exception {
+                    fastForward();
+                    super.channelInactive(ctx);
+                }
+
+                private void fastForward() {
+                    while (latch.getCount() > 0) {
+                        latch.countDown();
+                    }
                 }
             });
         }

+ 8 - 1
server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java

@@ -424,7 +424,14 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo
             // The channel may not be present if the close listener (set in serverAcceptedChannel) runs before this method because the
             // connection closed early
             if (trackingChannel == null) {
-                logger.warn("http channel [{}] missing tracking channel", httpChannel);
+                httpRequest.release();
+                logger.warn(
+                    "http channel [{}] closed before starting to handle [{}][{}][{}]",
+                    httpChannel,
+                    httpRequest.header(Task.X_OPAQUE_ID_HTTP_HEADER),
+                    httpRequest.method(),
+                    httpRequest.uri()
+                );
                 return;
             }
             trackingChannel.incomingRequest();

+ 1 - 3
server/src/main/java/org/elasticsearch/rest/action/RestActionListener.java

@@ -21,9 +21,7 @@ import org.elasticsearch.tasks.TaskCancelledException;
  */
 public abstract class RestActionListener<Response> implements ActionListener<Response> {
 
-    // we use static here so we won't have to pass the actual logger each time for a very rare case of logging
-    // where the settings don't matter that much
-    private static final Logger logger = LogManager.getLogger(RestResponseListener.class);
+    private static final Logger logger = LogManager.getLogger(RestActionListener.class);
 
     protected final RestChannel channel;