Version in base suite: 4.1.48-10 Base version: netty_4.1.48-10 Target version: netty_4.1.48-10+deb13u1 Base file: /srv/ftp-master.debian.org/ftp/pool/main/n/netty/netty_4.1.48-10.dsc Target file: /srv/ftp-master.debian.org/policy/pool/main/n/netty/netty_4.1.48-10+deb13u1.dsc changelog | 46 + patches/CVE-2025-55163_1.patch | 145 +++++ patches/CVE-2025-55163_2.patch | 492 +++++++++++++++++ patches/CVE-2025-55163_before-1.patch | 91 +++ patches/CVE-2025-58056.patch | 724 ++++++++++++++++++++++++++ patches/CVE-2025-58057.patch | 930 ++++++++++++++++++++++++++++++++++ patches/CVE-2025-59419 | 180 ++++++ patches/CVE-2025-59419.patch | 180 ++++++ patches/CVE-2025-67735.patch | 389 ++++++++++++++ patches/series | 7 salsa-ci.yml | 12 11 files changed, 3196 insertions(+) dpkg-source: warning: cannot verify inline signature for /srv/release.debian.org/tmp/tmpwzf0l9rv/netty_4.1.48-10.dsc: no acceptable signature found dpkg-source: warning: cannot verify inline signature for /srv/release.debian.org/tmp/tmpwzf0l9rv/netty_4.1.48-10+deb13u1.dsc: no acceptable signature found diff -Nru netty-4.1.48/debian/changelog netty-4.1.48/debian/changelog --- netty-4.1.48/debian/changelog 2024-05-12 19:20:10.000000000 +0000 +++ netty-4.1.48/debian/changelog 2026-02-09 10:26:12.000000000 +0000 @@ -1,3 +1,49 @@ +netty (1:4.1.48-10+deb13u1) trixie-security; urgency=high + + * Team upload + * Fix CVE-2025-55163 (Closes: #1111105) + Netty is vulnerable to MadeYouReset DDoS. + This is a logical vulnerability in the HTTP/2 protocol, + that uses malformed HTTP/2 control frames in order to break + the max concurrent streams limit, which results in resource + exhaustion and distributed denial of service. + * Fix CVE-2025-58056 (Closes: #1113994) + when supplied with specially crafted input, BrotliDecoder and + certain other decompression decoders will allocate a large + number of reachable byte buffers, which can lead to + denial of service. BrotliDecoder.decompress has + no limit in how often it calls pull, decompressing + data 64K bytes at a time. The buffers are saved in + the output list, and remain reachable until OOM is hit. + * Fix CVE-2025-58057: + When supplied with specially crafted input, BrotliDecoder + and certain other decompression decoders will allocate + a large number of reachable byte buffers, which can lead + to denial of service. BrotliDecoder.decompress has no limit + in how often it calls pull, decompressing data 64K bytes at + a time. The buffers are saved in the output list, and remain + reachable until OOM is hit. + (Closes: #1113994) + * Fix CVE-2025-59419 (Closes: #1118282) + SMTP Command Injection Vulnerability Allowing Email Forgery + An SMTP Command Injection (CRLF Injection) vulnerability + in Netty's SMTP codec allows a remote attacker who can control + SMTP command parameters (e.g., an email recipient) + to forge arbitrary emails from the trusted server. + This bypasses standard email authentication and can + be used to impersonate executives and forge high-stakes + corporate communications. + * Fix CVE-2025-67735 (Closes: #1123606) + `io.netty.handler.codec.http.HttpRequestEncoder` + has a CRLF injection with the request URI when constructing + a request. This leads to request smuggling when + `HttpRequestEncoder` is used without proper sanitization + of the URI. Any application / framework using `HttpRequestEncoder` + can be subject to be abused to perform request smuggling using + CRLF injection + + -- Bastien Roucariès Mon, 09 Feb 2026 11:26:12 +0100 + netty (1:4.1.48-10) unstable; urgency=high * Team upload. diff -Nru netty-4.1.48/debian/patches/CVE-2025-55163_1.patch netty-4.1.48/debian/patches/CVE-2025-55163_1.patch --- netty-4.1.48/debian/patches/CVE-2025-55163_1.patch 1970-01-01 00:00:00.000000000 +0000 +++ netty-4.1.48/debian/patches/CVE-2025-55163_1.patch 2026-02-09 10:23:38.000000000 +0000 @@ -0,0 +1,145 @@ +From: Norman Maurer +Date: Mon, 28 Jul 2025 08:25:35 -1000 +Subject: =?utf-8?q?HTTP2=3A_Http2ConnectionHandler_should_always_use_Http2C?= + =?utf-8?q?onnectionEncode=E2=80=A6_=28=2315518=29?= +MIME-Version: 1.0 +Content-Type: text/plain; charset="utf-8" +Content-Transfer-Encoding: 8bit + +…r (#15516) + +Motivation: + +We sometimes directly used the Http2FrameWriter which is not correct as +someone might have supplied a custom Http2ConnectionEncoder + +Modifications: + +Use Http2ConnectionEncoder when writing RST frames + +Result: + +Don't by-pass Http2ConnectionEncoder + +origin: backport, https://github.com/netty/netty/commit/be53dc3c9acd9af2e20d0c3c07cd77115a594cf1 +bug: https://github.com/netty/netty/security/advisories/GHSA-prj3-ccx8-p6x4 +bug-github-pull: https://github.com/netty/netty/pull/15516 +--- + .../codec/http2/Http2ConnectionHandler.java | 6 +++--- + .../codec/http2/Http2ConnectionHandlerTest.java | 22 +++++++++++----------- + 2 files changed, 14 insertions(+), 14 deletions(-) + +--- a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandler.java ++++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandler.java +@@ -706,7 +706,7 @@ + try { + stream = encoder.connection().remote().createStream(streamId, true); + } catch (Http2Exception e) { +- resetUnknownStream(ctx, streamId, http2Ex.error().code(), ctx.newPromise()); ++ encoder().writeRstStream(ctx, streamId, http2Ex.error().code(), ctx.newPromise()); + return; + } + } +@@ -723,10 +723,10 @@ + + if (stream == null) { + if (!outbound || connection().local().mayHaveCreatedStream(streamId)) { +- resetUnknownStream(ctx, streamId, http2Ex.error().code(), ctx.newPromise()); ++ encoder().writeRstStream(ctx, streamId, http2Ex.error().code(), ctx.newPromise()); + } + } else { +- resetStream(ctx, stream, http2Ex.error().code(), ctx.newPromise()); ++ encoder().writeRstStream(ctx, streamId, http2Ex.error().code(), ctx.newPromise()); + } + } + +--- a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java ++++ b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java +@@ -403,7 +403,7 @@ + when(connection.isServer()).thenReturn(true); + when(stream.isHeadersSent()).thenReturn(false); + when(remote.lastStreamCreated()).thenReturn(STREAM_ID); +- when(frameWriter.writeRstStream(eq(ctx), eq(STREAM_ID), ++ when(encoder.writeRstStream(eq(ctx), eq(STREAM_ID), + eq(PROTOCOL_ERROR.code()), eq(promise))).thenReturn(future); + + handler.exceptionCaught(ctx, e); +@@ -413,7 +413,7 @@ + captor.capture(), eq(padding), eq(true), eq(promise)); + Http2Headers headers = captor.getValue(); + assertEquals(HttpResponseStatus.REQUEST_HEADER_FIELDS_TOO_LARGE.codeAsText(), headers.status()); +- verify(frameWriter).writeRstStream(ctx, STREAM_ID, PROTOCOL_ERROR.code(), promise); ++ verify(encoder).writeRstStream(ctx, STREAM_ID, PROTOCOL_ERROR.code(), promise); + } + + @Test +@@ -427,14 +427,14 @@ + when(connection.isServer()).thenReturn(true); + when(stream.isHeadersSent()).thenReturn(false); + when(remote.lastStreamCreated()).thenReturn(STREAM_ID); +- when(frameWriter.writeRstStream(eq(ctx), eq(STREAM_ID), ++ when(encoder.writeRstStream(eq(ctx), eq(STREAM_ID), + eq(PROTOCOL_ERROR.code()), eq(promise))).thenReturn(future); + + handler.exceptionCaught(ctx, e); + + verify(encoder, never()).writeHeaders(eq(ctx), eq(STREAM_ID), + any(Http2Headers.class), eq(padding), eq(true), eq(promise)); +- verify(frameWriter).writeRstStream(ctx, STREAM_ID, PROTOCOL_ERROR.code(), promise); ++ verify(encoder).writeRstStream(ctx, STREAM_ID, PROTOCOL_ERROR.code(), promise); + } + + @Test +@@ -448,14 +448,14 @@ + when(connection.isServer()).thenReturn(false); + when(stream.isHeadersSent()).thenReturn(false); + when(remote.lastStreamCreated()).thenReturn(STREAM_ID); +- when(frameWriter.writeRstStream(eq(ctx), eq(STREAM_ID), ++ when(encoder.writeRstStream(eq(ctx), eq(STREAM_ID), + eq(PROTOCOL_ERROR.code()), eq(promise))).thenReturn(future); + + handler.exceptionCaught(ctx, e); + + verify(encoder, never()).writeHeaders(eq(ctx), eq(STREAM_ID), + any(Http2Headers.class), eq(padding), eq(true), eq(promise)); +- verify(frameWriter).writeRstStream(ctx, STREAM_ID, PROTOCOL_ERROR.code(), promise); ++ verify(encoder).writeRstStream(ctx, STREAM_ID, PROTOCOL_ERROR.code(), promise); + } + + @Test +@@ -484,14 +484,14 @@ + when(connection.isServer()).thenReturn(true); + when(stream.isHeadersSent()).thenReturn(true); + when(remote.lastStreamCreated()).thenReturn(STREAM_ID); +- when(frameWriter.writeRstStream(eq(ctx), eq(STREAM_ID), ++ when(encoder.writeRstStream(eq(ctx), eq(STREAM_ID), + eq(PROTOCOL_ERROR.code()), eq(promise))).thenReturn(future); + handler.exceptionCaught(ctx, e); + + verify(encoder, never()).writeHeaders(eq(ctx), eq(STREAM_ID), + any(Http2Headers.class), eq(padding), eq(true), eq(promise)); + +- verify(frameWriter).writeRstStream(ctx, STREAM_ID, PROTOCOL_ERROR.code(), promise); ++ verify(encoder).writeRstStream(ctx, STREAM_ID, PROTOCOL_ERROR.code(), promise); + } + + @Test +@@ -508,15 +508,15 @@ + when(connection.isServer()).thenReturn(true); + when(stream.isHeadersSent()).thenReturn(false); + when(remote.lastStreamCreated()).thenReturn(STREAM_ID); +- when(frameWriter.writeRstStream(eq(ctx), eq(STREAM_ID), +- eq(Http2Error.PROTOCOL_ERROR.code()), eq(promise))).thenReturn(future); ++ when(encoder.writeRstStream(eq(ctx), eq(STREAM_ID), ++ eq(PROTOCOL_ERROR.code()), eq(promise))).thenReturn(future); + handler.exceptionCaught(ctx, e); + + verify(remote).createStream(STREAM_ID, true); + verify(encoder).writeHeaders(eq(ctx), eq(STREAM_ID), + any(Http2Headers.class), eq(padding), eq(true), eq(promise)); + +- verify(frameWriter).writeRstStream(ctx, STREAM_ID, PROTOCOL_ERROR.code(), promise); ++ verify(encoder).writeRstStream(ctx, STREAM_ID, PROTOCOL_ERROR.code(), promise); + } + + @Test diff -Nru netty-4.1.48/debian/patches/CVE-2025-55163_2.patch netty-4.1.48/debian/patches/CVE-2025-55163_2.patch --- netty-4.1.48/debian/patches/CVE-2025-55163_2.patch 1970-01-01 00:00:00.000000000 +0000 +++ netty-4.1.48/debian/patches/CVE-2025-55163_2.patch 2026-02-09 10:23:38.000000000 +0000 @@ -0,0 +1,492 @@ +From: Norman Maurer +Date: Wed, 13 Aug 2025 14:14:00 +0200 +Subject: [PATCH] Merge commit from fork + +* Enforce the maximum number of RST frames that can be sent in window of time + +Motivation: + +A remote peer might be able to trigger an instance to generate and send RST frames by sending invalid frames on an existing stream. This can cause high resource usage and so might be abused by a remote peer. + +Modifications: + +Limit the number of RSTs that we allow to be generated and so send in a specific time window. If this limit is reached a GO_AWAY frame is send and the connection be closed. + +Result: + +Fix high resource usage that can be caused by a remote peer by trigger RST frames + +* Adjust testing + +* Address comments + +origin: backport, https://github.com/netty/netty/commit/009bd17b38a39fb1eecf9d22ea8ae8108afaac59 +bug: https://github.com/netty/netty/security/advisories/GHSA-prj3-ccx8-p6x4 +--- + .../AbstractHttp2ConnectionHandlerBuilder.java | 54 +++++- + .../codec/http2/Http2ConnectionHandlerBuilder.java | 8 + + .../codec/http2/Http2FrameCodecBuilder.java | 6 + + .../codec/http2/Http2MaxRstFrameLimitEncoder.java | 85 ++++++++ + .../codec/http2/Http2MultiplexCodecBuilder.java | 6 + + .../http2/Http2MaxRstFrameLimitEncoderTest.java | 213 +++++++++++++++++++++ + 6 files changed, 367 insertions(+), 5 deletions(-) + create mode 100644 codec-http2/src/main/java/io/netty/handler/codec/http2/Http2MaxRstFrameLimitEncoder.java + create mode 100644 codec-http2/src/test/java/io/netty/handler/codec/http2/Http2MaxRstFrameLimitEncoderTest.java + +diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2ConnectionHandlerBuilder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2ConnectionHandlerBuilder.java +index fc70dfe..572da55 100644 +--- a/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2ConnectionHandlerBuilder.java ++++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2ConnectionHandlerBuilder.java +@@ -112,7 +112,10 @@ public abstract class AbstractHttp2ConnectionHandlerBuilder 0 && maxEncodedRstFramesSecondsPerWindow > 0) { ++ encoder = new Http2MaxRstFrameLimitEncoder( ++ encoder, maxEncodedRstFrames, maxEncodedRstFramesSecondsPerWindow); ++ } + if (encoderEnforceMaxConcurrentStreams) { + if (connection.isServer()) { + encoder.close(); +@@ -575,8 +608,19 @@ public abstract class AbstractHttp2ConnectionHandlerBuilder 0 && secondsPerWindow > 0) { +- decoder = new Http2MaxRstFrameDecoder(decoder, maxRstFrames, secondsPerWindow); ++ final int maxDecodedRstFrames; ++ if (maxDecodedRstFramesPerWindow == null) { ++ // Only enable by default on the server. ++ if (isServer()) { ++ maxDecodedRstFrames = DEFAULT_MAX_RST_FRAMES_PER_CONNECTION_FOR_SERVER; ++ } else { ++ maxDecodedRstFrames = 0; ++ } ++ } else { ++ maxDecodedRstFrames = maxDecodedRstFramesPerWindow; ++ } ++ if (maxDecodedRstFrames > 0 && maxDecodedRstFramesSecondsPerWindow > 0) { ++ decoder = new Http2MaxRstFrameDecoder(decoder, maxDecodedRstFrames, maxDecodedRstFramesSecondsPerWindow); + } + final T handler; + try { +diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandlerBuilder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandlerBuilder.java +index c6d1ce7..1d2ac71 100644 +--- a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandlerBuilder.java ++++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandlerBuilder.java +@@ -19,6 +19,8 @@ package io.netty.handler.codec.http2; + import io.netty.handler.codec.http2.Http2HeadersEncoder.SensitivityDetector; + import io.netty.util.internal.UnstableApi; + ++import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; ++ + /** + * Builder which builds {@link Http2ConnectionHandler} objects. + */ +@@ -98,6 +100,12 @@ public final class Http2ConnectionHandlerBuilder + return super.decoupleCloseAndGoAway(decoupleCloseAndGoAway); + } + ++ @Override ++ public Http2ConnectionHandlerBuilder encoderEnforceMaxRstFramesPerWindow( ++ int maxRstFramesPerWindow, int secondsPerWindow) { ++ return super.encoderEnforceMaxRstFramesPerWindow(maxRstFramesPerWindow, secondsPerWindow); ++ } ++ + @Override + public Http2ConnectionHandler build() { + return super.build(); +diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodecBuilder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodecBuilder.java +index ab183e5..ef0cffe 100644 +--- a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodecBuilder.java ++++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodecBuilder.java +@@ -183,6 +183,12 @@ public class Http2FrameCodecBuilder extends + return super.decoderEnforceMaxRstFramesPerWindow(maxRstFramesPerWindow, secondsPerWindow); + } + ++ @Override ++ public Http2FrameCodecBuilder encoderEnforceMaxRstFramesPerWindow( ++ int maxRstFramesPerWindow, int secondsPerWindow) { ++ return super.encoderEnforceMaxRstFramesPerWindow(maxRstFramesPerWindow, secondsPerWindow); ++ } ++ + /** + * Build a {@link Http2FrameCodec} object. + */ +diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2MaxRstFrameLimitEncoder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2MaxRstFrameLimitEncoder.java +new file mode 100644 +index 0000000..a36b3d7 +--- /dev/null ++++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2MaxRstFrameLimitEncoder.java +@@ -0,0 +1,85 @@ ++/* ++ * Copyright 2025 The Netty Project ++ * ++ * The Netty Project licenses this file to you under the Apache License, version 2.0 (the ++ * "License"); you may not use this file except in compliance with the License. You may obtain a ++ * copy of the License at: ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software distributed under the License ++ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express ++ * or implied. See the License for the specific language governing permissions and limitations under ++ * the License. ++ */ ++package io.netty.handler.codec.http2; ++ ++import io.netty.channel.ChannelFuture; ++import io.netty.channel.ChannelHandlerContext; ++import io.netty.channel.ChannelPromise; ++import io.netty.util.internal.logging.InternalLogger; ++import io.netty.util.internal.logging.InternalLoggerFactory; ++ ++import java.util.concurrent.TimeUnit; ++ ++/** ++ * {@link DecoratingHttp2ConnectionEncoder} which guards against a remote peer that will trigger a massive amount ++ * of RST frames on an existing connection. ++ * This encoder will tear-down the connection once we reached the configured limit to reduce the risk of DDOS. ++ */ ++final class Http2MaxRstFrameLimitEncoder extends DecoratingHttp2ConnectionEncoder { ++ private static final InternalLogger logger = InternalLoggerFactory.getInstance(Http2MaxRstFrameLimitEncoder.class); ++ ++ private final long nanosPerWindow; ++ private final int maxRstFramesPerWindow; ++ private long lastRstFrameNano = System.nanoTime(); ++ private int sendRstInWindow; ++ private Http2LifecycleManager lifecycleManager; ++ ++ Http2MaxRstFrameLimitEncoder(Http2ConnectionEncoder delegate, int maxRstFramesPerWindow, int secondsPerWindow) { ++ super(delegate); ++ this.maxRstFramesPerWindow = maxRstFramesPerWindow; ++ this.nanosPerWindow = TimeUnit.SECONDS.toNanos(secondsPerWindow); ++ } ++ ++ @Override ++ public void lifecycleManager(Http2LifecycleManager lifecycleManager) { ++ this.lifecycleManager = lifecycleManager; ++ super.lifecycleManager(lifecycleManager); ++ } ++ ++ @Override ++ public ChannelFuture writeRstStream(ChannelHandlerContext ctx, int streamId, long errorCode, ++ ChannelPromise promise) { ++ ChannelFuture future = super.writeRstStream(ctx, streamId, errorCode, promise); ++ if (countRstFrameErrorCode(errorCode)) { ++ long currentNano = System.nanoTime(); ++ if (currentNano - lastRstFrameNano >= nanosPerWindow) { ++ lastRstFrameNano = currentNano; ++ sendRstInWindow = 1; ++ } else { ++ sendRstInWindow++; ++ if (sendRstInWindow > maxRstFramesPerWindow) { ++ Http2Exception exception = Http2Exception.connectionError(Http2Error.ENHANCE_YOUR_CALM, ++ "Maximum number %d of RST frames frames reached within %d seconds", maxRstFramesPerWindow, ++ TimeUnit.NANOSECONDS.toSeconds(nanosPerWindow)); ++ ++ logger.debug("{} Maximum number {} of RST frames reached within {} seconds, " + ++ "closing connection with {} error", ctx.channel(), maxRstFramesPerWindow, ++ TimeUnit.NANOSECONDS.toSeconds(nanosPerWindow), exception.error(), ++ exception); ++ // First notify the Http2LifecycleManager and then close the connection. ++ lifecycleManager.onError(ctx, true, exception); ++ ctx.close(); ++ } ++ } ++ } ++ ++ return future; ++ } ++ ++ private boolean countRstFrameErrorCode(long errorCode) { ++ // Don't count CANCEL and NO_ERROR as these might be ok. ++ return errorCode != Http2Error.CANCEL.code() && errorCode != Http2Error.NO_ERROR.code(); ++ } ++} +diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2MultiplexCodecBuilder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2MultiplexCodecBuilder.java +index 0a38d4e..da097ec 100644 +--- a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2MultiplexCodecBuilder.java ++++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2MultiplexCodecBuilder.java +@@ -212,6 +212,12 @@ public class Http2MultiplexCodecBuilder + return super.decoderEnforceMaxRstFramesPerWindow(maxRstFramesPerWindow, secondsPerWindow); + } + ++ @Override ++ public Http2MultiplexCodecBuilder encoderEnforceMaxRstFramesPerWindow( ++ int maxRstFramesPerWindow, int secondsPerWindow) { ++ return super.encoderEnforceMaxRstFramesPerWindow(maxRstFramesPerWindow, secondsPerWindow); ++ } ++ + @Override + public Http2MultiplexCodec build() { + Http2FrameWriter frameWriter = this.frameWriter; +diff --git a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2MaxRstFrameLimitEncoderTest.java b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2MaxRstFrameLimitEncoderTest.java +new file mode 100644 +index 0000000..24c40fe +--- /dev/null ++++ b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2MaxRstFrameLimitEncoderTest.java +@@ -0,0 +1,213 @@ ++/* ++ * Copyright 2019 The Netty Project ++ * ++ * The Netty Project licenses this file to you under the Apache License, version 2.0 (the ++ * "License"); you may not use this file except in compliance with the License. You may obtain a ++ * copy of the License at: ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software distributed under the License ++ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express ++ * or implied. See the License for the specific language governing permissions and limitations under ++ * the License. ++ */ ++ ++package io.netty.handler.codec.http2; ++ ++import io.netty.buffer.ByteBuf; ++import io.netty.buffer.UnpooledByteBufAllocator; ++import io.netty.channel.Channel; ++import io.netty.channel.ChannelConfig; ++import io.netty.channel.ChannelFuture; ++import io.netty.channel.ChannelHandlerContext; ++import io.netty.channel.ChannelMetadata; ++import io.netty.channel.ChannelPromise; ++import io.netty.channel.DefaultChannelPromise; ++import io.netty.channel.DefaultMessageSizeEstimator; ++import io.netty.util.ReferenceCountUtil; ++import io.netty.util.concurrent.EventExecutor; ++import io.netty.util.concurrent.ImmediateEventExecutor; ++import org.junit.jupiter.api.AfterEach; ++import org.junit.jupiter.api.BeforeEach; ++import org.junit.jupiter.api.Test; ++import org.junit.jupiter.params.ParameterizedTest; ++import org.junit.jupiter.params.provider.EnumSource; ++import org.junit.jupiter.params.provider.ValueSource; ++import org.mockito.Mock; ++import org.mockito.MockitoAnnotations; ++import org.mockito.invocation.InvocationOnMock; ++import org.mockito.stubbing.Answer; ++ ++import java.util.ArrayDeque; ++import java.util.Queue; ++ ++import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_MAX_FRAME_SIZE; ++import static io.netty.handler.codec.http2.Http2Error.CANCEL; ++import static io.netty.handler.codec.http2.Http2Error.ENHANCE_YOUR_CALM; ++import static io.netty.handler.codec.http2.Http2Error.NO_ERROR; ++import static org.junit.jupiter.api.Assertions.assertTrue; ++import static org.mockito.Mockito.any; ++import static org.mockito.Mockito.anyInt; ++import static org.mockito.Mockito.anyLong; ++import static org.mockito.Mockito.atLeast; ++import static org.mockito.Mockito.doAnswer; ++import static org.mockito.Mockito.eq; ++import static org.mockito.Mockito.mock; ++import static org.mockito.Mockito.times; ++import static org.mockito.Mockito.verify; ++import static org.mockito.Mockito.when; ++ ++/** ++ * Tests for {@link Http2MaxRstFrameLimitEncoder}. ++ */ ++public class Http2MaxRstFrameLimitEncoderTest { ++ ++ private Http2MaxRstFrameLimitEncoder encoder; ++ ++ @Mock ++ private Http2FrameWriter writer; ++ ++ @Mock ++ private ChannelHandlerContext ctx; ++ ++ @Mock ++ private Channel channel; ++ ++ @Mock ++ private Channel.Unsafe unsafe; ++ ++ @Mock ++ private ChannelConfig config; ++ ++ @Mock ++ private EventExecutor executor; ++ ++ private final Queue goAwayPromises = new ArrayDeque(); ++ ++ /** ++ * Init fields and do mocking. ++ */ ++ @BeforeEach ++ public void setup() throws Exception { ++ MockitoAnnotations.initMocks(this); ++ Http2FrameWriter.Configuration configuration = mock(Http2FrameWriter.Configuration.class); ++ Http2FrameSizePolicy frameSizePolicy = mock(Http2FrameSizePolicy.class); ++ when(writer.configuration()).thenReturn(configuration); ++ when(configuration.frameSizePolicy()).thenReturn(frameSizePolicy); ++ when(frameSizePolicy.maxFrameSize()).thenReturn(DEFAULT_MAX_FRAME_SIZE); ++ ++ when(writer.writeRstStream(eq(ctx), anyInt(), anyLong(), any(ChannelPromise.class))) ++ .thenAnswer(new Answer() { ++ @Override ++ public ChannelFuture answer(InvocationOnMock invocationOnMock) { ++ return handlePromise(invocationOnMock, 3); ++ } ++ }); ++ when(writer.writeGoAway(any(ChannelHandlerContext.class), anyInt(), anyLong(), any(ByteBuf.class), ++ any(ChannelPromise.class))).thenAnswer(new Answer() { ++ @Override ++ public ChannelFuture answer(InvocationOnMock invocationOnMock) { ++ ReferenceCountUtil.release(invocationOnMock.getArgument(3)); ++ ChannelPromise promise = invocationOnMock.getArgument(4); ++ goAwayPromises.offer(promise); ++ return promise; ++ } ++ }); ++ Http2Connection connection = new DefaultHttp2Connection(false); ++ connection.remote().flowController(new DefaultHttp2RemoteFlowController(connection)); ++ connection.local().flowController(new DefaultHttp2LocalFlowController(connection).frameWriter(writer)); ++ ++ DefaultHttp2ConnectionEncoder defaultEncoder = ++ new DefaultHttp2ConnectionEncoder(connection, writer); ++ encoder = new Http2MaxRstFrameLimitEncoder(defaultEncoder, 2, 1); ++ DefaultHttp2ConnectionDecoder decoder = ++ new DefaultHttp2ConnectionDecoder(connection, encoder, mock(Http2FrameReader.class)); ++ Http2ConnectionHandler handler = new Http2ConnectionHandlerBuilder() ++ .frameListener(mock(Http2FrameListener.class)) ++ .codec(decoder, encoder).build(); ++ ++ // Set LifeCycleManager on encoder and decoder ++ when(ctx.channel()).thenReturn(channel); ++ when(ctx.alloc()).thenReturn(UnpooledByteBufAllocator.DEFAULT); ++ when(channel.alloc()).thenReturn(UnpooledByteBufAllocator.DEFAULT); ++ when(executor.inEventLoop()).thenReturn(true); ++ doAnswer(new Answer() { ++ @Override ++ public ChannelPromise answer(InvocationOnMock invocation) throws Throwable { ++ return newPromise(); ++ } ++ }).when(ctx).newPromise(); ++ when(ctx.executor()).thenReturn(executor); ++ when(channel.isActive()).thenReturn(false); ++ when(channel.config()).thenReturn(config); ++ when(channel.isWritable()).thenReturn(true); ++ when(channel.bytesBeforeUnwritable()).thenReturn(Long.MAX_VALUE); ++ when(config.getWriteBufferHighWaterMark()).thenReturn(Integer.MAX_VALUE); ++ when(config.getMessageSizeEstimator()).thenReturn(DefaultMessageSizeEstimator.DEFAULT); ++ ChannelMetadata metadata = new ChannelMetadata(false, 16); ++ when(channel.metadata()).thenReturn(metadata); ++ when(channel.unsafe()).thenReturn(unsafe); ++ handler.handlerAdded(ctx); ++ } ++ ++ private ChannelPromise handlePromise(InvocationOnMock invocationOnMock, int promiseIdx) { ++ ChannelPromise promise = invocationOnMock.getArgument(promiseIdx); ++ return promise.setSuccess(); ++ } ++ ++ @AfterEach ++ public void teardown() { ++ // Close and release any buffered frames. ++ encoder.close(); ++ ++ // Notify all goAway ChannelPromise instances now as these will also release the retained ByteBuf for the ++ // debugData. ++ for (;;) { ++ ChannelPromise promise = goAwayPromises.poll(); ++ if (promise == null) { ++ break; ++ } ++ promise.setSuccess(); ++ } ++ } ++ ++ @ParameterizedTest ++ @EnumSource(Http2Error.class) ++ public void testLimitRst(Http2Error error) { ++ assertTrue(encoder.writeRstStream(ctx, 1, error.code(), newPromise()).isSuccess()); ++ assertTrue(encoder.writeRstStream(ctx, 1, error.code(), newPromise()).isSuccess()); ++ verifyFlushAndClose(0, false); ++ assertTrue(encoder.writeRstStream(ctx, 1, error.code(), newPromise()).isSuccess()); ++ if (error == CANCEL || error == NO_ERROR) { ++ // CANCEL and NO_ERROR are ignored as these will not be caused by a stream error. ++ verifyFlushAndClose(0, false); ++ } else { ++ verifyFlushAndClose(1, true); ++ } ++ } ++ ++ @ParameterizedTest ++ @EnumSource(Http2Error.class) ++ public void testLimitRstReset(Http2Error error) throws Exception { ++ assertTrue(encoder.writeRstStream(ctx, 1, error.code(), newPromise()).isSuccess()); ++ assertTrue(encoder.writeRstStream(ctx, 1, error.code(), newPromise()).isSuccess()); ++ verifyFlushAndClose(0, false); ++ Thread.sleep(1000); ++ assertTrue(encoder.writeRstStream(ctx, 1, error.code(), newPromise()).isSuccess()); ++ verifyFlushAndClose(0, false); ++ } ++ ++ private void verifyFlushAndClose(int invocations, boolean failed) { ++ verify(ctx, atLeast(invocations)).flush(); ++ verify(ctx, times(invocations)).close(); ++ if (failed) { ++ verify(writer, times(1)).writeGoAway(eq(ctx), eq(Integer.MAX_VALUE), eq(ENHANCE_YOUR_CALM.code()), ++ any(ByteBuf.class), any(ChannelPromise.class)); ++ } ++ } ++ ++ private ChannelPromise newPromise() { ++ return new DefaultChannelPromise(channel, ImmediateEventExecutor.INSTANCE); ++ } ++} diff -Nru netty-4.1.48/debian/patches/CVE-2025-55163_before-1.patch netty-4.1.48/debian/patches/CVE-2025-55163_before-1.patch --- netty-4.1.48/debian/patches/CVE-2025-55163_before-1.patch 1970-01-01 00:00:00.000000000 +0000 +++ netty-4.1.48/debian/patches/CVE-2025-55163_before-1.patch 2026-02-09 10:23:38.000000000 +0000 @@ -0,0 +1,91 @@ +commit 9b80d081ff3478c46152b012ae0e21f939467ac3 +Author: Norman Maurer +Date: Sat Oct 28 20:49:44 2023 +0200 + +Only enable the RST limit for servers by default (#13671) + +Motivation: + +We dont need to limit the number of RST frames per connection when we +are bulding a codec for the client side. + +Modifications: + +Dont limit the numbers of RST frames per connection when building a +codec for the client side. + +Result: + +Only add limit where needed + +origin: backport, https://github.com/netty/netty/commit/9b80d081ff3478c46152b012ae0e21f939467ac3 +bug-github-pull: https://github.com/netty/netty/pull/13671 + +--- a/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2ConnectionHandlerBuilder.java ++++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2ConnectionHandlerBuilder.java +@@ -78,6 +78,8 @@ + + private static final SensitivityDetector DEFAULT_HEADER_SENSITIVITY_DETECTOR = Http2HeadersEncoder.NEVER_SENSITIVE; + ++ private static final int DEFAULT_MAX_RST_FRAMES_PER_CONNECTION_FOR_SERVER = 200; ++ + // The properties that can always be set. + private Http2Settings initialSettings = Http2Settings.defaultSettings(); + private Http2FrameListener frameListener; +@@ -109,7 +111,7 @@ + private boolean autoAckPingFrame = true; + private int maxQueuedControlFrames = Http2CodecUtil.DEFAULT_MAX_QUEUED_CONTROL_FRAMES; + private int maxConsecutiveEmptyFrames = 2; +- private int maxRstFramesPerWindow = 200; ++ private Integer maxRstFramesPerWindow; + private int secondsPerWindow = 30; + + /** +@@ -562,8 +564,19 @@ + if (maxConsecutiveEmptyDataFrames > 0) { + decoder = new Http2EmptyDataFrameConnectionDecoder(decoder, maxConsecutiveEmptyDataFrames); + } +- if (maxRstFramesPerWindow > 0 && secondsPerWindow > 0) { +- decoder = new Http2MaxRstFrameDecoder(decoder, maxRstFramesPerWindow, secondsPerWindow); ++ final int maxRstFrames; ++ if (maxRstFramesPerWindow == null) { ++ // Only enable by default on the server. ++ if (isServer()) { ++ maxRstFrames = DEFAULT_MAX_RST_FRAMES_PER_CONNECTION_FOR_SERVER; ++ } else { ++ maxRstFrames = 0; ++ } ++ } else { ++ maxRstFrames = maxRstFramesPerWindow; ++ } ++ if (maxRstFrames > 0 && secondsPerWindow > 0) { ++ decoder = new Http2MaxRstFrameDecoder(decoder, maxRstFrames, secondsPerWindow); + } + final T handler; + try { +--- a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodecBuilder.java ++++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodecBuilder.java +@@ -179,8 +179,8 @@ + + @Override + public Http2FrameCodecBuilder decoderEnforceMaxRstFramesPerWindow( +- int maxConsecutiveEmptyFrames, int secondsPerWindow) { +- return super.decoderEnforceMaxRstFramesPerWindow(maxConsecutiveEmptyFrames, secondsPerWindow); ++ int maxRstFramesPerWindow, int secondsPerWindow) { ++ return super.decoderEnforceMaxRstFramesPerWindow(maxRstFramesPerWindow, secondsPerWindow); + } + + /** +--- a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2MultiplexCodecBuilder.java ++++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2MultiplexCodecBuilder.java +@@ -208,8 +208,8 @@ + + @Override + public Http2MultiplexCodecBuilder decoderEnforceMaxRstFramesPerWindow( +- int maxConsecutiveEmptyFrames, int secondsPerWindow) { +- return super.decoderEnforceMaxRstFramesPerWindow(maxConsecutiveEmptyFrames, secondsPerWindow); ++ int maxRstFramesPerWindow, int secondsPerWindow) { ++ return super.decoderEnforceMaxRstFramesPerWindow(maxRstFramesPerWindow, secondsPerWindow); + } + + @Override diff -Nru netty-4.1.48/debian/patches/CVE-2025-58056.patch netty-4.1.48/debian/patches/CVE-2025-58056.patch --- netty-4.1.48/debian/patches/CVE-2025-58056.patch 1970-01-01 00:00:00.000000000 +0000 +++ netty-4.1.48/debian/patches/CVE-2025-58056.patch 2026-02-09 10:23:38.000000000 +0000 @@ -0,0 +1,724 @@ +From 39d3ecf8f0c57a7469ba927b2163d4cb4314b138 Mon Sep 17 00:00:00 2001 +From: Chris Vest +Date: Tue, 2 Sep 2025 23:25:09 -0700 +Subject: [PATCH] Merge commit from fork + +* Prevent HTTP request/response smuggling via chunk encoding + +Motivation: +Transfer-Encoding: chunked has some strict rules around parsing CR LF delimiters. +If we are too lenient, it can cause request/response smuggling issues when combined with proxies that are lenient in different ways. +See https://w4ke.info/2025/06/18/funky-chunks.html for the details. + +Modification: +- Make sure that we reject chunks with chunk-extensions that contain lone Line Feed octets without their preceding Carriage Return octet. +- Make sure that we issue HttpContent objects with decoding failures, if we decode a chunk and it isn't immediately followed by a CR LF octet pair. + +Result: +Smuggling requests/responses is no longer possible. + +Fixes https://github.com/netty/netty/issues/15522 + +* Enforce CR LF line separators for HTTP messages by default + +But also make it configurable through `HttpDecoderConfig`, and add a system property opt-out to change the default back. + +* Remove property for the name of the strict line parsing property + +* Remove HeaderParser.parse overload that only takes a buffer argument + +Origin: backport, https://github.com/netty/netty/commit/39d3ecf8f0c57a7469ba927b2163d4cb4314b138 + +[Ubuntu note: This patch uses a new constructor to configure strict line +parsing (since HttpDecoderConfig.java does not exist), and uses a new field to +pass strictCRLFCheck from HeaderParser.parse() to HeaderParser.process(). +-- Edwin Jiang rIdx) { +- byte next = buffer.getByte(rIdx++); +- if (next == HttpConstants.LF) { ++ if (buffer.readableBytes() >= 2) { ++ int rIdx = buffer.readerIndex(); ++ if (buffer.getByte(rIdx) == HttpConstants.CR && ++ buffer.getByte(rIdx + 1) == HttpConstants.LF) { ++ buffer.skipBytes(2); + currentState = State.READ_CHUNK_SIZE; +- break; ++ } else { ++ out.add(invalidChunk(buffer, new InvalidChunkTerminationException())); + } + } +- buffer.readerIndex(rIdx); + return; + } + case READ_CHUNK_FOOTER: try { +@@ -560,7 +588,7 @@ + final HttpMessage message = this.message; + final HttpHeaders headers = message.headers(); + +- AppendableCharSequence line = headerParser.parse(buffer); ++ AppendableCharSequence line = headerParser.parse(buffer, defaultStrictCRLFCheck); + if (line == null) { + return null; + } +@@ -580,7 +608,7 @@ + splitHeader(line); + } + +- line = headerParser.parse(buffer); ++ line = headerParser.parse(buffer, defaultStrictCRLFCheck); + if (line == null) { + return null; + } +@@ -661,7 +689,7 @@ + } + + private LastHttpContent readTrailingHeaders(ByteBuf buffer) { +- AppendableCharSequence line = headerParser.parse(buffer); ++ AppendableCharSequence line = headerParser.parse(buffer, defaultStrictCRLFCheck); + if (line == null) { + return null; + } +@@ -701,7 +729,7 @@ + name = null; + value = null; + } +- line = headerParser.parse(buffer); ++ line = headerParser.parse(buffer, defaultStrictCRLFCheck); + if (line == null) { + return null; + } +@@ -865,14 +893,19 @@ + private final int maxLength; + private int size; + ++ private Runnable strictCRLFCheck; ++ + HeaderParser(AppendableCharSequence seq, int maxLength) { + this.seq = seq; + this.maxLength = maxLength; + } + +- public AppendableCharSequence parse(ByteBuf buffer) { ++ public AppendableCharSequence parse(ByteBuf buffer, Runnable strictCRLFCheck) { + final int oldSize = size; + seq.reset(); ++ ++ this.strictCRLFCheck = strictCRLFCheck; ++ + int i = buffer.forEachByte(this); + if (i == -1) { + size = oldSize; +@@ -895,6 +928,10 @@ + if (len >= 1 && seq.charAtUnsafe(len - 1) == HttpConstants.CR) { + -- size; + seq.setLength(len - 1); ++ } else { ++ if (strictCRLFCheck != null) { ++ strictCRLFCheck.run(); ++ } + } + return false; + } +@@ -927,9 +964,9 @@ + } + + @Override +- public AppendableCharSequence parse(ByteBuf buffer) { ++ public AppendableCharSequence parse(ByteBuf buffer, Runnable strictCRLFCheck) { + reset(); +- return super.parse(buffer); ++ return super.parse(buffer, strictCRLFCheck); + } + + @Override +--- /dev/null ++++ b/codec-http/src/main/java/io/netty/handler/codec/http/InvalidChunkExtensionException.java +@@ -0,0 +1,45 @@ ++/* ++ * Copyright 2025 The Netty Project ++ * ++ * The Netty Project licenses this file to you under the Apache License, ++ * version 2.0 (the "License"); you may not use this file except in compliance ++ * with the License. You may obtain a copy of the License at: ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++ * License for the specific language governing permissions and limitations ++ * under the License. ++ */ ++package io.netty.handler.codec.http; ++ ++import io.netty.handler.codec.CorruptedFrameException; ++ ++/** ++ * Thrown when HTTP chunk extensions could not be parsed, typically due to incorrect use of CR LF delimiters. ++ *

++ * RFC 9112 ++ * specifies that chunk header lines must be terminated in a CR LF pair, ++ * and that a lone LF octet is not allowed within the chunk header line. ++ */ ++public final class InvalidChunkExtensionException extends CorruptedFrameException { ++ private static final long serialVersionUID = 536224937231200736L; ++ ++ public InvalidChunkExtensionException() { ++ super("Line Feed must be preceded by Carriage Return when terminating HTTP chunk header lines"); ++ } ++ ++ public InvalidChunkExtensionException(String message, Throwable cause) { ++ super(message, cause); ++ } ++ ++ public InvalidChunkExtensionException(String message) { ++ super(message); ++ } ++ ++ public InvalidChunkExtensionException(Throwable cause) { ++ super(cause); ++ } ++} +--- /dev/null ++++ b/codec-http/src/main/java/io/netty/handler/codec/http/InvalidChunkTerminationException.java +@@ -0,0 +1,45 @@ ++/* ++ * Copyright 2025 The Netty Project ++ * ++ * The Netty Project licenses this file to you under the Apache License, ++ * version 2.0 (the "License"); you may not use this file except in compliance ++ * with the License. You may obtain a copy of the License at: ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++ * License for the specific language governing permissions and limitations ++ * under the License. ++ */ ++package io.netty.handler.codec.http; ++ ++import io.netty.handler.codec.CorruptedFrameException; ++ ++/** ++ * Thrown when HTTP chunks could not be parsed, typically due to incorrect use of CR LF delimiters. ++ *

++ * RFC 9112 ++ * specifies that chunk bodies must be terminated in a CR LF pair, ++ * and that the delimiter must follow the given chunk-size number of octets in chunk-data. ++ */ ++public final class InvalidChunkTerminationException extends CorruptedFrameException { ++ private static final long serialVersionUID = 536224937231200736L; ++ ++ public InvalidChunkTerminationException() { ++ super("Chunk data sections must be terminated by a CR LF octet pair"); ++ } ++ ++ public InvalidChunkTerminationException(String message, Throwable cause) { ++ super(message, cause); ++ } ++ ++ public InvalidChunkTerminationException(String message) { ++ super(message); ++ } ++ ++ public InvalidChunkTerminationException(Throwable cause) { ++ super(cause); ++ } ++} +--- /dev/null ++++ b/codec-http/src/main/java/io/netty/handler/codec/http/InvalidLineSeparatorException.java +@@ -0,0 +1,48 @@ ++/* ++ * Copyright 2025 The Netty Project ++ * ++ * The Netty Project licenses this file to you under the Apache License, ++ * version 2.0 (the "License"); you may not use this file except in compliance ++ * with the License. You may obtain a copy of the License at: ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++ * License for the specific language governing permissions and limitations ++ * under the License. ++ */ ++package io.netty.handler.codec.http; ++ ++import io.netty.handler.codec.DecoderException; ++ ++/** ++ * Thrown when {@linkplain HttpDecoderConfig#isStrictLineParsing() strict line parsing} is enabled, ++ * and HTTP start- and header field-lines are not seperated by CR LF octet pairs. ++ *

++ * Strict line parsing is enabled by default since Netty 4.1.124 and 4.2.4. ++ * This default can be overridden by setting the {@value HttpObjectDecoder#PROP_DEFAULT_STRICT_LINE_PARSING} system ++ * property to {@code false}. ++ *

++ * See RFC 9112 Section 2.1. ++ */ ++public final class InvalidLineSeparatorException extends DecoderException { ++ private static final long serialVersionUID = 536224937231200736L; ++ ++ public InvalidLineSeparatorException() { ++ super("Line Feed must be preceded by Carriage Return when terminating HTTP start- and header field-lines"); ++ } ++ ++ public InvalidLineSeparatorException(String message, Throwable cause) { ++ super(message, cause); ++ } ++ ++ public InvalidLineSeparatorException(String message) { ++ super(message); ++ } ++ ++ public InvalidLineSeparatorException(Throwable cause) { ++ super(cause); ++ } ++} +--- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java ++++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java +@@ -19,6 +19,7 @@ + import io.netty.buffer.Unpooled; + import io.netty.channel.embedded.EmbeddedChannel; + import io.netty.handler.codec.TooLongFrameException; ++import io.netty.handler.codec.DecoderResult; + import io.netty.util.AsciiString; + import io.netty.util.CharsetUtil; + import org.junit.Test; +@@ -43,6 +44,12 @@ + private static final byte[] CONTENT_MIXED_DELIMITERS = createContent("\r\n", "\n"); + private static final int CONTENT_LENGTH = 8; + ++ private static final int DEFAULT_MAX_INITIAL_LINE_LENGTH = 4096; ++ private static final int DEFAULT_MAX_HEADER_SIZE = 8192; ++ private static final int DEFAULT_MAX_CHUNK_SIZE = 8192; ++ private static final boolean DEFAULT_VALIDATE_HEADERS = true; ++ private static final int DEFAULT_INITIAL_BUFFER_SIZE = 128; ++ + private static byte[] createContent(String... lineDelimiters) { + String lineDelimiter; + String lineDelimiter2; +@@ -80,18 +87,45 @@ + testDecodeWholeRequestAtOnce(CONTENT_MIXED_DELIMITERS); + } + ++ @Test ++ public void testDecodeWholeRequestAtOnceFailesWithLFDelimiters() { ++ testDecodeWholeRequestAtOnce(CONTENT_LF_DELIMITERS, DEFAULT_MAX_HEADER_SIZE, true, true); ++ } ++ ++ @Test ++ public void testDecodeWholeRequestAtOnceFailsWithMixedDelimiters() { ++ testDecodeWholeRequestAtOnce(CONTENT_MIXED_DELIMITERS, DEFAULT_MAX_HEADER_SIZE, true, true); ++ } ++ + private static void testDecodeWholeRequestAtOnce(byte[] content) { +- EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder()); ++ testDecodeWholeRequestAtOnce(content, DEFAULT_MAX_HEADER_SIZE, false, false); ++ } ++ ++ private static void testDecodeWholeRequestAtOnce(byte[] content, int maxHeaderSize, boolean strictLineParsing, ++ boolean expectFailure) { ++ EmbeddedChannel channel = ++ new EmbeddedChannel(new HttpRequestDecoder(DEFAULT_MAX_INITIAL_LINE_LENGTH, ++ maxHeaderSize, ++ DEFAULT_MAX_CHUNK_SIZE, ++ DEFAULT_VALIDATE_HEADERS, ++ DEFAULT_INITIAL_BUFFER_SIZE, ++ strictLineParsing)); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(content))); + HttpRequest req = channel.readInbound(); + assertNotNull(req); +- checkHeaders(req.headers()); +- LastHttpContent c = channel.readInbound(); +- assertEquals(CONTENT_LENGTH, c.content().readableBytes()); +- assertEquals( +- Unpooled.wrappedBuffer(content, content.length - CONTENT_LENGTH, CONTENT_LENGTH), +- c.content().readSlice(CONTENT_LENGTH)); +- c.release(); ++ if (expectFailure) { ++ assertTrue(req.decoderResult().isFailure()); ++ assertThat(req.decoderResult().cause(), instanceOf(InvalidLineSeparatorException.class)); ++ } else { ++ assertFalse(req.decoderResult().isFailure()); ++ checkHeaders(req.headers()); ++ LastHttpContent c = channel.readInbound(); ++ assertEquals(CONTENT_LENGTH, c.content().readableBytes()); ++ assertEquals( ++ Unpooled.wrappedBuffer(content, content.length - CONTENT_LENGTH, CONTENT_LENGTH), ++ c.content().readSlice(CONTENT_LENGTH)); ++ c.release(); ++ } + + assertFalse(channel.finish()); + assertNull(channel.readInbound()); +@@ -116,27 +150,45 @@ + + @Test + public void testDecodeWholeRequestInMultipleStepsCRLFDelimiters() { +- testDecodeWholeRequestInMultipleSteps(CONTENT_CRLF_DELIMITERS); ++ testDecodeWholeRequestInMultipleSteps(CONTENT_CRLF_DELIMITERS, true, false); + } + + @Test + public void testDecodeWholeRequestInMultipleStepsLFDelimiters() { +- testDecodeWholeRequestInMultipleSteps(CONTENT_LF_DELIMITERS); ++ testDecodeWholeRequestInMultipleSteps(CONTENT_LF_DELIMITERS, false, false); + } + + @Test + public void testDecodeWholeRequestInMultipleStepsMixedDelimiters() { +- testDecodeWholeRequestInMultipleSteps(CONTENT_MIXED_DELIMITERS); ++ testDecodeWholeRequestInMultipleSteps(CONTENT_MIXED_DELIMITERS, false, false); + } + +- private static void testDecodeWholeRequestInMultipleSteps(byte[] content) { ++ @Test ++ public void testDecodeWholeRequestInMultipleStepsFailsWithLFDelimiters() { ++ testDecodeWholeRequestInMultipleSteps(CONTENT_LF_DELIMITERS, true, true); ++ } ++ ++ @Test ++ public void testDecodeWholeRequestInMultipleStepsFailsWithMixedDelimiters() { ++ testDecodeWholeRequestInMultipleSteps(CONTENT_MIXED_DELIMITERS, true, true); ++ } ++ ++ private static void testDecodeWholeRequestInMultipleSteps( ++ byte[] content, boolean strictLineParsing, boolean expectFailure) { + for (int i = 1; i < content.length; i++) { +- testDecodeWholeRequestInMultipleSteps(content, i); ++ testDecodeWholeRequestInMultipleSteps(content, i, strictLineParsing, expectFailure); + } + } + +- private static void testDecodeWholeRequestInMultipleSteps(byte[] content, int fragmentSize) { +- EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder()); ++ private static void testDecodeWholeRequestInMultipleSteps( ++ byte[] content, int fragmentSize, boolean strictLineParsing, boolean expectFailure) { ++ EmbeddedChannel channel = ++ new EmbeddedChannel(new HttpRequestDecoder(DEFAULT_MAX_INITIAL_LINE_LENGTH, ++ DEFAULT_MAX_HEADER_SIZE, ++ DEFAULT_MAX_CHUNK_SIZE, ++ DEFAULT_VALIDATE_HEADERS, ++ DEFAULT_INITIAL_BUFFER_SIZE, ++ strictLineParsing)); + int headerLength = content.length - CONTENT_LENGTH; + + // split up the header +@@ -158,6 +210,12 @@ + + HttpRequest req = channel.readInbound(); + assertNotNull(req); ++ if (expectFailure) { ++ assertTrue(req.decoderResult().isFailure()); ++ assertThat(req.decoderResult().cause(), instanceOf(InvalidLineSeparatorException.class)); ++ return; // No more messages will be produced. ++ } ++ assertFalse(req.decoderResult().isFailure()); + checkHeaders(req.headers()); + + for (int i = CONTENT_LENGTH; i > 1; i --) { +@@ -531,6 +589,66 @@ + } + + @Test ++ public void mustRejectImproperlyTerminatedChunkExtensions() throws Exception { ++ // See full explanation: https://w4ke.info/2025/06/18/funky-chunks.html ++ String requestStr = "GET /one HTTP/1.1\r\n" + ++ "Host: localhost\r\n" + ++ "Transfer-Encoding: chunked\r\n\r\n" + ++ "2;\n" + // Chunk size followed by illegal single newline (not preceded by carraige return) ++ "xx\r\n" + ++ "45\r\n" + ++ "0\r\n\r\n" + ++ "GET /two HTTP/1.1\r\n" + ++ "Host: localhost\r\n" + ++ "Transfer-Encoding: chunked\r\n\r\n" + ++ "0\r\n\r\n"; ++ EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder()); ++ assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII))); ++ HttpRequest request = channel.readInbound(); ++ assertFalse(request.decoderResult().isFailure()); // We parse the headers just fine. ++ assertTrue(request.headers().names().contains("Transfer-Encoding")); ++ assertTrue(request.headers().contains("Transfer-Encoding", "chunked", false)); ++ HttpContent content = channel.readInbound(); ++ DecoderResult decoderResult = content.decoderResult(); ++ assertTrue(decoderResult.isFailure()); // But parsing the chunk must fail. ++ assertThat(decoderResult.cause(), instanceOf(InvalidChunkExtensionException.class)); ++ content.release(); ++ assertFalse(channel.finish()); ++ } ++ ++ @Test ++ public void mustRejectImproperlyTerminatedChunkBodies() throws Exception { ++ // See full explanation: https://w4ke.info/2025/06/18/funky-chunks.html ++ String requestStr = "GET /one HTTP/1.1\r\n" + ++ "Host: localhost\r\n" + ++ "Transfer-Encoding: chunked\r\n\r\n" + ++ "5\r\n" + ++ "AAAAAXX" + // Chunk body contains extra (XX) bytes, and no CRLF terminator. ++ "45\r\n" + ++ "0\r\n" + ++ "GET /two HTTP/1.1\r\n" + ++ "Host: localhost\r\n" + ++ "Transfer-Encoding: chunked\r\n\r\n" + ++ "0\r\n\r\n"; ++ EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder()); ++ assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII))); ++ HttpRequest request = channel.readInbound(); ++ assertFalse(request.decoderResult().isFailure()); // We parse the headers just fine. ++ assertTrue(request.headers().names().contains("Transfer-Encoding")); ++ assertTrue(request.headers().contains("Transfer-Encoding", "chunked", false)); ++ HttpContent content = channel.readInbound(); ++ assertFalse(content.decoderResult().isFailure()); // We parse the content promised by the chunk length. ++ content.release(); ++ ++ content = channel.readInbound(); ++ DecoderResult decoderResult = content.decoderResult(); ++ assertTrue(decoderResult.isFailure()); // But then parsing the chunk delimiter must fail. ++ assertThat(decoderResult.cause(), instanceOf(InvalidChunkTerminationException.class)); ++ content.release(); ++ assertFalse(channel.finish()); ++ } ++ ++ @Test + public void testContentLengthHeaderAndChunked() { + String requestStr = "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + +--- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseDecoderTest.java ++++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseDecoderTest.java +@@ -18,6 +18,7 @@ + import io.netty.buffer.ByteBuf; + import io.netty.buffer.Unpooled; + import io.netty.channel.embedded.EmbeddedChannel; ++import io.netty.handler.codec.DecoderResult; + import io.netty.handler.codec.PrematureChannelClosureException; + import io.netty.handler.codec.TooLongFrameException; + import io.netty.util.CharsetUtil; +@@ -672,6 +673,63 @@ + } + + @Test ++ public void mustRejectImproperlyTerminatedChunkExtensions() throws Exception { ++ // See full explanation: https://w4ke.info/2025/06/18/funky-chunks.html ++ String requestStr = "HTTP/1.1 200 OK\r\n" + ++ "Transfer-Encoding: chunked\r\n" + ++ "\r\n" + ++ "2;\n" + // Chunk size followed by illegal single newline (not preceded by carraige return) ++ "xx\r\n" + ++ "1D\r\n" + ++ "0\r\n\r\n" + ++ "HTTP/1.1 200 OK\r\n" + ++ "Transfer-Encoding: chunked\r\n\r\n" + ++ "0\r\n\r\n"; ++ EmbeddedChannel channel = new EmbeddedChannel(new HttpResponseDecoder()); ++ assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII))); ++ HttpResponse response = channel.readInbound(); ++ assertFalse(response.decoderResult().isFailure()); // We parse the headers just fine. ++ assertTrue(response.headers().names().contains("Transfer-Encoding")); ++ assertTrue(response.headers().contains("Transfer-Encoding", "chunked", false)); ++ HttpContent content = channel.readInbound(); ++ DecoderResult decoderResult = content.decoderResult(); ++ assertTrue(decoderResult.isFailure()); // But parsing the chunk must fail. ++ assertThat(decoderResult.cause(), instanceOf(InvalidChunkExtensionException.class)); ++ content.release(); ++ assertFalse(channel.finish()); ++ } ++ ++ @Test ++ public void mustRejectImproperlyTerminatedChunkBodies() throws Exception { ++ // See full explanation: https://w4ke.info/2025/06/18/funky-chunks.html ++ String requestStr = "HTTP/1.1 200 OK\r\n" + ++ "Transfer-Encoding: chunked\r\n\r\n" + ++ "5\r\n" + ++ "AAAAXX" + // Chunk body contains extra (XX) bytes, and no CRLF terminator. ++ "1D\r\n" + ++ "0\r\n" + ++ "HTTP/1.1 200 OK\r\n" + ++ "Transfer-Encoding: chunked\r\n\r\n" + ++ "0\r\n\r\n"; ++ EmbeddedChannel channel = new EmbeddedChannel(new HttpResponseDecoder()); ++ assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII))); ++ HttpResponse response = channel.readInbound(); ++ assertFalse(response.decoderResult().isFailure()); // We parse the headers just fine. ++ assertTrue(response.headers().names().contains("Transfer-Encoding")); ++ assertTrue(response.headers().contains("Transfer-Encoding", "chunked", false)); ++ HttpContent content = channel.readInbound(); ++ assertFalse(content.decoderResult().isFailure()); // We parse the content promised by the chunk length. ++ content.release(); ++ ++ content = channel.readInbound(); ++ DecoderResult decoderResult = content.decoderResult(); ++ assertTrue(decoderResult.isFailure()); // But then parsing the chunk delimiter must fail. ++ assertThat(decoderResult.cause(), instanceOf(InvalidChunkTerminationException.class)); ++ content.release(); ++ assertFalse(channel.finish()); ++ } ++ ++ @Test + public void testConnectionClosedBeforeHeadersReceived() { + EmbeddedChannel channel = new EmbeddedChannel(new HttpResponseDecoder()); + String responseInitialLine = +@@ -718,7 +776,7 @@ + EmbeddedChannel channel = new EmbeddedChannel(new HttpResponseDecoder()); + String requestStr = "HTTP/1.1 200 OK\r\n" + + "Transfer-Encoding : chunked\r\n" + +- "Host: netty.io\n\r\n"; ++ "Host: netty.io\r\n\r\n"; + + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII))); + HttpResponse response = channel.readInbound(); +@@ -787,7 +845,7 @@ + testHeaderNameEndsWithControlChar(0x0c); + } + +- private void testHeaderNameEndsWithControlChar(int controlChar) { ++ private static void testHeaderNameEndsWithControlChar(int controlChar) { + ByteBuf responseBuffer = Unpooled.buffer(); + responseBuffer.writeCharSequence("HTTP/1.1 200 OK\r\n" + + "Host: netty.io\r\n", CharsetUtil.US_ASCII); +--- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequestDecoder.java ++++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequestDecoder.java +@@ -81,6 +81,15 @@ + super(maxInitialLineLength, maxHeaderSize, maxChunkSize, true, validateHeaders, initialBufferSize); + } + ++ public HttpRequestDecoder( ++ int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean validateHeaders, ++ int initialBufferSize, boolean strictLineParsing) { ++ super(maxInitialLineLength, maxHeaderSize, maxChunkSize, true, ++ validateHeaders, initialBufferSize, ++ HttpObjectDecoder.DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS, ++ strictLineParsing); ++ } ++ + @Override + protected HttpMessage createMessage(String[] initialLine) throws Exception { + return new DefaultHttpRequest( +--- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseDecoder.java ++++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseDecoder.java +@@ -112,6 +112,15 @@ + super(maxInitialLineLength, maxHeaderSize, maxChunkSize, true, validateHeaders, initialBufferSize); + } + ++ public HttpResponseDecoder( ++ int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean validateHeaders, ++ int initialBufferSize, boolean strictLineParsing) { ++ super(maxInitialLineLength, maxHeaderSize, maxChunkSize, true, ++ validateHeaders, initialBufferSize, ++ HttpObjectDecoder.DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS, ++ strictLineParsing); ++ } ++ + @Override + protected HttpMessage createMessage(String[] initialLine) { + return new DefaultHttpResponse( diff -Nru netty-4.1.48/debian/patches/CVE-2025-58057.patch netty-4.1.48/debian/patches/CVE-2025-58057.patch --- netty-4.1.48/debian/patches/CVE-2025-58057.patch 1970-01-01 00:00:00.000000000 +0000 +++ netty-4.1.48/debian/patches/CVE-2025-58057.patch 2026-02-09 10:23:38.000000000 +0000 @@ -0,0 +1,930 @@ +From: Norman Maurer +Date: Wed, 3 Sep 2025 10:35:05 +0200 +Subject: [PATCH] Merge commit from fork (#15612) + +Motivation: + +We should ensure our decompressing decoders will fire their buffers +through the pipeliner as fast as possible and so allow the user to take +ownership of these as fast as possible. This is needed to reduce the +risk of OOME as otherwise a small input might produce a large amount of +data that can't be processed until all the data was decompressed in a +loop. Beside this we also should ensure that other handlers that uses +these decompressors will not buffer all of the produced data before +processing it, which was true for HTTP and HTTP2. + +Modifications: + +- Adjust affected decoders (Brotli, Zstd and ZLib) to fire buffers +through the pipeline as soon as possible +- Adjust HTTP / HTTP2 decompressors to do the same +- Add testcase. + +Result: + +Less risk of OOME when doing decompressing + +Co-authored-by: yawkat + +origin: backport, https://github.com/netty/netty/commit/34894ac73b02efefeacd9c0972780b32dc3de04f +--- + .../handler/codec/http/HttpContentDecoder.java | 239 +++++++++++---------- + .../codec/http/HttpContentDecompressorTest.java | 88 ++++++++ + .../http2/DelegatingDecompressorFrameListener.java | 177 +++++++-------- + .../handler/codec/compression/JZlibDecoder.java | 32 ++- + .../handler/codec/compression/JdkZlibDecoder.java | 45 ++-- + .../codec/compression/AbstractIntegrationTest.java | 62 ++++++ + 6 files changed, 416 insertions(+), 227 deletions(-) + +diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java +index d2513e4..3c43900 100644 +--- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java ++++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java +@@ -17,6 +17,7 @@ package io.netty.handler.codec.http; + + import io.netty.buffer.ByteBuf; + import io.netty.channel.ChannelHandlerContext; ++import io.netty.channel.ChannelInboundHandlerAdapter; + import io.netty.channel.embedded.EmbeddedChannel; + import io.netty.handler.codec.CodecException; + import io.netty.handler.codec.DecoderResult; +@@ -52,125 +53,136 @@ public abstract class HttpContentDecoder extends MessageToMessageDecoder out) throws Exception { +- try { +- if (msg instanceof HttpResponse && ((HttpResponse) msg).status().code() == 100) { ++ needRead = true; ++ if (msg instanceof HttpResponse && ((HttpResponse) msg).status().code() == 100) { + +- if (!(msg instanceof LastHttpContent)) { +- continueResponse = true; +- } +- // 100-continue response must be passed through. +- out.add(ReferenceCountUtil.retain(msg)); +- return; ++ if (!(msg instanceof LastHttpContent)) { ++ continueResponse = true; + } ++ // 100-continue response must be passed through. ++ needRead = false; ++ ctx.fireChannelRead(ReferenceCountUtil.retain(msg)); ++ return; ++ } + +- if (continueResponse) { +- if (msg instanceof LastHttpContent) { +- continueResponse = false; +- } +- // 100-continue response must be passed through. +- out.add(ReferenceCountUtil.retain(msg)); +- return; ++ if (continueResponse) { ++ if (msg instanceof LastHttpContent) { ++ continueResponse = false; + } ++ needRead = false; ++ ctx.fireChannelRead(ReferenceCountUtil.retain(msg)); ++ return; ++ } + +- if (msg instanceof HttpMessage) { +- cleanup(); +- final HttpMessage message = (HttpMessage) msg; +- final HttpHeaders headers = message.headers(); ++ if (msg instanceof HttpMessage) { ++ cleanup(); ++ final HttpMessage message = (HttpMessage) msg; ++ final HttpHeaders headers = message.headers(); + +- // Determine the content encoding. +- String contentEncoding = headers.get(HttpHeaderNames.CONTENT_ENCODING); +- if (contentEncoding != null) { +- contentEncoding = contentEncoding.trim(); ++ // Determine the content encoding. ++ String contentEncoding = headers.get(HttpHeaderNames.CONTENT_ENCODING); ++ if (contentEncoding != null) { ++ contentEncoding = contentEncoding.trim(); ++ } else { ++ String transferEncoding = headers.get(HttpHeaderNames.TRANSFER_ENCODING); ++ if (transferEncoding != null) { ++ int idx = transferEncoding.indexOf(","); ++ if (idx != -1) { ++ contentEncoding = transferEncoding.substring(0, idx).trim(); ++ } else { ++ contentEncoding = transferEncoding.trim(); ++ } + } else { + contentEncoding = IDENTITY; + } +- decoder = newContentDecoder(contentEncoding); +- +- if (decoder == null) { +- if (message instanceof HttpContent) { +- ((HttpContent) message).retain(); +- } +- out.add(message); +- return; +- } ++ } ++ decoder = newContentDecoder(contentEncoding); + +- // Remove content-length header: +- // the correct value can be set only after all chunks are processed/decoded. +- // If buffering is not an issue, add HttpObjectAggregator down the chain, it will set the header. +- // Otherwise, rely on LastHttpContent message. +- if (headers.contains(HttpHeaderNames.CONTENT_LENGTH)) { +- headers.remove(HttpHeaderNames.CONTENT_LENGTH); +- headers.set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); ++ if (decoder == null) { ++ if (message instanceof HttpContent) { ++ ((HttpContent) message).retain(); + } +- // Either it is already chunked or EOF terminated. +- // See https://github.com/netty/netty/issues/5892 ++ needRead = false; ++ ctx.fireChannelRead(message); ++ return; ++ } ++ decoder.pipeline().addLast(forwarder); + +- // set new content encoding, +- CharSequence targetContentEncoding = getTargetContentEncoding(contentEncoding); +- if (HttpHeaderValues.IDENTITY.contentEquals(targetContentEncoding)) { +- // Do NOT set the 'Content-Encoding' header if the target encoding is 'identity' +- // as per: http://tools.ietf.org/html/rfc2616#section-14.11 +- headers.remove(HttpHeaderNames.CONTENT_ENCODING); +- } else { +- headers.set(HttpHeaderNames.CONTENT_ENCODING, targetContentEncoding); +- } ++ // Remove content-length header: ++ // the correct value can be set only after all chunks are processed/decoded. ++ // If buffering is not an issue, add HttpObjectAggregator down the chain, it will set the header. ++ // Otherwise, rely on LastHttpContent message. ++ if (headers.contains(HttpHeaderNames.CONTENT_LENGTH)) { ++ headers.remove(HttpHeaderNames.CONTENT_LENGTH); ++ headers.set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); ++ } ++ // Either it is already chunked or EOF terminated. ++ // See https://github.com/netty/netty/issues/5892 + +- if (message instanceof HttpContent) { +- // If message is a full request or response object (headers + data), don't copy data part into out. +- // Output headers only; data part will be decoded below. +- // Note: "copy" object must not be an instance of LastHttpContent class, +- // as this would (erroneously) indicate the end of the HttpMessage to other handlers. +- HttpMessage copy; +- if (message instanceof HttpRequest) { +- HttpRequest r = (HttpRequest) message; // HttpRequest or FullHttpRequest +- copy = new DefaultHttpRequest(r.protocolVersion(), r.method(), r.uri()); +- } else if (message instanceof HttpResponse) { +- HttpResponse r = (HttpResponse) message; // HttpResponse or FullHttpResponse +- copy = new DefaultHttpResponse(r.protocolVersion(), r.status()); +- } else { +- throw new CodecException("Object of class " + message.getClass().getName() + +- " is not an HttpRequest or HttpResponse"); +- } +- copy.headers().set(message.headers()); +- copy.setDecoderResult(message.decoderResult()); +- out.add(copy); +- } else { +- out.add(message); +- } ++ // set new content encoding, ++ CharSequence targetContentEncoding = getTargetContentEncoding(contentEncoding); ++ if (HttpHeaderValues.IDENTITY.contentEquals(targetContentEncoding)) { ++ // Do NOT set the 'Content-Encoding' header if the target encoding is 'identity' ++ // as per: https://tools.ietf.org/html/rfc2616#section-14.11 ++ headers.remove(HttpHeaderNames.CONTENT_ENCODING); ++ } else { ++ headers.set(HttpHeaderNames.CONTENT_ENCODING, targetContentEncoding); + } + +- if (msg instanceof HttpContent) { +- final HttpContent c = (HttpContent) msg; +- if (decoder == null) { +- out.add(c.retain()); ++ if (message instanceof HttpContent) { ++ // If message is a full request or response object (headers + data), don't copy data part into out. ++ // Output headers only; data part will be decoded below. ++ // Note: "copy" object must not be an instance of LastHttpContent class, ++ // as this would (erroneously) indicate the end of the HttpMessage to other handlers. ++ HttpMessage copy; ++ if (message instanceof HttpRequest) { ++ HttpRequest r = (HttpRequest) message; // HttpRequest or FullHttpRequest ++ copy = new DefaultHttpRequest(r.protocolVersion(), r.method(), r.uri()); ++ } else if (message instanceof HttpResponse) { ++ HttpResponse r = (HttpResponse) message; // HttpResponse or FullHttpResponse ++ copy = new DefaultHttpResponse(r.protocolVersion(), r.status()); + } else { +- decodeContent(c, out); ++ throw new CodecException("Object of class " + message.getClass().getName() + ++ " is not an HttpRequest or HttpResponse"); + } ++ copy.headers().set(message.headers()); ++ copy.setDecoderResult(message.decoderResult()); ++ needRead = false; ++ ctx.fireChannelRead(copy); ++ } else { ++ needRead = false; ++ ctx.fireChannelRead(message); + } +- } finally { +- needRead = out.isEmpty(); + } +- } +- +- private void decodeContent(HttpContent c, List out) { +- ByteBuf content = c.content(); +- +- decode(content, out); +- +- if (c instanceof LastHttpContent) { +- finishDecode(out); + +- LastHttpContent last = (LastHttpContent) c; +- // Generate an additional chunk if the decoder produced +- // the last product on closure, +- HttpHeaders headers = last.trailingHeaders(); +- if (headers.isEmpty()) { +- out.add(LastHttpContent.EMPTY_LAST_CONTENT); ++ if (msg instanceof HttpContent) { ++ final HttpContent c = (HttpContent) msg; ++ if (decoder == null) { ++ needRead = false; ++ ctx.fireChannelRead(c.retain()); + } else { +- out.add(new ComposedLastHttpContent(headers, DecoderResult.SUCCESS)); ++ // call retain here as it will call release after its written to the channel ++ decoder.writeInbound(c.content().retain()); ++ ++ if (c instanceof LastHttpContent) { ++ boolean notEmpty = decoder.finish(); ++ decoder = null; ++ assert !notEmpty; ++ LastHttpContent last = (LastHttpContent) c; ++ // Generate an additional chunk if the decoder produced ++ // the last product on closure, ++ HttpHeaders headers = last.trailingHeaders(); ++ needRead = false; ++ if (headers.isEmpty()) { ++ ctx.fireChannelRead(LastHttpContent.EMPTY_LAST_CONTENT); ++ } else { ++ ctx.fireChannelRead(new ComposedLastHttpContent(headers, DecoderResult.SUCCESS)); ++ } ++ } + } + } + } +@@ -228,6 +240,7 @@ public abstract class HttpContentDecoder extends MessageToMessageDecoder out) { +- // call retain here as it will call release after its written to the channel +- decoder.writeInbound(in.retain()); +- fetchDecoderOutput(out); +- } ++ private final class ByteBufForwarder extends ChannelInboundHandlerAdapter { ++ ++ private final ChannelHandlerContext targetCtx; + +- private void finishDecode(List out) { +- if (decoder.finish()) { +- fetchDecoderOutput(out); ++ ByteBufForwarder(ChannelHandlerContext targetCtx) { ++ this.targetCtx = targetCtx; + } +- decoder = null; +- } + +- private void fetchDecoderOutput(List out) { +- for (;;) { +- ByteBuf buf = decoder.readInbound(); +- if (buf == null) { +- break; +- } ++ @Override ++ public boolean isSharable() { ++ // We need to mark the handler as sharable as we will add it to every EmbeddedChannel that is ++ // generated. ++ return true; ++ } ++ ++ @Override ++ public void channelRead(ChannelHandlerContext ctx, Object msg) { ++ ByteBuf buf = (ByteBuf) msg; + if (!buf.isReadable()) { + buf.release(); +- continue; ++ return; + } +- out.add(new DefaultHttpContent(buf)); ++ needRead = false; ++ targetCtx.fireChannelRead(new DefaultHttpContent(buf)); + } + } + } +diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecompressorTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecompressorTest.java +index 4a659fa..f54e98d 100644 +--- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecompressorTest.java ++++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecompressorTest.java +@@ -15,6 +15,8 @@ + */ + package io.netty.handler.codec.http; + ++import io.netty.buffer.ByteBuf; ++import io.netty.buffer.PooledByteBufAllocator; + import io.netty.buffer.Unpooled; + import io.netty.channel.ChannelHandlerContext; + import io.netty.channel.ChannelInboundHandlerAdapter; +@@ -23,6 +25,8 @@ + import org.junit.Assert; + import org.junit.Test; + ++import java.util.ArrayList; ++import java.util.List; + import java.util.concurrent.atomic.AtomicInteger; + + public class HttpContentDecompressorTest { +@@ -67,4 +71,92 @@ + Assert.assertEquals(2, readCalled.get()); + Assert.assertFalse(channel.finishAndReleaseAll()); + } ++ ++ @Test ++ public void testZipBombGzip() { ++ testZipBomb("gzip"); ++ } ++ ++ @Test ++ public void testZipBombDeflate() { ++ testZipBomb("deflate"); ++ } ++ ++ @Test ++ public void testZipBombSnappy() { ++ testZipBomb("snappy"); ++ } ++ ++ private static void testZipBomb(String encoding) { ++ int chunkSize = 1024 * 1024; ++ int numberOfChunks = 256; ++ int memoryLimit = chunkSize * 128; ++ ++ EmbeddedChannel compressionChannel = new EmbeddedChannel(new HttpContentCompressor()); ++ DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); ++ req.headers().set(HttpHeaderNames.ACCEPT_ENCODING, encoding); ++ compressionChannel.writeInbound(req); ++ ++ DefaultHttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); ++ response.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); ++ compressionChannel.writeOutbound(response); ++ ++ for (int i = 0; i < numberOfChunks; i++) { ++ ByteBuf buffer = compressionChannel.alloc().buffer(chunkSize); ++ buffer.writeZero(chunkSize); ++ compressionChannel.writeOutbound(new DefaultHttpContent(buffer)); ++ } ++ compressionChannel.writeOutbound(LastHttpContent.EMPTY_LAST_CONTENT); ++ compressionChannel.finish(); ++ compressionChannel.releaseInbound(); ++ ++ ByteBuf compressed = compressionChannel.alloc().buffer(); ++ HttpMessage message = null; ++ while (true) { ++ HttpObject obj = compressionChannel.readOutbound(); ++ if (obj == null) { ++ break; ++ } ++ if (obj instanceof HttpMessage) { ++ message = (HttpMessage) obj; ++ } ++ if (obj instanceof HttpContent) { ++ HttpContent content = (HttpContent) obj; ++ compressed.writeBytes(content.content()); ++ content.release(); ++ } ++ } ++ ++ PooledByteBufAllocator allocator = new PooledByteBufAllocator(false); ++ ++ ZipBombIncomingHandler incomingHandler = new ZipBombIncomingHandler(memoryLimit); ++ EmbeddedChannel decompressChannel = new EmbeddedChannel(new HttpContentDecompressor(), incomingHandler); ++ decompressChannel.config().setAllocator(allocator); ++ decompressChannel.writeInbound(message); ++ decompressChannel.writeInbound(new DefaultLastHttpContent(compressed)); ++ ++ Assert.assertEquals((long) chunkSize * numberOfChunks, incomingHandler.total); ++ } ++ ++ private static final class ZipBombIncomingHandler extends ChannelInboundHandlerAdapter { ++ final int memoryLimit; ++ long total; ++ ++ ZipBombIncomingHandler(int memoryLimit) { ++ this.memoryLimit = memoryLimit; ++ } ++ ++ @Override ++ public void channelRead(ChannelHandlerContext ctx, Object msg) { ++ PooledByteBufAllocator allocator = (PooledByteBufAllocator) ctx.alloc(); ++ Assert.assertTrue(allocator.metric().usedHeapMemory() < memoryLimit); ++ Assert.assertTrue(allocator.metric().usedDirectMemory() < memoryLimit); ++ ++ if (msg instanceof HttpContent) { ++ HttpContent buf = (HttpContent) msg; ++ total += buf.content().readableBytes(); ++ buf.release(); ++ } ++ } ++ } + } +diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DelegatingDecompressorFrameListener.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DelegatingDecompressorFrameListener.java +index 6793f28..af15318 100644 +--- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DelegatingDecompressorFrameListener.java ++++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DelegatingDecompressorFrameListener.java +@@ -17,6 +17,7 @@ package io.netty.handler.codec.http2; + import io.netty.buffer.ByteBuf; + import io.netty.buffer.Unpooled; + import io.netty.channel.ChannelHandlerContext; ++import io.netty.channel.ChannelInboundHandlerAdapter; + import io.netty.channel.embedded.EmbeddedChannel; + import io.netty.handler.codec.ByteToMessageDecoder; + import io.netty.handler.codec.compression.ZlibCodecFactory; +@@ -63,7 +64,7 @@ public class DelegatingDecompressorFrameListener extends Http2FrameListenerDecor + public void onStreamRemoved(Http2Stream stream) { + final Http2Decompressor decompressor = decompressor(stream); + if (decompressor != null) { +- cleanup(decompressor); ++ decompressor.cleanup(); + } + } + }); +@@ -78,66 +79,7 @@ public class DelegatingDecompressorFrameListener extends Http2FrameListenerDecor + // The decompressor may be null if no compatible encoding type was found in this stream's headers + return listener.onDataRead(ctx, streamId, data, padding, endOfStream); + } +- +- final EmbeddedChannel channel = decompressor.decompressor(); +- final int compressedBytes = data.readableBytes() + padding; +- decompressor.incrementCompressedBytes(compressedBytes); +- try { +- // call retain here as it will call release after its written to the channel +- channel.writeInbound(data.retain()); +- ByteBuf buf = nextReadableBuf(channel); +- if (buf == null && endOfStream && channel.finish()) { +- buf = nextReadableBuf(channel); +- } +- if (buf == null) { +- if (endOfStream) { +- listener.onDataRead(ctx, streamId, Unpooled.EMPTY_BUFFER, padding, true); +- } +- // No new decompressed data was extracted from the compressed data. This means the application could +- // not be provided with data and thus could not return how many bytes were processed. We will assume +- // there is more data coming which will complete the decompression block. To allow for more data we +- // return all bytes to the flow control window (so the peer can send more data). +- decompressor.incrementDecompressedBytes(compressedBytes); +- return compressedBytes; +- } +- try { +- Http2LocalFlowController flowController = connection.local().flowController(); +- decompressor.incrementDecompressedBytes(padding); +- for (;;) { +- ByteBuf nextBuf = nextReadableBuf(channel); +- boolean decompressedEndOfStream = nextBuf == null && endOfStream; +- if (decompressedEndOfStream && channel.finish()) { +- nextBuf = nextReadableBuf(channel); +- decompressedEndOfStream = nextBuf == null; +- } +- +- decompressor.incrementDecompressedBytes(buf.readableBytes()); +- // Immediately return the bytes back to the flow controller. ConsumedBytesConverter will convert +- // from the decompressed amount which the user knows about to the compressed amount which flow +- // control knows about. +- flowController.consumeBytes(stream, +- listener.onDataRead(ctx, streamId, buf, padding, decompressedEndOfStream)); +- if (nextBuf == null) { +- break; +- } +- +- padding = 0; // Padding is only communicated once on the first iteration. +- buf.release(); +- buf = nextBuf; +- } +- // We consume bytes each time we call the listener to ensure if multiple frames are decompressed +- // that the bytes are accounted for immediately. Otherwise the user may see an inconsistent state of +- // flow control. +- return 0; +- } finally { +- buf.release(); +- } +- } catch (Http2Exception e) { +- throw e; +- } catch (Throwable t) { +- throw streamError(stream.id(), INTERNAL_ERROR, t, +- "Decompressor error detected while delegating data read on streamId %d", stream.id()); +- } ++ return decompressor.decompress(ctx, stream, data, padding, endOfStream); + } + + @Override +@@ -218,7 +160,7 @@ public class DelegatingDecompressorFrameListener extends Http2FrameListenerDecor + } + final EmbeddedChannel channel = newContentDecompressor(ctx, contentEncoding); + if (channel != null) { +- decompressor = new Http2Decompressor(channel); ++ decompressor = new Http2Decompressor(channel, connection, listener); + stream.setProperty(propertyKey, decompressor); + // Decode the content and remove or replace the existing headers + // so that the message looks like a decoded message. +@@ -250,36 +192,6 @@ public class DelegatingDecompressorFrameListener extends Http2FrameListenerDecor + return stream == null ? null : (Http2Decompressor) stream.getProperty(propertyKey); + } + +- /** +- * Release remaining content from the {@link EmbeddedChannel}. +- * +- * @param decompressor The decompressor for {@code stream} +- */ +- private static void cleanup(Http2Decompressor decompressor) { +- decompressor.decompressor().finishAndReleaseAll(); +- } +- +- /** +- * Read the next decompressed {@link ByteBuf} from the {@link EmbeddedChannel} +- * or {@code null} if one does not exist. +- * +- * @param decompressor The channel to read from +- * @return The next decoded {@link ByteBuf} from the {@link EmbeddedChannel} or {@code null} if one does not exist +- */ +- private static ByteBuf nextReadableBuf(EmbeddedChannel decompressor) { +- for (;;) { +- final ByteBuf buf = decompressor.readInbound(); +- if (buf == null) { +- return null; +- } +- if (!buf.isReadable()) { +- buf.release(); +- continue; +- } +- return buf; +- } +- } +- + /** + * A decorator around the local flow controller that converts consumed bytes from uncompressed to compressed. + */ +@@ -360,24 +272,93 @@ public class DelegatingDecompressorFrameListener extends Http2FrameListenerDecor + */ + private static final class Http2Decompressor { + private final EmbeddedChannel decompressor; ++ + private int compressed; + private int decompressed; ++ private Http2Stream stream; ++ private int padding; ++ private boolean dataDecompressed; ++ private ChannelHandlerContext targetCtx; + +- Http2Decompressor(EmbeddedChannel decompressor) { ++ Http2Decompressor(EmbeddedChannel decompressor, ++ final Http2Connection connection, final Http2FrameListener listener) { + this.decompressor = decompressor; ++ this.decompressor.pipeline().addLast(new ChannelInboundHandlerAdapter() { ++ @Override ++ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { ++ ByteBuf buf = (ByteBuf) msg; ++ if (!buf.isReadable()) { ++ buf.release(); ++ return; ++ } ++ incrementDecompressedBytes(buf.readableBytes()); ++ // Immediately return the bytes back to the flow controller. ConsumedBytesConverter will convert ++ // from the decompressed amount which the user knows about to the compressed amount which flow ++ // control knows about. ++ connection.local().flowController().consumeBytes(stream, ++ listener.onDataRead(targetCtx, stream.id(), buf, padding, false)); ++ padding = 0; // Padding is only communicated once on the first iteration. ++ buf.release(); ++ ++ dataDecompressed = true; ++ } ++ ++ @Override ++ public void channelInactive(ChannelHandlerContext ctx) throws Exception { ++ listener.onDataRead(targetCtx, stream.id(), Unpooled.EMPTY_BUFFER, padding, true); ++ } ++ }); + } + + /** +- * Responsible for taking compressed bytes in and producing decompressed bytes. ++ * Release remaining content from the {@link EmbeddedChannel}. + */ +- EmbeddedChannel decompressor() { +- return decompressor; ++ void cleanup() { ++ decompressor.finishAndReleaseAll(); + } + ++ int decompress(ChannelHandlerContext ctx, Http2Stream stream, ByteBuf data, int padding, boolean endOfStream) ++ throws Http2Exception { ++ final int compressedBytes = data.readableBytes() + padding; ++ incrementCompressedBytes(compressedBytes); ++ try { ++ this.stream = stream; ++ this.padding = padding; ++ this.dataDecompressed = false; ++ this.targetCtx = ctx; ++ ++ // call retain here as it will call release after its written to the channel ++ decompressor.writeInbound(data.retain()); ++ if (endOfStream) { ++ decompressor.finish(); ++ ++ if (!dataDecompressed) { ++ // No new decompressed data was extracted from the compressed data. This means the application ++ // could not be provided with data and thus could not return how many bytes were processed. ++ // We will assume there is more data coming which will complete the decompression block. ++ // To allow for more data we return all bytes to the flow control window (so the peer can ++ // send more data). ++ incrementDecompressedBytes(compressedBytes); ++ return compressedBytes; ++ } ++ } ++ // We consume bytes each time we call the listener to ensure if multiple frames are decompressed ++ // that the bytes are accounted for immediately. Otherwise the user may see an inconsistent state of ++ // flow control. ++ return 0; ++ } catch (Throwable t) { ++ // Http2Exception might be thrown by writeInbound(...) or finish(). ++ if (t instanceof Http2Exception) { ++ throw (Http2Exception) t; ++ } ++ throw streamError(stream.id(), INTERNAL_ERROR, t, ++ "Decompressor error detected while delegating data read on streamId %d", stream.id()); ++ } ++ } + /** + * Increment the number of bytes received prior to doing any decompression. + */ +- void incrementCompressedBytes(int delta) { ++ private void incrementCompressedBytes(int delta) { + assert delta >= 0; + compressed += delta; + } +@@ -385,7 +366,7 @@ public class DelegatingDecompressorFrameListener extends Http2FrameListenerDecor + /** + * Increment the number of bytes after the decompression process. + */ +- void incrementDecompressedBytes(int delta) { ++ private void incrementDecompressedBytes(int delta) { + assert delta >= 0; + decompressed += delta; + } +diff --git a/codec/src/main/java/io/netty/handler/codec/compression/JZlibDecoder.java b/codec/src/main/java/io/netty/handler/codec/compression/JZlibDecoder.java +index 6c65cd5..9bc684e 100644 +--- a/codec/src/main/java/io/netty/handler/codec/compression/JZlibDecoder.java ++++ b/codec/src/main/java/io/netty/handler/codec/compression/JZlibDecoder.java +@@ -28,6 +28,7 @@ public class JZlibDecoder extends ZlibDecoder { + + private final Inflater z = new Inflater(); + private byte[] dictionary; ++ private boolean needsRead; + private volatile boolean finished; + + /** +@@ -125,6 +126,7 @@ public class JZlibDecoder extends ZlibDecoder { + + @Override + protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { ++ needsRead = true; + if (finished) { + // Skip data received after finished. + in.skipBytes(in.readableBytes()); +@@ -166,6 +168,14 @@ public class JZlibDecoder extends ZlibDecoder { + int outputLength = z.next_out_index - oldNextOutIndex; + if (outputLength > 0) { + decompressed.writerIndex(decompressed.writerIndex() + outputLength); ++ if (maxAllocation == 0) { ++ // If we don't limit the maximum allocations we should just ++ // forward the buffer directly. ++ ByteBuf buffer = decompressed; ++ decompressed = null; ++ needsRead = false; ++ ctx.fireChannelRead(buffer); ++ } + } + + switch (resultCode) { +@@ -196,10 +206,13 @@ public class JZlibDecoder extends ZlibDecoder { + } + } finally { + in.skipBytes(z.next_in_index - oldNextInIndex); +- if (decompressed.isReadable()) { +- out.add(decompressed); +- } else { +- decompressed.release(); ++ if (decompressed != null) { ++ if (decompressed.isReadable()) { ++ needsRead = false; ++ ctx.fireChannelRead(decompressed); ++ } else { ++ decompressed.release(); ++ } + } + } + } finally { +@@ -212,6 +225,17 @@ public class JZlibDecoder extends ZlibDecoder { + } + } + ++ @Override ++ public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { ++ // Discard bytes of the cumulation buffer if needed. ++ discardSomeReadBytes(); ++ ++ if (needsRead && !ctx.channel().config().isAutoRead()) { ++ ctx.read(); ++ } ++ ctx.fireChannelReadComplete(); ++ } ++ + @Override + protected void decompressionBufferExhausted(ByteBuf buffer) { + finished = true; +diff --git a/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibDecoder.java b/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibDecoder.java +index 7e69422..426b84e 100644 +--- a/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibDecoder.java ++++ b/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibDecoder.java +@@ -57,6 +57,7 @@ public class JdkZlibDecoder extends ZlibDecoder { + private GzipState gzipState = GzipState.HEADER_START; + private int flags = -1; + private int xlen = -1; ++ private boolean needsRead; + + private volatile boolean finished; + +@@ -178,6 +179,7 @@ public class JdkZlibDecoder extends ZlibDecoder { + + @Override + protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { ++ needsRead = true; + if (finished) { + // Skip data received after finished. + in.skipBytes(in.readableBytes()); +@@ -239,14 +241,20 @@ public class JdkZlibDecoder extends ZlibDecoder { + if (crc != null) { + crc.update(outArray, outIndex, outputLength); + } +- } else { +- if (inflater.needsDictionary()) { +- if (dictionary == null) { +- throw new DecompressionException( +- "decompression failure, unable to set dictionary as non was specified"); +- } +- inflater.setDictionary(dictionary); ++ if (maxAllocation == 0) { ++ // If we don't limit the maximum allocations we should just ++ // forward the buffer directly. ++ ByteBuf buffer = decompressed; ++ decompressed = null; ++ needsRead = false; ++ ctx.fireChannelRead(buffer); ++ } ++ } else if (inflater.needsDictionary()) { ++ if (dictionary == null) { ++ throw new DecompressionException( ++ "decompression failure, unable to set dictionary as non was specified"); + } ++ inflater.setDictionary(dictionary); + } + + if (inflater.finished()) { +@@ -278,11 +286,13 @@ public class JdkZlibDecoder extends ZlibDecoder { + } catch (DataFormatException e) { + throw new DecompressionException("decompression failure", e); + } finally { +- +- if (decompressed.isReadable()) { +- out.add(decompressed); +- } else { +- decompressed.release(); ++ if (decompressed != null) { ++ if (decompressed.isReadable()) { ++ needsRead = false; ++ ctx.fireChannelRead(decompressed); ++ } else { ++ decompressed.release(); ++ } + } + } + } +@@ -454,4 +464,15 @@ public class JdkZlibDecoder extends ZlibDecoder { + return (cmf_flg & 0x7800) == 0x7800 && + cmf_flg % 31 == 0; + } ++ ++ @Override ++ public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { ++ // Discard bytes of the cumulation buffer if needed. ++ discardSomeReadBytes(); ++ ++ if (needsRead && !ctx.channel().config().isAutoRead()) { ++ ctx.read(); ++ } ++ ctx.fireChannelReadComplete(); ++ } + } +diff --git a/codec/src/test/java/io/netty/handler/codec/compression/AbstractIntegrationTest.java b/codec/src/test/java/io/netty/handler/codec/compression/AbstractIntegrationTest.java +index 5eaed2f..dc05eb6 100644 +--- a/codec/src/test/java/io/netty/handler/codec/compression/AbstractIntegrationTest.java ++++ b/codec/src/test/java/io/netty/handler/codec/compression/AbstractIntegrationTest.java +@@ -17,7 +17,10 @@ + + import io.netty.buffer.ByteBuf; + import io.netty.buffer.CompositeByteBuf; ++import io.netty.buffer.PooledByteBufAllocator; + import io.netty.buffer.Unpooled; ++import io.netty.channel.ChannelHandlerContext; ++import io.netty.channel.ChannelInboundHandlerAdapter; + import io.netty.channel.embedded.EmbeddedChannel; + import io.netty.util.CharsetUtil; + import io.netty.util.ReferenceCountUtil; +@@ -166,4 +169,63 @@ + decompressed.release(); + in.release(); + } ++ ++ @Test ++ public void testHugeDecompress() { ++ int chunkSize = 1024 * 1024; ++ int numberOfChunks = 256; ++ int memoryLimit = chunkSize * 128; ++ ++ EmbeddedChannel compressChannel = createEncoder(); ++ ByteBuf compressed = compressChannel.alloc().buffer(); ++ for (int i = 0; i <= numberOfChunks; i++) { ++ if (i < numberOfChunks) { ++ ByteBuf in = compressChannel.alloc().buffer(chunkSize); ++ in.writeZero(chunkSize); ++ compressChannel.writeOutbound(in); ++ } else { ++ compressChannel.close(); ++ } ++ while (true) { ++ ByteBuf buf = compressChannel.readOutbound(); ++ if (buf == null) { ++ break; ++ } ++ compressed.writeBytes(buf); ++ buf.release(); ++ } ++ } ++ ++ PooledByteBufAllocator allocator = new PooledByteBufAllocator(false); ++ ++ HugeDecompressIncomingHandler endHandler = new HugeDecompressIncomingHandler(memoryLimit); ++ EmbeddedChannel decompressChannel = createDecoder(); ++ decompressChannel.pipeline().addLast(endHandler); ++ decompressChannel.config().setAllocator(allocator); ++ decompressChannel.writeInbound(compressed); ++ decompressChannel.finishAndReleaseAll(); ++ assertEquals((long) chunkSize * numberOfChunks, endHandler.total); ++ } ++ ++ private static final class HugeDecompressIncomingHandler extends ChannelInboundHandlerAdapter { ++ final int memoryLimit; ++ long total; ++ ++ HugeDecompressIncomingHandler(int memoryLimit) { ++ this.memoryLimit = memoryLimit; ++ } ++ ++ @Override ++ public void channelRead(ChannelHandlerContext ctx, Object msg) { ++ ByteBuf buf = (ByteBuf) msg; ++ total += buf.readableBytes(); ++ try { ++ PooledByteBufAllocator allocator = (PooledByteBufAllocator) ctx.alloc(); ++ assertThat(allocator.metric().usedHeapMemory(), lessThan((long) memoryLimit)); ++ assertThat(allocator.metric().usedDirectMemory(), lessThan((long) memoryLimit)); ++ } finally { ++ buf.release(); ++ } ++ } ++ } + } diff -Nru netty-4.1.48/debian/patches/CVE-2025-59419 netty-4.1.48/debian/patches/CVE-2025-59419 --- netty-4.1.48/debian/patches/CVE-2025-59419 1970-01-01 00:00:00.000000000 +0000 +++ netty-4.1.48/debian/patches/CVE-2025-59419 2026-02-09 10:26:12.000000000 +0000 @@ -0,0 +1,180 @@ +From: DepthFirst Disclosures +Date: Tue, 14 Oct 2025 01:41:47 -0700 +Subject: CVE-2025-59419: Merge commit from fork + +* Patch 1 of 3 + +* Patch 2 of 3 + +* Patch 3 of 3 + +* Fix indentation style + +* Update 2025 + +* Optimize allocations + +* Update codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpUtils.java + +Co-authored-by: Chris Vest + +--------- + +Co-authored-by: Norman Maurer +Co-authored-by: Chris Vest +origin: https://github.com/netty/netty/commit/2b3fddd3339cde1601f622b9ce5e54c39f24c3f9 +bug: https://github.com/netty/netty/security/advisories/GHSA-jq43-27x9-3v86 +bug-debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1118282 +--- + .../handler/codec/smtp/DefaultSmtpRequest.java | 2 + + .../io/netty/handler/codec/smtp/SmtpUtils.java | 44 +++++++++++++ + .../netty/handler/codec/smtp/SmtpRequestsTest.java | 73 ++++++++++++++++++++++ + 3 files changed, 119 insertions(+) + create mode 100644 codec-smtp/src/test/java/io/netty/handler/codec/smtp/SmtpRequestsTest.java + +--- a/codec-smtp/src/main/java/io/netty/handler/codec/smtp/DefaultSmtpRequest.java ++++ b/codec-smtp/src/main/java/io/netty/handler/codec/smtp/DefaultSmtpRequest.java +@@ -43,6 +43,7 @@ + */ + public DefaultSmtpRequest(SmtpCommand command, CharSequence... parameters) { + this.command = ObjectUtil.checkNotNull(command, "command"); ++ SmtpUtils.validateSMTPParameters(parameters); + this.parameters = SmtpUtils.toUnmodifiableList(parameters); + } + +@@ -55,6 +56,7 @@ + + DefaultSmtpRequest(SmtpCommand command, List parameters) { + this.command = ObjectUtil.checkNotNull(command, "command"); ++ SmtpUtils.validateSMTPParameters(parameters); + this.parameters = parameters != null ? + Collections.unmodifiableList(parameters) : Collections.emptyList(); + } +--- a/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpUtils.java ++++ b/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpUtils.java +@@ -28,5 +28,49 @@ + return Collections.unmodifiableList(Arrays.asList(sequences)); + } + ++ /** ++ * Validates SMTP parameters to prevent SMTP command injection. ++ * Throws IllegalArgumentException if any parameter contains CRLF sequences. ++ */ ++ static void validateSMTPParameters(CharSequence... parameters) { ++ if (parameters != null) { ++ for (CharSequence parameter : parameters) { ++ if (parameter != null) { ++ validateSMTPParameter(parameter); ++ } ++ } ++ } ++ } ++ ++ /** ++ * Validates SMTP parameters to prevent SMTP command injection. ++ * Throws IllegalArgumentException if any parameter contains CRLF sequences. ++ */ ++ static void validateSMTPParameters(List parameters) { ++ if (parameters != null) { ++ for (CharSequence parameter : parameters) { ++ if (parameter != null) { ++ validateSMTPParameter(parameter); ++ } ++ } ++ } ++ } ++ ++ private static void validateSMTPParameter(CharSequence parameter) { ++ if (parameter instanceof String) { ++ String paramStr = (String) parameter; ++ if (paramStr.indexOf('\r') != -1 || paramStr.indexOf('\n') != -1) { ++ throw new IllegalArgumentException("SMTP parameter contains CRLF characters: " + parameter); ++ } ++ } else { ++ for (int i = 0; i < parameter.length(); i++) { ++ char c = parameter.charAt(i); ++ if (c == '\r' || c == '\n') { ++ throw new IllegalArgumentException("SMTP parameter contains CRLF characters: " + parameter); ++ } ++ } ++ } ++ } ++ + private SmtpUtils() { } + } +--- /dev/null ++++ b/codec-smtp/src/test/java/io/netty/handler/codec/smtp/SmtpRequestsTest.java +@@ -0,0 +1,73 @@ ++/* ++ * Copyright 2025 The Netty Project ++ * ++ * The Netty Project licenses this file to you under the Apache License, ++ * version 2.0 (the "License"); you may not use this file except in compliance ++ * with the License. You may obtain a copy of the License at: ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++ * License for the specific language governing permissions and limitations ++ * under the License. ++ */ ++package io.netty.handler.codec.smtp; ++ ++import org.junit.jupiter.api.Test; ++import org.junit.jupiter.api.function.Executable; ++ ++import static org.junit.jupiter.api.Assertions.assertThrows; ++ ++public class SmtpRequestsTest { ++ @Test ++ public void testSmtpInjectionWithCarriageReturn() { ++ assertThrows(IllegalArgumentException.class, new Executable() { ++ @Override ++ public void execute() { ++ SmtpRequests.mail("test@example.com\rQUIT"); ++ } ++ }); ++ } ++ ++ @Test ++ public void testSmtpInjectionWithLineFeed() { ++ assertThrows(IllegalArgumentException.class, new Executable() { ++ @Override ++ public void execute() { ++ SmtpRequests.mail("test@example.com\nQUIT"); ++ } ++ }); ++ } ++ ++ @Test ++ public void testSmtpInjectionWithCRLF() { ++ assertThrows(IllegalArgumentException.class, new Executable() { ++ @Override ++ public void execute() { ++ SmtpRequests.rcpt("test@example.com\r\nQUIT"); ++ } ++ }); ++ } ++ ++ @Test ++ public void testSmtpInjectionInAuthParameter() { ++ assertThrows(IllegalArgumentException.class, new Executable() { ++ @Override ++ public void execute() { ++ SmtpRequests.auth("PLAIN", "dGVzdA\rQUIT"); ++ } ++ }); ++ } ++ ++ @Test ++ public void testSmtpInjectionInHelo() { ++ assertThrows(IllegalArgumentException.class, new Executable() { ++ @Override ++ public void execute() { ++ SmtpRequests.helo("localhost\r\nQUIT"); ++ } ++ }); ++ } ++} diff -Nru netty-4.1.48/debian/patches/CVE-2025-59419.patch netty-4.1.48/debian/patches/CVE-2025-59419.patch --- netty-4.1.48/debian/patches/CVE-2025-59419.patch 1970-01-01 00:00:00.000000000 +0000 +++ netty-4.1.48/debian/patches/CVE-2025-59419.patch 2026-02-09 10:23:38.000000000 +0000 @@ -0,0 +1,180 @@ +From: DepthFirst Disclosures +Date: Tue, 14 Oct 2025 01:41:47 -0700 +Subject: CVE-2025-59419: Merge commit from fork + +* Patch 1 of 3 + +* Patch 2 of 3 + +* Patch 3 of 3 + +* Fix indentation style + +* Update 2025 + +* Optimize allocations + +* Update codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpUtils.java + +Co-authored-by: Chris Vest + +--------- + +Co-authored-by: Norman Maurer +Co-authored-by: Chris Vest +origin: https://github.com/netty/netty/commit/2b3fddd3339cde1601f622b9ce5e54c39f24c3f9 +bug: https://github.com/netty/netty/security/advisories/GHSA-jq43-27x9-3v86 +bug-debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1118282 +--- + .../handler/codec/smtp/DefaultSmtpRequest.java | 2 + + .../io/netty/handler/codec/smtp/SmtpUtils.java | 44 +++++++++++++ + .../netty/handler/codec/smtp/SmtpRequestsTest.java | 73 ++++++++++++++++++++++ + 3 files changed, 119 insertions(+) + create mode 100644 codec-smtp/src/test/java/io/netty/handler/codec/smtp/SmtpRequestsTest.java + +--- a/codec-smtp/src/main/java/io/netty/handler/codec/smtp/DefaultSmtpRequest.java ++++ b/codec-smtp/src/main/java/io/netty/handler/codec/smtp/DefaultSmtpRequest.java +@@ -43,6 +43,7 @@ + */ + public DefaultSmtpRequest(SmtpCommand command, CharSequence... parameters) { + this.command = ObjectUtil.checkNotNull(command, "command"); ++ SmtpUtils.validateSMTPParameters(parameters); + this.parameters = SmtpUtils.toUnmodifiableList(parameters); + } + +@@ -55,6 +56,7 @@ + + DefaultSmtpRequest(SmtpCommand command, List parameters) { + this.command = ObjectUtil.checkNotNull(command, "command"); ++ SmtpUtils.validateSMTPParameters(parameters); + this.parameters = parameters != null ? + Collections.unmodifiableList(parameters) : Collections.emptyList(); + } +--- a/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpUtils.java ++++ b/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpUtils.java +@@ -28,5 +28,49 @@ + return Collections.unmodifiableList(Arrays.asList(sequences)); + } + ++ /** ++ * Validates SMTP parameters to prevent SMTP command injection. ++ * Throws IllegalArgumentException if any parameter contains CRLF sequences. ++ */ ++ static void validateSMTPParameters(CharSequence... parameters) { ++ if (parameters != null) { ++ for (CharSequence parameter : parameters) { ++ if (parameter != null) { ++ validateSMTPParameter(parameter); ++ } ++ } ++ } ++ } ++ ++ /** ++ * Validates SMTP parameters to prevent SMTP command injection. ++ * Throws IllegalArgumentException if any parameter contains CRLF sequences. ++ */ ++ static void validateSMTPParameters(List parameters) { ++ if (parameters != null) { ++ for (CharSequence parameter : parameters) { ++ if (parameter != null) { ++ validateSMTPParameter(parameter); ++ } ++ } ++ } ++ } ++ ++ private static void validateSMTPParameter(CharSequence parameter) { ++ if (parameter instanceof String) { ++ String paramStr = (String) parameter; ++ if (paramStr.indexOf('\r') != -1 || paramStr.indexOf('\n') != -1) { ++ throw new IllegalArgumentException("SMTP parameter contains CRLF characters: " + parameter); ++ } ++ } else { ++ for (int i = 0; i < parameter.length(); i++) { ++ char c = parameter.charAt(i); ++ if (c == '\r' || c == '\n') { ++ throw new IllegalArgumentException("SMTP parameter contains CRLF characters: " + parameter); ++ } ++ } ++ } ++ } ++ + private SmtpUtils() { } + } +--- /dev/null ++++ b/codec-smtp/src/test/java/io/netty/handler/codec/smtp/SmtpRequestsTest.java +@@ -0,0 +1,73 @@ ++/* ++ * Copyright 2025 The Netty Project ++ * ++ * The Netty Project licenses this file to you under the Apache License, ++ * version 2.0 (the "License"); you may not use this file except in compliance ++ * with the License. You may obtain a copy of the License at: ++ * ++ * https://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++ * License for the specific language governing permissions and limitations ++ * under the License. ++ */ ++package io.netty.handler.codec.smtp; ++ ++import org.junit.jupiter.api.Test; ++import org.junit.jupiter.api.function.Executable; ++ ++import static org.junit.jupiter.api.Assertions.assertThrows; ++ ++public class SmtpRequestsTest { ++ @Test ++ public void testSmtpInjectionWithCarriageReturn() { ++ assertThrows(IllegalArgumentException.class, new Executable() { ++ @Override ++ public void execute() { ++ SmtpRequests.mail("test@example.com\rQUIT"); ++ } ++ }); ++ } ++ ++ @Test ++ public void testSmtpInjectionWithLineFeed() { ++ assertThrows(IllegalArgumentException.class, new Executable() { ++ @Override ++ public void execute() { ++ SmtpRequests.mail("test@example.com\nQUIT"); ++ } ++ }); ++ } ++ ++ @Test ++ public void testSmtpInjectionWithCRLF() { ++ assertThrows(IllegalArgumentException.class, new Executable() { ++ @Override ++ public void execute() { ++ SmtpRequests.rcpt("test@example.com\r\nQUIT"); ++ } ++ }); ++ } ++ ++ @Test ++ public void testSmtpInjectionInAuthParameter() { ++ assertThrows(IllegalArgumentException.class, new Executable() { ++ @Override ++ public void execute() { ++ SmtpRequests.auth("PLAIN", "dGVzdA\rQUIT"); ++ } ++ }); ++ } ++ ++ @Test ++ public void testSmtpInjectionInHelo() { ++ assertThrows(IllegalArgumentException.class, new Executable() { ++ @Override ++ public void execute() { ++ SmtpRequests.helo("localhost\r\nQUIT"); ++ } ++ }); ++ } ++} diff -Nru netty-4.1.48/debian/patches/CVE-2025-67735.patch netty-4.1.48/debian/patches/CVE-2025-67735.patch --- netty-4.1.48/debian/patches/CVE-2025-67735.patch 1970-01-01 00:00:00.000000000 +0000 +++ netty-4.1.48/debian/patches/CVE-2025-67735.patch 2026-02-09 10:26:12.000000000 +0000 @@ -0,0 +1,389 @@ +From: Chris Vest +Date: Thu, 11 Dec 2025 09:20:08 -0800 +Subject: Merge commit from fork * Reject encoding of HTTP URIs that have + line-breaks +MIME-Version: 1.0 +Content-Type: text/plain; charset="utf-8" +Content-Transfer-Encoding: 8bit + +Motivation: +Line-breaks in user-supplied data can cause security issues like request/response splitting, request smuggling, and parser desynchronization. +The URI was not being checked for containing line-breaks before encoding. + +Modification: +When encoding the URI in HttpRequestEncoder, we now also check if it contains any line-break characters, and if so, throw an IllegalArgumentException. + +Result: +Line-breaks are now being properly neutralized from the URI in HttpRequestEncoder. + +Unfortunately, the performance drops a bit from this check. + +Before: + +``` +Benchmark Mode Cnt Score Error Units +HttpRequestEncoderInsertBenchmark.newEncoder thrpt 40 10169070.498 ± 27016.445 ops/s +``` + +Now: + +``` +Benchmark Mode Cnt Score Error Units +HttpRequestEncoderInsertBenchmark.newEncoder thrpt 40 7984846.328 ± 29959.587 ops/s +``` + +* Move the request line encoding safety checks to DefaultHttpRequest + +origin: backport, https://github.com/netty/netty/commit/77e81f1e5944d98b3acf887d3aa443b252752e94 +--- + .../handler/codec/http/DefaultFullHttpRequest.java | 10 ++- + .../handler/codec/http/DefaultHttpRequest.java | 16 ++++ + .../java/io/netty/handler/codec/http/HttpUtil.java | 57 +++++++++++++- + .../handler/codec/http/DefaultHttpRequestTest.java | 88 +++++++++++++++++++++- + .../handler/codec/http/HttpRequestEncoderTest.java | 2 - + .../io/netty/handler/codec/http/HttpUtilTest.java | 60 ++++++++++++++- + 6 files changed, 222 insertions(+), 11 deletions(-) + +diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultFullHttpRequest.java b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultFullHttpRequest.java +index 117e6db..d599241 100644 +--- a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultFullHttpRequest.java ++++ b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultFullHttpRequest.java +@@ -53,7 +53,15 @@ public class DefaultFullHttpRequest extends DefaultHttpRequest implements FullHt + + public DefaultFullHttpRequest(HttpVersion httpVersion, HttpMethod method, String uri, + ByteBuf content, HttpHeaders headers, HttpHeaders trailingHeader) { +- super(httpVersion, method, uri, headers); ++ this(httpVersion, method, uri, content, headers, trailingHeader, true); ++ } ++ ++ /** ++ * Create a full HTTP response with the given HTTP version, method, URI, contents, and header and trailer objects. ++ */ ++ public DefaultFullHttpRequest(HttpVersion httpVersion, HttpMethod method, String uri, ++ ByteBuf content, HttpHeaders headers, HttpHeaders trailingHeader, boolean validateRequestLine) { ++ super(httpVersion, method, uri, headers, validateRequestLine); + this.content = checkNotNull(content, "content"); + this.trailingHeader = checkNotNull(trailingHeader, "trailingHeader"); + } +diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpRequest.java b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpRequest.java +index dbc7dd3..d0df1c0 100644 +--- a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpRequest.java ++++ b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpRequest.java +@@ -61,9 +61,25 @@ public class DefaultHttpRequest extends DefaultHttpMessage implements HttpReques + * @param headers the Headers for this Request + */ + public DefaultHttpRequest(HttpVersion httpVersion, HttpMethod method, String uri, HttpHeaders headers) { ++ this(httpVersion, method, uri, headers, true); ++ } ++ ++ /** ++ * Creates a new instance. ++ * ++ * @param httpVersion the HTTP version of the request ++ * @param method the HTTP method of the request ++ * @param uri the URI or path of the request ++ * @param headers the Headers for this Request ++ */ ++ public DefaultHttpRequest(HttpVersion httpVersion, HttpMethod method, String uri, HttpHeaders headers, ++ boolean validateRequestLine) { + super(httpVersion, headers); + this.method = checkNotNull(method, "method"); + this.uri = checkNotNull(uri, "uri"); ++ if (validateRequestLine) { ++ HttpUtil.validateRequestLineTokens(httpVersion, method, uri); ++ } + } + + @Override +diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpUtil.java +index afa3ec4..512d841 100644 +--- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpUtil.java ++++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpUtil.java +@@ -41,12 +41,13 @@ public final class HttpUtil { + private static final AsciiString CHARSET_EQUALS = AsciiString.of(HttpHeaderValues.CHARSET + "="); + private static final AsciiString SEMICOLON = AsciiString.cached(";"); + private static final String COMMA_STRING = String.valueOf(COMMA); ++ private static final long ILLEGAL_REQUEST_LINE_TOKEN_OCTET_MASK = 1L << '\n' | 1L << '\r' | 1L << ' '; + + private HttpUtil() { } + + /** + * Determine if a uri is in origin-form according to +- * rfc7230, 5.3. ++ * RFC 9112, 3.2.1. + */ + public static boolean isOriginForm(URI uri) { + return uri.getScheme() == null && uri.getSchemeSpecificPart() == null && +@@ -54,8 +55,8 @@ public final class HttpUtil { + } + + /** +- * Determine if a uri is in asterisk-form according to +- * rfc7230, 5.3. ++ * Determine if a string uri is in origin-form according to ++ * RFC 9112, 3.2.1. + */ + public static boolean isAsteriskForm(URI uri) { + return "*".equals(uri.getPath()) && +@@ -475,6 +476,56 @@ public final class HttpUtil { + return null; + } + ++ static void validateRequestLineTokens(HttpVersion httpVersion, HttpMethod method, String uri) { ++ // The HttpVersion class does its own validation, and it's not possible for subclasses to circumvent it. ++ // The HttpMethod class does its own validation, but subclasses might circumvent it. ++ if (method.getClass() != HttpMethod.class) { ++ if (!isEncodingSafeStartLineToken(method.asciiName())) { ++ throw new IllegalArgumentException( ++ "The HTTP method name contain illegal characters: " + method.asciiName()); ++ } ++ } ++ ++ if (!isEncodingSafeStartLineToken(uri)) { ++ throw new IllegalArgumentException("The URI contain illegal characters: " + uri); ++ } ++ } ++ ++ /** ++ * Validate that the given request line token is safe for verbatim encoding to the network. ++ * This does not fully check that the token – HTTP method, version, or URI – is valid and formatted correctly. ++ * Only that the token does not contain characters that would break or ++ * desynchronize HTTP message parsing of the start line wherein the token would be included. ++ *

++ * See RFC 9112, 3. ++ * ++ * @param token The token to check. ++ * @return {@code true} if the token is safe to encode verbatim into the HTTP message output stream, ++ * otherwise {@code false}. ++ */ ++ public static boolean isEncodingSafeStartLineToken(CharSequence token) { ++ int i = 0; ++ int lenBytes = token.length(); ++ int modulo = lenBytes % 4; ++ int lenInts = modulo == 0 ? lenBytes : lenBytes - modulo; ++ for (; i < lenInts; i += 4) { ++ long chars = 1L << token.charAt(i) | ++ 1L << token.charAt(i + 1) | ++ 1L << token.charAt(i + 2) | ++ 1L << token.charAt(i + 3); ++ if ((chars & ILLEGAL_REQUEST_LINE_TOKEN_OCTET_MASK) != 0) { ++ return false; ++ } ++ } ++ for (; i < lenBytes; i++) { ++ long ch = 1L << token.charAt(i); ++ if ((ch & ILLEGAL_REQUEST_LINE_TOKEN_OCTET_MASK) != 0) { ++ return false; ++ } ++ } ++ return true; ++ } ++ + /** + * Fetch MIME type part from message's Content-Type header as a char sequence. + * +diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpRequestTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpRequestTest.java +index cf0fa92..7f6e1a1 100644 +--- a/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpRequestTest.java ++++ b/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpRequestTest.java +@@ -22,8 +22,94 @@ import static io.netty.handler.codec.http.HttpHeadersTestUtils.of; + import static org.junit.Assert.assertNull; + import static org.junit.Assert.assertTrue; + +-public class DefaultHttpRequestTest { ++import org.junit.runner.RunWith; ++import org.junit.runners.Parameterized; ++import org.junit.runners.Parameterized.Parameters; ++ ++import java.util.Arrays; ++import java.util.Collection; ++ ++import static org.junit.Assert.fail; ++ ++@RunWith(Parameterized.class) ++class DefaultHttpRequestIllegalMethodTest { ++ ++ private final String method; ++ ++ public DefaultHttpRequestIllegalMethodTest(String method) { ++ this.method = method; ++ } ++ ++ @Parameters(name = "{index}: method={0}") ++ public static Collection data() { ++ return Arrays.asList(new Object[][] { ++ {"GET "}, {" GET"}, {"G ET"}, {" GET "}, {"GET\r"}, {"GET\n"}, ++ {"GET\r\n"}, {"GE\rT"}, {"GE\nT"}, {"GE\r\nT"}, {"\rGET"}, ++ {"\nGET"}, {"\r\nGET"}, {" \r\nGET"}, {"\r \nGET"}, {"\r\n GET"}, ++ {"\r\nGET "}, {"\nGET "}, {"\rGET "}, {"\r GET"}, {" \rGET"}, ++ {"\nGET "}, {"\n GET"}, {" \nGET"}, {"GET \n"}, {"GET \r"}, ++ {" GET\r"}, {" GET\r"}, {"GET \n"}, {" GET\n"}, {" GET\n"}, ++ {"GE\nT "}, {"GE\rT "}, {" GE\rT"}, {" GE\rT"}, {"GE\nT "}, ++ {" GE\nT"}, {" GE\nT"} ++ }); ++ } ++ ++ @Test ++ public void constructorMustRejectIllegalHttpMethodByDefault() { ++ try { ++ new DefaultHttpRequest(HttpVersion.HTTP_1_0, new HttpMethod("GET") { ++ public AsciiString asciiName() { ++ return new AsciiString(method); ++ } ++ }, "/"); ++ fail("Expected IllegalArgumentException for method: " + method); ++ } catch (IllegalArgumentException e) { ++ // Expected exception ++ } ++ } ++} + ++@RunWith(Parameterized.class) ++class DefaultHttpRequestIllegalUriTest { ++ ++ private final String uri; ++ ++ public DefaultHttpRequestIllegalUriTest(String uri) { ++ this.uri = uri; ++ } ++ ++ @Parameters(name = "{index}: uri={0}") ++ public static Collection data() { ++ return Arrays.asList(new Object[][] { ++ {"http://localhost/\r\n"}, ++ {"/r\r\n?q=1"}, ++ {"http://localhost/\r\n?q=1"}, ++ {"/r\r\n/?q=1"}, ++ {"http://localhost/\r\n/?q=1"}, ++ {"/r\r\n"}, ++ {"http://localhost/ HTTP/1.1\r\n\r\nPOST /p HTTP/1.1\r\n\r\n"}, ++ {"/r HTTP/1.1\r\n\r\nPOST /p HTTP/1.1\r\n\r\n"}, ++ {"/ path"}, ++ {"/path "}, ++ {" /path"}, ++ {"http://localhost/ "}, ++ {" http://localhost/"}, ++ {"http://local host/"} ++ }); ++ } ++ ++ @Test ++ public void constructorMustRejectIllegalUrisByDefault() { ++ try { ++ new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uri); ++ fail("Expected IllegalArgumentException for URI: " + uri); ++ } catch (IllegalArgumentException e) { ++ // Expected exception ++ } ++ } ++} ++ ++public class DefaultHttpRequestTest { + @Test + public void testHeaderRemoval() { + HttpMessage m = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); +diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestEncoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestEncoderTest.java +index 2f866f7..7008b4b 100644 +--- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestEncoderTest.java ++++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestEncoderTest.java +@@ -32,8 +32,6 @@ import static org.hamcrest.Matchers.instanceOf; + import static org.hamcrest.Matchers.is; + import static org.junit.Assert.*; + +-/** +- */ + public class HttpRequestEncoderTest { + + @SuppressWarnings("deprecation") +diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpUtilTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpUtilTest.java +index 186b498..332f57f 100644 +--- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpUtilTest.java ++++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpUtilTest.java +@@ -15,17 +15,22 @@ + */ + package io.netty.handler.codec.http; + ++import io.netty.util.CharsetUtil; ++import io.netty.util.ReferenceCountUtil; ++import io.netty.handler.codec.http.HttpHeaders; + import java.net.InetAddress; + import java.net.InetSocketAddress; ++import java.net.URI; + import java.nio.charset.StandardCharsets; + import java.util.ArrayList; ++import java.util.Arrays; ++import java.util.Collection; + import java.util.Collections; + import java.util.List; +- +-import io.netty.util.CharsetUtil; +-import io.netty.util.ReferenceCountUtil; ++import org.junit.runner.RunWith; ++import org.junit.runners.Parameterized; ++import org.junit.runners.Parameterized.Parameters; + import org.junit.Test; +- + import static io.netty.handler.codec.http.HttpHeadersTestUtils.of; + import static org.junit.Assert.assertEquals; + import static org.junit.Assert.assertFalse; +@@ -33,6 +38,41 @@ import static org.junit.Assert.assertNull; + import static org.junit.Assert.assertTrue; + import static org.junit.Assert.fail; + ++@RunWith(Parameterized.class) ++class HttpUtilRequestLineTokenValidationTest { ++ ++ private final String token; ++ ++ public HttpUtilRequestLineTokenValidationTest(String token) { ++ this.token = token; ++ } ++ ++ @Parameters(name = "{index}: token={0}") ++ public static Collection data() { ++ return Arrays.asList(new Object[][] { ++ {"http://localhost/\r\n"}, ++ {"/r\r\n?q=1"}, ++ {"http://localhost/\r\n?q=1"}, ++ {"/r\r\n/?q=1"}, ++ {"http://localhost/\r\n/?q=1"}, ++ {"/r\r\n"}, ++ {"http://localhost/ HTTP/1.1\r\n\r\nPOST /p HTTP/1.1\r\n\r\n"}, ++ {"/r HTTP/1.1\r\n\r\nPOST /p HTTP/1.1\r\n\r\n"}, ++ {"GET "}, ++ {" GET"}, ++ {"HTTP/ 1.1"}, ++ {"HTTP/\r0.9"}, ++ {"HTTP/\n1.1"} ++ }); ++ } ++ ++ @Test ++ public void requestLineTokenValidationMustRejectInvalidTokens() throws Exception { ++ assertFalse(HttpUtil.isEncodingSafeStartLineToken(token)); ++ } ++} ++ ++ + public class HttpUtilTest { + + @Test +@@ -59,6 +99,18 @@ public class HttpUtilTest { + assertEquals("2", values.get(1)); + } + ++ @Test ++ public void testRecognizesAsteriskForm() { ++ // Asterisk form: https://tools.ietf.org/html/rfc7230#section-5.3.4 ++ assertTrue(HttpUtil.isAsteriskForm(URI.create("*"))); ++ // Origin form: https://tools.ietf.org/html/rfc7230#section-5.3.1 ++ assertFalse(HttpUtil.isAsteriskForm(URI.create("/where?q=now"))); ++ // Absolute form: https://tools.ietf.org/html/rfc7230#section-5.3.2 ++ assertFalse(HttpUtil.isAsteriskForm(URI.create("http://www.example.org/pub/WWW/TheProject.html"))); ++ // Authority form: https://tools.ietf.org/html/rfc7230#section-5.3.3 ++ assertFalse(HttpUtil.isAsteriskForm(URI.create("www.example.com:80"))); ++ } ++ + @Test + public void testGetCharsetAsRawCharSequence() { + String QUOTES_CHARSET_CONTENT_TYPE = "text/html; charset=\"utf8\""; diff -Nru netty-4.1.48/debian/patches/series netty-4.1.48/debian/patches/series --- netty-4.1.48/debian/patches/series 2024-05-12 19:20:10.000000000 +0000 +++ netty-4.1.48/debian/patches/series 2026-02-09 10:26:12.000000000 +0000 @@ -25,3 +25,10 @@ CVE-2023-44487.patch 22-java-21.patch CVE-2024-29025.patch +CVE-2025-59419.patch +CVE-2025-55163_before-1.patch +CVE-2025-55163_1.patch +CVE-2025-55163_2.patch +CVE-2025-58057.patch +CVE-2025-58056.patch +CVE-2025-67735.patch diff -Nru netty-4.1.48/debian/salsa-ci.yml netty-4.1.48/debian/salsa-ci.yml --- netty-4.1.48/debian/salsa-ci.yml 1970-01-01 00:00:00.000000000 +0000 +++ netty-4.1.48/debian/salsa-ci.yml 2026-02-09 10:26:12.000000000 +0000 @@ -0,0 +1,12 @@ +--- +include: + - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/recipes/debian.yml + +variables: + # disable irrelevant jobs as this package only builds arch:all + RELEASE: 'trixie' + SALSA_CI_DISABLE_BLHC: 1 + SALSA_CI_DISABLE_BUILD_PACKAGE_I386: 1 + SALSA_CI_DISABLE_CROSSBUILD_ARM64: 1 + SALSA_CI_DISABLE_BUILD_PACKAGE_ALL: 1 + SALSA_CI_DISABLE_BUILD_PACKAGE_ANY: 1