This commit is contained in:
lei_ju 2020-12-04 15:27:56 +08:00
parent 416fa0eca9
commit 2d430d5a23
7 changed files with 1027 additions and 1 deletions

98
CVE-2019-16869.patch Normal file
View File

@ -0,0 +1,98 @@
From: Norman Maurer <norman_maurer@apple.com>
Date: Fri, 20 Sep 2019 21:02:11 +0200
Subject: Correctly handle whitespaces in HTTP header names as defined by
RFC7230#section-3.2.4 (#9585)
Origin: https://github.com/netty/netty/commit/39cafcb05c99f2aa9fce7e6597664c9ed6a63a95
Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-16869
Bug-Debian: https://bugs.debian.org/941266
Bug: https://github.com/netty/netty/issues/9571
Motivation:
When parsing HTTP headers special care needs to be taken when a whitespace is detected in the header name.
Modifications:
- Ignore whitespace when decoding response (just like before)
- Throw exception when whitespace is detected during parsing
- Add unit tests
Result:
Fixes https://github.com/netty/netty/issues/9571
[Salvatore Bonaccorso: Backport to 4.1.7 for context changes in
HttpObjectDecoder.java]
---
.../handler/codec/http/HttpObjectDecoder.java | 16 +++++++++++++++-
.../codec/http/HttpRequestDecoderTest.java | 14 ++++++++++++++
.../codec/http/HttpResponseDecoderTest.java | 15 +++++++++++++++
3 files changed, 44 insertions(+), 1 deletion(-)
--- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java
+++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java
@@ -727,7 +727,21 @@ public abstract class HttpObjectDecoder
nameStart = findNonWhitespace(sb, 0);
for (nameEnd = nameStart; nameEnd < length; nameEnd ++) {
char ch = sb.charAt(nameEnd);
- if (ch == ':' || Character.isWhitespace(ch)) {
+ // https://tools.ietf.org/html/rfc7230#section-3.2.4
+ //
+ // No whitespace is allowed between the header field-name and colon. In
+ // the past, differences in the handling of such whitespace have led to
+ // security vulnerabilities in request routing and response handling. A
+ // server MUST reject any received request message that contains
+ // whitespace between a header field-name and colon with a response code
+ // of 400 (Bad Request). A proxy MUST remove any such whitespace from a
+ // response message before forwarding the message downstream.
+ if (ch == ':' ||
+ // In case of decoding a request we will just continue processing and header validation
+ // is done in the DefaultHttpHeaders implementation.
+ //
+ // In the case of decoding a response we will "skip" the whitespace.
+ (!isDecodingRequest() && Character.isWhitespace(ch))) {
break;
}
}
--- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
+++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
@@ -270,4 +270,18 @@ public class HttpRequestDecoderTest {
cnt.release();
assertFalse(channel.finishAndReleaseAll());
}
+
+ @Test
+ public void testWhitespace() {
+ EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder());
+ String requestStr = "GET /some/path HTTP/1.1\r\n" +
+ "Transfer-Encoding : chunked\r\n" +
+ "Host: netty.io\n\r\n";
+
+ assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII)));
+ HttpRequest request = channel.readInbound();
+ assertTrue(request.decoderResult().isFailure());
+ assertTrue(request.decoderResult().cause() instanceof IllegalArgumentException);
+ assertFalse(channel.finish());
+ }
}
--- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseDecoderTest.java
+++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseDecoderTest.java
@@ -661,4 +661,19 @@ public class HttpResponseDecoderTest {
assertThat(message.decoderResult().cause(), instanceOf(PrematureChannelClosureException.class));
assertNull(channel.readInbound());
}
+
+ @Test
+ public void testWhitespace() {
+ EmbeddedChannel channel = new EmbeddedChannel(new HttpResponseDecoder());
+ String requestStr = "HTTP/1.1 200 OK\r\n" +
+ "Transfer-Encoding : chunked\r\n" +
+ "Host: netty.io\n\r\n";
+
+ assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII)));
+ HttpResponse response = channel.readInbound();
+ assertFalse(response.decoderResult().isFailure());
+ assertEquals(HttpHeaderValues.CHUNKED.toString(), response.headers().get(HttpHeaderNames.TRANSFER_ENCODING));
+ assertEquals("netty.io", response.headers().get(HttpHeaderNames.HOST));
+ assertFalse(channel.finish());
+ }
}

60
CVE-2019-20444.patch Normal file
View File

@ -0,0 +1,60 @@
From a7c18d44b46e02dadfe3da225a06e5091f5f328e Mon Sep 17 00:00:00 2001
From: Norman Maurer <norman_maurer@apple.com>
Date: Wed, 11 Dec 2019 15:49:07 +0100
Subject: [PATCH] Detect missing colon when parsing http headers with no value
(#9871)
Motivation:
Technical speaking its valid to have http headers with no values so we should support it. That said we need to detect if these are "generated" because of an "invalid" fold.
Modifications:
- Detect if a colon is missing when parsing headers.
- Add unit test
Result:
Fixes https://github.com/netty/netty/issues/9866
---
.../handler/codec/http/HttpObjectDecoder.java | 5 +++++
.../codec/http/HttpRequestDecoderTest.java | 16 ++++++++++++++++
2 files changed, 21 insertions(+)
--- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java
+++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java
@@ -746,6 +746,11 @@
}
}
+ if (nameEnd == length) {
+ // There was no colon present at all.
+ throw new IllegalArgumentException("No colon found");
+ }
+
for (colonEnd = nameEnd; colonEnd < length; colonEnd ++) {
if (sb.charAt(colonEnd) == ':') {
colonEnd ++;
--- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
+++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
@@ -284,4 +284,20 @@
assertTrue(request.decoderResult().cause() instanceof IllegalArgumentException);
assertFalse(channel.finish());
}
+
+ @Test
+ public void testHeaderWithNoValueAndMissingColon() {
+ EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder());
+ String requestStr = "GET /some/path HTTP/1.1\r\n" +
+ "Content-Length: 0\r\n" +
+ "Host:\r\n" +
+ "netty.io\r\n\r\n";
+
+ assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII)));
+ HttpRequest request = channel.readInbound();
+ System.err.println(request.headers().names().toString());
+ assertTrue(request.decoderResult().isFailure());
+ assertTrue(request.decoderResult().cause() instanceof IllegalArgumentException);
+ assertFalse(channel.finish());
+ }
}

186
CVE-2019-20445-1.patch Normal file
View File

@ -0,0 +1,186 @@
From 8494b046ec7e4f28dbd44bc699cc4c4c92251729 Mon Sep 17 00:00:00 2001
From: Norman Maurer <norman_maurer@apple.com>
Date: Fri, 13 Dec 2019 08:53:19 +0100
Subject: [PATCH] Verify we do not receive multiple content-length headers or a
content-length and transfer-encoding: chunked header when using HTTP/1.1
(#9865)
Motivation:
RFC7230 states that we should not accept multiple content-length headers and also should not accept a content-length header in combination with transfer-encoding: chunked
Modifications:
- Check for multiple content-length headers and if found mark message as invalid
- Check if we found a content-length header and also a transfer-encoding: chunked and if so mark the message as invalid
- Add unit test
Result:
Fixes https://github.com/netty/netty/issues/9861
---
.../handler/codec/http/HttpObjectDecoder.java | 50 +++++++++++++--
.../codec/http/HttpRequestDecoderTest.java | 64 ++++++++++++++++---
2 files changed, 99 insertions(+), 15 deletions(-)
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java
index 28f048252fe..768bd3b26f5 100644
--- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java
+++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java
@@ -600,23 +600,61 @@ private State readHeaders(ByteBuf buffer) {
if (name != null) {
headers.add(name, value);
}
+
// reset name and value fields
name = null;
value = null;
- State nextState;
+ List<String> values = headers.getAll(HttpHeaderNames.CONTENT_LENGTH);
+ int contentLengthValuesCount = values.size();
+
+ if (contentLengthValuesCount > 0) {
+ // Guard against multiple Content-Length headers as stated in
+ // https://tools.ietf.org/html/rfc7230#section-3.3.2:
+ //
+ // If a message is received that has multiple Content-Length header
+ // fields with field-values consisting of the same decimal value, or a
+ // single Content-Length header field with a field value containing a
+ // list of identical decimal values (e.g., "Content-Length: 42, 42"),
+ // indicating that duplicate Content-Length header fields have been
+ // generated or combined by an upstream message processor, then the
+ // recipient MUST either reject the message as invalid or replace the
+ // duplicated field-values with a single valid Content-Length field
+ // containing that decimal value prior to determining the message body
+ // length or forwarding the message.
+ if (contentLengthValuesCount > 1 && message.protocolVersion() == HttpVersion.HTTP_1_1) {
+ throw new IllegalArgumentException("Multiple Content-Length headers found");
+ }
+ contentLength = Long.parseLong(values.get(0));
+ }
if (isContentAlwaysEmpty(message)) {
HttpUtil.setTransferEncodingChunked(message, false);
- nextState = State.SKIP_CONTROL_CHARS;
+ return State.SKIP_CONTROL_CHARS;
} else if (HttpUtil.isTransferEncodingChunked(message)) {
- nextState = State.READ_CHUNK_SIZE;
+ // See https://tools.ietf.org/html/rfc7230#section-3.3.3
+ //
+ // If a message is received with both a Transfer-Encoding and a
+ // Content-Length header field, the Transfer-Encoding overrides the
+ // Content-Length. Such a message might indicate an attempt to
+ // perform request smuggling (Section 9.5) or response splitting
+ // (Section 9.4) and ought to be handled as an error. A sender MUST
+ // remove the received Content-Length field prior to forwarding such
+ // a message downstream.
+ //
+ // This is also what http_parser does:
+ // https://github.com/nodejs/http-parser/blob/v2.9.2/http_parser.c#L1769
+ if (contentLengthValuesCount > 0 && message.protocolVersion() == HttpVersion.HTTP_1_1) {
+ throw new IllegalArgumentException(
+ "Both 'Content-Length: " + contentLength + "' and 'Transfer-Encoding: chunked' found");
+ }
+
+ return State.READ_CHUNK_SIZE;
} else if (contentLength() >= 0) {
- nextState = State.READ_FIXED_LENGTH_CONTENT;
+ return State.READ_FIXED_LENGTH_CONTENT;
} else {
- nextState = State.READ_VARIABLE_LENGTH_CONTENT;
+ return State.READ_VARIABLE_LENGTH_CONTENT;
}
- return nextState;
}
private long contentLength() {
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
index 8a2345837fe..1e780b7959f 100644
--- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
+++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
@@ -323,29 +323,75 @@ public void testTooLargeHeaders() {
@Test
public void testWhitespace() {
- EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder());
String requestStr = "GET /some/path HTTP/1.1\r\n" +
"Transfer-Encoding : chunked\r\n" +
"Host: netty.io\n\r\n";
-
- assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII)));
- HttpRequest request = channel.readInbound();
- assertTrue(request.decoderResult().isFailure());
- assertTrue(request.decoderResult().cause() instanceof IllegalArgumentException);
- assertFalse(channel.finish());
+ testInvalidHeaders0(requestStr);
}
@Test
public void testHeaderWithNoValueAndMissingColon() {
- EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder());
String requestStr = "GET /some/path HTTP/1.1\r\n" +
"Content-Length: 0\r\n" +
"Host:\r\n" +
"netty.io\r\n\r\n";
+ testInvalidHeaders0(requestStr);
+ }
+
+ @Test
+ public void testMultipleContentLengthHeaders() {
+ String requestStr = "GET /some/path HTTP/1.1\r\n" +
+ "Content-Length: 1\r\n" +
+ "Content-Length: 0\r\n\r\n" +
+ "b";
+ testInvalidHeaders0(requestStr);
+ }
+
+ @Test
+ public void testMultipleContentLengthHeaders2() {
+ String requestStr = "GET /some/path HTTP/1.1\r\n" +
+ "Content-Length: 1\r\n" +
+ "Connection: close\r\n" +
+ "Content-Length: 0\r\n\r\n" +
+ "b";
+ testInvalidHeaders0(requestStr);
+ }
+
+ @Test
+ public void testContentLengthHeaderWithCommaValue() {
+ String requestStr = "GET /some/path HTTP/1.1\r\n" +
+ "Content-Length: 1,1\r\n\r\n" +
+ "b";
+ testInvalidHeaders0(requestStr);
+ }
+ @Test
+ public void testMultipleContentLengthHeadersWithFolding() {
+ String requestStr = "POST / HTTP/1.1\r\n" +
+ "Host: example.com\r\n" +
+ "Connection: close\r\n" +
+ "Content-Length: 5\r\n" +
+ "Content-Length:\r\n" +
+ "\t6\r\n\r\n" +
+ "123456";
+ testInvalidHeaders0(requestStr);
+ }
+
+ @Test
+ public void testContentLengthHeaderAndChunked() {
+ String requestStr = "POST / HTTP/1.1\r\n" +
+ "Host: example.com\r\n" +
+ "Connection: close\r\n" +
+ "Content-Length: 5\r\n" +
+ "Transfer-Encoding: chunked\r\n\r\n" +
+ "0\r\n\r\n";
+ testInvalidHeaders0(requestStr);
+ }
+
+ private static void testInvalidHeaders0(String requestStr) {
+ EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder());
assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII)));
HttpRequest request = channel.readInbound();
- System.err.println(request.headers().names().toString());
assertTrue(request.decoderResult().isFailure());
assertTrue(request.decoderResult().cause() instanceof IllegalArgumentException);
assertFalse(channel.finish());

127
CVE-2019-20445-2.patch Normal file
View File

@ -0,0 +1,127 @@
From 629034624626b722128e0fcc6b3ec9d406cb3706 Mon Sep 17 00:00:00 2001
From: Bennett Lynch <bennett.lynch@gmail.com>
Date: Mon, 10 Feb 2020 01:41:57 -0800
Subject: [PATCH] =?UTF-8?q?Remove=20"Content-Length"=20when=20decoding=20H?=
=?UTF-8?q?TTP/1.1=20message=20with=20both=20"Tra=E2=80=A6=20(#10003)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Motivation
As part of a recent commit for issue
https://github.com/netty/netty/issues/9861 the HttpObjectDecoder was
changed to throw an IllegalArgumentException (and produce a failed
decoder result) when decoding a message with both "Transfer-Encoding:
chunked" and "Content-Length".
While it seems correct for Netty to try to sanitize these types of
messages, the spec explicitly mentions that the Content-Length header
should be *removed* in this scenario.
Both Nginx 1.15.9 and Tomcat 9.0.31 also opt to remove the header:
https://github.com/apache/tomcat/blob/b693d7c1981fa7f51e58bc8c8e72e3fe80b7b773/java/org/apache/coyote/http11/Http11Processor.java#L747-L755
https://github.com/nginx/nginx/blob/0ad4393e30c119d250415cb769e3d8bc8dce5186/src/http/ngx_http_request.c#L1946-L1953
Modifications
* Change the default behavior from throwing an IllegalArgumentException
to removing the "Content-Length" header
* Extract the behavior to a new protected method,
handleChunkedEncodingWithContentLength(), that can be overridden to
change this behavior (or capture metrics)
Result
Messages of this nature will now be successfully decoded and have their
"Content-Length" header removed, rather than creating invalid messages
(decoder result failures). Users will be allowed to override and
configure this behavior.
---
.../handler/codec/http/HttpObjectDecoder.java | 42 ++++++++++++-------
.../codec/http/HttpRequestDecoderTest.java | 10 ++++-
2 files changed, 36 insertions(+), 16 deletions(-)
diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java
index 768bd3b26f5..04861043590 100644
--- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java
+++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java
@@ -632,23 +632,9 @@ private State readHeaders(ByteBuf buffer) {
HttpUtil.setTransferEncodingChunked(message, false);
return State.SKIP_CONTROL_CHARS;
} else if (HttpUtil.isTransferEncodingChunked(message)) {
- // See https://tools.ietf.org/html/rfc7230#section-3.3.3
- //
- // If a message is received with both a Transfer-Encoding and a
- // Content-Length header field, the Transfer-Encoding overrides the
- // Content-Length. Such a message might indicate an attempt to
- // perform request smuggling (Section 9.5) or response splitting
- // (Section 9.4) and ought to be handled as an error. A sender MUST
- // remove the received Content-Length field prior to forwarding such
- // a message downstream.
- //
- // This is also what http_parser does:
- // https://github.com/nodejs/http-parser/blob/v2.9.2/http_parser.c#L1769
if (contentLengthValuesCount > 0 && message.protocolVersion() == HttpVersion.HTTP_1_1) {
- throw new IllegalArgumentException(
- "Both 'Content-Length: " + contentLength + "' and 'Transfer-Encoding: chunked' found");
+ handleTransferEncodingChunkedWithContentLength(message);
}
-
return State.READ_CHUNK_SIZE;
} else if (contentLength() >= 0) {
return State.READ_FIXED_LENGTH_CONTENT;
@@ -657,6 +643,32 @@ private State readHeaders(ByteBuf buffer) {
}
}
+ /**
+ * Invoked when a message with both a "Transfer-Encoding: chunked" and a "Content-Length" header field is detected.
+ * The default behavior is to <i>remove</i> the Content-Length field, but this method could be overridden
+ * to change the behavior (to, e.g., throw an exception and produce an invalid message).
+ * <p>
+ * See: https://tools.ietf.org/html/rfc7230#section-3.3.3
+ * <pre>
+ * If a message is received with both a Transfer-Encoding and a
+ * Content-Length header field, the Transfer-Encoding overrides the
+ * Content-Length. Such a message might indicate an attempt to
+ * perform request smuggling (Section 9.5) or response splitting
+ * (Section 9.4) and ought to be handled as an error. A sender MUST
+ * remove the received Content-Length field prior to forwarding such
+ * a message downstream.
+ * </pre>
+ * Also see:
+ * https://github.com/apache/tomcat/blob/b693d7c1981fa7f51e58bc8c8e72e3fe80b7b773/
+ * java/org/apache/coyote/http11/Http11Processor.java#L747-L755
+ * https://github.com/nginx/nginx/blob/0ad4393e30c119d250415cb769e3d8bc8dce5186/
+ * src/http/ngx_http_request.c#L1946-L1953
+ */
+ protected void handleTransferEncodingChunkedWithContentLength(HttpMessage message) {
+ message.headers().remove(HttpHeaderNames.CONTENT_LENGTH);
+ contentLength = Long.MIN_VALUE;
+ }
+
private long contentLength() {
if (contentLength == Long.MIN_VALUE) {
contentLength = HttpUtil.getContentLength(message, -1L);
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
index 2548af0e2af..f92a32d0ad9 100644
--- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
+++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
@@ -408,7 +408,15 @@ public void testContentLengthHeaderAndChunked() {
"Content-Length: 5\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
"0\r\n\r\n";
- testInvalidHeaders0(requestStr);
+
+ EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder());
+ assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII)));
+ HttpRequest request = channel.readInbound();
+ assertFalse(request.decoderResult().isFailure());
+ assertTrue(request.headers().contains("Transfer-Encoding", "chunked", false));
+ assertFalse(request.headers().contains("Content-Length"));
+ LastHttpContent c = channel.readInbound();
+ assertFalse(channel.finish());
}
private static void testInvalidHeaders0(String requestStr) {

58
CVE-2019-20445-3.patch Normal file
View File

@ -0,0 +1,58 @@
From 5f68897880467c00f29495b0aa46ed19bf7a873c Mon Sep 17 00:00:00 2001
From: Artem Smotrakov <artem.smotrakov@gmail.com>
Date: Wed, 5 Feb 2020 14:33:28 +0100
Subject: [PATCH] Added tests for Transfer-Encoding header with whitespace
(#9997)
Motivation:
Need tests to ensure that CVE-2020-7238 is fixed.
Modifications:
Added two test cases into HttpRequestDecoderTest which check that
no whitespace is allowed before the Transfer-Encoding header.
Result:
Improved test coverage for #9861
---
.../codec/http/HttpRequestDecoderTest.java | 25 ++++++++++++++++++-
1 file changed, 24 insertions(+), 1 deletion(-)
diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
index 1e780b7959f..2548af0e2af 100644
--- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
+++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
@@ -325,7 +325,30 @@ public void testTooLargeHeaders() {
public void testWhitespace() {
String requestStr = "GET /some/path HTTP/1.1\r\n" +
"Transfer-Encoding : chunked\r\n" +
- "Host: netty.io\n\r\n";
+ "Host: netty.io\r\n\r\n";
+ testInvalidHeaders0(requestStr);
+ }
+
+ @Test
+ public void testWhitespaceBeforeTransferEncoding01() {
+ String requestStr = "GET /some/path HTTP/1.1\r\n" +
+ " Transfer-Encoding : chunked\r\n" +
+ "Content-Length: 1\r\n" +
+ "Host: netty.io\r\n\r\n" +
+ "a";
+ testInvalidHeaders0(requestStr);
+ }
+
+ @Test
+ public void testWhitespaceBeforeTransferEncoding02() {
+ String requestStr = "POST / HTTP/1.1" +
+ " Transfer-Encoding : chunked\r\n" +
+ "Host: target.com" +
+ "Content-Length: 65\r\n\r\n" +
+ "0\r\n\r\n" +
+ "GET /maliciousRequest HTTP/1.1\r\n" +
+ "Host: evilServer.com\r\n" +
+ "Foo: x";
testInvalidHeaders0(requestStr);
}

488
CVE-2020-11612.patch Normal file
View File

@ -0,0 +1,488 @@
From ad6830f1e90735407ae153a0ee9bf8e223e40d14 Mon Sep 17 00:00:00 2001
From: Rich DiCroce <Rich.DiCroce@scientificgames.com>
Date: Fri, 31 Jan 2020 06:11:06 -0500
Subject: [PATCH] Allow a limit to be set on the decompressed buffer size
for ZlibDecoders (#9924)
Motivation:
It is impossible to know in advance how much memory will be needed to
decompress a stream of bytes that was compressed using the DEFLATE
algorithm. In theory, up to 1032 times the compressed size could be
needed. For untrusted input, an attacker could exploit this to exhaust
the memory pool.
Modifications:
ZlibDecoder and its subclasses now support an optional limit on the size
of the decompressed buffer. By default, if the limit is reached,
decompression stops and a DecompressionException is thrown. Behavior
upon reaching the limit is modifiable by subclasses in case they desire
something else.
Result:
The decompressed buffer can now be limited to a configurable size, thus
mitigating the possibility of memory pool exhaustion.
---
.../codec/compression/JZlibDecoder.java | 57 +++++++++++++++-
.../codec/compression/JdkZlibDecoder.java | 58 +++++++++++++++--
.../codec/compression/ZlibDecoder.java | 65 +++++++++++++++++++
.../handler/codec/compression/JZlibTest.java | 4 +-
.../codec/compression/JdkZlibTest.java | 4 +-
.../codec/compression/ZlibCrossTest1.java | 4 +-
.../codec/compression/ZlibCrossTest2.java | 4 +-
.../handler/codec/compression/ZlibTest.java | 57 +++++++++++++++-
8 files changed, 235 insertions(+), 18 deletions(-)
diff --git a/codec/src/main/java/io/netty/handler/codec/compression/JZlibDecoder.java b/codec/src/main/java/io/netty/handler/codec/compression/JZlibDecoder.java
index 5d23bb8..cfb104e 100644
--- a/codec/src/main/java/io/netty/handler/codec/compression/JZlibDecoder.java
+++ b/codec/src/main/java/io/netty/handler/codec/compression/JZlibDecoder.java
@@ -18,6 +18,7 @@ package io.netty.handler.codec.compression;
import com.jcraft.jzlib.Inflater;
import com.jcraft.jzlib.JZlib;
import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufAllocator;
import io.netty.channel.ChannelHandlerContext;
import java.util.List;
@@ -34,7 +35,21 @@ public class JZlibDecoder extends ZlibDecoder {
* @throws DecompressionException if failed to initialize zlib
*/
public JZlibDecoder() {
- this(ZlibWrapper.ZLIB);
+ this(ZlibWrapper.ZLIB, 0);
+ }
+
+ /**
+ * Creates a new instance with the default wrapper ({@link ZlibWrapper#ZLIB})
+ * and specified maximum buffer allocation.
+ *
+ * @param maxAllocation
+ * Maximum size of the decompression buffer. Must be &gt;= 0.
+ * If zero, maximum size is decided by the {@link ByteBufAllocator}.
+ *
+ * @throws DecompressionException if failed to initialize zlib
+ */
+ public JZlibDecoder(int maxAllocation) {
+ this(ZlibWrapper.ZLIB, maxAllocation);
}
/**
@@ -43,6 +58,21 @@ public class JZlibDecoder extends ZlibDecoder {
* @throws DecompressionException if failed to initialize zlib
*/
public JZlibDecoder(ZlibWrapper wrapper) {
+ this(wrapper, 0);
+ }
+
+ /**
+ * Creates a new instance with the specified wrapper and maximum buffer allocation.
+ *
+ * @param maxAllocation
+ * Maximum size of the decompression buffer. Must be &gt;= 0.
+ * If zero, maximum size is decided by the {@link ByteBufAllocator}.
+ *
+ * @throws DecompressionException if failed to initialize zlib
+ */
+ public JZlibDecoder(ZlibWrapper wrapper, int maxAllocation) {
+ super(maxAllocation);
+
if (wrapper == null) {
throw new NullPointerException("wrapper");
}
@@ -61,6 +91,22 @@ public class JZlibDecoder extends ZlibDecoder {
* @throws DecompressionException if failed to initialize zlib
*/
public JZlibDecoder(byte[] dictionary) {
+ this(dictionary, 0);
+ }
+
+ /**
+ * Creates a new instance with the specified preset dictionary and maximum buffer allocation.
+ * The wrapper is always {@link ZlibWrapper#ZLIB} because it is the only format that
+ * supports the preset dictionary.
+ *
+ * @param maxAllocation
+ * Maximum size of the decompression buffer. Must be &gt;= 0.
+ * If zero, maximum size is decided by the {@link ByteBufAllocator}.
+ *
+ * @throws DecompressionException if failed to initialize zlib
+ */
+ public JZlibDecoder(byte[] dictionary, int maxAllocation) {
+ super(maxAllocation);
if (dictionary == null) {
throw new NullPointerException("dictionary");
}
@@ -110,11 +156,11 @@ public class JZlibDecoder extends ZlibDecoder {
final int oldNextInIndex = z.next_in_index;
// Configure output.
- ByteBuf decompressed = ctx.alloc().heapBuffer(inputLength << 1);
+ ByteBuf decompressed = prepareDecompressBuffer(ctx, null, inputLength << 1);
try {
loop: for (;;) {
- decompressed.ensureWritable(z.avail_in << 1);
+ decompressed = prepareDecompressBuffer(ctx, decompressed, z.avail_in << 1);
z.avail_out = decompressed.writableBytes();
z.next_out = decompressed.array();
z.next_out_index = decompressed.arrayOffset() + decompressed.writerIndex();
@@ -170,4 +216,9 @@ public class JZlibDecoder extends ZlibDecoder {
z.next_out = null;
}
}
+
+ @Override
+ protected void decompressionBufferExhausted(ByteBuf buffer) {
+ finished = true;
+ }
}
diff --git a/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibDecoder.java b/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibDecoder.java
index d9b4b91..74e6d5f 100644
--- a/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibDecoder.java
+++ b/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibDecoder.java
@@ -16,6 +16,7 @@
package io.netty.handler.codec.compression;
import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufAllocator;
import io.netty.channel.ChannelHandlerContext;
import java.util.List;
@@ -63,7 +64,19 @@ public class JdkZlibDecoder extends ZlibDecoder {
* Creates a new instance with the default wrapper ({@link ZlibWrapper#ZLIB}).
*/
public JdkZlibDecoder() {
- this(ZlibWrapper.ZLIB, null);
+ this(ZlibWrapper.ZLIB, null, 0);
+ }
+
+ /**
+ * Creates a new instance with the default wrapper ({@link ZlibWrapper#ZLIB})
+ * and the specified maximum buffer allocation.
+ *
+ * @param maxAllocation
+ * Maximum size of the decompression buffer. Must be &gt;= 0.
+ * If zero, maximum size is decided by the {@link ByteBufAllocator}.
+ */
+ public JdkZlibDecoder(int maxAllocation) {
+ this(ZlibWrapper.ZLIB, null, maxAllocation);
}
/**
@@ -72,7 +85,20 @@ public class JdkZlibDecoder extends ZlibDecoder {
* supports the preset dictionary.
*/
public JdkZlibDecoder(byte[] dictionary) {
- this(ZlibWrapper.ZLIB, dictionary);
+ this(ZlibWrapper.ZLIB, dictionary, 0);
+ }
+
+ /**
+ * Creates a new instance with the specified preset dictionary and maximum buffer allocation.
+ * The wrapper is always {@link ZlibWrapper#ZLIB} because it is the only format that
+ * supports the preset dictionary.
+ *
+ * @param maxAllocation
+ * Maximum size of the decompression buffer. Must be &gt;= 0.
+ * If zero, maximum size is decided by the {@link ByteBufAllocator}.
+ */
+ public JdkZlibDecoder(byte[] dictionary, int maxAllocation) {
+ this(ZlibWrapper.ZLIB, dictionary, maxAllocation);
}
/**
@@ -81,10 +107,25 @@ public class JdkZlibDecoder extends ZlibDecoder {
* supported atm.
*/
public JdkZlibDecoder(ZlibWrapper wrapper) {
- this(wrapper, null);
+ this(wrapper, null, 0);
}
- private JdkZlibDecoder(ZlibWrapper wrapper, byte[] dictionary) {
+ /**
+ * Creates a new instance with the specified wrapper and maximum buffer allocation.
+ * Be aware that only {@link ZlibWrapper#GZIP}, {@link ZlibWrapper#ZLIB} and {@link ZlibWrapper#NONE} are
+ * supported atm.
+ *
+ * @param maxAllocation
+ * Maximum size of the decompression buffer. Must be &gt;= 0.
+ * If zero, maximum size is decided by the {@link ByteBufAllocator}.
+ */
+ public JdkZlibDecoder(ZlibWrapper wrapper, int maxAllocation) {
+ this(wrapper, null, maxAllocation);
+ }
+
+ private JdkZlibDecoder(ZlibWrapper wrapper, byte[] dictionary, int maxAllocation) {
+ super(maxAllocation);
+
if (wrapper == null) {
throw new NullPointerException("wrapper");
}
@@ -167,7 +208,7 @@ public class JdkZlibDecoder extends ZlibDecoder {
inflater.setInput(array);
}
- ByteBuf decompressed = ctx.alloc().heapBuffer(inflater.getRemaining() << 1);
+ ByteBuf decompressed = prepareDecompressBuffer(ctx, null,inflater.getRemaining() << 1);
try {
boolean readFooter = false;
while (!inflater.needsInput()) {
@@ -198,7 +239,7 @@ public class JdkZlibDecoder extends ZlibDecoder {
}
break;
} else {
- decompressed.ensureWritable(inflater.getRemaining() << 1);
+ decompressed = prepareDecompressBuffer(ctx, decompressed,inflater.getRemaining() << 1);
}
}
@@ -222,6 +263,11 @@ public class JdkZlibDecoder extends ZlibDecoder {
}
}
+ @Override
+ protected void decompressionBufferExhausted(ByteBuf buffer) {
+ finished = true;
+ }
+
@Override
protected void handlerRemoved0(ChannelHandlerContext ctx) throws Exception {
super.handlerRemoved0(ctx);
diff --git a/codec/src/main/java/io/netty/handler/codec/compression/ZlibDecoder.java b/codec/src/main/java/io/netty/handler/codec/compression/ZlibDecoder.java
index d01bc6b..26fd3e7 100644
--- a/codec/src/main/java/io/netty/handler/codec/compression/ZlibDecoder.java
+++ b/codec/src/main/java/io/netty/handler/codec/compression/ZlibDecoder.java
@@ -16,6 +16,8 @@
package io.netty.handler.codec.compression;
import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufAllocator;
+import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.ByteToMessageDecoder;
/**
@@ -23,9 +25,72 @@ import io.netty.handler.codec.ByteToMessageDecoder;
*/
public abstract class ZlibDecoder extends ByteToMessageDecoder {
+ /**
+ * Maximum allowed size of the decompression buffer.
+ */
+ protected final int maxAllocation;
+
+ /**
+ * Same as {@link #ZlibDecoder(int)} with maxAllocation = 0.
+ */
+ public ZlibDecoder() {
+ this(0);
+ }
+
+ /**
+ * Construct a new ZlibDecoder.
+ * @param maxAllocation
+ * Maximum size of the decompression buffer. Must be &gt;= 0.
+ * If zero, maximum size is decided by the {@link ByteBufAllocator}.
+ */
+ public ZlibDecoder(int maxAllocation) {
+ if (maxAllocation < 0) {
+ throw new IllegalArgumentException("maxAllocation must be >= 0");
+ }
+ this.maxAllocation = maxAllocation;
+ }
+
/**
* Returns {@code true} if and only if the end of the compressed stream
* has been reached.
*/
public abstract boolean isClosed();
+
+ /**
+ * Allocate or expand the decompression buffer, without exceeding the maximum allocation.
+ * Calls {@link #decompressionBufferExhausted(ByteBuf)} if the buffer is full and cannot be expanded further.
+ */
+ protected ByteBuf prepareDecompressBuffer(ChannelHandlerContext ctx, ByteBuf buffer, int preferredSize) {
+ if (buffer == null) {
+ if (maxAllocation == 0) {
+ return ctx.alloc().heapBuffer(preferredSize);
+ }
+
+ return ctx.alloc().heapBuffer(Math.min(preferredSize, maxAllocation), maxAllocation);
+ }
+
+ // this always expands the buffer if possible, even if the expansion is less than preferredSize
+ // we throw the exception only if the buffer could not be expanded at all
+ // this means that one final attempt to deserialize will always be made with the buffer at maxAllocation
+ if (buffer.ensureWritable(preferredSize, true) == 1) {
+ // buffer must be consumed so subclasses don't add it to output
+ // we therefore duplicate it when calling decompressionBufferExhausted() to guarantee non-interference
+ // but wait until after to consume it so the subclass can tell how much output is really in the buffer
+ decompressionBufferExhausted(buffer.duplicate());
+ buffer.skipBytes(buffer.readableBytes());
+ throw new DecompressionException("Decompression buffer has reached maximum size: " + buffer.maxCapacity());
+ }
+
+ return buffer;
+ }
+
+ /**
+ * Called when the decompression buffer cannot be expanded further.
+ * Default implementation is a no-op, but subclasses can override in case they want to
+ * do something before the {@link DecompressionException} is thrown, such as log the
+ * data that was decompressed so far.
+ */
+ protected void decompressionBufferExhausted(ByteBuf buffer) {
+ }
+
}
diff --git a/codec/src/test/java/io/netty/handler/codec/compression/JZlibTest.java b/codec/src/test/java/io/netty/handler/codec/compression/JZlibTest.java
index 28f3919..015559e 100644
--- a/codec/src/test/java/io/netty/handler/codec/compression/JZlibTest.java
+++ b/codec/src/test/java/io/netty/handler/codec/compression/JZlibTest.java
@@ -23,7 +23,7 @@ public class JZlibTest extends ZlibTest {
}
@Override
- protected ZlibDecoder createDecoder(ZlibWrapper wrapper) {
- return new JZlibDecoder(wrapper);
+ protected ZlibDecoder createDecoder(ZlibWrapper wrapper, int maxAllocation) {
+ return new JZlibDecoder(wrapper, maxAllocation);
}
}
diff --git a/codec/src/test/java/io/netty/handler/codec/compression/JdkZlibTest.java b/codec/src/test/java/io/netty/handler/codec/compression/JdkZlibTest.java
index 23f178d..fc53282 100644
--- a/codec/src/test/java/io/netty/handler/codec/compression/JdkZlibTest.java
+++ b/codec/src/test/java/io/netty/handler/codec/compression/JdkZlibTest.java
@@ -26,8 +26,8 @@ public class JdkZlibTest extends ZlibTest {
}
@Override
- protected ZlibDecoder createDecoder(ZlibWrapper wrapper) {
- return new JdkZlibDecoder(wrapper);
+ protected ZlibDecoder createDecoder(ZlibWrapper wrapper, int maxAllocation) {
+ return new JdkZlibDecoder(wrapper, maxAllocation);
}
@Test(expected = DecompressionException.class)
diff --git a/codec/src/test/java/io/netty/handler/codec/compression/ZlibCrossTest1.java b/codec/src/test/java/io/netty/handler/codec/compression/ZlibCrossTest1.java
index 9e16e1a..3c31274 100644
--- a/codec/src/test/java/io/netty/handler/codec/compression/ZlibCrossTest1.java
+++ b/codec/src/test/java/io/netty/handler/codec/compression/ZlibCrossTest1.java
@@ -23,7 +23,7 @@ public class ZlibCrossTest1 extends ZlibTest {
}
@Override
- protected ZlibDecoder createDecoder(ZlibWrapper wrapper) {
- return new JZlibDecoder(wrapper);
+ protected ZlibDecoder createDecoder(ZlibWrapper wrapper, int maxAllocation) {
+ return new JZlibDecoder(wrapper, maxAllocation);
}
}
diff --git a/codec/src/test/java/io/netty/handler/codec/compression/ZlibCrossTest2.java b/codec/src/test/java/io/netty/handler/codec/compression/ZlibCrossTest2.java
index 8717019..00c6e18 100644
--- a/codec/src/test/java/io/netty/handler/codec/compression/ZlibCrossTest2.java
+++ b/codec/src/test/java/io/netty/handler/codec/compression/ZlibCrossTest2.java
@@ -25,8 +25,8 @@ public class ZlibCrossTest2 extends ZlibTest {
}
@Override
- protected ZlibDecoder createDecoder(ZlibWrapper wrapper) {
- return new JdkZlibDecoder(wrapper);
+ protected ZlibDecoder createDecoder(ZlibWrapper wrapper, int maxAllocation) {
+ return new JdkZlibDecoder(wrapper, maxAllocation);
}
@Test(expected = DecompressionException.class)
diff --git a/codec/src/test/java/io/netty/handler/codec/compression/ZlibTest.java b/codec/src/test/java/io/netty/handler/codec/compression/ZlibTest.java
index 7c25ec4..9d79c81 100644
--- a/codec/src/test/java/io/netty/handler/codec/compression/ZlibTest.java
+++ b/codec/src/test/java/io/netty/handler/codec/compression/ZlibTest.java
@@ -15,7 +15,9 @@
*/
package io.netty.handler.codec.compression;
+import io.netty.buffer.AbstractByteBufAllocator;
import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.ByteBufInputStream;
import io.netty.buffer.Unpooled;
import io.netty.channel.embedded.EmbeddedChannel;
@@ -88,8 +90,12 @@ public abstract class ZlibTest {
rand.nextBytes(BYTES_LARGE);
}
+ protected ZlibDecoder createDecoder(ZlibWrapper wrapper) {
+ return createDecoder(wrapper, 0);
+ }
+
protected abstract ZlibEncoder createEncoder(ZlibWrapper wrapper);
- protected abstract ZlibDecoder createDecoder(ZlibWrapper wrapper);
+ protected abstract ZlibDecoder createDecoder(ZlibWrapper wrapper, int maxAllocation);
@Test
public void testGZIP2() throws Exception {
@@ -345,6 +351,25 @@ public abstract class ZlibTest {
testCompressLarge(ZlibWrapper.GZIP, ZlibWrapper.ZLIB_OR_NONE);
}
+ @Test
+ public void testMaxAllocation() throws Exception {
+ int maxAllocation = 1024;
+ ZlibDecoder decoder = createDecoder(ZlibWrapper.ZLIB, maxAllocation);
+ EmbeddedChannel chDecoder = new EmbeddedChannel(decoder);
+ TestByteBufAllocator alloc = new TestByteBufAllocator(chDecoder.alloc());
+ chDecoder.config().setAllocator(alloc);
+
+ try {
+ chDecoder.writeInbound(Unpooled.wrappedBuffer(deflate(BYTES_LARGE)));
+ fail("decompressed size > maxAllocation, so should have thrown exception");
+ } catch (DecompressionException e) {
+ assertTrue(e.getMessage().startsWith("Decompression buffer has reached maximum size"));
+ assertEquals(maxAllocation, alloc.getMaxAllocation());
+ assertTrue(decoder.isClosed());
+ assertFalse(chDecoder.finish());
+ }
+ }
+
private static byte[] gzip(byte[] bytes) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
GZIPOutputStream stream = new GZIPOutputStream(out);
@@ -360,4 +385,34 @@ public abstract class ZlibTest {
stream.close();
return out.toByteArray();
}
+
+ private static final class TestByteBufAllocator extends AbstractByteBufAllocator {
+ private ByteBufAllocator wrapped;
+ private int maxAllocation;
+
+ TestByteBufAllocator(ByteBufAllocator wrapped) {
+ this.wrapped = wrapped;
+ }
+
+ public int getMaxAllocation() {
+ return maxAllocation;
+ }
+
+ @Override
+ public boolean isDirectBufferPooled() {
+ return wrapped.isDirectBufferPooled();
+ }
+
+ @Override
+ protected ByteBuf newHeapBuffer(int initialCapacity, int maxCapacity) {
+ maxAllocation = Math.max(maxAllocation, maxCapacity);
+ return wrapped.heapBuffer(initialCapacity, maxCapacity);
+ }
+
+ @Override
+ protected ByteBuf newDirectBuffer(int initialCapacity, int maxCapacity) {
+ maxAllocation = Math.max(maxAllocation, maxCapacity);
+ return wrapped.directBuffer(initialCapacity, maxCapacity);
+ }
+ }
}
--
2.23.0

View File

@ -2,7 +2,7 @@
Name: netty Name: netty
Version: 4.1.13 Version: 4.1.13
Release: 8 Release: 9
Summary: Asynchronous event-driven network application Java framework Summary: Asynchronous event-driven network application Java framework
License: ASL 2.0 License: ASL 2.0
URL: https://netty.io/ URL: https://netty.io/
@ -11,6 +11,12 @@ Source1: codegen.bash
Patch0000: 0001-Remove-OpenSSL-parts-depending-on-tcnative.patch Patch0000: 0001-Remove-OpenSSL-parts-depending-on-tcnative.patch
Patch0001: 0002-Remove-NPN.patch Patch0001: 0002-Remove-NPN.patch
Patch0002: 0003-Remove-conscrypt-ALPN.patch Patch0002: 0003-Remove-conscrypt-ALPN.patch
Patch0003: CVE-2019-16869.patch
Patch0004: CVE-2019-20444.patch
Patch0005: CVE-2019-20445-1.patch
Patch0006: CVE-2019-20445-2.patch
Patch0007: CVE-2019-20445-3.patch
Patch0008: CVE-2020-11612.patch
BuildRequires: maven-local mvn(ant-contrib:ant-contrib) BuildRequires: maven-local mvn(ant-contrib:ant-contrib)
BuildRequires: mvn(com.jcraft:jzlib) mvn(commons-logging:commons-logging) BuildRequires: mvn(com.jcraft:jzlib) mvn(commons-logging:commons-logging)
BuildRequires: mvn(kr.motd.maven:os-maven-plugin) mvn(log4j:log4j:1.2.17) BuildRequires: mvn(kr.motd.maven:os-maven-plugin) mvn(log4j:log4j:1.2.17)
@ -127,6 +133,9 @@ export CFLAGS="$RPM_OPT_FLAGS" LDFLAGS="$RPM_LD_FLAGS"
%changelog %changelog
* Fri Dec 04 2020 caodongxia <caodongxia@huawei.com> - 4.1.13-9
- fix CVE-2019-16869 CVE-2019-20444 CVE-2019-20445 CVE-2020-11612
* Wed Aug 26 2020 yaokai <yaokai13@huawei.com> - 4.1.13-8 * Wed Aug 26 2020 yaokai <yaokai13@huawei.com> - 4.1.13-8
- Disable support for protobuf in the codecs module - Disable support for protobuf in the codecs module