tomcat/CVE-2023-45648.patch
2023-10-20 10:07:41 +08:00

90 lines
4.2 KiB
Diff

Description: Align processing of trailer headers with standard processing
Origin: upstream, https://github.com/apache/tomcat/commit/59583245639d8c42ae0009f4a4a70464d3ea70a0
--- a/java/org/apache/coyote/http11/Http11InputBuffer.java
+++ b/java/org/apache/coyote/http11/Http11InputBuffer.java
@@ -818,6 +818,12 @@
*/
private HeaderParseStatus parseHeader() throws IOException {
+ /*
+ * Implementation note: Any changes to this method probably need to be echoed in
+ * ChunkedInputFilter.parseHeader(). Why not use a common implementation? In short, this code uses non-blocking
+ * reads whereas ChunkedInputFilter using blocking reads. The code is just different enough that a common
+ * implementation wasn't viewed as practical.
+ */
//
// Check for blank line
//
byte chr = 0;
byte prevChr = 0;
while (headerParsePos == HeaderParsePosition.HEADER_START) {
// Read new bytes if needed
@@ -950,7 +956,7 @@
} else if (prevChr == Constants.CR) {
// Invalid value - also need to delete header
return skipLine(true);
- } else if (chr != Constants.HT && HttpParser.isControl(chr)) {
+ } else if (HttpParser.isControl(chr) && chr != Constants.HT) {
// Invalid value - also need to delete header
return skipLine(true);
} else if (chr == Constants.SP || chr == Constants.HT) {
--- a/java/org/apache/coyote/http11/filters/ChunkedInputFilter.java
+++ b/java/org/apache/coyote/http11/filters/ChunkedInputFilter.java
@@ -30,6 +30,7 @@
import org.apache.coyote.http11.InputFilter;
import org.apache.tomcat.util.buf.ByteChunk;
import org.apache.tomcat.util.buf.HexUtils;
+import org.apache.tomcat.util.http.parser.HttpParser;
import org.apache.tomcat.util.net.ApplicationBufferHandler;
import org.apache.tomcat.util.res.StringManager;
@@ -443,6 +444,13 @@
private boolean parseHeader() throws IOException {
+ /*
+ * Implementation note: Any changes to this method probably need to be echoed in
+ * Http11InputBuffer.parseHeader(). Why not use a common implementation? In short, this code uses blocking
+ * reads whereas Http11InputBuffer using non-blocking reads. The code is just different enough that a common
+ * implementation wasn't viewed as practical.
+ */
+
Map<String,String> headers = request.getTrailerFields();
byte chr = 0;
@@ -489,6 +497,9 @@
if (chr == Constants.COLON) {
colon = true;
+ } else if (!HttpParser.isToken(chr)) {
+ // Non-token characters are illegal in header names
+ throw new IOException(sm.getString("chunkedInputFilter.invalidTrailerHeaderName"));
} else {
trailingHeaders.append(chr);
}
@@ -550,7 +561,9 @@
if (chr == Constants.CR || chr == Constants.LF) {
parseCRLF(true);
eol = true;
- } else if (chr == Constants.SP) {
+ } else if (HttpParser.isControl(chr) && chr != Constants.HT) {
+ throw new IOException(sm.getString("chunkedInputFilter.invalidTrailerHeaderValue"));
+ } else if (chr == Constants.SP || chr == Constants.HT) {
trailingHeaders.append(chr);
} else {
trailingHeaders.append(chr);
--- a/java/org/apache/coyote/http11/filters/LocalStrings.properties
+++ b/java/org/apache/coyote/http11/filters/LocalStrings.properties
@@ -21,6 +21,8 @@
chunkedInputFilter.invalidCrlfNoCR=Invalid end of line sequence (No CR before LF)
chunkedInputFilter.invalidCrlfNoData=Invalid end of line sequence (no data available to read)
chunkedInputFilter.invalidHeader=Invalid chunk header
+chunkedInputFilter.invalidTrailerHeaderName=Invalid trailer header name (non-token character in name)
+chunkedInputFilter.invalidTrailerHeaderValue=Invalid trailer header value (control character in value)
chunkedInputFilter.maxExtension=maxExtensionSize exceeded
chunkedInputFilter.maxTrailer=maxTrailerSize exceeded