Compare commits
10 Commits
2d9c667429
...
4ea1823e9f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4ea1823e9f | ||
|
|
de7abae06c | ||
|
|
c0233bd321 | ||
|
|
d6ed65db3d | ||
|
|
f9dc70f006 | ||
|
|
7c3bed9978 | ||
|
|
05a023da84 | ||
|
|
6e8699454c | ||
|
|
e5a2eb540b | ||
|
|
79813e0a42 |
@ -1,122 +0,0 @@
|
|||||||
From 5921355e474ffbff2cb577c3622ce0e686e8996a Mon Sep 17 00:00:00 2001
|
|
||||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
|
||||||
Date: Sat, 11 Mar 2023 05:48:14 +0000
|
|
||||||
Subject: [PATCH] Replaced clientReplyContext::tempBuffer with old_reqofs
|
|
||||||
(#1304)
|
|
||||||
|
|
||||||
The tempBuffer data member was not actually used as a buffer. We only
|
|
||||||
used its offset field, and only for saving reqofs (which has a different
|
|
||||||
type than tempBuffer.offset!). Replaced the buffer with old_reqofs,
|
|
||||||
consistent with the rest of the "saved stale entry state" code.
|
|
||||||
|
|
||||||
Also fixed old_reqsize type to match reqsize and grouped that member
|
|
||||||
with the other private "saved stale entry state" fields.
|
|
||||||
|
|
||||||
Bad old types probably did not trigger runtime failures because the
|
|
||||||
associated saved numbers are saved at the very beginning of fetching the
|
|
||||||
entry, when all these accumulation-related counters are still small.
|
|
||||||
|
|
||||||
The remaining reqofs and reqsize types are wrong for platforms where
|
|
||||||
size_t is not uint64_t, but fixing that deserves a dedicated change. For
|
|
||||||
now, we just made the types of "old_" and "current" members consistent.
|
|
||||||
|
|
||||||
Reference:https://github.com/squid-cache/squid/commit/5921355e474ffbff2cb577c3622ce0e686e8996a
|
|
||||||
Conflict:NA
|
|
||||||
---
|
|
||||||
src/client_side_reply.cc | 12 ++++++------
|
|
||||||
src/client_side_reply.h | 6 +++---
|
|
||||||
2 files changed, 9 insertions(+), 9 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/client_side_reply.cc b/src/client_side_reply.cc
|
|
||||||
index 0004137cbc9..606a3ecafff 100644
|
|
||||||
--- a/src/client_side_reply.cc
|
|
||||||
+++ b/src/client_side_reply.cc
|
|
||||||
@@ -66,7 +66,6 @@ clientReplyContext::~clientReplyContext()
|
|
||||||
/* old_entry might still be set if we didn't yet get the reply
|
|
||||||
* code in HandleIMSReply() */
|
|
||||||
removeStoreReference(&old_sc, &old_entry);
|
|
||||||
- safe_free(tempBuffer.data);
|
|
||||||
cbdataReferenceDone(http);
|
|
||||||
HTTPMSGUNLOCK(reply);
|
|
||||||
}
|
|
||||||
@@ -76,7 +75,6 @@ clientReplyContext::clientReplyContext(ClientHttpRequest *clientContext) :
|
|
||||||
http(cbdataReference(clientContext)),
|
|
||||||
headers_sz(0),
|
|
||||||
sc(nullptr),
|
|
||||||
- old_reqsize(0),
|
|
||||||
reqsize(0),
|
|
||||||
reqofs(0),
|
|
||||||
ourNode(nullptr),
|
|
||||||
@@ -84,6 +82,8 @@ clientReplyContext::clientReplyContext(ClientHttpRequest *clientContext) :
|
|
||||||
old_entry(nullptr),
|
|
||||||
old_sc(nullptr),
|
|
||||||
old_lastmod(-1),
|
|
||||||
+ old_reqofs(0),
|
|
||||||
+ old_reqsize(0),
|
|
||||||
deleting(false),
|
|
||||||
collapsedRevalidation(crNone)
|
|
||||||
{
|
|
||||||
@@ -202,7 +202,7 @@ clientReplyContext::saveState()
|
|
||||||
old_lastmod = http->request->lastmod;
|
|
||||||
old_etag = http->request->etag;
|
|
||||||
old_reqsize = reqsize;
|
|
||||||
- tempBuffer.offset = reqofs;
|
|
||||||
+ old_reqofs = reqofs;
|
|
||||||
/* Prevent accessing the now saved entries */
|
|
||||||
http->storeEntry(nullptr);
|
|
||||||
sc = nullptr;
|
|
||||||
@@ -219,7 +219,7 @@ clientReplyContext::restoreState()
|
|
||||||
http->storeEntry(old_entry);
|
|
||||||
sc = old_sc;
|
|
||||||
reqsize = old_reqsize;
|
|
||||||
- reqofs = tempBuffer.offset;
|
|
||||||
+ reqofs = old_reqofs;
|
|
||||||
http->request->lastmod = old_lastmod;
|
|
||||||
http->request->etag = old_etag;
|
|
||||||
/* Prevent accessed the old saved entries */
|
|
||||||
@@ -228,7 +228,7 @@ clientReplyContext::restoreState()
|
|
||||||
old_lastmod = -1;
|
|
||||||
old_etag.clean();
|
|
||||||
old_reqsize = 0;
|
|
||||||
- tempBuffer.offset = 0;
|
|
||||||
+ old_reqofs = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
@@ -377,7 +377,7 @@ clientReplyContext::sendClientUpstreamResponse()
|
|
||||||
http->storeEntry()->clearPublicKeyScope();
|
|
||||||
|
|
||||||
/* here the data to send is the data we just received */
|
|
||||||
- tempBuffer.offset = 0;
|
|
||||||
+ old_reqofs = 0;
|
|
||||||
old_reqsize = 0;
|
|
||||||
/* sendMoreData tracks the offset as well.
|
|
||||||
* Force it back to zero */
|
|
||||||
diff --git a/src/client_side_reply.h b/src/client_side_reply.h
|
|
||||||
index 68b45715b33..32a38bc95e1 100644
|
|
||||||
--- a/src/client_side_reply.h
|
|
||||||
+++ b/src/client_side_reply.h
|
|
||||||
@@ -74,8 +74,6 @@ class clientReplyContext : public RefCountable, public StoreClient
|
|
||||||
/// Not to be confused with ClientHttpRequest::Out::headers_sz.
|
|
||||||
int headers_sz;
|
|
||||||
store_client *sc; /* The store_client we're using */
|
|
||||||
- StoreIOBuffer tempBuffer; /* For use in validating requests via IMS */
|
|
||||||
- int old_reqsize; /* ... again, for the buffer */
|
|
||||||
size_t reqsize;
|
|
||||||
size_t reqofs;
|
|
||||||
char tempbuf[HTTP_REQBUF_SZ]; ///< a temporary buffer if we need working storage
|
|
||||||
@@ -135,11 +133,13 @@ class clientReplyContext : public RefCountable, public StoreClient
|
|
||||||
/// TODO: Exclude internal Store match bans from the "mismatch" category.
|
|
||||||
const char *firstStoreLookup_ = nullptr;
|
|
||||||
|
|
||||||
+ /* (stale) cache hit information preserved during IMS revalidation */
|
|
||||||
StoreEntry *old_entry;
|
|
||||||
- /* ... for entry to be validated */
|
|
||||||
store_client *old_sc;
|
|
||||||
time_t old_lastmod;
|
|
||||||
String old_etag;
|
|
||||||
+ size_t old_reqofs;
|
|
||||||
+ size_t old_reqsize;
|
|
||||||
|
|
||||||
bool deleting;
|
|
||||||
|
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,39 +0,0 @@
|
|||||||
From b70f864940225dfe69f9f653f948e787f99c3810 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Andreas Weigel <andreas.weigel@securepoint.de>
|
|
||||||
Date: Wed, 18 Oct 2023 04:14:31 +0000
|
|
||||||
Subject: [PATCH] Fix validation of certificates with CN=* (#1523)
|
|
||||||
|
|
||||||
The bug was discovered and detailed by Joshua Rogers at
|
|
||||||
https://megamansec.github.io/Squid-Security-Audit/
|
|
||||||
where it was filed as "Buffer UnderRead in SSL CN Parsing".
|
|
||||||
|
|
||||||
Conflict:NA
|
|
||||||
Reference:https://github.com/squid-cache/squid/commit/b70f864940225dfe69f9f653f948e787f99c3810
|
|
||||||
---
|
|
||||||
src/anyp/Uri.cc | 6 ++++++
|
|
||||||
1 file changed, 6 insertions(+)
|
|
||||||
|
|
||||||
diff --git a/src/anyp/Uri.cc b/src/anyp/Uri.cc
|
|
||||||
index 3eed2366abd..ef77d4f766b 100644
|
|
||||||
--- a/src/anyp/Uri.cc
|
|
||||||
+++ b/src/anyp/Uri.cc
|
|
||||||
@@ -175,6 +175,10 @@ urlInitialize(void)
|
|
||||||
assert(0 == matchDomainName("*.foo.com", ".foo.com", mdnHonorWildcards));
|
|
||||||
assert(0 != matchDomainName("*.foo.com", "foo.com", mdnHonorWildcards));
|
|
||||||
|
|
||||||
+ assert(0 != matchDomainName("foo.com", ""));
|
|
||||||
+ assert(0 != matchDomainName("foo.com", "", mdnHonorWildcards));
|
|
||||||
+ assert(0 != matchDomainName("foo.com", "", mdnRejectSubsubDomains));
|
|
||||||
+
|
|
||||||
/* more cases? */
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -828,6 +832,8 @@ matchDomainName(const char *h, const char *d, MatchDomainNameFlags flags)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
dl = strlen(d);
|
|
||||||
+ if (dl == 0)
|
|
||||||
+ return 1;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Start at the ends of the two strings and work towards the
|
|
||||||
@ -1,180 +0,0 @@
|
|||||||
From 6cfa10d94ca15a764a1d975597d8024582ef19be Mon Sep 17 00:00:00 2001
|
|
||||||
From: Amos Jeffries <yadij@users.noreply.github.com>
|
|
||||||
Date: Fri, 13 Oct 2023 08:44:16 +0000
|
|
||||||
Subject: [PATCH] RFC 9112: Improve HTTP chunked encoding compliance (#1498)
|
|
||||||
|
|
||||||
Reference:http://www.squid-cache.org/Versions/v6/SQUID-2023_1.patch
|
|
||||||
Conflict:NA
|
|
||||||
---
|
|
||||||
src/http/one/Parser.cc | 8 +-------
|
|
||||||
src/http/one/Parser.h | 4 +---
|
|
||||||
src/http/one/TeChunkedParser.cc | 23 ++++++++++++++++++-----
|
|
||||||
src/parser/Tokenizer.cc | 12 ++++++++++++
|
|
||||||
src/parser/Tokenizer.h | 7 +++++++
|
|
||||||
5 files changed, 39 insertions(+), 15 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
|
|
||||||
index 964371b4e..b1908316a 100644
|
|
||||||
--- a/src/http/one/Parser.cc
|
|
||||||
+++ b/src/http/one/Parser.cc
|
|
||||||
@@ -65,16 +65,10 @@ Http::One::Parser::DelimiterCharacters()
|
|
||||||
void
|
|
||||||
Http::One::Parser::skipLineTerminator(Tokenizer &tok) const
|
|
||||||
{
|
|
||||||
- if (tok.skip(Http1::CrLf()))
|
|
||||||
- return;
|
|
||||||
-
|
|
||||||
if (Config.onoff.relaxed_header_parser && tok.skipOne(CharacterSet::LF))
|
|
||||||
return;
|
|
||||||
|
|
||||||
- if (tok.atEnd() || (tok.remaining().length() == 1 && tok.remaining().at(0) == '\r'))
|
|
||||||
- throw InsufficientInput();
|
|
||||||
-
|
|
||||||
- throw TexcHere("garbage instead of CRLF line terminator");
|
|
||||||
+ tok.skipRequired("line-terminating CRLF", Http1::CrLf());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// all characters except the LF line terminator
|
|
||||||
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
|
|
||||||
index 5892a7a59..503c61d3f 100644
|
|
||||||
--- a/src/http/one/Parser.h
|
|
||||||
+++ b/src/http/one/Parser.h
|
|
||||||
@@ -124,9 +124,7 @@ protected:
|
|
||||||
* detect and skip the CRLF or (if tolerant) LF line terminator
|
|
||||||
* consume from the tokenizer.
|
|
||||||
*
|
|
||||||
- * \throws exception on bad or InsuffientInput.
|
|
||||||
- * \retval true only if line terminator found.
|
|
||||||
- * \retval false incomplete or missing line terminator, need more data.
|
|
||||||
+ * \throws exception on bad or InsufficientInput
|
|
||||||
*/
|
|
||||||
void skipLineTerminator(Tokenizer &) const;
|
|
||||||
|
|
||||||
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
|
|
||||||
index d9138fe9a..9cce10fdc 100644
|
|
||||||
--- a/src/http/one/TeChunkedParser.cc
|
|
||||||
+++ b/src/http/one/TeChunkedParser.cc
|
|
||||||
@@ -91,6 +91,11 @@ Http::One::TeChunkedParser::parseChunkSize(Tokenizer &tok)
|
|
||||||
{
|
|
||||||
Must(theChunkSize <= 0); // Should(), really
|
|
||||||
|
|
||||||
+ static const SBuf bannedHexPrefixLower("0x");
|
|
||||||
+ static const SBuf bannedHexPrefixUpper("0X");
|
|
||||||
+ if (tok.skip(bannedHexPrefixLower) || tok.skip(bannedHexPrefixUpper))
|
|
||||||
+ throw TextException("chunk starts with 0x", Here());
|
|
||||||
+
|
|
||||||
int64_t size = -1;
|
|
||||||
if (tok.int64(size, 16, false) && !tok.atEnd()) {
|
|
||||||
if (size < 0)
|
|
||||||
@@ -121,7 +126,7 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
|
||||||
// bad or insufficient input, like in the code below. TODO: Expand up.
|
|
||||||
try {
|
|
||||||
parseChunkExtensions(tok); // a possibly empty chunk-ext list
|
|
||||||
- skipLineTerminator(tok);
|
|
||||||
+ tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
|
|
||||||
buf_ = tok.remaining();
|
|
||||||
parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
|
|
||||||
return true;
|
|
||||||
@@ -132,12 +137,14 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
|
||||||
// other exceptions bubble up to kill message parsing
|
|
||||||
}
|
|
||||||
|
|
||||||
-/// Parses the chunk-ext list (RFC 7230 section 4.1.1 and its Errata #4667):
|
|
||||||
+/// Parses the chunk-ext list (RFC 9112 section 7.1.1:
|
|
||||||
/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
|
|
||||||
void
|
|
||||||
-Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &tok)
|
|
||||||
+Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
|
|
||||||
{
|
|
||||||
do {
|
|
||||||
+ auto tok = callerTok;
|
|
||||||
+
|
|
||||||
ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
|
|
||||||
|
|
||||||
if (!tok.skip(';'))
|
|
||||||
@@ -145,6 +152,7 @@ Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &tok)
|
|
||||||
|
|
||||||
parseOneChunkExtension(tok);
|
|
||||||
buf_ = tok.remaining(); // got one extension
|
|
||||||
+ callerTok = tok;
|
|
||||||
} while (true);
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -158,11 +166,14 @@ Http::One::ChunkExtensionValueParser::Ignore(Tokenizer &tok, const SBuf &extName
|
|
||||||
/// Parses a single chunk-ext list element:
|
|
||||||
/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
|
|
||||||
void
|
|
||||||
-Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &tok)
|
|
||||||
+Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &callerTok)
|
|
||||||
{
|
|
||||||
+ auto tok = callerTok;
|
|
||||||
+
|
|
||||||
ParseBws(tok); // Bug 4492: ICAP servers send SP before chunk-ext-name
|
|
||||||
|
|
||||||
const auto extName = tok.prefix("chunk-ext-name", CharacterSet::TCHAR);
|
|
||||||
+ callerTok = tok; // in case we determine that this is a valueless chunk-ext
|
|
||||||
|
|
||||||
ParseBws(tok);
|
|
||||||
|
|
||||||
@@ -176,6 +187,8 @@ Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &tok)
|
|
||||||
customExtensionValueParser->parse(tok, extName);
|
|
||||||
else
|
|
||||||
ChunkExtensionValueParser::Ignore(tok, extName);
|
|
||||||
+
|
|
||||||
+ callerTok = tok;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
@@ -209,7 +222,7 @@ Http::One::TeChunkedParser::parseChunkEnd(Tokenizer &tok)
|
|
||||||
Must(theLeftBodySize == 0); // Should(), really
|
|
||||||
|
|
||||||
try {
|
|
||||||
- skipLineTerminator(tok);
|
|
||||||
+ tok.skipRequired("chunk CRLF", Http1::CrLf());
|
|
||||||
buf_ = tok.remaining(); // parse checkpoint
|
|
||||||
theChunkSize = 0; // done with the current chunk
|
|
||||||
parsingStage_ = Http1::HTTP_PARSE_CHUNK_SZ;
|
|
||||||
diff --git a/src/parser/Tokenizer.cc b/src/parser/Tokenizer.cc
|
|
||||||
index 2bac7fb1f..2843a44af 100644
|
|
||||||
--- a/src/parser/Tokenizer.cc
|
|
||||||
+++ b/src/parser/Tokenizer.cc
|
|
||||||
@@ -147,6 +147,18 @@ Parser::Tokenizer::skipAll(const CharacterSet &tokenChars)
|
|
||||||
return success(prefixLen);
|
|
||||||
}
|
|
||||||
|
|
||||||
+void
|
|
||||||
+Parser::Tokenizer::skipRequired(const char *description, const SBuf &tokenToSkip)
|
|
||||||
+{
|
|
||||||
+ if (skip(tokenToSkip) || tokenToSkip.isEmpty())
|
|
||||||
+ return;
|
|
||||||
+
|
|
||||||
+ if (tokenToSkip.startsWith(buf_))
|
|
||||||
+ throw InsufficientInput();
|
|
||||||
+
|
|
||||||
+ throw TextException(ToSBuf("cannot skip ", description), Here());
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
bool
|
|
||||||
Parser::Tokenizer::skipOne(const CharacterSet &chars)
|
|
||||||
{
|
|
||||||
diff --git a/src/parser/Tokenizer.h b/src/parser/Tokenizer.h
|
|
||||||
index 7bae1ccbb..3cfa7dd6c 100644
|
|
||||||
--- a/src/parser/Tokenizer.h
|
|
||||||
+++ b/src/parser/Tokenizer.h
|
|
||||||
@@ -115,6 +115,13 @@ public:
|
|
||||||
*/
|
|
||||||
SBuf::size_type skipAll(const CharacterSet &discardables);
|
|
||||||
|
|
||||||
+ /** skips a given character sequence (string);
|
|
||||||
+ * does nothing if the sequence is empty
|
|
||||||
+ *
|
|
||||||
+ * \throws exception on mismatching prefix or InsufficientInput
|
|
||||||
+ */
|
|
||||||
+ void skipRequired(const char *description, const SBuf &tokenToSkip);
|
|
||||||
+
|
|
||||||
/** Removes a single trailing character from the set.
|
|
||||||
*
|
|
||||||
* \return whether a character was removed
|
|
||||||
--
|
|
||||||
2.25.1
|
|
||||||
|
|
||||||
@ -1,42 +0,0 @@
|
|||||||
From dc0e10bec3334053c1a5297e50dd7052ea18aef0 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Alex Bason <nonsleepr@gmail.com>
|
|
||||||
Date: Sun, 15 Oct 2023 13:04:47 +0000
|
|
||||||
Subject: [PATCH] Fix stack buffer overflow when parsing Digest Authorization
|
|
||||||
(#1517)
|
|
||||||
|
|
||||||
The bug was discovered and detailed by Joshua Rogers at
|
|
||||||
https://megamansec.github.io/Squid-Security-Audit/digest-overflow.html
|
|
||||||
where it was filed as "Stack Buffer Overflow in Digest Authentication".
|
|
||||||
|
|
||||||
Reference:http://www.squid-cache.org/Versions/v6/SQUID-2023_3.patch
|
|
||||||
Conflict:NA
|
|
||||||
---
|
|
||||||
src/auth/digest/Config.cc | 10 +++++++---
|
|
||||||
1 file changed, 7 insertions(+), 3 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc
|
|
||||||
index f00e2ba68..3c070d242 100644
|
|
||||||
--- a/src/auth/digest/Config.cc
|
|
||||||
+++ b/src/auth/digest/Config.cc
|
|
||||||
@@ -827,11 +827,15 @@ Auth::Digest::Config::decode(char const *proxy_auth, const HttpRequest *request,
|
|
||||||
break;
|
|
||||||
|
|
||||||
case DIGEST_NC:
|
|
||||||
- if (value.size() != 8) {
|
|
||||||
+ if (value.size() == 8) {
|
|
||||||
+ // for historical reasons, the nc value MUST be exactly 8 bytes
|
|
||||||
+ static_assert(sizeof(digest_request->nc) == 8 + 1);
|
|
||||||
+ xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
|
|
||||||
+ debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
|
|
||||||
+ } else {
|
|
||||||
debugs(29, 9, "Invalid nc '" << value << "' in '" << temp << "'");
|
|
||||||
+ digest_request->nc[0] = 0;
|
|
||||||
}
|
|
||||||
- xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
|
|
||||||
- debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
|
|
||||||
break;
|
|
||||||
|
|
||||||
case DIGEST_CNONCE:
|
|
||||||
--
|
|
||||||
2.25.1
|
|
||||||
|
|
||||||
@ -1,49 +0,0 @@
|
|||||||
From 7de01969a793b2fdb476e354a9fcda272d400d27 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
|
||||||
Date: Thu, 25 May 2023 02:10:28 +0000
|
|
||||||
Subject: [PATCH] Fix userinfo percent-encoding (#1367)
|
|
||||||
|
|
||||||
%X expects an unsigned int, and that is what we were giving it. However,
|
|
||||||
to get to the correct unsigned int value from a (signed) char, one has
|
|
||||||
to cast to an unsigned char (or equivalent) first.
|
|
||||||
|
|
||||||
Broken since inception in commit 7b75100.
|
|
||||||
|
|
||||||
Also adjusted similar (commented out) ext_edirectory_userip_acl code.
|
|
||||||
|
|
||||||
Reference:http://www.squid-cache.org/Versions/v6/SQUID-2023_5.patch
|
|
||||||
Conflict:NA
|
|
||||||
---
|
|
||||||
src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc | 2 +-
|
|
||||||
src/anyp/Uri.cc | 2 +-
|
|
||||||
2 files changed, 2 insertions(+), 2 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc b/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc
|
|
||||||
index bf124d24f..e3f33e209 100644
|
|
||||||
--- a/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc
|
|
||||||
+++ b/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc
|
|
||||||
@@ -1555,7 +1555,7 @@ MainSafe(int argc, char **argv)
|
|
||||||
/* BINARY DEBUGGING *
|
|
||||||
local_printfx("while() -> bufa[%" PRIuSIZE "]: %s", k, bufa);
|
|
||||||
for (i = 0; i < k; ++i)
|
|
||||||
- local_printfx("%02X", bufa[i]);
|
|
||||||
+ local_printfx("%02X", static_cast<unsigned int>(static_cast<unsigned char>(bufa[i])));
|
|
||||||
local_printfx("\n");
|
|
||||||
* BINARY DEBUGGING */
|
|
||||||
/* Check for CRLF */
|
|
||||||
diff --git a/src/anyp/Uri.cc b/src/anyp/Uri.cc
|
|
||||||
index e37293996..eca2c2357 100644
|
|
||||||
--- a/src/anyp/Uri.cc
|
|
||||||
+++ b/src/anyp/Uri.cc
|
|
||||||
@@ -71,7 +71,7 @@ AnyP::Uri::Encode(const SBuf &buf, const CharacterSet &ignore)
|
|
||||||
while (!tk.atEnd()) {
|
|
||||||
// TODO: Add Tokenizer::parseOne(void).
|
|
||||||
const auto ch = tk.remaining()[0];
|
|
||||||
- output.appendf("%%%02X", static_cast<unsigned int>(ch)); // TODO: Optimize using a table
|
|
||||||
+ output.appendf("%%%02X", static_cast<unsigned int>(static_cast<unsigned char>(ch))); // TODO: Optimize using a table
|
|
||||||
(void)tk.skip(ch);
|
|
||||||
|
|
||||||
if (tk.prefix(goodSection, ignore))
|
|
||||||
--
|
|
||||||
2.25.1
|
|
||||||
|
|
||||||
@ -1,34 +0,0 @@
|
|||||||
From deee944f9a12c9fd399ce52f3e2526bb573a9470 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
|
||||||
Date: Wed, 25 Oct 2023 19:41:45 +0000
|
|
||||||
Subject: [PATCH] RFC 1123: Fix date parsing (#1538)
|
|
||||||
|
|
||||||
The bug was discovered and detailed by Joshua Rogers at
|
|
||||||
https://megamansec.github.io/Squid-Security-Audit/datetime-overflow.html
|
|
||||||
where it was filed as "1-Byte Buffer OverRead in RFC 1123 date/time
|
|
||||||
Handling".
|
|
||||||
|
|
||||||
Conflict:NA
|
|
||||||
Reference:https://github.com/squid-cache/squid/commit/deee944f9a12c9fd399ce52f3e2526bb573a9470
|
|
||||||
---
|
|
||||||
src/time/rfc1123.cc | 6 ++++++
|
|
||||||
1 file changed, 6 insertions(+)
|
|
||||||
|
|
||||||
diff --git a/src/time/rfc1123.cc b/src/time/rfc1123.cc
|
|
||||||
index d89d22262f6..7524959edb0 100644
|
|
||||||
--- a/src/time/rfc1123.cc
|
|
||||||
+++ b/src/time/rfc1123.cc
|
|
||||||
@@ -50,7 +50,13 @@ make_month(const char *s)
|
|
||||||
char month[3];
|
|
||||||
|
|
||||||
month[0] = xtoupper(*s);
|
|
||||||
+ if (!month[0])
|
|
||||||
+ return -1; // protects *(s + 1) below
|
|
||||||
+
|
|
||||||
month[1] = xtolower(*(s + 1));
|
|
||||||
+ if (!month[1])
|
|
||||||
+ return -1; // protects *(s + 2) below
|
|
||||||
+
|
|
||||||
month[2] = xtolower(*(s + 2));
|
|
||||||
|
|
||||||
for (i = 0; i < 12; i++)
|
|
||||||
@ -1,85 +0,0 @@
|
|||||||
From 6014c6648a2a54a4ecb7f952ea1163e0798f9264 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
|
||||||
Date: Fri, 27 Oct 2023 21:27:20 +0000
|
|
||||||
Subject: [PATCH] Exit without asserting when helper process startup fails
|
|
||||||
(#1543)
|
|
||||||
|
|
||||||
... to dup() after fork() and before execvp().
|
|
||||||
|
|
||||||
Assertions are for handling program logic errors. Helper initialization
|
|
||||||
code already handled system call errors correctly (i.e. by exiting the
|
|
||||||
newly created helper process with an error), except for a couple of
|
|
||||||
assert()s that could be triggered by dup(2) failures.
|
|
||||||
|
|
||||||
This bug was discovered and detailed by Joshua Rogers at
|
|
||||||
https://megamansec.github.io/Squid-Security-Audit/ipc-assert.html
|
|
||||||
where it was filed as 'Assertion in Squid "Helper" Process Creator'.
|
|
||||||
|
|
||||||
Conflict:NA
|
|
||||||
Reference:https://github.com/squid-cache/squid/commit/6014c6648a2a54a4ecb7f952ea1163e0798f9264
|
|
||||||
---
|
|
||||||
src/ipc.cc | 32 ++++++++++++++++++++++++++------
|
|
||||||
1 file changed, 26 insertions(+), 6 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/ipc.cc b/src/ipc.cc
|
|
||||||
index 40d34b4755a..1afc4d5cf3c 100644
|
|
||||||
--- a/src/ipc.cc
|
|
||||||
+++ b/src/ipc.cc
|
|
||||||
@@ -22,6 +22,11 @@
|
|
||||||
|
|
||||||
#include <chrono>
|
|
||||||
#include <thread>
|
|
||||||
+#include <cstdlib>
|
|
||||||
+
|
|
||||||
+#if HAVE_UNISTD_H
|
|
||||||
+#include <unistd.h>
|
|
||||||
+#endif
|
|
||||||
|
|
||||||
static const char *hello_string = "hi there\n";
|
|
||||||
#ifndef HELLO_BUF_SZ
|
|
||||||
@@ -362,6 +367,22 @@ ipcCreate(int type, const char *prog, const char *const args[], const char *name
|
|
||||||
}
|
|
||||||
|
|
||||||
PutEnvironment();
|
|
||||||
+
|
|
||||||
+ // A dup(2) wrapper that reports and exits the process on errors. The
|
|
||||||
+ // exiting logic is only suitable for this child process context.
|
|
||||||
+ const auto dupOrExit = [prog,name](const int oldFd) {
|
|
||||||
+ const auto newFd = dup(oldFd);
|
|
||||||
+ if (newFd < 0) {
|
|
||||||
+ const auto savedErrno = errno;
|
|
||||||
+ debugs(54, DBG_CRITICAL, "ERROR: Helper process initialization failure: " << name <<
|
|
||||||
+ Debug::Extra << "helper (CHILD) PID: " << getpid() <<
|
|
||||||
+ Debug::Extra << "helper program name: " << prog <<
|
|
||||||
+ Debug::Extra << "dup(2) system call error for FD " << oldFd << ": " << xstrerr(savedErrno));
|
|
||||||
+ _exit(EXIT_FAILURE);
|
|
||||||
+ }
|
|
||||||
+ return newFd;
|
|
||||||
+ };
|
|
||||||
+
|
|
||||||
/*
|
|
||||||
* This double-dup stuff avoids problems when one of
|
|
||||||
* crfd, cwfd, or debug_log are in the rage 0-2.
|
|
||||||
@@ -369,17 +390,16 @@ ipcCreate(int type, const char *prog, const char *const args[], const char *name
|
|
||||||
|
|
||||||
do {
|
|
||||||
/* First make sure 0-2 is occupied by something. Gets cleaned up later */
|
|
||||||
- x = dup(crfd);
|
|
||||||
- assert(x > -1);
|
|
||||||
- } while (x < 3 && x > -1);
|
|
||||||
+ x = dupOrExit(crfd);
|
|
||||||
+ } while (x < 3);
|
|
||||||
|
|
||||||
close(x);
|
|
||||||
|
|
||||||
- t1 = dup(crfd);
|
|
||||||
+ t1 = dupOrExit(crfd);
|
|
||||||
|
|
||||||
- t2 = dup(cwfd);
|
|
||||||
+ t2 = dupOrExit(cwfd);
|
|
||||||
|
|
||||||
- t3 = dup(fileno(debug_log));
|
|
||||||
+ t3 = dupOrExit(fileno(debug_log));
|
|
||||||
|
|
||||||
assert(t1 > 2 && t2 > 2 && t3 > 2);
|
|
||||||
|
|
||||||
248
backport-CVE-2024-25111.patch
Normal file
248
backport-CVE-2024-25111.patch
Normal file
@ -0,0 +1,248 @@
|
|||||||
|
From 4658d0fc049738c2e6cd25fc0af10e820cf4c11a Mon Sep 17 00:00:00 2001
|
||||||
|
From: Alex Rousskov <rousskov@measurement-factory.com>
|
||||||
|
Date: Tue, 31 Oct 2023 11:35:02 +0000
|
||||||
|
Subject: [PATCH] Fix infinite recursion when parsing HTTP chunks (#1553)
|
||||||
|
|
||||||
|
This change stops infinite HttpStateData recursion with at-max-capacity
|
||||||
|
inBuf. Such inBuf prevents progress in the following call chain:
|
||||||
|
|
||||||
|
* processReply()
|
||||||
|
* processReplyBody() and decodeAndWriteReplyBody()
|
||||||
|
* maybeReadVirginBody()
|
||||||
|
* maybeMakeSpaceAvailable() -- tries but fails to quit processing
|
||||||
|
* processReply()
|
||||||
|
|
||||||
|
HttpStateData::maybeMakeSpaceAvailable() no longer calls processReply(),
|
||||||
|
preventing recursion.
|
||||||
|
|
||||||
|
maybeReadVirginBody() now aborts transactions that would otherwise get
|
||||||
|
stalled due to full read buffer at its maximum capacity. This change
|
||||||
|
requires that all maybeReadVirginBody() callers do actually need more
|
||||||
|
response data to make progress. AFAICT, that (natural) invariant holds.
|
||||||
|
|
||||||
|
We moved transaction stalling check from maybeMakeSpaceAvailable() into
|
||||||
|
its previous callers. Without that move, maybeMakeSpaceAvailable() would
|
||||||
|
have to handle both abortTransaction() and delayRead() cases. Besides
|
||||||
|
increased code complexity, that would trigger some premature delayRead()
|
||||||
|
calls (at maybeReadVirginBody() time). Deciding whether to delay socket
|
||||||
|
reads is complicated, the delay mechanism is expensive, and delaying may
|
||||||
|
become unnecessary by the time the socket becomes readable, so it is
|
||||||
|
best to continue to only delayRead() at readReply() time, when there is
|
||||||
|
no other choice left.
|
||||||
|
|
||||||
|
maybeReadVirginBody() mishandled cases where progress was possible, but
|
||||||
|
not _immediately_ -- it did nothing in those cases, probably stalling
|
||||||
|
transactions when maybeMakeSpaceAvailable() returned false but did not
|
||||||
|
call processReply(). This is now fixed: maybeReadVirginBody() now starts
|
||||||
|
waiting for the socket to be ready for reading in those cases,
|
||||||
|
effectively passing control to readReply() that handles them.
|
||||||
|
|
||||||
|
maybeReadVirginBody() prematurely grew buffer for future socket reads.
|
||||||
|
As a (positive) side effect of the above refactoring, we now delay
|
||||||
|
buffer growth until the actual read(2) time, which is best for
|
||||||
|
performance. Most likely, this premature buffer growth was an accident:
|
||||||
|
maybeReadVirginBody() correctly called maybeMakeSpaceAvailable() with
|
||||||
|
doGrow set to false. However, maybeMakeSpaceAvailable() misinterpreted
|
||||||
|
doGrow as a "do not actually do it" parameter. That bug is now gone.
|
||||||
|
|
||||||
|
This recursion bug was discovered and detailed by Joshua Rogers at
|
||||||
|
https://megamansec.github.io/Squid-Security-Audit/
|
||||||
|
where it was filed as "Chunked Encoding Stack Overflow".
|
||||||
|
|
||||||
|
Conflict: NA
|
||||||
|
Reference: https://github.com/squid-cache/squid/commit/4658d0fc049738c2e6cd25fc0af10e820cf4c11a
|
||||||
|
---
|
||||||
|
src/http.cc | 109 +++++++++++++++++++++++++++++++++++++---------------
|
||||||
|
src/http.h | 15 +++-----
|
||||||
|
2 files changed, 84 insertions(+), 40 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/src/http.cc b/src/http.cc
|
||||||
|
index 138c845c7b0..0829c25142f 100644
|
||||||
|
--- a/src/http.cc
|
||||||
|
+++ b/src/http.cc
|
||||||
|
@@ -54,6 +54,7 @@
|
||||||
|
#include "RefreshPattern.h"
|
||||||
|
#include "rfc1738.h"
|
||||||
|
#include "SquidConfig.h"
|
||||||
|
+#include "SquidMath.h"
|
||||||
|
#include "StatCounters.h"
|
||||||
|
#include "Store.h"
|
||||||
|
#include "StrList.h"
|
||||||
|
@@ -1200,16 +1201,24 @@ HttpStateData::readReply(const CommIoCbParams &io)
|
||||||
|
* Plus, it breaks our lame *HalfClosed() detection
|
||||||
|
*/
|
||||||
|
|
||||||
|
- Must(maybeMakeSpaceAvailable(true));
|
||||||
|
- CommIoCbParams rd(this); // will be expanded with ReadNow results
|
||||||
|
- rd.conn = io.conn;
|
||||||
|
- rd.size = entry->bytesWanted(Range<size_t>(0, inBuf.spaceSize()));
|
||||||
|
+ const auto moreDataPermission = canBufferMoreReplyBytes();
|
||||||
|
+ if (!moreDataPermission) {
|
||||||
|
+ abortTransaction("ready to read required data, but the read buffer is full and cannot be drained");
|
||||||
|
+ return;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ const auto readSizeMax = maybeMakeSpaceAvailable(moreDataPermission.value());
|
||||||
|
+ // TODO: Move this logic inside maybeMakeSpaceAvailable():
|
||||||
|
+ const auto readSizeWanted = readSizeMax ? entry->bytesWanted(Range<size_t>(0, readSizeMax)) : 0;
|
||||||
|
|
||||||
|
- if (rd.size <= 0) {
|
||||||
|
+ if (readSizeWanted <= 0) {
|
||||||
|
delayRead();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
+ CommIoCbParams rd(this); // will be expanded with ReadNow results
|
||||||
|
+ rd.conn = io.conn;
|
||||||
|
+ rd.size = readSizeWanted;
|
||||||
|
switch (Comm::ReadNow(rd, inBuf)) {
|
||||||
|
case Comm::INPROGRESS:
|
||||||
|
if (inBuf.isEmpty())
|
||||||
|
@@ -1591,8 +1600,10 @@ HttpStateData::maybeReadVirginBody()
|
||||||
|
if (!Comm::IsConnOpen(serverConnection) || fd_table[serverConnection->fd].closing())
|
||||||
|
return;
|
||||||
|
|
||||||
|
- if (!maybeMakeSpaceAvailable(false))
|
||||||
|
+ if (!canBufferMoreReplyBytes()) {
|
||||||
|
+ abortTransaction("more response bytes required, but the read buffer is full and cannot be drained");
|
||||||
|
return;
|
||||||
|
+ }
|
||||||
|
|
||||||
|
// XXX: get rid of the do_next_read flag
|
||||||
|
// check for the proper reasons preventing read(2)
|
||||||
|
@@ -1610,40 +1621,78 @@ HttpStateData::maybeReadVirginBody()
|
||||||
|
Comm::Read(serverConnection, call);
|
||||||
|
}
|
||||||
|
|
||||||
|
-bool
|
||||||
|
-HttpStateData::maybeMakeSpaceAvailable(bool doGrow)
|
||||||
|
+/// Desired inBuf capacity based on various capacity preferences/limits:
|
||||||
|
+/// * a smaller buffer may not hold enough for look-ahead header/body parsers;
|
||||||
|
+/// * a smaller buffer may result in inefficient tiny network reads;
|
||||||
|
+/// * a bigger buffer may waste memory;
|
||||||
|
+/// * a bigger buffer may exceed SBuf storage capabilities (SBuf::maxSize);
|
||||||
|
+size_t
|
||||||
|
+HttpStateData::calcReadBufferCapacityLimit() const
|
||||||
|
{
|
||||||
|
- // how much we are allowed to buffer
|
||||||
|
- const int limitBuffer = (flags.headers_parsed ? Config.readAheadGap : Config.maxReplyHeaderSize);
|
||||||
|
-
|
||||||
|
- if (limitBuffer < 0 || inBuf.length() >= (SBuf::size_type)limitBuffer) {
|
||||||
|
- // when buffer is at or over limit already
|
||||||
|
- debugs(11, 7, "will not read up to " << limitBuffer << ". buffer has (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
|
||||||
|
- debugs(11, DBG_DATA, "buffer has {" << inBuf << "}");
|
||||||
|
- // Process next response from buffer
|
||||||
|
- processReply();
|
||||||
|
- return false;
|
||||||
|
+ if (!flags.headers_parsed)
|
||||||
|
+ return Config.maxReplyHeaderSize;
|
||||||
|
+
|
||||||
|
+ // XXX: Our inBuf is not used to maintain the read-ahead gap, and using
|
||||||
|
+ // Config.readAheadGap like this creates huge read buffers for large
|
||||||
|
+ // read_ahead_gap values. TODO: Switch to using tcp_recv_bufsize as the
|
||||||
|
+ // primary read buffer capacity factor.
|
||||||
|
+ //
|
||||||
|
+ // TODO: Cannot reuse throwing NaturalCast() here. Consider removing
|
||||||
|
+ // .value() dereference in NaturalCast() or add/use NaturalCastOrMax().
|
||||||
|
+ const auto configurationPreferences = NaturalSum<size_t>(Config.readAheadGap).value_or(SBuf::maxSize);
|
||||||
|
+
|
||||||
|
+ // TODO: Honor TeChunkedParser look-ahead and trailer parsing requirements
|
||||||
|
+ // (when explicit configurationPreferences are set too low).
|
||||||
|
+
|
||||||
|
+ return std::min<size_t>(configurationPreferences, SBuf::maxSize);
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+/// The maximum number of virgin reply bytes we may buffer before we violate
|
||||||
|
+/// the currently configured response buffering limits.
|
||||||
|
+/// \retval std::nullopt means that no more virgin response bytes can be read
|
||||||
|
+/// \retval 0 means that more virgin response bytes may be read later
|
||||||
|
+/// \retval >0 is the number of bytes that can be read now (subject to other constraints)
|
||||||
|
+std::optional<size_t>
|
||||||
|
+HttpStateData::canBufferMoreReplyBytes() const
|
||||||
|
+{
|
||||||
|
+#if USE_ADAPTATION
|
||||||
|
+ // If we do not check this now, we may say the final "no" prematurely below
|
||||||
|
+ // because inBuf.length() will decrease as adaptation drains buffered bytes.
|
||||||
|
+ if (responseBodyBuffer) {
|
||||||
|
+ debugs(11, 3, "yes, but waiting for adaptation to drain read buffer");
|
||||||
|
+ return 0; // yes, we may be able to buffer more (but later)
|
||||||
|
+ }
|
||||||
|
+#endif
|
||||||
|
+
|
||||||
|
+ const auto maxCapacity = calcReadBufferCapacityLimit();
|
||||||
|
+ if (inBuf.length() >= maxCapacity) {
|
||||||
|
+ debugs(11, 3, "no, due to a full buffer: " << inBuf.length() << '/' << inBuf.spaceSize() << "; limit: " << maxCapacity);
|
||||||
|
+ return std::nullopt; // no, configuration prohibits buffering more
|
||||||
|
}
|
||||||
|
|
||||||
|
+ const auto maxReadSize = maxCapacity - inBuf.length(); // positive
|
||||||
|
+ debugs(11, 7, "yes, may read up to " << maxReadSize << " into " << inBuf.length() << '/' << inBuf.spaceSize());
|
||||||
|
+ return maxReadSize; // yes, can read up to this many bytes (subject to other constraints)
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+/// prepare read buffer for reading
|
||||||
|
+/// \return the maximum number of bytes the caller should attempt to read
|
||||||
|
+/// \retval 0 means that the caller should delay reading
|
||||||
|
+size_t
|
||||||
|
+HttpStateData::maybeMakeSpaceAvailable(const size_t maxReadSize)
|
||||||
|
+{
|
||||||
|
// how much we want to read
|
||||||
|
- const size_t read_size = calcBufferSpaceToReserve(inBuf.spaceSize(), (limitBuffer - inBuf.length()));
|
||||||
|
+ const size_t read_size = calcBufferSpaceToReserve(inBuf.spaceSize(), maxReadSize);
|
||||||
|
|
||||||
|
- if (!read_size) {
|
||||||
|
+ if (read_size < 2) {
|
||||||
|
debugs(11, 7, "will not read up to " << read_size << " into buffer (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
|
||||||
|
- return false;
|
||||||
|
+ return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
- // just report whether we could grow or not, do not actually do it
|
||||||
|
- if (doGrow)
|
||||||
|
- return (read_size >= 2);
|
||||||
|
-
|
||||||
|
// we may need to grow the buffer
|
||||||
|
inBuf.reserveSpace(read_size);
|
||||||
|
- debugs(11, 8, (!flags.do_next_read ? "will not" : "may") <<
|
||||||
|
- " read up to " << read_size << " bytes info buf(" << inBuf.length() << "/" << inBuf.spaceSize() <<
|
||||||
|
- ") from " << serverConnection);
|
||||||
|
-
|
||||||
|
- return (inBuf.spaceSize() >= 2); // only read if there is 1+ bytes of space available
|
||||||
|
+ debugs(11, 7, "may read up to " << read_size << " bytes info buffer (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
|
||||||
|
+ return read_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// called after writing the very last request byte (body, last-chunk, etc)
|
||||||
|
diff --git a/src/http.h b/src/http.h
|
||||||
|
index 7baffe36499..4f59af90ba8 100644
|
||||||
|
--- a/src/http.h
|
||||||
|
+++ b/src/http.h
|
||||||
|
@@ -15,6 +15,8 @@
|
||||||
|
#include "http/StateFlags.h"
|
||||||
|
#include "sbuf/SBuf.h"
|
||||||
|
|
||||||
|
+#include <optional>
|
||||||
|
+
|
||||||
|
class FwdState;
|
||||||
|
class HttpHeader;
|
||||||
|
class String;
|
||||||
|
@@ -114,16 +116,9 @@ class HttpStateData : public Client
|
||||||
|
|
||||||
|
void abortTransaction(const char *reason) { abortAll(reason); } // abnormal termination
|
||||||
|
|
||||||
|
- /**
|
||||||
|
- * determine if read buffer can have space made available
|
||||||
|
- * for a read.
|
||||||
|
- *
|
||||||
|
- * \param grow whether to actually expand the buffer
|
||||||
|
- *
|
||||||
|
- * \return whether the buffer can be grown to provide space
|
||||||
|
- * regardless of whether the grow actually happened.
|
||||||
|
- */
|
||||||
|
- bool maybeMakeSpaceAvailable(bool grow);
|
||||||
|
+ size_t calcReadBufferCapacityLimit() const;
|
||||||
|
+ std::optional<size_t> canBufferMoreReplyBytes() const;
|
||||||
|
+ size_t maybeMakeSpaceAvailable(size_t maxReadSize);
|
||||||
|
|
||||||
|
// consuming request body
|
||||||
|
virtual void handleMoreRequestBodyAvailable();
|
||||||
25
backport-CVE-2024-37894.patch
Normal file
25
backport-CVE-2024-37894.patch
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
From f411fe7d75197852f0e5ee85027a06d58dd8df4c Mon Sep 17 00:00:00 2001
|
||||||
|
From: Francesco Chemolli <kinkie@squid-cache.org>
|
||||||
|
Date: Sun, 2 Jun 2024 16:41:08 +0200
|
||||||
|
Subject: [PATCH] Force downcast in TrieNode::add
|
||||||
|
|
||||||
|
---
|
||||||
|
lib/libTrie/TrieNode.cc | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/lib/libTrie/TrieNode.cc b/lib/libTrie/TrieNode.cc
|
||||||
|
index 0f991a0..d417e0f 100644
|
||||||
|
--- a/lib/libTrie/TrieNode.cc
|
||||||
|
+++ b/lib/libTrie/TrieNode.cc
|
||||||
|
@@ -32,7 +32,7 @@ TrieNode::add(char const *aString, size_t theLength, void *privatedata, TrieChar
|
||||||
|
/* We trust that privatedata and existent keys have already been checked */
|
||||||
|
|
||||||
|
if (theLength) {
|
||||||
|
- int index = transform ? (*transform)(*aString): *aString;
|
||||||
|
+ const unsigned char index = transform ? (*transform)(*aString): *aString;
|
||||||
|
|
||||||
|
if (!internal[index])
|
||||||
|
internal[index] = new TrieNode;
|
||||||
|
--
|
||||||
|
2.41.0
|
||||||
|
|
||||||
196
backport-squid-crash-half-closed.patch
Normal file
196
backport-squid-crash-half-closed.patch
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
From 5da786ef2a708559f5b53a05b7db6de0b64ce885 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Alex Rousskov <rousskov@measurement-factory.com>
|
||||||
|
Date: Mon, 11 Sep 2023 07:49:36 +0000
|
||||||
|
Subject: [PATCH] Bug 4156: comm.cc "!commHasHalfClosedMonitor(fd)" assertion
|
||||||
|
(#1443)
|
||||||
|
|
||||||
|
This bug is specific to "half_closed_clients on" configurations.
|
||||||
|
|
||||||
|
assertion failed: ... "isOpen(fd) && !commHasHalfClosedMonitor(fd)"
|
||||||
|
location: comm.cc:1583 in commStartHalfClosedMonitor()
|
||||||
|
|
||||||
|
Squid asserts because Server schedules comm_read() after receiving EOF:
|
||||||
|
That extra read results in another EOF notification, and an attempt to
|
||||||
|
start monitoring an already monitored half-closed connection.
|
||||||
|
|
||||||
|
Upon detecting a potentially half-closed connection,
|
||||||
|
Server::doClientRead() should clear flags.readMore to prevent Server
|
||||||
|
from scheduling another comm_read(), but it does not and cannot do that
|
||||||
|
(without significant refactoring) because
|
||||||
|
|
||||||
|
* Server does not have access to flags.readMore
|
||||||
|
* flags.readMore hack is used for more than just "read more"
|
||||||
|
|
||||||
|
We worked around the above limitation by re-detecting half-closed
|
||||||
|
conditions and clearing flags.readMore after clientParseRequests(). That
|
||||||
|
fixed the bug but further increased poor code duplication across
|
||||||
|
ConnStateData::afterClientRead() and ConnStateData::kick() methods. We
|
||||||
|
then refactored by merging and moving that duplicated code into
|
||||||
|
clientParseRequests() and renamed that method to make backports safer.
|
||||||
|
|
||||||
|
Conflict: NA
|
||||||
|
Reference: https://github.com/squid-cache/squid/commit/5da786
|
||||||
|
---
|
||||||
|
src/client_side.cc | 67 ++++++++++++-----------------------
|
||||||
|
src/client_side.h | 2 +-
|
||||||
|
src/tests/stub_client_side.cc | 2 +-
|
||||||
|
3 files changed, 24 insertions(+), 47 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/src/client_side.cc b/src/client_side.cc
|
||||||
|
index e62bcf0fc7d..bd9cf6a7d5f 100644
|
||||||
|
--- a/src/client_side.cc
|
||||||
|
+++ b/src/client_side.cc
|
||||||
|
@@ -932,7 +932,7 @@ ConnStateData::kick()
|
||||||
|
* We are done with the response, and we are either still receiving request
|
||||||
|
* body (early response!) or have already stopped receiving anything.
|
||||||
|
*
|
||||||
|
- * If we are still receiving, then clientParseRequest() below will fail.
|
||||||
|
+ * If we are still receiving, then parseRequests() below will fail.
|
||||||
|
* (XXX: but then we will call readNextRequest() which may succeed and
|
||||||
|
* execute a smuggled request as we are not done with the current request).
|
||||||
|
*
|
||||||
|
@@ -952,28 +952,12 @@ ConnStateData::kick()
|
||||||
|
* Attempt to parse a request from the request buffer.
|
||||||
|
* If we've been fed a pipelined request it may already
|
||||||
|
* be in our read buffer.
|
||||||
|
- *
|
||||||
|
- \par
|
||||||
|
- * This needs to fall through - if we're unlucky and parse the _last_ request
|
||||||
|
- * from our read buffer we may never re-register for another client read.
|
||||||
|
*/
|
||||||
|
|
||||||
|
- if (clientParseRequests()) {
|
||||||
|
- debugs(33, 3, clientConnection << ": parsed next request from buffer");
|
||||||
|
- }
|
||||||
|
+ parseRequests();
|
||||||
|
|
||||||
|
- /** \par
|
||||||
|
- * Either we need to kick-start another read or, if we have
|
||||||
|
- * a half-closed connection, kill it after the last request.
|
||||||
|
- * This saves waiting for half-closed connections to finished being
|
||||||
|
- * half-closed _AND_ then, sometimes, spending "Timeout" time in
|
||||||
|
- * the keepalive "Waiting for next request" state.
|
||||||
|
- */
|
||||||
|
- if (commIsHalfClosed(clientConnection->fd) && pipeline.empty()) {
|
||||||
|
- debugs(33, 3, "half-closed client with no pending requests, closing");
|
||||||
|
- clientConnection->close();
|
||||||
|
+ if (!isOpen())
|
||||||
|
return;
|
||||||
|
- }
|
||||||
|
|
||||||
|
/** \par
|
||||||
|
* At this point we either have a parsed request (which we've
|
||||||
|
@@ -1882,16 +1866,11 @@ ConnStateData::receivedFirstByte()
|
||||||
|
resetReadTimeout(Config.Timeout.request);
|
||||||
|
}
|
||||||
|
|
||||||
|
-/**
|
||||||
|
- * Attempt to parse one or more requests from the input buffer.
|
||||||
|
- * Returns true after completing parsing of at least one request [header]. That
|
||||||
|
- * includes cases where parsing ended with an error (e.g., a huge request).
|
||||||
|
- */
|
||||||
|
-bool
|
||||||
|
-ConnStateData::clientParseRequests()
|
||||||
|
+/// Attempt to parse one or more requests from the input buffer.
|
||||||
|
+/// May close the connection.
|
||||||
|
+void
|
||||||
|
+ConnStateData::parseRequests()
|
||||||
|
{
|
||||||
|
- bool parsed_req = false;
|
||||||
|
-
|
||||||
|
debugs(33, 5, clientConnection << ": attempting to parse");
|
||||||
|
|
||||||
|
// Loop while we have read bytes that are not needed for producing the body
|
||||||
|
@@ -1936,8 +1915,6 @@ ConnStateData::clientParseRequests()
|
||||||
|
|
||||||
|
processParsedRequest(context);
|
||||||
|
|
||||||
|
- parsed_req = true; // XXX: do we really need to parse everything right NOW ?
|
||||||
|
-
|
||||||
|
if (context->mayUseConnection()) {
|
||||||
|
debugs(33, 3, "Not parsing new requests, as this request may need the connection");
|
||||||
|
break;
|
||||||
|
@@ -1950,8 +1927,19 @@ ConnStateData::clientParseRequests()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- /* XXX where to 'finish' the parsing pass? */
|
||||||
|
- return parsed_req;
|
||||||
|
+ debugs(33, 7, "buffered leftovers: " << inBuf.length());
|
||||||
|
+
|
||||||
|
+ if (isOpen() && commIsHalfClosed(clientConnection->fd)) {
|
||||||
|
+ if (pipeline.empty()) {
|
||||||
|
+ // we processed what we could parse, and no more data is coming
|
||||||
|
+ debugs(33, 5, "closing half-closed without parsed requests: " << clientConnection);
|
||||||
|
+ clientConnection->close();
|
||||||
|
+ } else {
|
||||||
|
+ // we parsed what we could, and no more data is coming
|
||||||
|
+ debugs(33, 5, "monitoring half-closed while processing parsed requests: " << clientConnection);
|
||||||
|
+ flags.readMore = false; // may already be false
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
@@ -1968,18 +1956,7 @@ ConnStateData::afterClientRead()
|
||||||
|
if (pipeline.empty())
|
||||||
|
fd_note(clientConnection->fd, "Reading next request");
|
||||||
|
|
||||||
|
- if (!clientParseRequests()) {
|
||||||
|
- if (!isOpen())
|
||||||
|
- return;
|
||||||
|
- // We may get here if the client half-closed after sending a partial
|
||||||
|
- // request. See doClientRead() and shouldCloseOnEof().
|
||||||
|
- // XXX: This partially duplicates ConnStateData::kick().
|
||||||
|
- if (pipeline.empty() && commIsHalfClosed(clientConnection->fd)) {
|
||||||
|
- debugs(33, 5, clientConnection << ": half-closed connection, no completed request parsed, connection closing.");
|
||||||
|
- clientConnection->close();
|
||||||
|
- return;
|
||||||
|
- }
|
||||||
|
- }
|
||||||
|
+ parseRequests();
|
||||||
|
|
||||||
|
if (!isOpen())
|
||||||
|
return;
|
||||||
|
@@ -3767,7 +3744,7 @@ ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic)
|
||||||
|
startPinnedConnectionMonitoring();
|
||||||
|
|
||||||
|
if (pipeline.empty())
|
||||||
|
- kick(); // in case clientParseRequests() was blocked by a busy pic.connection
|
||||||
|
+ kick(); // in case parseRequests() was blocked by a busy pic.connection
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Forward future client requests using the given server connection.
|
||||||
|
diff --git a/src/client_side.h b/src/client_side.h
|
||||||
|
index e37ab27da1a..9f36d864c2c 100644
|
||||||
|
--- a/src/client_side.h
|
||||||
|
+++ b/src/client_side.h
|
||||||
|
@@ -98,7 +98,6 @@ class ConnStateData:
|
||||||
|
void doneWithControlMsg() override;
|
||||||
|
|
||||||
|
/// Traffic parsing
|
||||||
|
- bool clientParseRequests();
|
||||||
|
void readNextRequest();
|
||||||
|
|
||||||
|
/// try to make progress on a transaction or read more I/O
|
||||||
|
@@ -443,6 +442,7 @@ class ConnStateData:
|
||||||
|
|
||||||
|
void checkLogging();
|
||||||
|
|
||||||
|
+ void parseRequests();
|
||||||
|
void clientAfterReadingRequests();
|
||||||
|
bool concurrentRequestQueueFilled() const;
|
||||||
|
|
||||||
|
diff --git a/src/tests/stub_client_side.cc b/src/tests/stub_client_side.cc
|
||||||
|
index 8c160e56340..f49d5dceeed 100644
|
||||||
|
--- a/src/tests/stub_client_side.cc
|
||||||
|
+++ b/src/tests/stub_client_side.cc
|
||||||
|
@@ -14,7 +14,7 @@
|
||||||
|
#include "tests/STUB.h"
|
||||||
|
|
||||||
|
#include "client_side.h"
|
||||||
|
-bool ConnStateData::clientParseRequests() STUB_RETVAL(false)
|
||||||
|
+void ConnStateData::parseRequests() STUB
|
||||||
|
void ConnStateData::readNextRequest() STUB
|
||||||
|
bool ConnStateData::isOpen() const STUB_RETVAL(false)
|
||||||
|
void ConnStateData::kick() STUB
|
||||||
BIN
squid-6.1.tar.xz
BIN
squid-6.1.tar.xz
Binary file not shown.
@ -1,25 +0,0 @@
|
|||||||
File: squid-6.1.tar.xz
|
|
||||||
Date: Thu 06 Jul 2023 05:42:59 UTC
|
|
||||||
Size: 2546668
|
|
||||||
MD5 : 64841841e06ea487b0305070c33b04dd
|
|
||||||
SHA1: 4a3711e42ca9acbba580fd0c306cc2f6f84db1f8
|
|
||||||
Key : CD6DBF8EF3B17D3E <squid3@treenet.co.nz>
|
|
||||||
B068 84ED B779 C89B 044E 64E3 CD6D BF8E F3B1 7D3E
|
|
||||||
keyring = http://www.squid-cache.org/pgp.asc
|
|
||||||
keyserver = pool.sks-keyservers.net
|
|
||||||
-----BEGIN PGP SIGNATURE-----
|
|
||||||
|
|
||||||
iQIzBAABCgAdFiEEsGiE7bd5yJsETmTjzW2/jvOxfT4FAmSmVGsACgkQzW2/jvOx
|
|
||||||
fT6rPA/9Gkkw4w3h0EFPI7mHvjquoc2mW/zX5clfZmhoA81u9xRUazgZeaPcLydF
|
|
||||||
rwRxNPITWzZ+emqeTMLrrVndHXAqKqg8VRbHymWEAh3aqVzvaj98h9iZylvPLX1N
|
|
||||||
rdToqDBET8E4YHYUsmUdoegg5XgGiFEC3fqcS7/3eOUUoWsrFsKy6WAF2g1lS5yx
|
|
||||||
kqfRA5Nim23ckACsoRcMPfVPHxRFuyoYiLAsgMjx2cZAGDVtFos260N1QK8xdQkE
|
|
||||||
o/LEv2zp4wXFMeLSJJsvgl9SfqA7XtC91ZH8nrAvmWwj99Totnt5KEa8MiF0Wn0K
|
|
||||||
dpB2X1meb/dx8CI9AMNex9hedUspPlAMLCI8ggR8KtzW31g/7GagXpsKmJIEdk+S
|
|
||||||
Yjq4NXHS0eDmiMcI2ZDBp6Sk/ty1VrnH61GqA8FDEOTTAJGvFu6DahVgxHE6s0aj
|
|
||||||
pOw8AmM/0yP2kuafhchbRQQ9bCFBsO4z4sUyGNkHNHCjX3XimW3m4mBPSlEkDAF2
|
|
||||||
dbUdAwIjBdS8zdU0N6wB+WXy7y459bsQBkWQMc7P4TqQ02IeL+4boF2c8RpgWiDf
|
|
||||||
hHS03U60zAP36m6HlC1nSnGnMABlwvBPg928yMq/jrf75T5DQHOSEuQ69NxF61ge
|
|
||||||
SLGX+aEGwwXGsHhGfW6W9sORKaiJNI683US3vGOn33CX+L5rCIU=
|
|
||||||
=hwBL
|
|
||||||
-----END PGP SIGNATURE-----
|
|
||||||
BIN
squid-6.6.tar.xz
Normal file
BIN
squid-6.6.tar.xz
Normal file
Binary file not shown.
25
squid-6.6.tar.xz.asc
Normal file
25
squid-6.6.tar.xz.asc
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
File: squid-6.6.tar.xz
|
||||||
|
Date: Thu 07 Dec 2023 04:03:46 UTC
|
||||||
|
Size: 2554824
|
||||||
|
MD5 : 5a41134ee1b7e75f62088acdec92d2ca
|
||||||
|
SHA1: f05e06a9dd3bf7501d2844e43d9ae1bd00e9edcc
|
||||||
|
Key : CD6DBF8EF3B17D3E <squid3@treenet.co.nz>
|
||||||
|
B068 84ED B779 C89B 044E 64E3 CD6D BF8E F3B1 7D3E
|
||||||
|
keyring = http://www.squid-cache.org/pgp.asc
|
||||||
|
keyserver = pool.sks-keyservers.net
|
||||||
|
-----BEGIN PGP SIGNATURE-----
|
||||||
|
|
||||||
|
iQIzBAABCgAdFiEEsGiE7bd5yJsETmTjzW2/jvOxfT4FAmVxRCsACgkQzW2/jvOx
|
||||||
|
fT5VtQ/+M+mhaGYCp9YBi1GG9vyQwkkIyngL3vPpz7UxZHAR+mzk29zwlgdDgwWA
|
||||||
|
Zasaomg8S1Clq2dhNr7oo6RuZ7mKlhEeHba2WvL+1/VcBsPnazUwzYQiW7k9KxYe
|
||||||
|
n1At62duit+YnswTNnj6HJRKKK0nKlPmJycL1AThh9Tj6oHTsWBCItnSZ5eUjGX0
|
||||||
|
aKiMrkrHtq3qheWkVZPCJEFDs88ECDrJD7s9cpAhun+/0v+4ECE65uJ2bZHK4f/E
|
||||||
|
TH5OIf8vltEB8sA/SSanMM/C+gZObET3TssrgHz92j0svMOlALLtitb0aHly21JV
|
||||||
|
fEKB200Ngac2y6rq3xDNiznmMn+SeCNUsiDcdauCrsUHNW9S9FhOxeWXy/Z7JK4A
|
||||||
|
mqVnnqvN9GFvv2EEC8J9lj+cwGOdaSW6L2aPVkub8Ij5O+e2Tg+uBm4ZC8vcACYz
|
||||||
|
+1oo8YyvcfO9EmNRE0vpFTWH9Ux5ptgdvsIxv41QN40RUYN7FBbOgey59mP3uq2Q
|
||||||
|
0g/b8lr1PnrwB74OrVGcXLwREFLXtkRC9vcdNjvdchCg60KlBNWEPSGJA2adS8HJ
|
||||||
|
4AGyVpU8GCpV3q74rJxIG6FUffL85CfT+1HRmQhzYiGJDzy1AaUJmcelyS4e6cjn
|
||||||
|
urAWH3mlAaPzj87OuaeZYGAZMWh/5iAarU+VHkZn6vI2Mvl9yMA=
|
||||||
|
=oyMI
|
||||||
|
-----END PGP SIGNATURE-----
|
||||||
45
squid.spec
45
squid.spec
@ -1,7 +1,7 @@
|
|||||||
%define __perl_requires %{SOURCE8}
|
%define __perl_requires %{SOURCE8}
|
||||||
|
|
||||||
Name: squid
|
Name: squid
|
||||||
Version: 6.1
|
Version: 6.6
|
||||||
Release: 4
|
Release: 4
|
||||||
Summary: The Squid proxy caching server
|
Summary: The Squid proxy caching server
|
||||||
Epoch: 7
|
Epoch: 7
|
||||||
@ -21,14 +21,9 @@ Patch0: squid-4.0.11-config.patch
|
|||||||
Patch1: squid-3.1.0.9-location.patch
|
Patch1: squid-3.1.0.9-location.patch
|
||||||
Patch2: squid-3.0.STABLE1-perlpath.patch
|
Patch2: squid-3.0.STABLE1-perlpath.patch
|
||||||
Patch3: backport-squid-6.1-symlink-lang-err.patch
|
Patch3: backport-squid-6.1-symlink-lang-err.patch
|
||||||
Patch4: backport-0001-CVE-2023-5824.patch
|
Patch4: backport-squid-crash-half-closed.patch
|
||||||
Patch5: backport-0002-CVE-2023-5824.patch
|
Patch5: backport-CVE-2024-25111.patch
|
||||||
Patch6: backport-CVE-2023-46846.patch
|
Patch6: backport-CVE-2024-37894.patch
|
||||||
Patch7: backport-CVE-2023-46847.patch
|
|
||||||
Patch8: backport-CVE-2023-46848.patch
|
|
||||||
Patch9: backport-CVE-2023-46724.patch
|
|
||||||
Patch10: backport-CVE-2023-49285.patch
|
|
||||||
Patch11: backport-CVE-2023-49286.patch
|
|
||||||
|
|
||||||
Requires: bash
|
Requires: bash
|
||||||
Requires: httpd-filesystem
|
Requires: httpd-filesystem
|
||||||
@ -71,7 +66,7 @@ autoreconf -fi
|
|||||||
--enable-linux-netfilter --enable-removal-policies="heap,lru" \
|
--enable-linux-netfilter --enable-removal-policies="heap,lru" \
|
||||||
--enable-snmp --enable-ssl --enable-ssl-crtd \
|
--enable-snmp --enable-ssl --enable-ssl-crtd \
|
||||||
--enable-storeio="aufs,diskd,ufs,rock" --enable-diskio --enable-wccpv2 \
|
--enable-storeio="aufs,diskd,ufs,rock" --enable-diskio --enable-wccpv2 \
|
||||||
--enable-esi --enable-ecap --with-aio --with-default-user="squid" \
|
--disable-esi --enable-ecap --with-aio --with-default-user="squid" \
|
||||||
--with-dl --with-openssl --with-pthreads --disable-arch-native \
|
--with-dl --with-openssl --with-pthreads --disable-arch-native \
|
||||||
--disable-security-cert-validators \
|
--disable-security-cert-validators \
|
||||||
--with-tdb --disable-strict-error-checking \
|
--with-tdb --disable-strict-error-checking \
|
||||||
@ -251,6 +246,36 @@ fi
|
|||||||
chgrp squid /var/cache/samba/winbindd_privileged >/dev/null 2>&1 || :
|
chgrp squid /var/cache/samba/winbindd_privileged >/dev/null 2>&1 || :
|
||||||
|
|
||||||
%changelog
|
%changelog
|
||||||
|
* Tue Oct 29 2024 xinghe <xinghe2@h-partners.com> - 7:6.6-4
|
||||||
|
- Type:cves
|
||||||
|
- ID:CVE-2024-45802
|
||||||
|
- SUG:NA
|
||||||
|
- DESC:fix CVE-2024-45802
|
||||||
|
|
||||||
|
* Wed Jun 26 2024 yinyongkang <yinyongkang@kylinos.cn> - 7:6.6-3
|
||||||
|
- Type:cves
|
||||||
|
- ID:CVE-2024-37894
|
||||||
|
- SUG:NA
|
||||||
|
- DESC:fix CVE-2024-37894
|
||||||
|
|
||||||
|
* Thu Mar 07 2024 xinghe <xinghe2@h-partners.com> - 7:6.6-2
|
||||||
|
- Type:cves
|
||||||
|
- ID:CVE-2024-25111
|
||||||
|
- SUG:NA
|
||||||
|
- DESC:fix CVE-2024-25111
|
||||||
|
|
||||||
|
* Tue Dec 26 2023 xinghe <xinghe2@h-partners.com> - 7:6.6-1
|
||||||
|
- Type:requirements
|
||||||
|
- ID:NA
|
||||||
|
- SUG:NA
|
||||||
|
- DESC:upgrade to 6.6
|
||||||
|
|
||||||
|
* Fri Dec 15 2023 xinghe <xinghe2@h-partners.com> - 7:6.1-5
|
||||||
|
- Type:cves
|
||||||
|
- ID:CVE-2023-50269
|
||||||
|
- SUG:NA
|
||||||
|
- DESC:fix CVE-2023-50269
|
||||||
|
|
||||||
* Tue Dec 05 2023 yanglu <yanglu72@h-partners.com> - 7:6.1-4
|
* Tue Dec 05 2023 yanglu <yanglu72@h-partners.com> - 7:6.1-4
|
||||||
- Type:cves
|
- Type:cves
|
||||||
- ID:CVE-2023-49285 CVE-2023-49286
|
- ID:CVE-2023-49285 CVE-2023-49286
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user