update version to 6.6
This commit is contained in:
parent
79813e0a42
commit
6e8699454c
@ -1,122 +0,0 @@
|
|||||||
From 5921355e474ffbff2cb577c3622ce0e686e8996a Mon Sep 17 00:00:00 2001
|
|
||||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
|
||||||
Date: Sat, 11 Mar 2023 05:48:14 +0000
|
|
||||||
Subject: [PATCH] Replaced clientReplyContext::tempBuffer with old_reqofs
|
|
||||||
(#1304)
|
|
||||||
|
|
||||||
The tempBuffer data member was not actually used as a buffer. We only
|
|
||||||
used its offset field, and only for saving reqofs (which has a different
|
|
||||||
type than tempBuffer.offset!). Replaced the buffer with old_reqofs,
|
|
||||||
consistent with the rest of the "saved stale entry state" code.
|
|
||||||
|
|
||||||
Also fixed old_reqsize type to match reqsize and grouped that member
|
|
||||||
with the other private "saved stale entry state" fields.
|
|
||||||
|
|
||||||
Bad old types probably did not trigger runtime failures because the
|
|
||||||
associated saved numbers are saved at the very beginning of fetching the
|
|
||||||
entry, when all these accumulation-related counters are still small.
|
|
||||||
|
|
||||||
The remaining reqofs and reqsize types are wrong for platforms where
|
|
||||||
size_t is not uint64_t, but fixing that deserves a dedicated change. For
|
|
||||||
now, we just made the types of "old_" and "current" members consistent.
|
|
||||||
|
|
||||||
Reference:https://github.com/squid-cache/squid/commit/5921355e474ffbff2cb577c3622ce0e686e8996a
|
|
||||||
Conflict:NA
|
|
||||||
---
|
|
||||||
src/client_side_reply.cc | 12 ++++++------
|
|
||||||
src/client_side_reply.h | 6 +++---
|
|
||||||
2 files changed, 9 insertions(+), 9 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/client_side_reply.cc b/src/client_side_reply.cc
|
|
||||||
index 0004137cbc9..606a3ecafff 100644
|
|
||||||
--- a/src/client_side_reply.cc
|
|
||||||
+++ b/src/client_side_reply.cc
|
|
||||||
@@ -66,7 +66,6 @@ clientReplyContext::~clientReplyContext()
|
|
||||||
/* old_entry might still be set if we didn't yet get the reply
|
|
||||||
* code in HandleIMSReply() */
|
|
||||||
removeStoreReference(&old_sc, &old_entry);
|
|
||||||
- safe_free(tempBuffer.data);
|
|
||||||
cbdataReferenceDone(http);
|
|
||||||
HTTPMSGUNLOCK(reply);
|
|
||||||
}
|
|
||||||
@@ -76,7 +75,6 @@ clientReplyContext::clientReplyContext(ClientHttpRequest *clientContext) :
|
|
||||||
http(cbdataReference(clientContext)),
|
|
||||||
headers_sz(0),
|
|
||||||
sc(nullptr),
|
|
||||||
- old_reqsize(0),
|
|
||||||
reqsize(0),
|
|
||||||
reqofs(0),
|
|
||||||
ourNode(nullptr),
|
|
||||||
@@ -84,6 +82,8 @@ clientReplyContext::clientReplyContext(ClientHttpRequest *clientContext) :
|
|
||||||
old_entry(nullptr),
|
|
||||||
old_sc(nullptr),
|
|
||||||
old_lastmod(-1),
|
|
||||||
+ old_reqofs(0),
|
|
||||||
+ old_reqsize(0),
|
|
||||||
deleting(false),
|
|
||||||
collapsedRevalidation(crNone)
|
|
||||||
{
|
|
||||||
@@ -202,7 +202,7 @@ clientReplyContext::saveState()
|
|
||||||
old_lastmod = http->request->lastmod;
|
|
||||||
old_etag = http->request->etag;
|
|
||||||
old_reqsize = reqsize;
|
|
||||||
- tempBuffer.offset = reqofs;
|
|
||||||
+ old_reqofs = reqofs;
|
|
||||||
/* Prevent accessing the now saved entries */
|
|
||||||
http->storeEntry(nullptr);
|
|
||||||
sc = nullptr;
|
|
||||||
@@ -219,7 +219,7 @@ clientReplyContext::restoreState()
|
|
||||||
http->storeEntry(old_entry);
|
|
||||||
sc = old_sc;
|
|
||||||
reqsize = old_reqsize;
|
|
||||||
- reqofs = tempBuffer.offset;
|
|
||||||
+ reqofs = old_reqofs;
|
|
||||||
http->request->lastmod = old_lastmod;
|
|
||||||
http->request->etag = old_etag;
|
|
||||||
/* Prevent accessed the old saved entries */
|
|
||||||
@@ -228,7 +228,7 @@ clientReplyContext::restoreState()
|
|
||||||
old_lastmod = -1;
|
|
||||||
old_etag.clean();
|
|
||||||
old_reqsize = 0;
|
|
||||||
- tempBuffer.offset = 0;
|
|
||||||
+ old_reqofs = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
@@ -377,7 +377,7 @@ clientReplyContext::sendClientUpstreamResponse()
|
|
||||||
http->storeEntry()->clearPublicKeyScope();
|
|
||||||
|
|
||||||
/* here the data to send is the data we just received */
|
|
||||||
- tempBuffer.offset = 0;
|
|
||||||
+ old_reqofs = 0;
|
|
||||||
old_reqsize = 0;
|
|
||||||
/* sendMoreData tracks the offset as well.
|
|
||||||
* Force it back to zero */
|
|
||||||
diff --git a/src/client_side_reply.h b/src/client_side_reply.h
|
|
||||||
index 68b45715b33..32a38bc95e1 100644
|
|
||||||
--- a/src/client_side_reply.h
|
|
||||||
+++ b/src/client_side_reply.h
|
|
||||||
@@ -74,8 +74,6 @@ class clientReplyContext : public RefCountable, public StoreClient
|
|
||||||
/// Not to be confused with ClientHttpRequest::Out::headers_sz.
|
|
||||||
int headers_sz;
|
|
||||||
store_client *sc; /* The store_client we're using */
|
|
||||||
- StoreIOBuffer tempBuffer; /* For use in validating requests via IMS */
|
|
||||||
- int old_reqsize; /* ... again, for the buffer */
|
|
||||||
size_t reqsize;
|
|
||||||
size_t reqofs;
|
|
||||||
char tempbuf[HTTP_REQBUF_SZ]; ///< a temporary buffer if we need working storage
|
|
||||||
@@ -135,11 +133,13 @@ class clientReplyContext : public RefCountable, public StoreClient
|
|
||||||
/// TODO: Exclude internal Store match bans from the "mismatch" category.
|
|
||||||
const char *firstStoreLookup_ = nullptr;
|
|
||||||
|
|
||||||
+ /* (stale) cache hit information preserved during IMS revalidation */
|
|
||||||
StoreEntry *old_entry;
|
|
||||||
- /* ... for entry to be validated */
|
|
||||||
store_client *old_sc;
|
|
||||||
time_t old_lastmod;
|
|
||||||
String old_etag;
|
|
||||||
+ size_t old_reqofs;
|
|
||||||
+ size_t old_reqsize;
|
|
||||||
|
|
||||||
bool deleting;
|
|
||||||
|
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,39 +0,0 @@
|
|||||||
From b70f864940225dfe69f9f653f948e787f99c3810 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Andreas Weigel <andreas.weigel@securepoint.de>
|
|
||||||
Date: Wed, 18 Oct 2023 04:14:31 +0000
|
|
||||||
Subject: [PATCH] Fix validation of certificates with CN=* (#1523)
|
|
||||||
|
|
||||||
The bug was discovered and detailed by Joshua Rogers at
|
|
||||||
https://megamansec.github.io/Squid-Security-Audit/
|
|
||||||
where it was filed as "Buffer UnderRead in SSL CN Parsing".
|
|
||||||
|
|
||||||
Conflict:NA
|
|
||||||
Reference:https://github.com/squid-cache/squid/commit/b70f864940225dfe69f9f653f948e787f99c3810
|
|
||||||
---
|
|
||||||
src/anyp/Uri.cc | 6 ++++++
|
|
||||||
1 file changed, 6 insertions(+)
|
|
||||||
|
|
||||||
diff --git a/src/anyp/Uri.cc b/src/anyp/Uri.cc
|
|
||||||
index 3eed2366abd..ef77d4f766b 100644
|
|
||||||
--- a/src/anyp/Uri.cc
|
|
||||||
+++ b/src/anyp/Uri.cc
|
|
||||||
@@ -175,6 +175,10 @@ urlInitialize(void)
|
|
||||||
assert(0 == matchDomainName("*.foo.com", ".foo.com", mdnHonorWildcards));
|
|
||||||
assert(0 != matchDomainName("*.foo.com", "foo.com", mdnHonorWildcards));
|
|
||||||
|
|
||||||
+ assert(0 != matchDomainName("foo.com", ""));
|
|
||||||
+ assert(0 != matchDomainName("foo.com", "", mdnHonorWildcards));
|
|
||||||
+ assert(0 != matchDomainName("foo.com", "", mdnRejectSubsubDomains));
|
|
||||||
+
|
|
||||||
/* more cases? */
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -828,6 +832,8 @@ matchDomainName(const char *h, const char *d, MatchDomainNameFlags flags)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
dl = strlen(d);
|
|
||||||
+ if (dl == 0)
|
|
||||||
+ return 1;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Start at the ends of the two strings and work towards the
|
|
||||||
@ -1,180 +0,0 @@
|
|||||||
From 6cfa10d94ca15a764a1d975597d8024582ef19be Mon Sep 17 00:00:00 2001
|
|
||||||
From: Amos Jeffries <yadij@users.noreply.github.com>
|
|
||||||
Date: Fri, 13 Oct 2023 08:44:16 +0000
|
|
||||||
Subject: [PATCH] RFC 9112: Improve HTTP chunked encoding compliance (#1498)
|
|
||||||
|
|
||||||
Reference:http://www.squid-cache.org/Versions/v6/SQUID-2023_1.patch
|
|
||||||
Conflict:NA
|
|
||||||
---
|
|
||||||
src/http/one/Parser.cc | 8 +-------
|
|
||||||
src/http/one/Parser.h | 4 +---
|
|
||||||
src/http/one/TeChunkedParser.cc | 23 ++++++++++++++++++-----
|
|
||||||
src/parser/Tokenizer.cc | 12 ++++++++++++
|
|
||||||
src/parser/Tokenizer.h | 7 +++++++
|
|
||||||
5 files changed, 39 insertions(+), 15 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/http/one/Parser.cc b/src/http/one/Parser.cc
|
|
||||||
index 964371b4e..b1908316a 100644
|
|
||||||
--- a/src/http/one/Parser.cc
|
|
||||||
+++ b/src/http/one/Parser.cc
|
|
||||||
@@ -65,16 +65,10 @@ Http::One::Parser::DelimiterCharacters()
|
|
||||||
void
|
|
||||||
Http::One::Parser::skipLineTerminator(Tokenizer &tok) const
|
|
||||||
{
|
|
||||||
- if (tok.skip(Http1::CrLf()))
|
|
||||||
- return;
|
|
||||||
-
|
|
||||||
if (Config.onoff.relaxed_header_parser && tok.skipOne(CharacterSet::LF))
|
|
||||||
return;
|
|
||||||
|
|
||||||
- if (tok.atEnd() || (tok.remaining().length() == 1 && tok.remaining().at(0) == '\r'))
|
|
||||||
- throw InsufficientInput();
|
|
||||||
-
|
|
||||||
- throw TexcHere("garbage instead of CRLF line terminator");
|
|
||||||
+ tok.skipRequired("line-terminating CRLF", Http1::CrLf());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// all characters except the LF line terminator
|
|
||||||
diff --git a/src/http/one/Parser.h b/src/http/one/Parser.h
|
|
||||||
index 5892a7a59..503c61d3f 100644
|
|
||||||
--- a/src/http/one/Parser.h
|
|
||||||
+++ b/src/http/one/Parser.h
|
|
||||||
@@ -124,9 +124,7 @@ protected:
|
|
||||||
* detect and skip the CRLF or (if tolerant) LF line terminator
|
|
||||||
* consume from the tokenizer.
|
|
||||||
*
|
|
||||||
- * \throws exception on bad or InsuffientInput.
|
|
||||||
- * \retval true only if line terminator found.
|
|
||||||
- * \retval false incomplete or missing line terminator, need more data.
|
|
||||||
+ * \throws exception on bad or InsufficientInput
|
|
||||||
*/
|
|
||||||
void skipLineTerminator(Tokenizer &) const;
|
|
||||||
|
|
||||||
diff --git a/src/http/one/TeChunkedParser.cc b/src/http/one/TeChunkedParser.cc
|
|
||||||
index d9138fe9a..9cce10fdc 100644
|
|
||||||
--- a/src/http/one/TeChunkedParser.cc
|
|
||||||
+++ b/src/http/one/TeChunkedParser.cc
|
|
||||||
@@ -91,6 +91,11 @@ Http::One::TeChunkedParser::parseChunkSize(Tokenizer &tok)
|
|
||||||
{
|
|
||||||
Must(theChunkSize <= 0); // Should(), really
|
|
||||||
|
|
||||||
+ static const SBuf bannedHexPrefixLower("0x");
|
|
||||||
+ static const SBuf bannedHexPrefixUpper("0X");
|
|
||||||
+ if (tok.skip(bannedHexPrefixLower) || tok.skip(bannedHexPrefixUpper))
|
|
||||||
+ throw TextException("chunk starts with 0x", Here());
|
|
||||||
+
|
|
||||||
int64_t size = -1;
|
|
||||||
if (tok.int64(size, 16, false) && !tok.atEnd()) {
|
|
||||||
if (size < 0)
|
|
||||||
@@ -121,7 +126,7 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
|
||||||
// bad or insufficient input, like in the code below. TODO: Expand up.
|
|
||||||
try {
|
|
||||||
parseChunkExtensions(tok); // a possibly empty chunk-ext list
|
|
||||||
- skipLineTerminator(tok);
|
|
||||||
+ tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
|
|
||||||
buf_ = tok.remaining();
|
|
||||||
parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
|
|
||||||
return true;
|
|
||||||
@@ -132,12 +137,14 @@ Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
|
|
||||||
// other exceptions bubble up to kill message parsing
|
|
||||||
}
|
|
||||||
|
|
||||||
-/// Parses the chunk-ext list (RFC 7230 section 4.1.1 and its Errata #4667):
|
|
||||||
+/// Parses the chunk-ext list (RFC 9112 section 7.1.1:
|
|
||||||
/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
|
|
||||||
void
|
|
||||||
-Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &tok)
|
|
||||||
+Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
|
|
||||||
{
|
|
||||||
do {
|
|
||||||
+ auto tok = callerTok;
|
|
||||||
+
|
|
||||||
ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
|
|
||||||
|
|
||||||
if (!tok.skip(';'))
|
|
||||||
@@ -145,6 +152,7 @@ Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &tok)
|
|
||||||
|
|
||||||
parseOneChunkExtension(tok);
|
|
||||||
buf_ = tok.remaining(); // got one extension
|
|
||||||
+ callerTok = tok;
|
|
||||||
} while (true);
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -158,11 +166,14 @@ Http::One::ChunkExtensionValueParser::Ignore(Tokenizer &tok, const SBuf &extName
|
|
||||||
/// Parses a single chunk-ext list element:
|
|
||||||
/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
|
|
||||||
void
|
|
||||||
-Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &tok)
|
|
||||||
+Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &callerTok)
|
|
||||||
{
|
|
||||||
+ auto tok = callerTok;
|
|
||||||
+
|
|
||||||
ParseBws(tok); // Bug 4492: ICAP servers send SP before chunk-ext-name
|
|
||||||
|
|
||||||
const auto extName = tok.prefix("chunk-ext-name", CharacterSet::TCHAR);
|
|
||||||
+ callerTok = tok; // in case we determine that this is a valueless chunk-ext
|
|
||||||
|
|
||||||
ParseBws(tok);
|
|
||||||
|
|
||||||
@@ -176,6 +187,8 @@ Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &tok)
|
|
||||||
customExtensionValueParser->parse(tok, extName);
|
|
||||||
else
|
|
||||||
ChunkExtensionValueParser::Ignore(tok, extName);
|
|
||||||
+
|
|
||||||
+ callerTok = tok;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
@@ -209,7 +222,7 @@ Http::One::TeChunkedParser::parseChunkEnd(Tokenizer &tok)
|
|
||||||
Must(theLeftBodySize == 0); // Should(), really
|
|
||||||
|
|
||||||
try {
|
|
||||||
- skipLineTerminator(tok);
|
|
||||||
+ tok.skipRequired("chunk CRLF", Http1::CrLf());
|
|
||||||
buf_ = tok.remaining(); // parse checkpoint
|
|
||||||
theChunkSize = 0; // done with the current chunk
|
|
||||||
parsingStage_ = Http1::HTTP_PARSE_CHUNK_SZ;
|
|
||||||
diff --git a/src/parser/Tokenizer.cc b/src/parser/Tokenizer.cc
|
|
||||||
index 2bac7fb1f..2843a44af 100644
|
|
||||||
--- a/src/parser/Tokenizer.cc
|
|
||||||
+++ b/src/parser/Tokenizer.cc
|
|
||||||
@@ -147,6 +147,18 @@ Parser::Tokenizer::skipAll(const CharacterSet &tokenChars)
|
|
||||||
return success(prefixLen);
|
|
||||||
}
|
|
||||||
|
|
||||||
+void
|
|
||||||
+Parser::Tokenizer::skipRequired(const char *description, const SBuf &tokenToSkip)
|
|
||||||
+{
|
|
||||||
+ if (skip(tokenToSkip) || tokenToSkip.isEmpty())
|
|
||||||
+ return;
|
|
||||||
+
|
|
||||||
+ if (tokenToSkip.startsWith(buf_))
|
|
||||||
+ throw InsufficientInput();
|
|
||||||
+
|
|
||||||
+ throw TextException(ToSBuf("cannot skip ", description), Here());
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
bool
|
|
||||||
Parser::Tokenizer::skipOne(const CharacterSet &chars)
|
|
||||||
{
|
|
||||||
diff --git a/src/parser/Tokenizer.h b/src/parser/Tokenizer.h
|
|
||||||
index 7bae1ccbb..3cfa7dd6c 100644
|
|
||||||
--- a/src/parser/Tokenizer.h
|
|
||||||
+++ b/src/parser/Tokenizer.h
|
|
||||||
@@ -115,6 +115,13 @@ public:
|
|
||||||
*/
|
|
||||||
SBuf::size_type skipAll(const CharacterSet &discardables);
|
|
||||||
|
|
||||||
+ /** skips a given character sequence (string);
|
|
||||||
+ * does nothing if the sequence is empty
|
|
||||||
+ *
|
|
||||||
+ * \throws exception on mismatching prefix or InsufficientInput
|
|
||||||
+ */
|
|
||||||
+ void skipRequired(const char *description, const SBuf &tokenToSkip);
|
|
||||||
+
|
|
||||||
/** Removes a single trailing character from the set.
|
|
||||||
*
|
|
||||||
* \return whether a character was removed
|
|
||||||
--
|
|
||||||
2.25.1
|
|
||||||
|
|
||||||
@ -1,42 +0,0 @@
|
|||||||
From dc0e10bec3334053c1a5297e50dd7052ea18aef0 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Alex Bason <nonsleepr@gmail.com>
|
|
||||||
Date: Sun, 15 Oct 2023 13:04:47 +0000
|
|
||||||
Subject: [PATCH] Fix stack buffer overflow when parsing Digest Authorization
|
|
||||||
(#1517)
|
|
||||||
|
|
||||||
The bug was discovered and detailed by Joshua Rogers at
|
|
||||||
https://megamansec.github.io/Squid-Security-Audit/digest-overflow.html
|
|
||||||
where it was filed as "Stack Buffer Overflow in Digest Authentication".
|
|
||||||
|
|
||||||
Reference:http://www.squid-cache.org/Versions/v6/SQUID-2023_3.patch
|
|
||||||
Conflict:NA
|
|
||||||
---
|
|
||||||
src/auth/digest/Config.cc | 10 +++++++---
|
|
||||||
1 file changed, 7 insertions(+), 3 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc
|
|
||||||
index f00e2ba68..3c070d242 100644
|
|
||||||
--- a/src/auth/digest/Config.cc
|
|
||||||
+++ b/src/auth/digest/Config.cc
|
|
||||||
@@ -827,11 +827,15 @@ Auth::Digest::Config::decode(char const *proxy_auth, const HttpRequest *request,
|
|
||||||
break;
|
|
||||||
|
|
||||||
case DIGEST_NC:
|
|
||||||
- if (value.size() != 8) {
|
|
||||||
+ if (value.size() == 8) {
|
|
||||||
+ // for historical reasons, the nc value MUST be exactly 8 bytes
|
|
||||||
+ static_assert(sizeof(digest_request->nc) == 8 + 1);
|
|
||||||
+ xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
|
|
||||||
+ debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
|
|
||||||
+ } else {
|
|
||||||
debugs(29, 9, "Invalid nc '" << value << "' in '" << temp << "'");
|
|
||||||
+ digest_request->nc[0] = 0;
|
|
||||||
}
|
|
||||||
- xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
|
|
||||||
- debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
|
|
||||||
break;
|
|
||||||
|
|
||||||
case DIGEST_CNONCE:
|
|
||||||
--
|
|
||||||
2.25.1
|
|
||||||
|
|
||||||
@ -1,49 +0,0 @@
|
|||||||
From 7de01969a793b2fdb476e354a9fcda272d400d27 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
|
||||||
Date: Thu, 25 May 2023 02:10:28 +0000
|
|
||||||
Subject: [PATCH] Fix userinfo percent-encoding (#1367)
|
|
||||||
|
|
||||||
%X expects an unsigned int, and that is what we were giving it. However,
|
|
||||||
to get to the correct unsigned int value from a (signed) char, one has
|
|
||||||
to cast to an unsigned char (or equivalent) first.
|
|
||||||
|
|
||||||
Broken since inception in commit 7b75100.
|
|
||||||
|
|
||||||
Also adjusted similar (commented out) ext_edirectory_userip_acl code.
|
|
||||||
|
|
||||||
Reference:http://www.squid-cache.org/Versions/v6/SQUID-2023_5.patch
|
|
||||||
Conflict:NA
|
|
||||||
---
|
|
||||||
src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc | 2 +-
|
|
||||||
src/anyp/Uri.cc | 2 +-
|
|
||||||
2 files changed, 2 insertions(+), 2 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc b/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc
|
|
||||||
index bf124d24f..e3f33e209 100644
|
|
||||||
--- a/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc
|
|
||||||
+++ b/src/acl/external/eDirectory_userip/ext_edirectory_userip_acl.cc
|
|
||||||
@@ -1555,7 +1555,7 @@ MainSafe(int argc, char **argv)
|
|
||||||
/* BINARY DEBUGGING *
|
|
||||||
local_printfx("while() -> bufa[%" PRIuSIZE "]: %s", k, bufa);
|
|
||||||
for (i = 0; i < k; ++i)
|
|
||||||
- local_printfx("%02X", bufa[i]);
|
|
||||||
+ local_printfx("%02X", static_cast<unsigned int>(static_cast<unsigned char>(bufa[i])));
|
|
||||||
local_printfx("\n");
|
|
||||||
* BINARY DEBUGGING */
|
|
||||||
/* Check for CRLF */
|
|
||||||
diff --git a/src/anyp/Uri.cc b/src/anyp/Uri.cc
|
|
||||||
index e37293996..eca2c2357 100644
|
|
||||||
--- a/src/anyp/Uri.cc
|
|
||||||
+++ b/src/anyp/Uri.cc
|
|
||||||
@@ -71,7 +71,7 @@ AnyP::Uri::Encode(const SBuf &buf, const CharacterSet &ignore)
|
|
||||||
while (!tk.atEnd()) {
|
|
||||||
// TODO: Add Tokenizer::parseOne(void).
|
|
||||||
const auto ch = tk.remaining()[0];
|
|
||||||
- output.appendf("%%%02X", static_cast<unsigned int>(ch)); // TODO: Optimize using a table
|
|
||||||
+ output.appendf("%%%02X", static_cast<unsigned int>(static_cast<unsigned char>(ch))); // TODO: Optimize using a table
|
|
||||||
(void)tk.skip(ch);
|
|
||||||
|
|
||||||
if (tk.prefix(goodSection, ignore))
|
|
||||||
--
|
|
||||||
2.25.1
|
|
||||||
|
|
||||||
@ -1,34 +0,0 @@
|
|||||||
From deee944f9a12c9fd399ce52f3e2526bb573a9470 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
|
||||||
Date: Wed, 25 Oct 2023 19:41:45 +0000
|
|
||||||
Subject: [PATCH] RFC 1123: Fix date parsing (#1538)
|
|
||||||
|
|
||||||
The bug was discovered and detailed by Joshua Rogers at
|
|
||||||
https://megamansec.github.io/Squid-Security-Audit/datetime-overflow.html
|
|
||||||
where it was filed as "1-Byte Buffer OverRead in RFC 1123 date/time
|
|
||||||
Handling".
|
|
||||||
|
|
||||||
Conflict:NA
|
|
||||||
Reference:https://github.com/squid-cache/squid/commit/deee944f9a12c9fd399ce52f3e2526bb573a9470
|
|
||||||
---
|
|
||||||
src/time/rfc1123.cc | 6 ++++++
|
|
||||||
1 file changed, 6 insertions(+)
|
|
||||||
|
|
||||||
diff --git a/src/time/rfc1123.cc b/src/time/rfc1123.cc
|
|
||||||
index d89d22262f6..7524959edb0 100644
|
|
||||||
--- a/src/time/rfc1123.cc
|
|
||||||
+++ b/src/time/rfc1123.cc
|
|
||||||
@@ -50,7 +50,13 @@ make_month(const char *s)
|
|
||||||
char month[3];
|
|
||||||
|
|
||||||
month[0] = xtoupper(*s);
|
|
||||||
+ if (!month[0])
|
|
||||||
+ return -1; // protects *(s + 1) below
|
|
||||||
+
|
|
||||||
month[1] = xtolower(*(s + 1));
|
|
||||||
+ if (!month[1])
|
|
||||||
+ return -1; // protects *(s + 2) below
|
|
||||||
+
|
|
||||||
month[2] = xtolower(*(s + 2));
|
|
||||||
|
|
||||||
for (i = 0; i < 12; i++)
|
|
||||||
@ -1,85 +0,0 @@
|
|||||||
From 6014c6648a2a54a4ecb7f952ea1163e0798f9264 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Alex Rousskov <rousskov@measurement-factory.com>
|
|
||||||
Date: Fri, 27 Oct 2023 21:27:20 +0000
|
|
||||||
Subject: [PATCH] Exit without asserting when helper process startup fails
|
|
||||||
(#1543)
|
|
||||||
|
|
||||||
... to dup() after fork() and before execvp().
|
|
||||||
|
|
||||||
Assertions are for handling program logic errors. Helper initialization
|
|
||||||
code already handled system call errors correctly (i.e. by exiting the
|
|
||||||
newly created helper process with an error), except for a couple of
|
|
||||||
assert()s that could be triggered by dup(2) failures.
|
|
||||||
|
|
||||||
This bug was discovered and detailed by Joshua Rogers at
|
|
||||||
https://megamansec.github.io/Squid-Security-Audit/ipc-assert.html
|
|
||||||
where it was filed as 'Assertion in Squid "Helper" Process Creator'.
|
|
||||||
|
|
||||||
Conflict:NA
|
|
||||||
Reference:https://github.com/squid-cache/squid/commit/6014c6648a2a54a4ecb7f952ea1163e0798f9264
|
|
||||||
---
|
|
||||||
src/ipc.cc | 32 ++++++++++++++++++++++++++------
|
|
||||||
1 file changed, 26 insertions(+), 6 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/ipc.cc b/src/ipc.cc
|
|
||||||
index 40d34b4755a..1afc4d5cf3c 100644
|
|
||||||
--- a/src/ipc.cc
|
|
||||||
+++ b/src/ipc.cc
|
|
||||||
@@ -22,6 +22,11 @@
|
|
||||||
|
|
||||||
#include <chrono>
|
|
||||||
#include <thread>
|
|
||||||
+#include <cstdlib>
|
|
||||||
+
|
|
||||||
+#if HAVE_UNISTD_H
|
|
||||||
+#include <unistd.h>
|
|
||||||
+#endif
|
|
||||||
|
|
||||||
static const char *hello_string = "hi there\n";
|
|
||||||
#ifndef HELLO_BUF_SZ
|
|
||||||
@@ -362,6 +367,22 @@ ipcCreate(int type, const char *prog, const char *const args[], const char *name
|
|
||||||
}
|
|
||||||
|
|
||||||
PutEnvironment();
|
|
||||||
+
|
|
||||||
+ // A dup(2) wrapper that reports and exits the process on errors. The
|
|
||||||
+ // exiting logic is only suitable for this child process context.
|
|
||||||
+ const auto dupOrExit = [prog,name](const int oldFd) {
|
|
||||||
+ const auto newFd = dup(oldFd);
|
|
||||||
+ if (newFd < 0) {
|
|
||||||
+ const auto savedErrno = errno;
|
|
||||||
+ debugs(54, DBG_CRITICAL, "ERROR: Helper process initialization failure: " << name <<
|
|
||||||
+ Debug::Extra << "helper (CHILD) PID: " << getpid() <<
|
|
||||||
+ Debug::Extra << "helper program name: " << prog <<
|
|
||||||
+ Debug::Extra << "dup(2) system call error for FD " << oldFd << ": " << xstrerr(savedErrno));
|
|
||||||
+ _exit(EXIT_FAILURE);
|
|
||||||
+ }
|
|
||||||
+ return newFd;
|
|
||||||
+ };
|
|
||||||
+
|
|
||||||
/*
|
|
||||||
* This double-dup stuff avoids problems when one of
|
|
||||||
* crfd, cwfd, or debug_log are in the rage 0-2.
|
|
||||||
@@ -369,17 +390,16 @@ ipcCreate(int type, const char *prog, const char *const args[], const char *name
|
|
||||||
|
|
||||||
do {
|
|
||||||
/* First make sure 0-2 is occupied by something. Gets cleaned up later */
|
|
||||||
- x = dup(crfd);
|
|
||||||
- assert(x > -1);
|
|
||||||
- } while (x < 3 && x > -1);
|
|
||||||
+ x = dupOrExit(crfd);
|
|
||||||
+ } while (x < 3);
|
|
||||||
|
|
||||||
close(x);
|
|
||||||
|
|
||||||
- t1 = dup(crfd);
|
|
||||||
+ t1 = dupOrExit(crfd);
|
|
||||||
|
|
||||||
- t2 = dup(cwfd);
|
|
||||||
+ t2 = dupOrExit(cwfd);
|
|
||||||
|
|
||||||
- t3 = dup(fileno(debug_log));
|
|
||||||
+ t3 = dupOrExit(fileno(debug_log));
|
|
||||||
|
|
||||||
assert(t1 > 2 && t2 > 2 && t3 > 2);
|
|
||||||
|
|
||||||
@ -1,86 +0,0 @@
|
|||||||
From 45b6522eb80a6d12f75630fe1c132b52fc3f1624 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Thomas Leroy <32497783+p4zuu@users.noreply.github.com>
|
|
||||||
Date: Tue, 28 Nov 2023 07:35:46 +0000
|
|
||||||
Subject: [PATCH] Limit the number of allowed X-Forwarded-For hops (#1589)
|
|
||||||
|
|
||||||
Squid will ignore all X-Forwarded-For elements listed after the first 64
|
|
||||||
addresses allowed by the follow_x_forwarded_for directive. A different
|
|
||||||
limit can be specified by defining a C++ SQUID_X_FORWARDED_FOR_HOP_MAX
|
|
||||||
macro, but that macro is not a supported Squid configuration interface
|
|
||||||
and may change or disappear at any time.
|
|
||||||
|
|
||||||
Squid will log a cache.log ERROR if the hop limit has been reached.
|
|
||||||
|
|
||||||
This change works around problematic ACLChecklist and/or slow ACLs
|
|
||||||
implementation that results in immediate nonBlockingCheck() callbacks.
|
|
||||||
Such callbacks have caused many bugs and development complications. In
|
|
||||||
clientFollowXForwardedForCheck() context, they lead to indirect
|
|
||||||
recursion that was bound only by the number of allowed XFF entries,
|
|
||||||
which could reach thousands and exhaust Squid process call stack.
|
|
||||||
|
|
||||||
This recursion bug was discovered and detailed by Joshua Rogers at
|
|
||||||
https://megamansec.github.io/Squid-Security-Audit/xff-stackoverflow.html
|
|
||||||
where it was filed as "X-Forwarded-For Stack Overflow".
|
|
||||||
|
|
||||||
Conflict: NA
|
|
||||||
Reference: https://github.com/squid-cache/squid/commit/45b6522eb80a6d12f75630fe1c132b52fc3f1624
|
|
||||||
---
|
|
||||||
src/ClientRequestContext.h | 7 ++++++-
|
|
||||||
src/client_side_request.cc | 17 +++++++++++++++--
|
|
||||||
2 files changed, 21 insertions(+), 3 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/src/ClientRequestContext.h b/src/ClientRequestContext.h
|
|
||||||
index 16c33bba4bc..0997f5d22b6 100644
|
|
||||||
--- a/src/ClientRequestContext.h
|
|
||||||
+++ b/src/ClientRequestContext.h
|
|
||||||
@@ -80,8 +80,13 @@ class ClientRequestContext : public RefCountable
|
|
||||||
#if USE_OPENSSL
|
|
||||||
bool sslBumpCheckDone = false;
|
|
||||||
#endif
|
|
||||||
- ErrorState *error = nullptr; ///< saved error page for centralized/delayed processing
|
|
||||||
+
|
|
||||||
bool readNextRequest = false; ///< whether Squid should read after error handling
|
|
||||||
+ ErrorState *error = nullptr; ///< saved error page for centralized/delayed processing
|
|
||||||
+
|
|
||||||
+#if FOLLOW_X_FORWARDED_FOR
|
|
||||||
+ size_t currentXffHopNumber = 0; ///< number of X-Forwarded-For header values processed so far
|
|
||||||
+#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* SQUID_CLIENTREQUESTCONTEXT_H */
|
|
||||||
diff --git a/src/client_side_request.cc b/src/client_side_request.cc
|
|
||||||
index 5b5b5af8086..7f802d4219e 100644
|
|
||||||
--- a/src/client_side_request.cc
|
|
||||||
+++ b/src/client_side_request.cc
|
|
||||||
@@ -75,6 +75,11 @@
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if FOLLOW_X_FORWARDED_FOR
|
|
||||||
+
|
|
||||||
+#if !defined(SQUID_X_FORWARDED_FOR_HOP_MAX)
|
|
||||||
+#define SQUID_X_FORWARDED_FOR_HOP_MAX 64
|
|
||||||
+#endif
|
|
||||||
+
|
|
||||||
static void clientFollowXForwardedForCheck(Acl::Answer answer, void *data);
|
|
||||||
#endif /* FOLLOW_X_FORWARDED_FOR */
|
|
||||||
|
|
||||||
@@ -437,8 +442,16 @@ clientFollowXForwardedForCheck(Acl::Answer answer, void *data)
|
|
||||||
/* override the default src_addr tested if we have to go deeper than one level into XFF */
|
|
||||||
Filled(calloutContext->acl_checklist)->src_addr = request->indirect_client_addr;
|
|
||||||
}
|
|
||||||
- calloutContext->acl_checklist->nonBlockingCheck(clientFollowXForwardedForCheck, data);
|
|
||||||
- return;
|
|
||||||
+ if (++calloutContext->currentXffHopNumber < SQUID_X_FORWARDED_FOR_HOP_MAX) {
|
|
||||||
+ calloutContext->acl_checklist->nonBlockingCheck(clientFollowXForwardedForCheck, data);
|
|
||||||
+ return;
|
|
||||||
+ }
|
|
||||||
+ const auto headerName = Http::HeaderLookupTable.lookup(Http::HdrType::X_FORWARDED_FOR).name;
|
|
||||||
+ debugs(28, DBG_CRITICAL, "ERROR: Ignoring trailing " << headerName << " addresses" <<
|
|
||||||
+ Debug::Extra << "addresses allowed by follow_x_forwarded_for: " << calloutContext->currentXffHopNumber <<
|
|
||||||
+ Debug::Extra << "last/accepted address: " << request->indirect_client_addr <<
|
|
||||||
+ Debug::Extra << "ignored trailing addresses: " << request->x_forwarded_for_iterator);
|
|
||||||
+ // fall through to resume clientAccessCheck() processing
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
--
|
|
||||||
196
backport-squid-crash-half-closed.patch
Normal file
196
backport-squid-crash-half-closed.patch
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
From 5da786ef2a708559f5b53a05b7db6de0b64ce885 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Alex Rousskov <rousskov@measurement-factory.com>
|
||||||
|
Date: Mon, 11 Sep 2023 07:49:36 +0000
|
||||||
|
Subject: [PATCH] Bug 4156: comm.cc "!commHasHalfClosedMonitor(fd)" assertion
|
||||||
|
(#1443)
|
||||||
|
|
||||||
|
This bug is specific to "half_closed_clients on" configurations.
|
||||||
|
|
||||||
|
assertion failed: ... "isOpen(fd) && !commHasHalfClosedMonitor(fd)"
|
||||||
|
location: comm.cc:1583 in commStartHalfClosedMonitor()
|
||||||
|
|
||||||
|
Squid asserts because Server schedules comm_read() after receiving EOF:
|
||||||
|
That extra read results in another EOF notification, and an attempt to
|
||||||
|
start monitoring an already monitored half-closed connection.
|
||||||
|
|
||||||
|
Upon detecting a potentially half-closed connection,
|
||||||
|
Server::doClientRead() should clear flags.readMore to prevent Server
|
||||||
|
from scheduling another comm_read(), but it does not and cannot do that
|
||||||
|
(without significant refactoring) because
|
||||||
|
|
||||||
|
* Server does not have access to flags.readMore
|
||||||
|
* flags.readMore hack is used for more than just "read more"
|
||||||
|
|
||||||
|
We worked around the above limitation by re-detecting half-closed
|
||||||
|
conditions and clearing flags.readMore after clientParseRequests(). That
|
||||||
|
fixed the bug but further increased poor code duplication across
|
||||||
|
ConnStateData::afterClientRead() and ConnStateData::kick() methods. We
|
||||||
|
then refactored by merging and moving that duplicated code into
|
||||||
|
clientParseRequests() and renamed that method to make backports safer.
|
||||||
|
|
||||||
|
Conflict: NA
|
||||||
|
Reference: https://github.com/squid-cache/squid/commit/5da786
|
||||||
|
---
|
||||||
|
src/client_side.cc | 67 ++++++++++++-----------------------
|
||||||
|
src/client_side.h | 2 +-
|
||||||
|
src/tests/stub_client_side.cc | 2 +-
|
||||||
|
3 files changed, 24 insertions(+), 47 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/src/client_side.cc b/src/client_side.cc
|
||||||
|
index e62bcf0fc7d..bd9cf6a7d5f 100644
|
||||||
|
--- a/src/client_side.cc
|
||||||
|
+++ b/src/client_side.cc
|
||||||
|
@@ -932,7 +932,7 @@ ConnStateData::kick()
|
||||||
|
* We are done with the response, and we are either still receiving request
|
||||||
|
* body (early response!) or have already stopped receiving anything.
|
||||||
|
*
|
||||||
|
- * If we are still receiving, then clientParseRequest() below will fail.
|
||||||
|
+ * If we are still receiving, then parseRequests() below will fail.
|
||||||
|
* (XXX: but then we will call readNextRequest() which may succeed and
|
||||||
|
* execute a smuggled request as we are not done with the current request).
|
||||||
|
*
|
||||||
|
@@ -952,28 +952,12 @@ ConnStateData::kick()
|
||||||
|
* Attempt to parse a request from the request buffer.
|
||||||
|
* If we've been fed a pipelined request it may already
|
||||||
|
* be in our read buffer.
|
||||||
|
- *
|
||||||
|
- \par
|
||||||
|
- * This needs to fall through - if we're unlucky and parse the _last_ request
|
||||||
|
- * from our read buffer we may never re-register for another client read.
|
||||||
|
*/
|
||||||
|
|
||||||
|
- if (clientParseRequests()) {
|
||||||
|
- debugs(33, 3, clientConnection << ": parsed next request from buffer");
|
||||||
|
- }
|
||||||
|
+ parseRequests();
|
||||||
|
|
||||||
|
- /** \par
|
||||||
|
- * Either we need to kick-start another read or, if we have
|
||||||
|
- * a half-closed connection, kill it after the last request.
|
||||||
|
- * This saves waiting for half-closed connections to finished being
|
||||||
|
- * half-closed _AND_ then, sometimes, spending "Timeout" time in
|
||||||
|
- * the keepalive "Waiting for next request" state.
|
||||||
|
- */
|
||||||
|
- if (commIsHalfClosed(clientConnection->fd) && pipeline.empty()) {
|
||||||
|
- debugs(33, 3, "half-closed client with no pending requests, closing");
|
||||||
|
- clientConnection->close();
|
||||||
|
+ if (!isOpen())
|
||||||
|
return;
|
||||||
|
- }
|
||||||
|
|
||||||
|
/** \par
|
||||||
|
* At this point we either have a parsed request (which we've
|
||||||
|
@@ -1882,16 +1866,11 @@ ConnStateData::receivedFirstByte()
|
||||||
|
resetReadTimeout(Config.Timeout.request);
|
||||||
|
}
|
||||||
|
|
||||||
|
-/**
|
||||||
|
- * Attempt to parse one or more requests from the input buffer.
|
||||||
|
- * Returns true after completing parsing of at least one request [header]. That
|
||||||
|
- * includes cases where parsing ended with an error (e.g., a huge request).
|
||||||
|
- */
|
||||||
|
-bool
|
||||||
|
-ConnStateData::clientParseRequests()
|
||||||
|
+/// Attempt to parse one or more requests from the input buffer.
|
||||||
|
+/// May close the connection.
|
||||||
|
+void
|
||||||
|
+ConnStateData::parseRequests()
|
||||||
|
{
|
||||||
|
- bool parsed_req = false;
|
||||||
|
-
|
||||||
|
debugs(33, 5, clientConnection << ": attempting to parse");
|
||||||
|
|
||||||
|
// Loop while we have read bytes that are not needed for producing the body
|
||||||
|
@@ -1936,8 +1915,6 @@ ConnStateData::clientParseRequests()
|
||||||
|
|
||||||
|
processParsedRequest(context);
|
||||||
|
|
||||||
|
- parsed_req = true; // XXX: do we really need to parse everything right NOW ?
|
||||||
|
-
|
||||||
|
if (context->mayUseConnection()) {
|
||||||
|
debugs(33, 3, "Not parsing new requests, as this request may need the connection");
|
||||||
|
break;
|
||||||
|
@@ -1950,8 +1927,19 @@ ConnStateData::clientParseRequests()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- /* XXX where to 'finish' the parsing pass? */
|
||||||
|
- return parsed_req;
|
||||||
|
+ debugs(33, 7, "buffered leftovers: " << inBuf.length());
|
||||||
|
+
|
||||||
|
+ if (isOpen() && commIsHalfClosed(clientConnection->fd)) {
|
||||||
|
+ if (pipeline.empty()) {
|
||||||
|
+ // we processed what we could parse, and no more data is coming
|
||||||
|
+ debugs(33, 5, "closing half-closed without parsed requests: " << clientConnection);
|
||||||
|
+ clientConnection->close();
|
||||||
|
+ } else {
|
||||||
|
+ // we parsed what we could, and no more data is coming
|
||||||
|
+ debugs(33, 5, "monitoring half-closed while processing parsed requests: " << clientConnection);
|
||||||
|
+ flags.readMore = false; // may already be false
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
@@ -1968,18 +1956,7 @@ ConnStateData::afterClientRead()
|
||||||
|
if (pipeline.empty())
|
||||||
|
fd_note(clientConnection->fd, "Reading next request");
|
||||||
|
|
||||||
|
- if (!clientParseRequests()) {
|
||||||
|
- if (!isOpen())
|
||||||
|
- return;
|
||||||
|
- // We may get here if the client half-closed after sending a partial
|
||||||
|
- // request. See doClientRead() and shouldCloseOnEof().
|
||||||
|
- // XXX: This partially duplicates ConnStateData::kick().
|
||||||
|
- if (pipeline.empty() && commIsHalfClosed(clientConnection->fd)) {
|
||||||
|
- debugs(33, 5, clientConnection << ": half-closed connection, no completed request parsed, connection closing.");
|
||||||
|
- clientConnection->close();
|
||||||
|
- return;
|
||||||
|
- }
|
||||||
|
- }
|
||||||
|
+ parseRequests();
|
||||||
|
|
||||||
|
if (!isOpen())
|
||||||
|
return;
|
||||||
|
@@ -3767,7 +3744,7 @@ ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic)
|
||||||
|
startPinnedConnectionMonitoring();
|
||||||
|
|
||||||
|
if (pipeline.empty())
|
||||||
|
- kick(); // in case clientParseRequests() was blocked by a busy pic.connection
|
||||||
|
+ kick(); // in case parseRequests() was blocked by a busy pic.connection
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Forward future client requests using the given server connection.
|
||||||
|
diff --git a/src/client_side.h b/src/client_side.h
|
||||||
|
index e37ab27da1a..9f36d864c2c 100644
|
||||||
|
--- a/src/client_side.h
|
||||||
|
+++ b/src/client_side.h
|
||||||
|
@@ -98,7 +98,6 @@ class ConnStateData:
|
||||||
|
void doneWithControlMsg() override;
|
||||||
|
|
||||||
|
/// Traffic parsing
|
||||||
|
- bool clientParseRequests();
|
||||||
|
void readNextRequest();
|
||||||
|
|
||||||
|
/// try to make progress on a transaction or read more I/O
|
||||||
|
@@ -443,6 +442,7 @@ class ConnStateData:
|
||||||
|
|
||||||
|
void checkLogging();
|
||||||
|
|
||||||
|
+ void parseRequests();
|
||||||
|
void clientAfterReadingRequests();
|
||||||
|
bool concurrentRequestQueueFilled() const;
|
||||||
|
|
||||||
|
diff --git a/src/tests/stub_client_side.cc b/src/tests/stub_client_side.cc
|
||||||
|
index 8c160e56340..f49d5dceeed 100644
|
||||||
|
--- a/src/tests/stub_client_side.cc
|
||||||
|
+++ b/src/tests/stub_client_side.cc
|
||||||
|
@@ -14,7 +14,7 @@
|
||||||
|
#include "tests/STUB.h"
|
||||||
|
|
||||||
|
#include "client_side.h"
|
||||||
|
-bool ConnStateData::clientParseRequests() STUB_RETVAL(false)
|
||||||
|
+void ConnStateData::parseRequests() STUB
|
||||||
|
void ConnStateData::readNextRequest() STUB
|
||||||
|
bool ConnStateData::isOpen() const STUB_RETVAL(false)
|
||||||
|
void ConnStateData::kick() STUB
|
||||||
BIN
squid-6.1.tar.xz
BIN
squid-6.1.tar.xz
Binary file not shown.
@ -1,25 +0,0 @@
|
|||||||
File: squid-6.1.tar.xz
|
|
||||||
Date: Thu 06 Jul 2023 05:42:59 UTC
|
|
||||||
Size: 2546668
|
|
||||||
MD5 : 64841841e06ea487b0305070c33b04dd
|
|
||||||
SHA1: 4a3711e42ca9acbba580fd0c306cc2f6f84db1f8
|
|
||||||
Key : CD6DBF8EF3B17D3E <squid3@treenet.co.nz>
|
|
||||||
B068 84ED B779 C89B 044E 64E3 CD6D BF8E F3B1 7D3E
|
|
||||||
keyring = http://www.squid-cache.org/pgp.asc
|
|
||||||
keyserver = pool.sks-keyservers.net
|
|
||||||
-----BEGIN PGP SIGNATURE-----
|
|
||||||
|
|
||||||
iQIzBAABCgAdFiEEsGiE7bd5yJsETmTjzW2/jvOxfT4FAmSmVGsACgkQzW2/jvOx
|
|
||||||
fT6rPA/9Gkkw4w3h0EFPI7mHvjquoc2mW/zX5clfZmhoA81u9xRUazgZeaPcLydF
|
|
||||||
rwRxNPITWzZ+emqeTMLrrVndHXAqKqg8VRbHymWEAh3aqVzvaj98h9iZylvPLX1N
|
|
||||||
rdToqDBET8E4YHYUsmUdoegg5XgGiFEC3fqcS7/3eOUUoWsrFsKy6WAF2g1lS5yx
|
|
||||||
kqfRA5Nim23ckACsoRcMPfVPHxRFuyoYiLAsgMjx2cZAGDVtFos260N1QK8xdQkE
|
|
||||||
o/LEv2zp4wXFMeLSJJsvgl9SfqA7XtC91ZH8nrAvmWwj99Totnt5KEa8MiF0Wn0K
|
|
||||||
dpB2X1meb/dx8CI9AMNex9hedUspPlAMLCI8ggR8KtzW31g/7GagXpsKmJIEdk+S
|
|
||||||
Yjq4NXHS0eDmiMcI2ZDBp6Sk/ty1VrnH61GqA8FDEOTTAJGvFu6DahVgxHE6s0aj
|
|
||||||
pOw8AmM/0yP2kuafhchbRQQ9bCFBsO4z4sUyGNkHNHCjX3XimW3m4mBPSlEkDAF2
|
|
||||||
dbUdAwIjBdS8zdU0N6wB+WXy7y459bsQBkWQMc7P4TqQ02IeL+4boF2c8RpgWiDf
|
|
||||||
hHS03U60zAP36m6HlC1nSnGnMABlwvBPg928yMq/jrf75T5DQHOSEuQ69NxF61ge
|
|
||||||
SLGX+aEGwwXGsHhGfW6W9sORKaiJNI683US3vGOn33CX+L5rCIU=
|
|
||||||
=hwBL
|
|
||||||
-----END PGP SIGNATURE-----
|
|
||||||
BIN
squid-6.6.tar.xz
Normal file
BIN
squid-6.6.tar.xz
Normal file
Binary file not shown.
25
squid-6.6.tar.xz.asc
Normal file
25
squid-6.6.tar.xz.asc
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
File: squid-6.6.tar.xz
|
||||||
|
Date: Thu 07 Dec 2023 04:03:46 UTC
|
||||||
|
Size: 2554824
|
||||||
|
MD5 : 5a41134ee1b7e75f62088acdec92d2ca
|
||||||
|
SHA1: f05e06a9dd3bf7501d2844e43d9ae1bd00e9edcc
|
||||||
|
Key : CD6DBF8EF3B17D3E <squid3@treenet.co.nz>
|
||||||
|
B068 84ED B779 C89B 044E 64E3 CD6D BF8E F3B1 7D3E
|
||||||
|
keyring = http://www.squid-cache.org/pgp.asc
|
||||||
|
keyserver = pool.sks-keyservers.net
|
||||||
|
-----BEGIN PGP SIGNATURE-----
|
||||||
|
|
||||||
|
iQIzBAABCgAdFiEEsGiE7bd5yJsETmTjzW2/jvOxfT4FAmVxRCsACgkQzW2/jvOx
|
||||||
|
fT5VtQ/+M+mhaGYCp9YBi1GG9vyQwkkIyngL3vPpz7UxZHAR+mzk29zwlgdDgwWA
|
||||||
|
Zasaomg8S1Clq2dhNr7oo6RuZ7mKlhEeHba2WvL+1/VcBsPnazUwzYQiW7k9KxYe
|
||||||
|
n1At62duit+YnswTNnj6HJRKKK0nKlPmJycL1AThh9Tj6oHTsWBCItnSZ5eUjGX0
|
||||||
|
aKiMrkrHtq3qheWkVZPCJEFDs88ECDrJD7s9cpAhun+/0v+4ECE65uJ2bZHK4f/E
|
||||||
|
TH5OIf8vltEB8sA/SSanMM/C+gZObET3TssrgHz92j0svMOlALLtitb0aHly21JV
|
||||||
|
fEKB200Ngac2y6rq3xDNiznmMn+SeCNUsiDcdauCrsUHNW9S9FhOxeWXy/Z7JK4A
|
||||||
|
mqVnnqvN9GFvv2EEC8J9lj+cwGOdaSW6L2aPVkub8Ij5O+e2Tg+uBm4ZC8vcACYz
|
||||||
|
+1oo8YyvcfO9EmNRE0vpFTWH9Ux5ptgdvsIxv41QN40RUYN7FBbOgey59mP3uq2Q
|
||||||
|
0g/b8lr1PnrwB74OrVGcXLwREFLXtkRC9vcdNjvdchCg60KlBNWEPSGJA2adS8HJ
|
||||||
|
4AGyVpU8GCpV3q74rJxIG6FUffL85CfT+1HRmQhzYiGJDzy1AaUJmcelyS4e6cjn
|
||||||
|
urAWH3mlAaPzj87OuaeZYGAZMWh/5iAarU+VHkZn6vI2Mvl9yMA=
|
||||||
|
=oyMI
|
||||||
|
-----END PGP SIGNATURE-----
|
||||||
20
squid.spec
20
squid.spec
@ -1,8 +1,8 @@
|
|||||||
%define __perl_requires %{SOURCE8}
|
%define __perl_requires %{SOURCE8}
|
||||||
|
|
||||||
Name: squid
|
Name: squid
|
||||||
Version: 6.1
|
Version: 6.6
|
||||||
Release: 5
|
Release: 1
|
||||||
Summary: The Squid proxy caching server
|
Summary: The Squid proxy caching server
|
||||||
Epoch: 7
|
Epoch: 7
|
||||||
License: GPLv2+ and (LGPLv2+ and MIT and BSD and Public Domain)
|
License: GPLv2+ and (LGPLv2+ and MIT and BSD and Public Domain)
|
||||||
@ -21,15 +21,7 @@ Patch0: squid-4.0.11-config.patch
|
|||||||
Patch1: squid-3.1.0.9-location.patch
|
Patch1: squid-3.1.0.9-location.patch
|
||||||
Patch2: squid-3.0.STABLE1-perlpath.patch
|
Patch2: squid-3.0.STABLE1-perlpath.patch
|
||||||
Patch3: backport-squid-6.1-symlink-lang-err.patch
|
Patch3: backport-squid-6.1-symlink-lang-err.patch
|
||||||
Patch4: backport-0001-CVE-2023-5824.patch
|
Patch4: backport-squid-crash-half-closed.patch
|
||||||
Patch5: backport-0002-CVE-2023-5824.patch
|
|
||||||
Patch6: backport-CVE-2023-46846.patch
|
|
||||||
Patch7: backport-CVE-2023-46847.patch
|
|
||||||
Patch8: backport-CVE-2023-46848.patch
|
|
||||||
Patch9: backport-CVE-2023-46724.patch
|
|
||||||
Patch10: backport-CVE-2023-49285.patch
|
|
||||||
Patch11: backport-CVE-2023-49286.patch
|
|
||||||
Patch12: backport-CVE-2023-50269.patch
|
|
||||||
|
|
||||||
Requires: bash
|
Requires: bash
|
||||||
Requires: httpd-filesystem
|
Requires: httpd-filesystem
|
||||||
@ -252,6 +244,12 @@ fi
|
|||||||
chgrp squid /var/cache/samba/winbindd_privileged >/dev/null 2>&1 || :
|
chgrp squid /var/cache/samba/winbindd_privileged >/dev/null 2>&1 || :
|
||||||
|
|
||||||
%changelog
|
%changelog
|
||||||
|
* Tue Dec 26 2023 xinghe <xinghe2@h-partners.com> - 7:6.6-1
|
||||||
|
- Type:requirements
|
||||||
|
- ID:NA
|
||||||
|
- SUG:NA
|
||||||
|
- DESC:upgrade to 6.6
|
||||||
|
|
||||||
* Fri Dec 15 2023 xinghe <xinghe2@h-partners.com> - 7:6.1-5
|
* Fri Dec 15 2023 xinghe <xinghe2@h-partners.com> - 7:6.1-5
|
||||||
- Type:cves
|
- Type:cves
|
||||||
- ID:CVE-2023-50269
|
- ID:CVE-2023-50269
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user