diff --git a/CVE-2019-12519.patch b/CVE-2019-12519.patch deleted file mode 100644 index d0f7c27..0000000 --- a/CVE-2019-12519.patch +++ /dev/null @@ -1,296 +0,0 @@ -commit fdd4123629320aa1ee4c3481bb392437c90d188d -Author: Amos Jeffries -Date: 2019-05-20 11:23:13 +0000 - - ESI: convert parse exceptions into 500 status response (#411) - - Produce a valid HTTP 500 status reply and continue operations when - ESI parser throws an exception. This will prevent incomplete ESI - responses reaching clients on server errors. Such responses might - have been cacheable and thus corrupted, albeit corrupted consistently - and at source by the reverse-proxy delivering them. - - ESI: throw on large stack recursions (#408) - - This reduces the impact on concurrent clients to only those - accessing the malformed resource. - - Depending on what type of recursion is being performed the - resource may appear to the client with missing segments, or - not at all. - -diff --git a/src/esi/Context.h b/src/esi/Context.h -index f3281a1..1b08cfb 100644 ---- a/src/esi/Context.h -+++ b/src/esi/Context.h -@@ -12,6 +12,7 @@ - #include "clientStream.h" - #include "err_type.h" - #include "esi/Element.h" -+#include "esi/Esi.h" - #include "esi/Parser.h" - #include "http/forward.h" - #include "http/StatusCode.h" -@@ -113,7 +114,7 @@ public: - { - - public: -- ESIElement::Pointer stack[10]; /* a stack of esi elements that are open */ -+ ESIElement::Pointer stack[ESI_STACK_DEPTH_LIMIT]; /* a stack of esi elements that are open */ - int stackdepth; /* self explanatory */ - ESIParser::Pointer theParser; - ESIElement::Pointer top(); -diff --git a/src/esi/Esi.cc b/src/esi/Esi.cc -index cc662c4..e41d593 100644 ---- a/src/esi/Esi.cc -+++ b/src/esi/Esi.cc -@@ -29,6 +29,7 @@ - #include "esi/Expression.h" - #include "esi/Segment.h" - #include "esi/VarState.h" -+#include "FadingCounter.h" - #include "fatal.h" - #include "http/Stream.h" - #include "HttpHdrSc.h" -@@ -930,13 +931,18 @@ void - ESIContext::addStackElement (ESIElement::Pointer element) - { - /* Put on the stack to allow skipping of 'invalid' markup */ -- assert (parserState.stackdepth <11); -+ -+ // throw an error if the stack location would be invalid -+ if (parserState.stackdepth >= ESI_STACK_DEPTH_LIMIT) -+ throw Esi::Error("ESI Too many nested elements"); -+ if (parserState.stackdepth < 0) -+ throw Esi::Error("ESI elements stack error, probable error in ESI template"); -+ - assert (!failed()); - debugs(86, 5, "ESIContext::addStackElement: About to add ESI Node " << element.getRaw()); - - if (!parserState.top()->addElement(element)) { -- debugs(86, DBG_IMPORTANT, "ESIContext::addStackElement: failed to add esi node, probable error in ESI template"); -- flags.error = 1; -+ throw Esi::Error("ESIContext::addStackElement failed, probable error in ESI template"); - } else { - /* added ok, push onto the stack */ - parserState.stack[parserState.stackdepth] = element; -@@ -1188,13 +1194,10 @@ ESIContext::addLiteral (const char *s, int len) - assert (len); - debugs(86, 5, "literal length is " << len); - /* give a literal to the current element */ -- assert (parserState.stackdepth <11); - ESIElement::Pointer element (new esiLiteral (this, s, len)); - -- if (!parserState.top()->addElement(element)) { -- debugs(86, DBG_IMPORTANT, "ESIContext::addLiteral: failed to add esi node, probable error in ESI template"); -- flags.error = 1; -- } -+ if (!parserState.top()->addElement(element)) -+ throw Esi::Error("ESIContext::addLiteral failed, probable error in ESI template"); - } - - void -@@ -1256,8 +1259,24 @@ ESIContext::parse() - - PROF_start(esiParsing); - -- while (buffered.getRaw() && !flags.error) -- parseOneBuffer(); -+ try { -+ while (buffered.getRaw() && !flags.error) -+ parseOneBuffer(); -+ -+ } catch (Esi::ErrorDetail &errMsg) { // FIXME: non-const for c_str() -+ // level-2: these are protocol/syntax errors from upstream -+ debugs(86, 2, "WARNING: ESI syntax error: " << errMsg); -+ setError(); -+ setErrorMessage(errMsg.c_str()); -+ -+ } catch (...) { -+ // DBG_IMPORTANT because these are local issues the admin needs to fix -+ static FadingCounter logEntries; // TODO: set horizon less than infinity -+ if (logEntries.count(1) < 100) -+ debugs(86, DBG_IMPORTANT, "ERROR: ESI parser: " << CurrentException); -+ setError(); -+ setErrorMessage("ESI parser error"); -+ } - - PROF_stop(esiParsing); - -diff --git a/src/esi/Esi.h b/src/esi/Esi.h -index 180b2c4..6fd5aac 100644 ---- a/src/esi/Esi.h -+++ b/src/esi/Esi.h -@@ -10,6 +10,11 @@ - #define SQUID_ESI_H - - #include "clientStream.h" -+#include "sbuf/SBuf.h" -+ -+#if !defined(ESI_STACK_DEPTH_LIMIT) -+#define ESI_STACK_DEPTH_LIMIT 20 -+#endif - - /* ESI.c */ - extern CSR esiStreamRead; -@@ -18,5 +23,14 @@ extern CSD esiStreamDetach; - extern CSS esiStreamStatus; - int esiEnableProcessing (HttpReply *); - -+namespace Esi -+{ -+ -+typedef SBuf ErrorDetail; -+/// prepare an Esi::ErrorDetail for throw on ESI parser internal errors -+inline Esi::ErrorDetail Error(const char *msg) { return ErrorDetail(msg); } -+ -+} // namespace Esi -+ - #endif /* SQUID_ESI_H */ - -diff --git a/src/esi/Expression.cc b/src/esi/Expression.cc -index 2b5b762..8519b03 100644 ---- a/src/esi/Expression.cc -+++ b/src/esi/Expression.cc -@@ -10,6 +10,7 @@ - - #include "squid.h" - #include "Debug.h" -+#include "esi/Esi.h" - #include "esi/Expression.h" - #include "profiler/Profiler.h" - -@@ -97,6 +98,17 @@ stackpop(stackmember * s, int *depth) - cleanmember(&s[*depth]); - } - -+static void -+stackpush(stackmember *stack, stackmember &item, int *depth) -+{ -+ if (*depth < 0) -+ throw Esi::Error("ESIExpression stack has negative size"); -+ if (*depth >= ESI_STACK_DEPTH_LIMIT) -+ throw Esi::Error("ESIExpression stack is full, cannot push"); -+ -+ stack[(*depth)++] = item; -+} -+ - static evaluate evalnegate; - static evaluate evalliteral; - static evaluate evalor; -@@ -208,6 +220,11 @@ evalnegate(stackmember * stack, int *depth, int whereAmI, stackmember * candidat - /* invalid stack */ - return 1; - -+ if (whereAmI < 0) -+ throw Esi::Error("negate expression location too small"); -+ if (*depth >= ESI_STACK_DEPTH_LIMIT) -+ throw Esi::Error("negate expression too complex"); -+ - if (stack[whereAmI + 1].valuetype != ESI_EXPR_EXPR) - /* invalid operand */ - return 1; -@@ -280,7 +297,7 @@ evalor(stackmember * stack, int *depth, int whereAmI, stackmember * candidate) - - srv.precedence = 1; - -- stack[(*depth)++] = srv; -+ stackpush(stack, srv, depth); - - /* we're out of way, try adding now */ - if (!addmember(stack, depth, candidate)) -@@ -327,7 +344,7 @@ evaland(stackmember * stack, int *depth, int whereAmI, stackmember * candidate) - - srv.precedence = 1; - -- stack[(*depth)++] = srv; -+ stackpush(stack, srv, depth); - - /* we're out of way, try adding now */ - if (!addmember(stack, depth, candidate)) -@@ -373,7 +390,7 @@ evallesseq(stackmember * stack, int *depth, int whereAmI, stackmember * candidat - - srv.precedence = 1; - -- stack[(*depth)++] = srv; -+ stackpush(stack, srv, depth); - - /* we're out of way, try adding now */ - if (!addmember(stack, depth, candidate)) -@@ -421,7 +438,7 @@ evallessthan(stackmember * stack, int *depth, int whereAmI, stackmember * candid - - srv.precedence = 1; - -- stack[(*depth)++] = srv; -+ stackpush(stack, srv, depth); - - /* we're out of way, try adding now */ - if (!addmember(stack, depth, candidate)) -@@ -469,7 +486,7 @@ evalmoreeq(stackmember * stack, int *depth, int whereAmI, stackmember * candidat - - srv.precedence = 1; - -- stack[(*depth)++] = srv; -+ stackpush(stack, srv, depth); - - /* we're out of way, try adding now */ - if (!addmember(stack, depth, candidate)) -@@ -517,7 +534,7 @@ evalmorethan(stackmember * stack, int *depth, int whereAmI, stackmember * candid - - srv.precedence = 1; - -- stack[(*depth)++] = srv; -+ stackpush(stack, srv, depth); - - /* we're out of way, try adding now */ - if (!addmember(stack, depth, candidate)) -@@ -566,7 +583,7 @@ evalequals(stackmember * stack, int *depth, int whereAmI, - - srv.precedence = 1; - -- stack[(*depth)++] = srv; -+ stackpush(stack, srv, depth); - - /* we're out of way, try adding now */ - if (!addmember(stack, depth, candidate)) -@@ -613,7 +630,7 @@ evalnotequals(stackmember * stack, int *depth, int whereAmI, stackmember * candi - - srv.precedence = 1; - -- stack[(*depth)++] = srv; -+ stackpush(stack, srv, depth); - - /* we're out of way, try adding now */ - if (!addmember(stack, depth, candidate)) -@@ -953,6 +970,9 @@ addmember(stackmember * stack, int *stackdepth, stackmember * candidate) - /* !(!(a==b))) is why thats safe */ - /* strictly less than until we unwind */ - -+ if (*stackdepth >= ESI_STACK_DEPTH_LIMIT) -+ throw Esi::Error("ESI expression too complex to add member"); -+ - if (candidate->precedence < stack[*stackdepth - 1].precedence || - candidate->precedence < stack[*stackdepth - 2].precedence) { - /* must be an operator */ -@@ -968,10 +988,10 @@ addmember(stackmember * stack, int *stackdepth, stackmember * candidate) - return 0; - } - } else { -- stack[(*stackdepth)++] = *candidate; -+ stackpush(stack, *candidate, stackdepth); - } - } else if (candidate->valuetype != ESI_EXPR_INVALID) -- stack[(*stackdepth)++] = *candidate; -+ stackpush(stack, *candidate, stackdepth); - - return 1; - } -@@ -979,7 +999,7 @@ addmember(stackmember * stack, int *stackdepth, stackmember * candidate) - int - ESIExpression::Evaluate(char const *s) - { -- stackmember stack[20]; -+ stackmember stack[ESI_STACK_DEPTH_LIMIT]; - int stackdepth = 0; - char const *end; - PROF_start(esiExpressionEval); diff --git a/CVE-2019-12528.patch b/CVE-2019-12528.patch deleted file mode 100644 index c8c42d0..0000000 --- a/CVE-2019-12528.patch +++ /dev/null @@ -1,174 +0,0 @@ -From c3972f03bed2ca25e212e52a9c216d8a3d102892 Mon Sep 17 00:00:00 2001 -From: Christos Tsantilas -Date: Fri, 20 Dec 2019 07:29:58 +0000 -Subject: [PATCH] Fix FTP buffers handling (#521) - -Fix the parsing of the received listing from FTP services. -Also relaxed size/filename grammar used for DOS listings: Tolerate -multiple spaces between the size and the filename. - -This is a Measurement Factory project ---- - src/clients/FtpGateway.cc | 81 +++++++++++++++++++++++------------------------ - 1 file changed, 39 insertions(+), 42 deletions(-) - -diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc -index 140c441..33286b5 100644 ---- a/src/clients/FtpGateway.cc -+++ b/src/clients/FtpGateway.cc -@@ -532,8 +532,10 @@ ftpListParseParts(const char *buf, struct Ftp::GatewayFlags flags) - { - ftpListParts *p = NULL; - char *t = NULL; -- const char *ct = NULL; -- char *tokens[MAX_TOKENS]; -+ struct FtpLineToken { -+ char *token = nullptr; ///< token image copied from the received line -+ size_t pos = 0; ///< token offset on the received line -+ } tokens[MAX_TOKENS]; - int i; - int n_tokens; - static char tbuf[128]; -@@ -574,7 +576,8 @@ ftpListParseParts(const char *buf, struct Ftp::GatewayFlags flags) - } - - for (t = strtok(xbuf, w_space); t && n_tokens < MAX_TOKENS; t = strtok(NULL, w_space)) { -- tokens[n_tokens] = xstrdup(t); -+ tokens[n_tokens].token = xstrdup(t); -+ tokens[n_tokens].pos = t - xbuf; - ++n_tokens; - } - -@@ -582,10 +585,10 @@ ftpListParseParts(const char *buf, struct Ftp::GatewayFlags flags) - - /* locate the Month field */ - for (i = 3; i < n_tokens - 2; ++i) { -- char *size = tokens[i - 1]; -- char *month = tokens[i]; -- char *day = tokens[i + 1]; -- char *year = tokens[i + 2]; -+ const auto size = tokens[i - 1].token; -+ char *month = tokens[i].token; -+ char *day = tokens[i + 1].token; -+ char *year = tokens[i + 2].token; - - if (!is_month(month)) - continue; -@@ -599,23 +602,27 @@ ftpListParseParts(const char *buf, struct Ftp::GatewayFlags flags) - if (regexec(&scan_ftp_time, year, 0, NULL, 0) != 0) /* Yr | hh:mm */ - continue; - -- snprintf(tbuf, 128, "%s %2s %5s", -- month, day, year); -+ const auto *copyFrom = buf + tokens[i].pos; - -- if (!strstr(buf, tbuf)) -- snprintf(tbuf, 128, "%s %2s %-5s", -- month, day, year); -+ // "MMM DD [ YYYY|hh:mm]" with at most two spaces between DD and YYYY -+ auto dateSize = snprintf(tbuf, sizeof(tbuf), "%s %2s %5s", month, day, year); -+ bool isTypeA = (dateSize == 12) && (strncmp(copyFrom, tbuf, dateSize) == 0); - -- char const *copyFrom = NULL; -+ // "MMM DD [YYYY|hh:mm]" with one space between DD and YYYY -+ dateSize = snprintf(tbuf, sizeof(tbuf), "%s %2s %-5s", month, day, year); -+ bool isTypeB = (dateSize == 12 || dateSize == 11) && (strncmp(copyFrom, tbuf, dateSize) == 0); - -- if ((copyFrom = strstr(buf, tbuf))) { -- p->type = *tokens[0]; -+ // TODO: replace isTypeA and isTypeB with a regex. -+ if (isTypeA || isTypeB) { -+ p->type = *tokens[0].token; - p->size = strtoll(size, NULL, 10); -+ const auto finalDateSize = snprintf(tbuf, sizeof(tbuf), "%s %2s %5s", month, day, year); -+ assert(finalDateSize >= 0); - p->date = xstrdup(tbuf); - -+ // point after tokens[i+2] : -+ copyFrom = buf + tokens[i + 2].pos + strlen(tokens[i + 2].token); - if (flags.skip_whitespace) { -- copyFrom += strlen(tbuf); -- - while (strchr(w_space, *copyFrom)) - ++copyFrom; - } else { -@@ -627,7 +634,6 @@ ftpListParseParts(const char *buf, struct Ftp::GatewayFlags flags) - * Assuming a single space between date and filename - * suggested by: Nathan.Bailey@cc.monash.edu.au and - * Mike Battersby */ -- copyFrom += strlen(tbuf); - if (strchr(w_space, *copyFrom)) - ++copyFrom; - } -@@ -647,45 +653,36 @@ ftpListParseParts(const char *buf, struct Ftp::GatewayFlags flags) - - /* try it as a DOS listing, 04-05-70 09:33PM ... */ - if (n_tokens > 3 && -- regexec(&scan_ftp_dosdate, tokens[0], 0, NULL, 0) == 0 && -- regexec(&scan_ftp_dostime, tokens[1], 0, NULL, 0) == 0) { -- if (!strcasecmp(tokens[2], "")) { -+ regexec(&scan_ftp_dosdate, tokens[0].token, 0, NULL, 0) == 0 && -+ regexec(&scan_ftp_dostime, tokens[1].token, 0, NULL, 0) == 0) { -+ if (!strcasecmp(tokens[2].token, "")) { - p->type = 'd'; - } else { - p->type = '-'; -- p->size = strtoll(tokens[2], NULL, 10); -+ p->size = strtoll(tokens[2].token, NULL, 10); - } - -- snprintf(tbuf, 128, "%s %s", tokens[0], tokens[1]); -+ snprintf(tbuf, sizeof(tbuf), "%s %s", tokens[0].token, tokens[1].token); - p->date = xstrdup(tbuf); - - if (p->type == 'd') { -- /* Directory.. name begins with first printable after */ -- ct = strstr(buf, tokens[2]); -- ct += strlen(tokens[2]); -- -- while (xisspace(*ct)) -- ++ct; -- -- if (!*ct) -- ct = NULL; -+ // Directory.. name begins with first printable after -+ // Because of the "n_tokens > 3", the next printable after -+ // is stored at token[3]. No need for more checks here. - } else { -- /* A file. Name begins after size, with a space in between */ -- snprintf(tbuf, 128, " %s %s", tokens[2], tokens[3]); -- ct = strstr(buf, tbuf); -- -- if (ct) { -- ct += strlen(tokens[2]) + 2; -- } -+ // A file. Name begins after size, with a space in between. -+ // Also a space should exist before size. -+ // But there is not needed to be very strict with spaces. -+ // The name is stored at token[3], take it from here. - } - -- p->name = xstrdup(ct ? ct : tokens[3]); -+ p->name = xstrdup(tokens[3].token); - goto found; - } - - /* Try EPLF format; carson@lehman.com */ - if (buf[0] == '+') { -- ct = buf + 1; -+ const char *ct = buf + 1; - p->type = 0; - - while (ct && *ct) { -@@ -756,7 +753,7 @@ blank: - found: - - for (i = 0; i < n_tokens; ++i) -- xfree(tokens[i]); -+ xfree(tokens[i].token); - - if (!p->name) - ftpListPartsFree(&p); /* cleanup */ --- -1.8.3.1 - diff --git a/CVE-2020-11945.patch b/CVE-2020-11945.patch deleted file mode 100644 index 649b3fc..0000000 --- a/CVE-2020-11945.patch +++ /dev/null @@ -1,63 +0,0 @@ -commit eeebf0f37a72a2de08348e85ae34b02c34e9a811 -Author: desbma-s1n <62935004+desbma-s1n@users.noreply.github.com> -Date: 2020-04-02 11:16:45 +0000 - - Fix auth digest refcount integer overflow (#585) - - This fixes a possible overflow of the nonce reference counter in the - digest authentication scheme, found by security researchers - @synacktiv. - - It changes `references` to be an 64 bits unsigned integer. This makes - overflowing the counter impossible in practice. - -diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc -index fdef7df..9deb184 100644 ---- a/src/auth/digest/Config.cc -+++ b/src/auth/digest/Config.cc -@@ -94,9 +94,6 @@ static void authenticateDigestNonceDelete(digest_nonce_h * nonce); - static void authenticateDigestNonceSetup(void); - static void authDigestNonceEncode(digest_nonce_h * nonce); - static void authDigestNonceLink(digest_nonce_h * nonce); --#if NOT_USED --static int authDigestNonceLinks(digest_nonce_h * nonce); --#endif - static void authDigestNonceUserUnlink(digest_nonce_h * nonce); - - static void -@@ -289,21 +286,10 @@ authDigestNonceLink(digest_nonce_h * nonce) - { - assert(nonce != NULL); - ++nonce->references; -+ assert(nonce->references != 0); // no overflows - debugs(29, 9, "nonce '" << nonce << "' now at '" << nonce->references << "'."); - } - --#if NOT_USED --static int --authDigestNonceLinks(digest_nonce_h * nonce) --{ -- if (!nonce) -- return -1; -- -- return nonce->references; --} -- --#endif -- - void - authDigestNonceUnlink(digest_nonce_h * nonce) - { -diff --git a/src/auth/digest/Config.h b/src/auth/digest/Config.h -index 56ccaa9..7fb7673 100644 ---- a/src/auth/digest/Config.h -+++ b/src/auth/digest/Config.h -@@ -42,7 +42,7 @@ struct _digest_nonce_h : public hash_link { - /* number of uses we've seen of this nonce */ - unsigned long nc; - /* reference count */ -- short references; -+ uint64_t references; - /* the auth_user this nonce has been tied to */ - Auth::Digest::User *user; - /* has this nonce been invalidated ? */ diff --git a/CVE-2020-14058.patch b/CVE-2020-14058.patch deleted file mode 100644 index 1567477..0000000 --- a/CVE-2020-14058.patch +++ /dev/null @@ -1,295 +0,0 @@ -commit 93f5fda134a2a010b84ffedbe833d670e63ba4be -Author: Christos Tsantilas -Date: 2020-05-15 04:54:54 +0000 - - Fix sending of unknown validation errors to cert. validator (#633) - - Squid may be compiled with an OpenSSL release introducing X509 - validation errors that Squid does not have the names for. Send their - integer codes. - - Also sync Squid certificate verification errors with OpenSSL v1.1.1g. - - This is a Measurement Factory project. - -diff --git a/src/format/Format.cc b/src/format/Format.cc -index 8c5574b..4b4ad42 100644 ---- a/src/format/Format.cc -+++ b/src/format/Format.cc -@@ -322,15 +322,6 @@ log_quoted_string(const char *str, char *out) - *p = '\0'; - } - --#if USE_OPENSSL --static char * --sslErrorName(Security::ErrorCode err, char *buf, size_t size) --{ -- snprintf(buf, size, "SSL_ERR=%d", err); -- return buf; --} --#endif -- - /// XXX: Misnamed. TODO: Split request && al->request->errType == ERR_SECURE_CONNECT_FAIL) { -- out = Ssl::GetErrorName(al->request->errDetail); -- if (!out) -- out = sslErrorName(al->request->errDetail, tmp, sizeof(tmp)); -+ out = Ssl::GetErrorName(al->request->errDetail, true); - } else - #endif - if (al->request && al->request->errDetail != ERR_DETAIL_NONE) { -@@ -1263,10 +1252,7 @@ Format::Format::assemble(MemBuf &mb, const AccessLogEntry::Pointer &al, int logS - for (const Security::CertErrors *sslError = srvBump->sslErrors(); sslError; sslError = sslError->next) { - if (!sb.isEmpty()) - sb.append(separator); -- if (const char *errorName = Ssl::GetErrorName(sslError->element.code)) -- sb.append(errorName); -- else -- sb.append(sslErrorName(sslError->element.code, tmp, sizeof(tmp))); -+ sb.append(Ssl::GetErrorName(sslError->element.code, true)); - if (sslError->element.depth >= 0) - sb.appendf("@depth=%d", sslError->element.depth); - } -diff --git a/src/ssl/ErrorDetail.cc b/src/ssl/ErrorDetail.cc -index ddd61fd..00eb0e2 100644 ---- a/src/ssl/ErrorDetail.cc -+++ b/src/ssl/ErrorDetail.cc -@@ -233,6 +233,9 @@ static SslErrorEntry TheSslErrorArray[] = { - "X509_V_ERR_SUBTREE_MINMAX" - }, - #endif -+ { X509_V_ERR_APPLICATION_VERIFICATION, //50 -+ "X509_V_ERR_APPLICATION_VERIFICATION" -+ }, - #if defined(X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE) - { - X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE, //51 -@@ -257,9 +260,132 @@ static SslErrorEntry TheSslErrorArray[] = { - "X509_V_ERR_CRL_PATH_VALIDATION_ERROR" - }, - #endif -- { X509_V_ERR_APPLICATION_VERIFICATION, -- "X509_V_ERR_APPLICATION_VERIFICATION" -+#if defined(X509_V_ERR_PATH_LOOP) -+ { -+ X509_V_ERR_PATH_LOOP, //55 -+ "X509_V_ERR_PATH_LOOP" -+ }, -+#endif -+#if defined(X509_V_ERR_SUITE_B_INVALID_VERSION) -+ { -+ X509_V_ERR_SUITE_B_INVALID_VERSION, //56 -+ "X509_V_ERR_SUITE_B_INVALID_VERSION" -+ }, -+#endif -+#if defined(X509_V_ERR_SUITE_B_INVALID_ALGORITHM) -+ { -+ X509_V_ERR_SUITE_B_INVALID_ALGORITHM, //57 -+ "X509_V_ERR_SUITE_B_INVALID_ALGORITHM" -+ }, -+#endif -+#if defined(X509_V_ERR_SUITE_B_INVALID_CURVE) -+ { -+ X509_V_ERR_SUITE_B_INVALID_CURVE, //58 -+ "X509_V_ERR_SUITE_B_INVALID_CURVE" -+ }, -+#endif -+#if defined(X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM) -+ { -+ X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM, //59 -+ "X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM" -+ }, -+#endif -+#if defined(X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED) -+ { -+ X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED, //60 -+ "X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED" -+ }, -+#endif -+#if defined(X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256) -+ { -+ X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256, //61 -+ "X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256" -+ }, -+#endif -+#if defined(X509_V_ERR_HOSTNAME_MISMATCH) -+ { -+ X509_V_ERR_HOSTNAME_MISMATCH, //62 -+ "X509_V_ERR_HOSTNAME_MISMATCH" -+ }, -+#endif -+#if defined(X509_V_ERR_EMAIL_MISMATCH) -+ { -+ X509_V_ERR_EMAIL_MISMATCH, //63 -+ "X509_V_ERR_EMAIL_MISMATCH" -+ }, -+#endif -+#if defined(X509_V_ERR_IP_ADDRESS_MISMATCH) -+ { -+ X509_V_ERR_IP_ADDRESS_MISMATCH, //64 -+ "X509_V_ERR_IP_ADDRESS_MISMATCH" -+ }, -+#endif -+#if defined(X509_V_ERR_DANE_NO_MATCH) -+ { -+ X509_V_ERR_DANE_NO_MATCH, //65 -+ "X509_V_ERR_DANE_NO_MATCH" - }, -+#endif -+#if defined(X509_V_ERR_EE_KEY_TOO_SMALL) -+ { -+ X509_V_ERR_EE_KEY_TOO_SMALL, //66 -+ "X509_V_ERR_EE_KEY_TOO_SMALL" -+ }, -+#endif -+#if defined(X509_V_ERR_CA_KEY_TOO_SMALL) -+ { -+ X509_V_ERR_CA_KEY_TOO_SMALL, //67 -+ "X509_V_ERR_CA_KEY_TOO_SMALL" -+ }, -+#endif -+#if defined(X509_V_ERR_CA_MD_TOO_WEAK) -+ { -+ X509_V_ERR_CA_MD_TOO_WEAK, //68 -+ "X509_V_ERR_CA_MD_TOO_WEAK" -+ }, -+#endif -+#if defined(X509_V_ERR_INVALID_CALL) -+ { -+ X509_V_ERR_INVALID_CALL, //69 -+ "X509_V_ERR_INVALID_CALL" -+ }, -+#endif -+#if defined(X509_V_ERR_STORE_LOOKUP) -+ { -+ X509_V_ERR_STORE_LOOKUP, //70 -+ "X509_V_ERR_STORE_LOOKUP" -+ }, -+#endif -+#if defined(X509_V_ERR_NO_VALID_SCTS) -+ { -+ X509_V_ERR_NO_VALID_SCTS, //71 -+ "X509_V_ERR_NO_VALID_SCTS" -+ }, -+#endif -+#if defined(X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION) -+ { -+ X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION, //72 -+ "X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION" -+ }, -+#endif -+#if defined(X509_V_ERR_OCSP_VERIFY_NEEDED) -+ { -+ X509_V_ERR_OCSP_VERIFY_NEEDED, //73 -+ "X509_V_ERR_OCSP_VERIFY_NEEDED" -+ }, -+#endif -+#if defined(X509_V_ERR_OCSP_VERIFY_FAILED) -+ { -+ X509_V_ERR_OCSP_VERIFY_FAILED, //74 -+ "X509_V_ERR_OCSP_VERIFY_FAILED" -+ }, -+#endif -+#if defined(X509_V_ERR_OCSP_CERT_UNKNOWN) -+ { -+ X509_V_ERR_OCSP_CERT_UNKNOWN, //75 -+ "X509_V_ERR_OCSP_CERT_UNKNOWN" -+ }, -+#endif - { SSL_ERROR_NONE, "SSL_ERROR_NONE"}, - {SSL_ERROR_NONE, NULL} - }; -@@ -286,6 +412,27 @@ static const char *OptionalSslErrors[] = { - "X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX", - "X509_V_ERR_UNSUPPORTED_NAME_SYNTAX", - "X509_V_ERR_CRL_PATH_VALIDATION_ERROR", -+ "X509_V_ERR_PATH_LOOP", -+ "X509_V_ERR_SUITE_B_INVALID_VERSION", -+ "X509_V_ERR_SUITE_B_INVALID_ALGORITHM", -+ "X509_V_ERR_SUITE_B_INVALID_CURVE", -+ "X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM", -+ "X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED", -+ "X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256", -+ "X509_V_ERR_HOSTNAME_MISMATCH", -+ "X509_V_ERR_EMAIL_MISMATCH", -+ "X509_V_ERR_IP_ADDRESS_MISMATCH", -+ "X509_V_ERR_DANE_NO_MATCH", -+ "X509_V_ERR_EE_KEY_TOO_SMALL", -+ "X509_V_ERR_CA_KEY_TOO_SMALL", -+ "X509_V_ERR_CA_MD_TOO_WEAK", -+ "X509_V_ERR_INVALID_CALL", -+ "X509_V_ERR_STORE_LOOKUP", -+ "X509_V_ERR_NO_VALID_SCTS", -+ "X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION", -+ "X509_V_ERR_OCSP_VERIFY_NEEDED", -+ "X509_V_ERR_OCSP_VERIFY_FAILED", -+ "X509_V_ERR_OCSP_CERT_UNKNOWN", - NULL - }; - -@@ -390,7 +537,7 @@ Ssl::ParseErrorString(const char *name, Security::Errors &errors) - return false; // not reached - } - --const char *Ssl::GetErrorName(Security::ErrorCode value) -+const char *Ssl::GetErrorName(Security::ErrorCode value, const bool prefixRawCode) - { - if (TheSslErrors.empty()) - loadSslErrorMap(); -@@ -399,7 +546,9 @@ const char *Ssl::GetErrorName(Security::ErrorCode value) - if (it != TheSslErrors.end()) - return it->second->name; - -- return NULL; -+ static char tmpBuffer[128]; -+ snprintf(tmpBuffer, sizeof(tmpBuffer), "%s%d", prefixRawCode ? "SSL_ERR=" : "", (int)value); -+ return tmpBuffer; - } - - bool -@@ -529,21 +678,14 @@ const char *Ssl::ErrorDetail::notafter() const - */ - const char *Ssl::ErrorDetail::err_code() const - { -- static char tmpBuffer[64]; - // We can use the GetErrorName but using the detailEntry is faster, - // so try it first. -- const char *err = detailEntry.name.termedBuf(); -+ if (const char *err = detailEntry.name.termedBuf()) -+ return err; - - // error details not loaded yet or not defined in error_details.txt, - // try the GetErrorName... -- if (!err) -- err = GetErrorName(error_no); -- -- if (!err) { -- snprintf(tmpBuffer, 64, "%d", (int)error_no); -- err = tmpBuffer; -- } -- return err; -+ return GetErrorName(error_no); - } - - /** -diff --git a/src/ssl/ErrorDetail.h b/src/ssl/ErrorDetail.h -index 48dc405..0eec0a9 100644 ---- a/src/ssl/ErrorDetail.h -+++ b/src/ssl/ErrorDetail.h -@@ -26,8 +26,9 @@ bool ParseErrorString(const char *name, Security::Errors &); - /// The Security::ErrorCode code of the error described by "name". - Security::ErrorCode GetErrorCode(const char *name); - --/// The string representation of the TLS error "value" --const char *GetErrorName(Security::ErrorCode value); -+/// \return string representation of a known TLS error (or a raw error code) -+/// \param prefixRawCode whether to prefix raw codes with "SSL_ERR=" -+const char *GetErrorName(Security::ErrorCode value, const bool prefixRawCode = false); - - /// A short description of the TLS error "value" - const char *GetErrorDescr(Security::ErrorCode value); diff --git a/CVE-2020-15049.patch b/CVE-2020-15049.patch deleted file mode 100644 index 5f7151d..0000000 --- a/CVE-2020-15049.patch +++ /dev/null @@ -1,105 +0,0 @@ -commit ea12a34d338b962707d5078d6d1fc7c6eb119a22 -Author: Alex Rousskov -Date: 2020-05-13 14:05:00 +0000 - - Validate Content-Length value prefix (#629) - - The new code detects all invalid Content-Length prefixes but the old - code was already rejecting most invalid prefixes using strtoll(). The - newly covered (and now rejected) invalid characters are - - * explicit "+" sign; - * explicit "-" sign in "-0" values; - * isspace(3) characters that are not (relaxed) OWS characters. - - In most deployment environments, the last set is probably empty because - the relaxed OWS set has all the POSIX/C isspace(3) characters but the - new line, and the new line is unlikely to sneak in past other checks. - - Thank you, Amit Klein , for elevating the - importance of this 2016 TODO (added in commit a1b9ec2). - -diff --git a/CONTRIBUTORS b/CONTRIBUTORS -index 36957f2..c10a221 100644 ---- a/CONTRIBUTORS -+++ b/CONTRIBUTORS -@@ -25,6 +25,7 @@ Thank you! - Alex Wu - Alin Nastac - Alter -+ Amit Klein - Amos Jeffries - Amos Jeffries - Amos Jeffries -diff --git a/src/http/ContentLengthInterpreter.cc b/src/http/ContentLengthInterpreter.cc -index 3fdf7de..a3741eb 100644 ---- a/src/http/ContentLengthInterpreter.cc -+++ b/src/http/ContentLengthInterpreter.cc -@@ -28,6 +28,24 @@ Http::ContentLengthInterpreter::ContentLengthInterpreter(const int aDebugLevel): - { - } - -+/// checks whether all characters before the Content-Length number are allowed -+/// \returns the start of the digit sequence (or nil on errors) -+const char * -+Http::ContentLengthInterpreter::findDigits(const char *prefix, const char * const valueEnd) const -+{ -+ // skip leading OWS in RFC 7230's `OWS field-value OWS` -+ const CharacterSet &whitespace = Http::One::Parser::WhitespaceCharacters(); -+ while (prefix < valueEnd) { -+ const auto ch = *prefix; -+ if (CharacterSet::DIGIT[ch]) -+ return prefix; // common case: a pre-trimmed field value -+ if (!whitespace[ch]) -+ return nullptr; // (trimmed) length does not start with a digit -+ ++prefix; -+ } -+ return nullptr; // empty or whitespace-only value -+} -+ - /// checks whether all characters after the Content-Length are allowed - bool - Http::ContentLengthInterpreter::goodSuffix(const char *suffix, const char * const end) const -@@ -52,10 +70,19 @@ Http::ContentLengthInterpreter::checkValue(const char *rawValue, const int value - { - Must(!sawBad); - -+ const auto valueEnd = rawValue + valueSize; -+ -+ const auto digits = findDigits(rawValue, valueEnd); -+ if (!digits) { -+ debugs(55, debugLevel, "WARNING: Leading garbage or empty value in" << Raw("Content-Length", rawValue, valueSize)); -+ sawBad = true; -+ return false; -+ } -+ - int64_t latestValue = -1; - char *suffix = nullptr; -- // TODO: Handle malformed values with leading signs (e.g., "-0" or "+1"). -- if (!httpHeaderParseOffset(rawValue, &latestValue, &suffix)) { -+ -+ if (!httpHeaderParseOffset(digits, &latestValue, &suffix)) { - debugs(55, DBG_IMPORTANT, "WARNING: Malformed" << Raw("Content-Length", rawValue, valueSize)); - sawBad = true; - return false; -@@ -68,7 +95,7 @@ Http::ContentLengthInterpreter::checkValue(const char *rawValue, const int value - } - - // check for garbage after the number -- if (!goodSuffix(suffix, rawValue + valueSize)) { -+ if (!goodSuffix(suffix, valueEnd)) { - debugs(55, debugLevel, "WARNING: Trailing garbage in" << Raw("Content-Length", rawValue, valueSize)); - sawBad = true; - return false; -diff --git a/src/http/ContentLengthInterpreter.h b/src/http/ContentLengthInterpreter.h -index ce36e22..f22de91 100644 ---- a/src/http/ContentLengthInterpreter.h -+++ b/src/http/ContentLengthInterpreter.h -@@ -46,6 +46,7 @@ public: - bool sawGood; - - protected: -+ const char *findDigits(const char *prefix, const char *valueEnd) const; - bool goodSuffix(const char *suffix, const char * const end) const; - bool checkValue(const char *start, const int size); - bool checkList(const String &list); diff --git a/CVE-2020-15810.patch b/CVE-2020-15810.patch deleted file mode 100644 index 5b676a7..0000000 --- a/CVE-2020-15810.patch +++ /dev/null @@ -1,63 +0,0 @@ -From 9c8e2a71aa1d3c159a319d9365c346c48dc783a5 Mon Sep 17 00:00:00 2001 -From: Amos Jeffries -Date: Tue, 4 Aug 2020 04:34:32 +0000 -Subject: [PATCH] Enforce token characters for field-name (#700) - -RFC 7230 defines field-name as a token. Request splitting and cache -poisoning attacks have used non-token characters to fool broken HTTP -agents behind or in front of Squid for years. This change should -significantly reduce that abuse. - -If we discover exceptional situations that need special treatment, the -relaxed parser can allow them on a case-by-case basis (while being extra -careful about framing-related header fields), just like we already -tolerate some header whitespace (e.g., between the response header -field-name and colon). ---- - src/HttpHeader.cc | 26 ++++++++++++++------------ - 1 file changed, 14 insertions(+), 12 deletions(-) - -diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc -index dc6e0ffd63..9e5e47fb34 100644 ---- a/src/HttpHeader.cc -+++ b/src/HttpHeader.cc -@@ -443,18 +443,6 @@ HttpHeader::parse(const char *header_start, size_t hdrLen) - return 0; - } - -- if (e->id == Http::HdrType::OTHER && stringHasWhitespace(e->name.termedBuf())) { -- debugs(55, warnOnError, "WARNING: found whitespace in HTTP header name {" << -- getStringPrefix(field_start, field_end-field_start) << "}"); -- -- if (!Config.onoff.relaxed_header_parser) { -- delete e; -- PROF_stop(HttpHeaderParse); -- clean(); -- return 0; -- } -- } -- - addEntry(e); - } - -@@ -1437,6 +1425,20 @@ HttpHeaderEntry::parse(const char *field_start, const char *field_end, const htt - } - } - -+ /* RFC 7230 section 3.2: -+ * -+ * header-field = field-name ":" OWS field-value OWS -+ * field-name = token -+ * token = 1*TCHAR -+ */ -+ for (const char *pos = field_start; pos < (field_start+name_len); ++pos) { -+ if (!CharacterSet::TCHAR[*pos]) { -+ debugs(55, 2, "found header with invalid characters in " << -+ Raw("field-name", field_start, min(name_len,100)) << "..."); -+ return nullptr; -+ } -+ } -+ - /* now we know we can parse it */ - - debugs(55, 9, "parsing HttpHeaderEntry: near '" << getStringPrefix(field_start, field_end-field_start) << "'"); diff --git a/CVE-2020-15811.patch b/CVE-2020-15811.patch deleted file mode 100644 index 42d802a..0000000 --- a/CVE-2020-15811.patch +++ /dev/null @@ -1,161 +0,0 @@ -From fd68382860633aca92065e6c343cfd1b12b126e7 Mon Sep 17 00:00:00 2001 -From: Amos Jeffries -Date: Sun, 16 Aug 2020 02:21:22 +0000 -Subject: [PATCH] Improve Transfer-Encoding handling (#702) - -Reject messages containing Transfer-Encoding header with coding other -than chunked or identity. Squid does not support other codings. - -For simplicity and security sake, also reject messages where -Transfer-Encoding contains unnecessary complex values that are -technically equivalent to "chunked" or "identity" (e.g., ",,chunked" or -"identity, chunked"). - -RFC 7230 formally deprecated and removed identity coding, but it is -still used by some agents. ---- - src/HttpHeader.cc | 16 +++++++++++++++- - src/HttpHeader.h | 18 ++++++++++-------- - src/client_side.cc | 11 ++--------- - src/http.cc | 3 +++ - 4 files changed, 30 insertions(+), 18 deletions(-) - -diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc -index 80c23458eb..f30802eb79 100644 ---- a/src/HttpHeader.cc -+++ b/src/HttpHeader.cc -@@ -174,6 +174,7 @@ HttpHeader::operator =(const HttpHeader &other) - update(&other); // will update the mask as well - len = other.len; - conflictingContentLength_ = other.conflictingContentLength_; -+ teUnsupported_ = other.teUnsupported_; - } - return *this; - } -@@ -222,6 +223,7 @@ HttpHeader::clean() - httpHeaderMaskInit(&mask, 0); - len = 0; - conflictingContentLength_ = false; -+ teUnsupported_ = false; - PROF_stop(HttpHeaderClean); - } - -@@ -471,11 +473,23 @@ HttpHeader::parse(const char *header_start, size_t hdrLen) - Raw("header", header_start, hdrLen)); - } - -- if (chunked()) { -+ String rawTe; -+ if (getByIdIfPresent(Http::HdrType::TRANSFER_ENCODING, &rawTe)) { - // RFC 2616 section 4.4: ignore Content-Length with Transfer-Encoding - // RFC 7230 section 3.3.3 #3: Transfer-Encoding overwrites Content-Length - delById(Http::HdrType::CONTENT_LENGTH); - // and clen state becomes irrelevant -+ -+ if (rawTe == "chunked") { -+ ; // leave header present for chunked() method -+ } else if (rawTe == "identity") { // deprecated. no coding -+ delById(Http::HdrType::TRANSFER_ENCODING); -+ } else { -+ // This also rejects multiple encodings until we support them properly. -+ debugs(55, warnOnError, "WARNING: unsupported Transfer-Encoding used by client: " << rawTe); -+ teUnsupported_ = true; -+ } -+ - } else if (clen.sawBad) { - // ensure our callers do not accidentally see bad Content-Length values - delById(Http::HdrType::CONTENT_LENGTH); -diff --git a/src/HttpHeader.h b/src/HttpHeader.h -index e3553a4e4d..64f294a50a 100644 ---- a/src/HttpHeader.h -+++ b/src/HttpHeader.h -@@ -140,7 +140,13 @@ class HttpHeader - int hasListMember(Http::HdrType id, const char *member, const char separator) const; - int hasByNameListMember(const char *name, const char *member, const char separator) const; - void removeHopByHopEntries(); -- inline bool chunked() const; ///< whether message uses chunked Transfer-Encoding -+ -+ /// whether the message uses chunked Transfer-Encoding -+ /// optimized implementation relies on us rejecting/removing other codings -+ bool chunked() const { return has(Http::HdrType::TRANSFER_ENCODING); } -+ -+ /// whether message used an unsupported and/or invalid Transfer-Encoding -+ bool unsupportedTe() const { return teUnsupported_; } - - /* protected, do not use these, use interface functions instead */ - std::vector entries; /**< parsed fields in raw format */ -@@ -158,6 +164,9 @@ class HttpHeader - private: - HttpHeaderEntry *findLastEntry(Http::HdrType id) const; - bool conflictingContentLength_; ///< found different Content-Length fields -+ /// unsupported encoding, unnecessary syntax characters, and/or -+ /// invalid field-value found in Transfer-Encoding header -+ bool teUnsupported_ = false; - }; - - int httpHeaderParseQuotedString(const char *start, const int len, String *val); -@@ -167,13 +176,6 @@ SBuf httpHeaderQuoteString(const char *raw); - - void httpHeaderCalcMask(HttpHeaderMask * mask, Http::HdrType http_hdr_type_enums[], size_t count); - --inline bool --HttpHeader::chunked() const --{ -- return has(Http::HdrType::TRANSFER_ENCODING) && -- hasListMember(Http::HdrType::TRANSFER_ENCODING, "chunked", ','); --} -- - void httpHeaderInitModule(void); - - #endif /* SQUID_HTTPHEADER_H */ -diff --git a/src/client_side.cc b/src/client_side.cc -index f7038ba983..547b0ca723 100644 ---- a/src/client_side.cc -+++ b/src/client_side.cc -@@ -1600,9 +1600,7 @@ void - clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp, Http::Stream *context) - { - ClientHttpRequest *http = context->http; -- bool chunked = false; - bool mustReplyToOptions = false; -- bool unsupportedTe = false; - bool expectBody = false; - - // We already have the request parsed and checked, so we -@@ -1659,13 +1657,7 @@ clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp, - request->http_ver.minor = http_ver.minor; - } - -- if (request->header.chunked()) { -- chunked = true; -- } else if (request->header.has(Http::HdrType::TRANSFER_ENCODING)) { -- const String te = request->header.getList(Http::HdrType::TRANSFER_ENCODING); -- // HTTP/1.1 requires chunking to be the last encoding if there is one -- unsupportedTe = te.size() && te != "identity"; -- } // else implied identity coding -+ const auto unsupportedTe = request->header.unsupportedTe(); - - mustReplyToOptions = (request->method == Http::METHOD_OPTIONS) && - (request->header.getInt64(Http::HdrType::MAX_FORWARDS) == 0); -@@ -1682,6 +1674,7 @@ clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp, - return; - } - -+ const auto chunked = request->header.chunked(); - if (!chunked && !clientIsContentLengthValid(request.getRaw())) { - clientStreamNode *node = context->getClientReplyContext(); - clientReplyContext *repContext = dynamic_cast(node->data.getRaw()); -diff --git a/src/http.cc b/src/http.cc -index 53f428a4d2..79ab2cf226 100644 ---- a/src/http.cc -+++ b/src/http.cc -@@ -1292,6 +1292,9 @@ HttpStateData::continueAfterParsingHeader() - } else if (vrep->header.conflictingContentLength()) { - fwd->dontRetry(true); - error = ERR_INVALID_RESP; -+ } else if (vrep->header.unsupportedTe()) { -+ fwd->dontRetry(true); -+ error = ERR_INVALID_RESP; - } else { - return true; // done parsing, got reply, and no error - } diff --git a/CVE-2020-24606.patch b/CVE-2020-24606.patch deleted file mode 100644 index d277507..0000000 --- a/CVE-2020-24606.patch +++ /dev/null @@ -1,34 +0,0 @@ -commit b789e719affbb0a6ff9c22095f6ca8db6a5f4926 -Author: Eduard Bagdasaryan -Date: 2020-07-27 15:28:31 +0000 - - Fix livelocking in peerDigestHandleReply (#698) - - peerDigestHandleReply() was missing a premature EOF check. The existing - peerDigestFetchedEnough() cannot detect EOF because it does not have - access to receivedData.length used to indicate the EOF condition. We did - not adjust peerDigestFetchedEnough() because it is abused to check both - post-I/O state and the state after each digest processing step. The - latter invocations lack access to receivedData.length and should not - really bother with EOF anyway. - -diff --git a/src/peer_digest.cc b/src/peer_digest.cc -index d48340f97..265f16183 100644 ---- a/src/peer_digest.cc -+++ b/src/peer_digest.cc -@@ -483,6 +483,15 @@ peerDigestHandleReply(void *data, StoreIOBuffer receivedData) - - } while (cbdataReferenceValid(fetch) && prevstate != fetch->state && fetch->bufofs > 0); - -+ // Check for EOF here, thus giving the parser one extra run. We could avoid this overhead by -+ // checking at the beginning of this function. However, in this case, we would have to require -+ // that the parser does not regard EOF as a special condition (it is true now but may change -+ // in the future). -+ if (!receivedData.length) { // EOF -+ peerDigestFetchAbort(fetch, fetch->buf, "premature end of digest reply"); -+ return; -+ } -+ - /* Update the copy offset */ - fetch->offset += receivedData.length; - diff --git a/CVE-2020-8449_CVE-2020-8450.patch b/CVE-2020-8449_CVE-2020-8450.patch deleted file mode 100644 index 0f76598..0000000 --- a/CVE-2020-8449_CVE-2020-8450.patch +++ /dev/null @@ -1,54 +0,0 @@ -From f9fb256a80f966d7f7af7d2e04438366c74258c7 Mon Sep 17 00:00:00 2001 -From: Guido Vranken -Date: Thu, 12 Dec 2019 03:27:40 +0000 -Subject: [PATCH] Fix request URL generation in reverse proxy configurations - (#519) - ---- - src/client_side.cc | 24 ++++++++++-------------- - 1 file changed, 10 insertions(+), 14 deletions(-) - -diff --git a/src/client_side.cc b/src/client_side.cc -index 538bd5e..671f6c6 100644 ---- a/src/client_side.cc -+++ b/src/client_side.cc -@@ -1141,26 +1141,22 @@ prepareAcceleratedURL(ConnStateData * conn, const Http1::RequestParserPointer &h - if (vport < 0) - vport = conn->clientConnection->local.port(); - -- char *host = NULL; -- if (vhost && (host = hp->getHostHeaderField())) { -+ char *receivedHost = nullptr; -+ if (vhost && (receivedHost = hp->getHostHeaderField())) { -+ SBuf host(receivedHost); - debugs(33, 5, "ACCEL VHOST REWRITE: vhost=" << host << " + vport=" << vport); -- char thost[256]; - if (vport > 0) { -- thost[0] = '\0'; -- char *t = NULL; -- if (host[strlen(host) - 1] != ']' && (t = strrchr(host,':')) != nullptr) { -- strncpy(thost, host, (t-host)); -- snprintf(thost+(t-host), sizeof(thost)-(t-host), ":%d", vport); -- host = thost; -- } else if (!t) { -- snprintf(thost, sizeof(thost), "%s:%d",host, vport); -- host = thost; -+ // remove existing :port (if any), cope with IPv6+ without port -+ const auto lastColonPos = host.rfind(':'); -+ if (lastColonPos != SBuf::npos && *host.rbegin() != ']') { -+ host.chop(0, lastColonPos); // truncate until the last colon - } -+ host.appendf(":%d", vport); - } // else nothing to alter port-wise. - const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image(); -- const int url_sz = scheme.length() + strlen(host) + url.length() + 32; -+ const auto url_sz = scheme.length() + host.length() + url.length() + 32; - char *uri = static_cast(xcalloc(url_sz, 1)); -- snprintf(uri, url_sz, SQUIDSBUFPH "://%s" SQUIDSBUFPH, SQUIDSBUFPRINT(scheme), host, SQUIDSBUFPRINT(url)); -+ snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH SQUIDSBUFPH, SQUIDSBUFPRINT(scheme), SQUIDSBUFPRINT(host), SQUIDSBUFPRINT(url)); - debugs(33, 5, "ACCEL VHOST REWRITE: " << uri); - return uri; - } else if (conn->port->defaultsite /* && !vhost */) { --- -1.8.3.1 - diff --git a/CVE-2020-8517.patch b/CVE-2020-8517.patch deleted file mode 100644 index 16811d7..0000000 --- a/CVE-2020-8517.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 71d6f8af3458d3462371d544c5d144abe4c9ee55 Mon Sep 17 00:00:00 2001 -From: aaron-costello <56684862+aaron-costello@users.noreply.github.com> -Date: Fri, 22 Nov 2019 02:44:29 +0000 -Subject: [PATCH] ext_lm_group_acl: Improved username handling (#512) - ---- - src/acl/external/LM_group/ext_lm_group_acl.cc | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/src/acl/external/LM_group/ext_lm_group_acl.cc b/src/acl/external/LM_group/ext_lm_group_acl.cc -index dee369c..a0fb6ad 100644 ---- a/src/acl/external/LM_group/ext_lm_group_acl.cc -+++ b/src/acl/external/LM_group/ext_lm_group_acl.cc -@@ -343,10 +343,10 @@ Valid_Global_Groups(char *UserName, const char **Groups) - break; - } - if (domain_qualify == NULL) { -- strcpy(User, NTDomain); -- strcpy(NTDomain, DefaultDomain); -+ xstrncpy(User, NTDomain, sizeof(User)); -+ xstrncpy(NTDomain, DefaultDomain, sizeof(NTDomain)); - } else { -- strcpy(User, domain_qualify + 1); -+ xstrncpy(User, domain_qualify + 1, sizeof(User)); - domain_qualify[0] = '\0'; - strlwr(NTDomain); - } --- -1.8.3.1 - diff --git a/backport-0001-CVE-2021-28652.patch b/backport-0001-CVE-2021-28652.patch deleted file mode 100644 index 6a52d06..0000000 --- a/backport-0001-CVE-2021-28652.patch +++ /dev/null @@ -1,195 +0,0 @@ -From 417da4006cf5c97d44e74431b816fc58fec9e270 Mon Sep 17 00:00:00 2001 -From: Eduard Bagdasaryan -Date: Mon, 18 Mar 2019 17:48:21 +0000 -Subject: [PATCH] Fix incremental parsing of chunked quoted extensions (#310) - -Before this change, incremental parsing of quoted chunked extensions -was broken for two reasons: - -* Http::One::Parser::skipLineTerminator() unexpectedly threw after - partially received quoted chunk extension value. - -* When Http::One::Tokenizer was unable to parse a quoted extension, - it incorrectly restored the input buffer to the beginning of the - extension value (instead of the extension itself), thus making - further incremental parsing iterations impossible. - -IMO, the reason for this problem was that Http::One::Tokenizer::qdText() -could not distinguish two cases (returning false in both): - -* the end of the quoted string not yet reached - -* an input error, e.g., wrong/unexpected character - -A possible approach could be to improve Http::One::Tokenizer, making it -aware about "needs more data" state. However, to be acceptable, -these improvements should be done in the base Parser::Tokenizer -class instead. These changes seem to be non-trivial and could be -done separately and later. - -Another approach, used here, is to simplify the complex and error-prone -chunked extensions parsing algorithm, fixing incremental parsing bugs -and still parse incrementally in almost all cases. The performance -regression could be expected only in relatively rare cases of partially -received or malformed extensions. - -Also: -* fixed parsing of partial use-original-body extension values -* do not treat an invalid use-original-body as an unknown extension -* optimization: parse use-original-body extension only in ICAP context - (i.e., where it is expected) -* improvement: added a new API to TeChunkedParser to specify known - chunked extensions list ---- - src/Debug.h | 4 ++++ - src/parser/Makefile.am | 1 + - src/parser/Tokenizer.cc | 40 ++++++++++++++++++++++++++++++++++++++++ - src/parser/Tokenizer.h | 13 +++++++++++++ - src/parser/forward.h | 22 ++++++++++++++++++++++ - 5 files changed, 80 insertions(+) - create mode 100644 src/parser/forward.h - -diff --git a/src/Debug.h b/src/Debug.h -index 7fb1ed5..8c24175 100644 ---- a/src/Debug.h -+++ b/src/Debug.h -@@ -99,6 +99,10 @@ public: - - /// configures the active debugging context to write syslog ALERT - static void ForceAlert(); -+ -+ /// prefixes each grouped debugs() line after the first one in the group -+ static std::ostream& Extra(std::ostream &os) { return os << "\n "; } -+ - private: - static Context *Current; ///< deepest active context; nil outside debugs() - }; -diff --git a/src/parser/Makefile.am b/src/parser/Makefile.am -index aef3235..c08d1d5 100644 ---- a/src/parser/Makefile.am -+++ b/src/parser/Makefile.am -@@ -13,6 +13,7 @@ noinst_LTLIBRARIES = libparser.la - libparser_la_SOURCES = \ - BinaryTokenizer.h \ - BinaryTokenizer.cc \ -+ forward.h \ - Tokenizer.h \ - Tokenizer.cc - -diff --git a/src/parser/Tokenizer.cc b/src/parser/Tokenizer.cc -index 99f8eb3..0b44e40 100644 ---- a/src/parser/Tokenizer.cc -+++ b/src/parser/Tokenizer.cc -@@ -10,7 +10,9 @@ - - #include "squid.h" - #include "Debug.h" -+#include "parser/forward.h" - #include "parser/Tokenizer.h" -+#include "sbuf/Stream.h" - - #include - #if HAVE_CTYPE_H -@@ -96,6 +98,23 @@ Parser::Tokenizer::prefix(SBuf &returnedToken, const CharacterSet &tokenChars, c - return true; - } - -+SBuf -+Parser::Tokenizer::prefix(const char *description, const CharacterSet &tokenChars, const SBuf::size_type limit) -+{ -+ if (atEnd()) -+ throw InsufficientInput(); -+ -+ SBuf result; -+ -+ if (!prefix(result, tokenChars, limit)) -+ throw TexcHere(ToSBuf("cannot parse ", description)); -+ -+ if (atEnd()) -+ throw InsufficientInput(); -+ -+ return result; -+} -+ - bool - Parser::Tokenizer::suffix(SBuf &returnedToken, const CharacterSet &tokenChars, const SBuf::size_type limit) - { -@@ -283,3 +302,24 @@ Parser::Tokenizer::int64(int64_t & result, int base, bool allowSign, const SBuf: - return success(s - range.rawContent()); - } - -+int64_t -+Parser::Tokenizer::udec64(const char *description, const SBuf::size_type limit) -+{ -+ if (atEnd()) -+ throw InsufficientInput(); -+ -+ int64_t result = 0; -+ -+ // Since we only support unsigned decimals, a parsing failure with a -+ // non-empty input always implies invalid/malformed input (or a buggy -+ // limit=0 caller). TODO: Support signed and non-decimal integers by -+ // refactoring int64() to detect insufficient input. -+ if (!int64(result, 10, false, limit)) -+ throw TexcHere(ToSBuf("cannot parse ", description)); -+ -+ if (atEnd()) -+ throw InsufficientInput(); // more digits may be coming -+ -+ return result; -+} -+ -diff --git a/src/parser/Tokenizer.h b/src/parser/Tokenizer.h -index f04fd3e..6ae8162 100644 ---- a/src/parser/Tokenizer.h -+++ b/src/parser/Tokenizer.h -@@ -143,6 +143,19 @@ public: - */ - bool int64(int64_t &result, int base = 0, bool allowSign = true, SBuf::size_type limit = SBuf::npos); - -+ /* -+ * The methods below mimic their counterparts documented above, but they -+ * throw on errors, including InsufficientInput. The field description -+ * parameter is used for error reporting and debugging. -+ */ -+ -+ /// prefix() wrapper but throws InsufficientInput if input contains -+ /// nothing but the prefix (i.e. if the prefix is not "terminated") -+ SBuf prefix(const char *description, const CharacterSet &tokenChars, SBuf::size_type limit = SBuf::npos); -+ -+ /// int64() wrapper but limited to unsigned decimal integers (for now) -+ int64_t udec64(const char *description, SBuf::size_type limit = SBuf::npos); -+ - protected: - SBuf consume(const SBuf::size_type n); - SBuf::size_type success(const SBuf::size_type n); -diff --git a/src/parser/forward.h b/src/parser/forward.h -new file mode 100644 -index 0000000..5a95b7a ---- /dev/null -+++ b/src/parser/forward.h -@@ -0,0 +1,22 @@ -+/* -+ * Copyright (C) 1996-2019 The Squid Software Foundation and contributors -+ * -+ * Squid software is distributed under GPLv2+ license and includes -+ * contributions from numerous individuals and organizations. -+ * Please see the COPYING and CONTRIBUTORS files for details. -+ */ -+ -+#ifndef SQUID_PARSER_FORWARD_H -+#define SQUID_PARSER_FORWARD_H -+ -+namespace Parser { -+class Tokenizer; -+class BinaryTokenizer; -+ -+// TODO: Move this declaration (to parser/Elements.h) if we need more like it. -+/// thrown by modern "incremental" parsers when they need more data -+class InsufficientInput {}; -+} // namespace Parser -+ -+#endif /* SQUID_PARSER_FORWARD_H */ -+ --- -2.23.0 diff --git a/backport-0002-CVE-2021-28652.patch b/backport-0002-CVE-2021-28652.patch deleted file mode 100644 index 7c61c69..0000000 --- a/backport-0002-CVE-2021-28652.patch +++ /dev/null @@ -1,631 +0,0 @@ -commit 0003e3518dc95e4b5ab46b5140af79b22253048e -Author: Amos Jeffries -Date: 2021-04-30 05:15:44 +0000 - - Bug 5106: Broken cache manager URL parsing (#788) - - Use already parsed request-target URL in cache manager and - update CacheManager to Tokanizer based URL parse - - Removing use of sscan() and regex string processing which have - proven to be problematic on many levels. Most particularly with - regards to tolerance of normally harmless garbage syntax in URLs - received. - - Support for generic URI schemes is added possibly resolving some - issues reported with ftp:// URL and manager access via ftp_port - sockets. - - Truly generic support for /squid-internal-mgr/ path prefix is - added, fixing some user confusion about its use on cache_object: - scheme URLs. - - TODO: support for single-name parameters and URL #fragments - are left to future updates. As is refactoring the QueryParams - data storage to avoid SBuf data copying. - -diff --git a/src/CacheManager.h b/src/CacheManager.h -index 78a69f799..74705c58a 100644 ---- a/src/CacheManager.h -+++ b/src/CacheManager.h -@@ -9,6 +9,7 @@ - #ifndef SQUID_CACHEMANAGER_H - #define SQUID_CACHEMANAGER_H - -+#include "anyp/forward.h" - #include "comm/forward.h" - #include "mgr/Action.h" - #include "mgr/ActionProfile.h" -@@ -50,7 +51,7 @@ public: - protected: - CacheManager() {} ///< use Instance() instead - -- Mgr::CommandPointer ParseUrl(const char *url); -+ Mgr::CommandPointer ParseUrl(const AnyP::Uri &); - void ParseHeaders(const HttpRequest * request, Mgr::ActionParams ¶ms); - int CheckPassword(const Mgr::Command &cmd); - char *PasswdGet(Mgr::ActionPasswordList *, const char *); -diff --git a/src/cache_manager.cc b/src/cache_manager.cc -index 9fe9bbb89..8055ece6b 100644 ---- a/src/cache_manager.cc -+++ b/src/cache_manager.cc -@@ -26,7 +26,9 @@ - #include "mgr/Forwarder.h" - #include "mgr/FunAction.h" - #include "mgr/QueryParams.h" -+#include "parser/Tokenizer.h" - #include "protos.h" -+#include "sbuf/Stream.h" - #include "sbuf/StringConvert.h" - #include "SquidConfig.h" - #include "SquidTime.h" -@@ -147,82 +149,87 @@ CacheManager::createRequestedAction(const Mgr::ActionParams ¶ms) - return cmd->profile->creator->create(cmd); - } - -+static const CharacterSet & -+MgrFieldChars(const AnyP::ProtocolType &protocol) -+{ -+ // Deprecated cache_object:// scheme used '@' to delimit passwords -+ if (protocol == AnyP::PROTO_CACHE_OBJECT) { -+ static const CharacterSet fieldChars = CharacterSet("cache-object-field", "@?#").complement(); -+ return fieldChars; -+ } -+ -+ static const CharacterSet actionChars = CharacterSet("mgr-field", "?#").complement(); -+ return actionChars; -+} -+ - /** -- \ingroup CacheManagerInternal - * define whether the URL is a cache-manager URL and parse the action - * requested by the user. Checks via CacheManager::ActionProtection() that the - * item is accessible by the user. -- \retval CacheManager::cachemgrStateData state object for the following handling -- \retval NULL if the action can't be found or can't be accessed by the user -+ * -+ * Syntax: -+ * -+ * scheme "://" authority [ '/squid-internal-mgr' ] path-absolute [ '@' unreserved ] '?' query-string -+ * -+ * see RFC 3986 for definitions of scheme, authority, path-absolute, query-string -+ * -+ * \returns Mgr::Command object with action to perform and parameters it might use - */ - Mgr::Command::Pointer --CacheManager::ParseUrl(const char *url) -+CacheManager::ParseUrl(const AnyP::Uri &uri) - { -- int t; -- LOCAL_ARRAY(char, host, MAX_URL); -- LOCAL_ARRAY(char, request, MAX_URL); -- LOCAL_ARRAY(char, password, MAX_URL); -- LOCAL_ARRAY(char, params, MAX_URL); -- host[0] = 0; -- request[0] = 0; -- password[0] = 0; -- params[0] = 0; -- int pos = -1; -- int len = strlen(url); -- Must(len > 0); -- t = sscanf(url, "cache_object://%[^/]/%[^@?]%n@%[^?]?%s", host, request, &pos, password, params); -- if (t < 3) { -- t = sscanf(url, "cache_object://%[^/]/%[^?]%n?%s", host, request, &pos, params); -- } -- if (t < 1) { -- t = sscanf(url, "http://%[^/]/squid-internal-mgr/%[^?]%n?%s", host, request, &pos, params); -- } -- if (t < 1) { -- t = sscanf(url, "https://%[^/]/squid-internal-mgr/%[^?]%n?%s", host, request, &pos, params); -- } -- if (t < 2) { -- if (strncmp("cache_object://",url,15)==0) -- xstrncpy(request, "menu", MAX_URL); -- else -- xstrncpy(request, "index", MAX_URL); -- } -+ Parser::Tokenizer tok(uri.path()); - --#if _SQUID_OS2_ -- if (t == 2 && request[0] == '\0') { -- /* -- * emx's sscanf insists of returning 2 because it sets request -- * to null -- */ -- if (strncmp("cache_object://",url,15)==0) -- xstrncpy(request, "menu", MAX_URL); -- else -- xstrncpy(request, "index", MAX_URL); -- } --#endif -+ static const SBuf internalMagicPrefix("/squid-internal-mgr/"); -+ if (!tok.skip(internalMagicPrefix) && !tok.skip('/')) -+ throw TextException("invalid URL path", Here()); - -- debugs(16, 3, HERE << "MGR request: t=" << t << ", host='" << host << "', request='" << request << "', pos=" << pos << -- ", password='" << password << "', params='" << params << "'"); -+ Mgr::Command::Pointer cmd = new Mgr::Command(); -+ cmd->params.httpUri = SBufToString(uri.absolute()); - -- Mgr::ActionProfile::Pointer profile = findAction(request); -- if (!profile) { -- debugs(16, DBG_IMPORTANT, "CacheManager::ParseUrl: action '" << request << "' not found"); -- return NULL; -+ const auto &fieldChars = MgrFieldChars(uri.getScheme()); -+ -+ SBuf action; -+ if (!tok.prefix(action, fieldChars)) { -+ if (uri.getScheme() == AnyP::PROTO_CACHE_OBJECT) { -+ static const SBuf menuReport("menu"); -+ action = menuReport; -+ } else { -+ static const SBuf indexReport("index"); -+ action = indexReport; -+ } - } -+ cmd->params.actionName = SBufToString(action); -+ -+ const auto profile = findAction(action.c_str()); -+ if (!profile) -+ throw TextException(ToSBuf("action '", action, "' not found"), Here()); - - const char *prot = ActionProtection(profile); -- if (!strcmp(prot, "disabled") || !strcmp(prot, "hidden")) { -- debugs(16, DBG_IMPORTANT, "CacheManager::ParseUrl: action '" << request << "' is " << prot); -- return NULL; -+ if (!strcmp(prot, "disabled") || !strcmp(prot, "hidden")) -+ throw TextException(ToSBuf("action '", action, "' is ", prot), Here()); -+ cmd->profile = profile; -+ -+ SBuf passwd; -+ if (uri.getScheme() == AnyP::PROTO_CACHE_OBJECT && tok.skip('@')) { -+ (void)tok.prefix(passwd, fieldChars); -+ cmd->params.password = SBufToString(passwd); - } - -- Mgr::Command::Pointer cmd = new Mgr::Command; -- if (!Mgr::QueryParams::Parse(params, cmd->params.queryParams)) -- return NULL; -- cmd->profile = profile; -- cmd->params.httpUri = url; -- cmd->params.userName = String(); -- cmd->params.password = password; -- cmd->params.actionName = request; -+ // TODO: fix when AnyP::Uri::parse() separates path?query#fragment -+ SBuf params; -+ if (tok.skip('?')) { -+ params = tok.remaining(); -+ Mgr::QueryParams::Parse(tok, cmd->params.queryParams); -+ } -+ -+ if (!tok.skip('#') && !tok.atEnd()) -+ throw TextException("invalid characters in URL", Here()); -+ // else ignore #fragment (if any) -+ -+ debugs(16, 3, "MGR request: host=" << uri.host() << ", action=" << action << -+ ", password=" << passwd << ", params=" << params); -+ - return cmd; - } - -@@ -305,11 +312,15 @@ CacheManager::CheckPassword(const Mgr::Command &cmd) - void - CacheManager::Start(const Comm::ConnectionPointer &client, HttpRequest * request, StoreEntry * entry) - { -- debugs(16, 3, "CacheManager::Start: '" << entry->url() << "'" ); -+ debugs(16, 3, "request-url= '" << request->url << "', entry-url='" << entry->url() << "'"); - -- Mgr::Command::Pointer cmd = ParseUrl(entry->url()); -- if (!cmd) { -- ErrorState *err = new ErrorState(ERR_INVALID_URL, Http::scNotFound, request); -+ Mgr::Command::Pointer cmd; -+ try { -+ cmd = ParseUrl(request->url); -+ -+ } catch (...) { -+ debugs(16, 2, "request URL error: " << CurrentException); -+ const auto err = new ErrorState(ERR_INVALID_URL, Http::scNotFound, request); - err->url = xstrdup(entry->url()); - errorAppendEntry(entry, err); - entry->expires = squid_curtime; -@@ -473,4 +484,3 @@ CacheManager::GetInstance() - } - return instance; - } -- -diff --git a/src/mgr/QueryParams.cc b/src/mgr/QueryParams.cc -index 831694245..a53dee1c7 100644 ---- a/src/mgr/QueryParams.cc -+++ b/src/mgr/QueryParams.cc -@@ -14,6 +14,10 @@ - #include "mgr/IntParam.h" - #include "mgr/QueryParams.h" - #include "mgr/StringParam.h" -+#include "parser/Tokenizer.h" -+#include "sbuf/StringConvert.h" -+ -+#include - - Mgr::QueryParam::Pointer - Mgr::QueryParams::get(const String& name) const -@@ -65,61 +69,76 @@ Mgr::QueryParams::find(const String& name) const - return iter; - } - --bool --Mgr::QueryParams::ParseParam(const String& paramStr, Param& param) -+/** -+ * Parses the value part of a "param=value" URL section. -+ * Value can be a comma-separated list of integers or an opaque string. -+ * -+ * value = *pchar | ( 1*DIGIT *( ',' 1*DIGIT ) ) -+ * -+ * \note opaque string may be a list with a non-integer (e.g., "1,2,3,z") -+ */ -+Mgr::QueryParam::Pointer -+ParseParamValue(const SBuf &rawValue) - { -- bool parsed = false; -- regmatch_t pmatch[3]; -- regex_t intExpr; -- regcomp(&intExpr, "^([a-z][a-z0-9_]*)=([0-9]+((,[0-9]+))*)$", REG_EXTENDED | REG_ICASE); -- regex_t stringExpr; -- regcomp(&stringExpr, "^([a-z][a-z0-9_]*)=([^&= ]+)$", REG_EXTENDED | REG_ICASE); -- if (regexec(&intExpr, paramStr.termedBuf(), 3, pmatch, 0) == 0) { -- param.first = paramStr.substr(pmatch[1].rm_so, pmatch[1].rm_eo); -- std::vector array; -- int n = pmatch[2].rm_so; -- for (int i = n; i < pmatch[2].rm_eo; ++i) { -- if (paramStr[i] == ',') { -- array.push_back(atoi(paramStr.substr(n, i).termedBuf())); -- n = i + 1; -- } -- } -- if (n < pmatch[2].rm_eo) -- array.push_back(atoi(paramStr.substr(n, pmatch[2].rm_eo).termedBuf())); -- param.second = new IntParam(array); -- parsed = true; -- } else if (regexec(&stringExpr, paramStr.termedBuf(), 3, pmatch, 0) == 0) { -- param.first = paramStr.substr(pmatch[1].rm_so, pmatch[1].rm_eo); -- param.second = new StringParam(paramStr.substr(pmatch[2].rm_so, pmatch[2].rm_eo)); -- parsed = true; -+ static const CharacterSet comma("comma", ","); -+ -+ Parser::Tokenizer tok(rawValue); -+ std::vector array; -+ int64_t intVal = 0; -+ while (tok.int64(intVal, 10, false)) { -+ Must(intVal >= std::numeric_limits::min()); -+ Must(intVal <= std::numeric_limits::max()); -+ array.emplace_back(intVal); -+ // integer list has comma between values. -+ // Require at least one potential DIGIT after the skipped ',' -+ if (tok.remaining().length() > 1) -+ (void)tok.skipOne(comma); - } -- regfree(&stringExpr); -- regfree(&intExpr); -- return parsed; -+ -+ if (tok.atEnd()) -+ return new Mgr::IntParam(array); -+ else -+ return new Mgr::StringParam(SBufToString(rawValue)); - } - --bool --Mgr::QueryParams::Parse(const String& aParamsStr, QueryParams& aParams) -+/** -+ * Syntax: -+ * query = [ param *( '&' param ) ] -+ * param = name '=' value -+ * name = [a-zA-Z0-9]+ -+ * value = *pchar | ( 1*DIGIT *( ',' 1*DIGIT ) ) -+ */ -+void -+Mgr::QueryParams::Parse(Parser::Tokenizer &tok, QueryParams &aParams) - { -- if (aParamsStr.size() != 0) { -- Param param; -- size_t n = 0; -- size_t len = aParamsStr.size(); -- for (size_t i = n; i < len; ++i) { -- if (aParamsStr[i] == '&') { -- if (!ParseParam(aParamsStr.substr(n, i), param)) -- return false; -- aParams.params.push_back(param); -- n = i + 1; -- } -- } -- if (n < len) { -- if (!ParseParam(aParamsStr.substr(n, len), param)) -- return false; -- aParams.params.push_back(param); -- } -+ static const CharacterSet nameChars = CharacterSet("param-name", "_") + CharacterSet::ALPHA + CharacterSet::DIGIT; -+ static const CharacterSet valueChars = CharacterSet("param-value", "&= #").complement(); -+ static const CharacterSet delimChars("param-delim", "&"); -+ -+ while (!tok.atEnd()) { -+ -+ // TODO: remove '#' processing when AnyP::Uri splits 'query#fragment' properly -+ // #fragment handled by caller. Do not throw. -+ if (tok.remaining()[0] == '#') -+ return; -+ -+ if (tok.skipAll(delimChars)) -+ continue; -+ -+ SBuf nameStr; -+ if (!tok.prefix(nameStr, nameChars)) -+ throw TextException("invalid query parameter name", Here()); -+ if (!tok.skip('=')) -+ throw TextException("missing parameter value", Here()); -+ -+ SBuf valueStr; -+ if (!tok.prefix(valueStr, valueChars)) -+ throw TextException("missing or malformed parameter value", Here()); -+ -+ const auto name = SBufToString(nameStr); -+ const auto value = ParseParamValue(valueStr); -+ aParams.params.emplace_back(name, value); - } -- return true; - } - - Mgr::QueryParam::Pointer -@@ -138,4 +157,3 @@ Mgr::QueryParams::CreateParam(QueryParam::Type aType) - } - return NULL; - } -- -diff --git a/src/mgr/QueryParams.h b/src/mgr/QueryParams.h -index bb8f40308..450c20f86 100644 ---- a/src/mgr/QueryParams.h -+++ b/src/mgr/QueryParams.h -@@ -13,9 +13,11 @@ - - #include "ipc/forward.h" - #include "mgr/QueryParam.h" -+#include "parser/Tokenizer.h" - #include "SquidString.h" --#include -+ - #include -+#include - - namespace Mgr - { -@@ -32,15 +34,13 @@ public: - void pack(Ipc::TypedMsgHdr& msg) const; ///< store params into msg - void unpack(const Ipc::TypedMsgHdr& msg); ///< load params from msg - /// parses the query string parameters -- static bool Parse(const String& aParamsStr, QueryParams& aParams); -+ static void Parse(Parser::Tokenizer &, QueryParams &); - - private: - /// find query parameter by name - Params::const_iterator find(const String& name) const; - /// creates a parameter of the specified type - static QueryParam::Pointer CreateParam(QueryParam::Type aType); -- /// parses string like "param=value"; returns true if success -- static bool ParseParam(const String& paramStr, Param& param); - - private: - Params params; -diff --git a/src/tests/stub_libmgr.cc b/src/tests/stub_libmgr.cc -index f8be88a58..cd3ffc2de 100644 ---- a/src/tests/stub_libmgr.cc -+++ b/src/tests/stub_libmgr.cc -@@ -174,11 +174,10 @@ void Mgr::IoAction::dump(StoreEntry* entry) STUB - Mgr::QueryParam::Pointer Mgr::QueryParams::get(const String& name) const STUB_RETVAL(Mgr::QueryParam::Pointer(NULL)) - void Mgr::QueryParams::pack(Ipc::TypedMsgHdr& msg) const STUB - void Mgr::QueryParams::unpack(const Ipc::TypedMsgHdr& msg) STUB --bool Mgr::QueryParams::Parse(const String& aParamsStr, QueryParams& aParams) STUB_RETVAL(false) -+void Mgr::QueryParams::Parse(Parser::Tokenizer &, QueryParams &) STUB - //private: - //Params::const_iterator Mgr::QueryParams::find(const String& name) const STUB_RETVAL(new Mgr::Params::const_iterator(*this)) - Mgr::QueryParam::Pointer Mgr::QueryParams::CreateParam(QueryParam::Type aType) STUB_RETVAL(Mgr::QueryParam::Pointer(NULL)) --bool Mgr::QueryParams::ParseParam(const String& paramStr, Param& param) STUB_RETVAL(false) - - #include "mgr/Registration.h" - //void Mgr::RegisterAction(char const * action, char const * desc, OBJH * handler, int pw_req_flag, int atomic); -diff --git a/src/tests/testCacheManager.cc b/src/tests/testCacheManager.cc -index f02396176..7d6631aae 100644 ---- a/src/tests/testCacheManager.cc -+++ b/src/tests/testCacheManager.cc -@@ -7,6 +7,7 @@ - */ - - #include "squid.h" -+#include "anyp/Uri.h" - #include "CacheManager.h" - #include "mgr/Action.h" - #include "Store.h" -@@ -17,11 +18,19 @@ - - CPPUNIT_TEST_SUITE_REGISTRATION( testCacheManager ); - -+/// Provides test code access to CacheManager internal symbols -+class CacheManagerInternals : public CacheManager -+{ -+public: -+ void ParseUrl(const AnyP::Uri &u) { CacheManager::ParseUrl(u); } -+}; -+ - /* init memory pools */ - - void testCacheManager::setUp() - { - Mem::Init(); -+ AnyP::UriScheme::Init(); - } - - /* -@@ -66,3 +75,146 @@ testCacheManager::testRegister() - CPPUNIT_ASSERT_EQUAL(1,(int)sentry->flags); - } - -+void -+testCacheManager::testParseUrl() -+{ -+ auto *mgr = static_cast(CacheManager::GetInstance()); -+ CPPUNIT_ASSERT(mgr != nullptr); -+ -+ std::vector validSchemes = { -+ AnyP::PROTO_CACHE_OBJECT, -+ AnyP::PROTO_HTTP, -+ AnyP::PROTO_HTTPS, -+ AnyP::PROTO_FTP -+ }; -+ -+ AnyP::Uri mgrUrl; -+ mgrUrl.host("localhost"); -+ mgrUrl.port(3128); -+ -+ const std::vector magicPrefixes = { -+ "/", -+ "/squid-internal-mgr/" -+ }; -+ -+ const std::vector validActions = { -+ "", -+ "menu" -+ }; -+ -+ const std::vector invalidActions = { -+ "INVALID" // any unregistered name -+ }; -+ -+ const std::vector validParams = { -+ "", -+ "?", -+ "?&", -+ "?&&&&&&&&&&&&", -+ "?foo=bar", -+ "?0123456789=bar", -+ "?foo=bar&", -+ "?foo=bar&&&&", -+ "?&foo=bar", -+ "?&&&&foo=bar", -+ "?&foo=bar&", -+ "?&&&&foo=bar&&&&", -+ "?foo=?_weird?~`:[]stuff&bar=okay&&&&&&", -+ "?intlist=1", -+ "?intlist=1,2,3,4,5", -+ "?string=1a", -+ "?string=1,2,3,4,z", -+ "?string=1,2,3,4,[0]", -+ "?intlist=1,2,3,4,5&string=1,2,3,4,y" -+ }; -+ -+ const std::vector invalidParams = { -+ "?/", -+ "?foo", -+ "?/foo", -+ "?foo/", -+ "?foo=", -+ "?foo=&", -+ "?=foo", -+ "? foo=bar", -+ "? &", -+ "?& ", -+ "?=&", -+ "?&=", -+ "? &&&", -+ "?& &&", -+ "?&& &", -+ "?=&&&", -+ "?&=&&", -+ "?&&=&" -+ }; -+ -+ const std::vector validFragments = { -+ "", -+ "#", -+ "##", -+ "#?a=b", -+ "#fragment" -+ }; -+ -+ for (const auto &scheme : validSchemes) { -+ mgrUrl.setScheme(scheme); -+ -+ for (const auto *magic : magicPrefixes) { -+ -+ // all schemes except cache_object require magic path prefix bytes -+ if (scheme != AnyP::PROTO_CACHE_OBJECT && strlen(magic) <= 2) -+ continue; -+ -+ /* Check the parser accepts all the valid cases */ -+ -+ for (const auto *action : validActions) { -+ for (const auto *param : validParams) { -+ for (const auto *frag : validFragments) { -+ try { -+ SBuf bits; -+ bits.append(magic); -+ bits.append(action); -+ bits.append(param); -+ bits.append(frag); -+ mgrUrl.path(bits); -+ -+ (void)mgr->ParseUrl(mgrUrl); -+ } catch (...) { -+ std::cerr << std::endl -+ << "FAIL: " << mgrUrl -+ << Debug::Extra << "error: " << CurrentException << std::endl; -+ CPPUNIT_FAIL("rejected a valid URL"); -+ } -+ } -+ } -+ } -+ -+ /* Check that invalid parameters are rejected */ -+ -+ for (const auto *action : validActions) { -+ for (const auto *param : invalidParams) { -+ for (const auto *frag : validFragments) { -+ try { -+ SBuf bits; -+ bits.append(magic); -+ bits.append(action); -+ bits.append(param); -+ bits.append(frag); -+ mgrUrl.path(bits); -+ -+ (void)mgr->ParseUrl(mgrUrl); -+ -+ std::cerr << std::endl -+ << "FAIL: " << mgrUrl -+ << Debug::Extra << "error: should be rejected due to '" << param << "'" << std::endl; -+ } catch (const TextException &e) { -+ continue; // success. caught bad input -+ } -+ CPPUNIT_FAIL("failed to reject an invalid URL"); -+ } -+ } -+ } -+ } -+ } -+} -diff --git a/src/tests/testCacheManager.h b/src/tests/testCacheManager.h -index 6d32d69e5..fee15846a 100644 ---- a/src/tests/testCacheManager.h -+++ b/src/tests/testCacheManager.h -@@ -20,6 +20,7 @@ class testCacheManager : public CPPUNIT_NS::TestFixture - CPPUNIT_TEST_SUITE( testCacheManager ); - CPPUNIT_TEST( testCreate ); - CPPUNIT_TEST( testRegister ); -+ CPPUNIT_TEST( testParseUrl ); - CPPUNIT_TEST_SUITE_END(); - - public: -@@ -28,6 +29,7 @@ public: - protected: - void testCreate(); - void testRegister(); -+ void testParseUrl(); - }; - - #endif diff --git a/backport-CVE-2020-25097.patch b/backport-CVE-2020-25097.patch deleted file mode 100644 index 5ba30db..0000000 --- a/backport-CVE-2020-25097.patch +++ /dev/null @@ -1,60 +0,0 @@ -From dfd818595b54942cb1adc45f6aed95c9b706e3a8 Mon Sep 17 00:00:00 2001 -From: Amos Jeffries -Date: Fri, 4 Sep 2020 17:38:30 +1200 -Subject: [PATCH] Merge pull request from GHSA-jvf6-h9gj-pmj6 - -* Add slash prefix to path-rootless or path-noscheme URLs - -* Update src/anyp/Uri.cc - -Co-authored-by: Alex Rousskov - -* restore file trailer GH auto-removes - -* Remove redundant path-empty check - -* Removed stale comment left behind by b2ab59a - -Many things imply a leading `/` in a URI. Their enumeration is likely to -(and did) become stale, misleading the reader. - -* fixup: Remind that the `src` iterator may be at its end - -We are dereferencing `src` without comparing it to `\0`. -To many readers that (incorrectly) implies that we are not done iterating yet. - -Also fixed branch-added comment indentation. - -Co-authored-by: Alex Rousskov ---- - src/anyp/Uri.cc | 10 +++------- - 1 file changed, 3 insertions(+), 7 deletions(-) - -diff --git a/src/anyp/Uri.cc b/src/anyp/Uri.cc -index 72dddb64aa..88fd83ee2d 100644 ---- a/src/anyp/Uri.cc -+++ b/src/anyp/Uri.cc -@@ -343,8 +343,9 @@ AnyP::Uri::parse(const HttpRequestMethod& method, const SBuf &rawUrl) - return false; - *dst = '\0'; - -- // bug 3074: received 'path' starting with '?', '#', or '\0' implies '/' -- if (*src == '?' || *src == '#' || *src == '\0') { -+ // We are looking at path-abempty. -+ if (*src != '/') { -+ // path-empty, including the end of the `src` c-string cases - urlpath[0] = '/'; - dst = &urlpath[1]; - } else { -@@ -358,11 +359,6 @@ AnyP::Uri::parse(const HttpRequestMethod& method, const SBuf &rawUrl) - /* We -could- be at the end of the buffer here */ - if (i > l) - return false; -- /* If the URL path is empty we set it to be "/" */ -- if (dst == urlpath) { -- *dst = '/'; -- ++dst; -- } - *dst = '\0'; - - foundPort = scheme.defaultPort(); // may be reset later diff --git a/backport-CVE-2021-28116.patch b/backport-CVE-2021-28116.patch deleted file mode 100644 index 116a520..0000000 --- a/backport-CVE-2021-28116.patch +++ /dev/null @@ -1,424 +0,0 @@ -commit b003a0da7865caa25b5d1e70c79329b32409b02a (HEAD -> refs/heads/v4, refs/remotes/origin/v4) -Author: Amos Jeffries -Date: 2021-09-24 21:53:11 +0000 - - WCCP: Validate packets better (#899) - - Update WCCP to support exception based error handling for - parsing and processing we are moving Squid to for protocol - handling. - - Update the main WCCPv2 parsing checks to throw meaningful - exceptions when detected. - -diff --git a/src/wccp2.cc b/src/wccp2.cc -index ee592449c..6ef469e91 100644 ---- a/src/wccp2.cc -+++ b/src/wccp2.cc -@@ -1108,6 +1108,59 @@ wccp2ConnectionClose(void) - * Functions for handling the requests. - */ - -+/// Checks that the given area section ends inside the given (whole) area. -+/// \param error the message to throw when the section does not fit -+static void -+CheckSectionLength(const void *sectionStart, const size_t sectionLength, const void *wholeStart, const size_t wholeSize, const char *error) -+{ -+ assert(sectionStart); -+ assert(wholeStart); -+ -+ const auto wholeEnd = static_cast(wholeStart) + wholeSize; -+ assert(sectionStart >= wholeStart && "we never go backwards"); -+ assert(sectionStart <= wholeEnd && "we never go beyond our whole (but zero-sized fields are OK)"); -+ static_assert(sizeof(wccp2_i_see_you_t) <= PTRDIFF_MAX, "paranoid: no UB when subtracting in-whole pointers"); -+ // subtraction safe due to the three assertions above -+ const auto remainderDiff = wholeEnd - static_cast(sectionStart); -+ -+ // casting safe due to the assertions above (and size_t definition) -+ assert(remainderDiff >= 0); -+ const auto remainderSize = static_cast(remainderDiff); -+ -+ if (sectionLength <= remainderSize) -+ return; -+ -+ throw TextException(error, Here()); -+} -+ -+/// Checks that the area contains at least dataLength bytes after the header. -+/// The size of the field header itself is not included in dataLength. -+/// \returns the total field size -- the field header and field data combined -+template -+static size_t -+CheckFieldDataLength(const FieldHeader *header, const size_t dataLength, const void *areaStart, const size_t areaSize, const char *error) -+{ -+ assert(header); -+ const auto dataStart = reinterpret_cast(header) + sizeof(header); -+ CheckSectionLength(dataStart, dataLength, areaStart, areaSize, error); -+ return sizeof(header) + dataLength; // no overflow after CheckSectionLength() -+} -+ -+/// Positions the given field at a given start within a given packet area. -+/// The Field type determines the correct field size (used for bounds checking). -+/// \param field the field pointer the function should set -+/// \param areaStart the start of a packet (sub)structure containing the field -+/// \param areaSize the size of the packet (sub)structure starting at areaStart -+/// \param fieldStart the start of a field within the given area -+/// \param error the message to throw when the field does not fit the area -+template -+static void -+SetField(Field *&field, const void *fieldStart, const void *areaStart, const size_t areaSize, const char *error) -+{ -+ CheckSectionLength(fieldStart, sizeof(Field), areaStart, areaSize, error); -+ field = static_cast(const_cast(fieldStart)); -+} -+ - /* - * Accept the UDP packet - */ -@@ -1124,8 +1177,6 @@ wccp2HandleUdp(int sock, void *) - - /* These structs form the parts of the packet */ - -- struct wccp2_item_header_t *header = NULL; -- - struct wccp2_security_none_t *security_info = NULL; - - struct wccp2_service_info_t *service_info = NULL; -@@ -1141,14 +1192,13 @@ wccp2HandleUdp(int sock, void *) - struct wccp2_cache_identity_info_t *cache_identity = NULL; - - struct wccp2_capability_info_header_t *router_capability_header = NULL; -+ char *router_capability_data_start = nullptr; - - struct wccp2_capability_element_t *router_capability_element; - - struct sockaddr_in from; - - struct in_addr cache_address; -- int len, found; -- short int data_length, offset; - uint32_t tmp; - char *ptr; - int num_caches; -@@ -1161,20 +1211,18 @@ wccp2HandleUdp(int sock, void *) - Ip::Address from_tmp; - from_tmp.setIPv4(); - -- len = comm_udp_recvfrom(sock, -- &wccp2_i_see_you, -- WCCP_RESPONSE_SIZE, -- 0, -- from_tmp); -+ const auto lenOrError = comm_udp_recvfrom(sock, &wccp2_i_see_you, WCCP_RESPONSE_SIZE, 0, from_tmp); - -- if (len < 0) -+ if (lenOrError < 0) - return; -+ const auto len = static_cast(lenOrError); - -- if (ntohs(wccp2_i_see_you.version) != WCCP2_VERSION) -- return; -- -- if (ntohl(wccp2_i_see_you.type) != WCCP2_I_SEE_YOU) -- return; -+ try { -+ // TODO: Remove wccp2_i_see_you.data and use a buffer to read messages. -+ const auto message_header_size = sizeof(wccp2_i_see_you) - sizeof(wccp2_i_see_you.data); -+ Must2(len >= message_header_size, "incomplete WCCP message header"); -+ Must2(ntohs(wccp2_i_see_you.version) == WCCP2_VERSION, "WCCP version unsupported"); -+ Must2(ntohl(wccp2_i_see_you.type) == WCCP2_I_SEE_YOU, "WCCP packet type unsupported"); - - /* FIXME INET6 : drop conversion boundary */ - from_tmp.getSockAddr(from); -@@ -1182,73 +1230,60 @@ wccp2HandleUdp(int sock, void *) - debugs(80, 3, "Incoming WCCPv2 I_SEE_YOU length " << ntohs(wccp2_i_see_you.length) << "."); - - /* Record the total data length */ -- data_length = ntohs(wccp2_i_see_you.length); -+ const auto data_length = ntohs(wccp2_i_see_you.length); -+ Must2(data_length <= len - message_header_size, -+ "malformed packet claiming it's bigger than received data"); - -- offset = 0; -- -- if (data_length > len) { -- debugs(80, DBG_IMPORTANT, "ERROR: Malformed WCCPv2 packet claiming it's bigger than received data"); -- return; -- } -+ size_t offset = 0; - - /* Go through the data structure */ -- while (data_length > offset) { -+ while (offset + sizeof(struct wccp2_item_header_t) <= data_length) { - - char *data = wccp2_i_see_you.data; - -- header = (struct wccp2_item_header_t *) &data[offset]; -+ const auto itemHeader = reinterpret_cast(&data[offset]); -+ const auto itemSize = CheckFieldDataLength(itemHeader, ntohs(itemHeader->length), -+ data, data_length, "truncated record"); -+ // XXX: Check "The specified length must be a multiple of 4 octets" -+ // requirement to avoid unaligned memory reads after the first item. - -- switch (ntohs(header->type)) { -+ switch (ntohs(itemHeader->type)) { - - case WCCP2_SECURITY_INFO: -- -- if (security_info != NULL) { -- debugs(80, DBG_IMPORTANT, "Duplicate security definition"); -- return; -- } -- -- security_info = (struct wccp2_security_none_t *) &wccp2_i_see_you.data[offset]; -+ Must2(!security_info, "duplicate security definition"); -+ SetField(security_info, itemHeader, itemHeader, itemSize, -+ "security definition truncated"); - break; - - case WCCP2_SERVICE_INFO: -- -- if (service_info != NULL) { -- debugs(80, DBG_IMPORTANT, "Duplicate service_info definition"); -- return; -- } -- -- service_info = (struct wccp2_service_info_t *) &wccp2_i_see_you.data[offset]; -+ Must2(!service_info, "duplicate service_info definition"); -+ SetField(service_info, itemHeader, itemHeader, itemSize, -+ "service_info definition truncated"); - break; - - case WCCP2_ROUTER_ID_INFO: -- -- if (router_identity_info != NULL) { -- debugs(80, DBG_IMPORTANT, "Duplicate router_identity_info definition"); -- return; -- } -- -- router_identity_info = (struct router_identity_info_t *) &wccp2_i_see_you.data[offset]; -+ Must2(!router_identity_info, "duplicate router_identity_info definition"); -+ SetField(router_identity_info, itemHeader, itemHeader, itemSize, -+ "router_identity_info definition truncated"); - break; - - case WCCP2_RTR_VIEW_INFO: -- -- if (router_view_header != NULL) { -- debugs(80, DBG_IMPORTANT, "Duplicate router_view definition"); -- return; -- } -- -- router_view_header = (struct router_view_t *) &wccp2_i_see_you.data[offset]; -+ Must2(!router_view_header, "duplicate router_view definition"); -+ SetField(router_view_header, itemHeader, itemHeader, itemSize, -+ "router_view definition truncated"); - break; - -- case WCCP2_CAPABILITY_INFO: -- -- if (router_capability_header != NULL) { -- debugs(80, DBG_IMPORTANT, "Duplicate router_capability definition"); -- return; -- } -+ case WCCP2_CAPABILITY_INFO: { -+ Must2(!router_capability_header, "duplicate router_capability definition"); -+ SetField(router_capability_header, itemHeader, itemHeader, itemSize, -+ "router_capability definition truncated"); - -- router_capability_header = (struct wccp2_capability_info_header_t *) &wccp2_i_see_you.data[offset]; -+ CheckFieldDataLength(router_capability_header, ntohs(router_capability_header->capability_info_length), -+ itemHeader, itemSize, "capability info truncated"); -+ router_capability_data_start = reinterpret_cast(router_capability_header) + -+ sizeof(*router_capability_header); - break; -+ } - - /* Nothing to do for the types below */ - -@@ -1257,22 +1292,17 @@ wccp2HandleUdp(int sock, void *) - break; - - default: -- debugs(80, DBG_IMPORTANT, "Unknown record type in WCCPv2 Packet (" << ntohs(header->type) << ")."); -+ debugs(80, DBG_IMPORTANT, "Unknown record type in WCCPv2 Packet (" << ntohs(itemHeader->type) << ")."); - } - -- offset += sizeof(struct wccp2_item_header_t); -- offset += ntohs(header->length); -- -- if (offset > data_length) { -- debugs(80, DBG_IMPORTANT, "Error: WCCPv2 packet tried to tell us there is data beyond the end of the packet"); -- return; -- } -+ offset += itemSize; -+ assert(offset <= data_length && "CheckFieldDataLength(itemHeader...) established that"); - } - -- if ((security_info == NULL) || (service_info == NULL) || (router_identity_info == NULL) || (router_view_header == NULL)) { -- debugs(80, DBG_IMPORTANT, "Incomplete WCCPv2 Packet"); -- return; -- } -+ Must2(security_info, "packet missing security definition"); -+ Must2(service_info, "packet missing service_info definition"); -+ Must2(router_identity_info, "packet missing router_identity_info definition"); -+ Must2(router_view_header, "packet missing router_view definition"); - - debugs(80, 5, "Complete packet received"); - -@@ -1308,10 +1338,7 @@ wccp2HandleUdp(int sock, void *) - break; - } - -- if (router_list_ptr->next == NULL) { -- debugs(80, DBG_IMPORTANT, "WCCPv2 Packet received from unknown router"); -- return; -- } -+ Must2(router_list_ptr->next, "packet received from unknown router"); - - /* Set the router id */ - router_list_ptr->info->router_address = router_identity_info->router_id_element.router_address; -@@ -1331,11 +1358,20 @@ wccp2HandleUdp(int sock, void *) - } - } else { - -- char *end = ((char *) router_capability_header) + sizeof(*router_capability_header) + ntohs(router_capability_header->capability_info_length) - sizeof(struct wccp2_capability_info_header_t); -- -- router_capability_element = (struct wccp2_capability_element_t *) (((char *) router_capability_header) + sizeof(*router_capability_header)); -- -- while ((char *) router_capability_element <= end) { -+ const auto router_capability_data_length = ntohs(router_capability_header->capability_info_length); -+ assert(router_capability_data_start); -+ const auto router_capability_data_end = router_capability_data_start + -+ router_capability_data_length; -+ for (auto router_capability_data_current = router_capability_data_start; -+ router_capability_data_current < router_capability_data_end;) { -+ -+ SetField(router_capability_element, router_capability_data_current, -+ router_capability_data_start, router_capability_data_length, -+ "capability element header truncated"); -+ const auto elementSize = CheckFieldDataLength( -+ router_capability_element, ntohs(router_capability_element->capability_length), -+ router_capability_data_start, router_capability_data_length, -+ "capability element truncated"); - - switch (ntohs(router_capability_element->capability_type)) { - -@@ -1377,7 +1413,7 @@ wccp2HandleUdp(int sock, void *) - debugs(80, DBG_IMPORTANT, "Unknown capability type in WCCPv2 Packet (" << ntohs(router_capability_element->capability_type) << ")."); - } - -- router_capability_element = (struct wccp2_capability_element_t *) (((char *) router_capability_element) + sizeof(struct wccp2_item_header_t) + ntohs(router_capability_element->capability_length)); -+ router_capability_data_current += elementSize; - } - } - -@@ -1396,23 +1432,34 @@ wccp2HandleUdp(int sock, void *) - num_caches = 0; - - /* Check to see if we're the master cache and update the cache list */ -- found = 0; -+ bool found = false; - service_list_ptr->lowest_ip = 1; - cache_list_ptr = &router_list_ptr->cache_list_head; - - /* to find the list of caches, we start at the end of the router view header */ - - ptr = (char *) (router_view_header) + sizeof(struct router_view_t); -+ const auto router_view_size = sizeof(struct router_view_t) + -+ ntohs(router_view_header->header.length); - - /* Then we read the number of routers */ -- memcpy(&tmp, ptr, sizeof(tmp)); -+ const uint32_t *routerCountRaw = nullptr; -+ SetField(routerCountRaw, ptr, router_view_header, router_view_size, -+ "malformed packet (truncated router view info w/o number of routers)"); - - /* skip the number plus all the ip's */ -- -- ptr += sizeof(tmp) + (ntohl(tmp) * sizeof(struct in_addr)); -+ ptr += sizeof(*routerCountRaw); -+ const auto ipCount = ntohl(*routerCountRaw); -+ const auto ipsSize = ipCount * sizeof(struct in_addr); // we check for unsigned overflow below -+ Must2(ipsSize / sizeof(struct in_addr) != ipCount, "huge IP address count"); -+ CheckSectionLength(ptr, ipsSize, router_view_header, router_view_size, "invalid IP address count"); -+ ptr += ipsSize; - - /* Then read the number of caches */ -- memcpy(&tmp, ptr, sizeof(tmp)); -+ const uint32_t *cacheCountRaw = nullptr; -+ SetField(cacheCountRaw, ptr, router_view_header, router_view_size, -+ "malformed packet (truncated router view info w/o cache count)"); -+ memcpy(&tmp, cacheCountRaw, sizeof(tmp)); // TODO: Replace tmp with cacheCount - ptr += sizeof(tmp); - - if (ntohl(tmp) != 0) { -@@ -1426,7 +1473,8 @@ wccp2HandleUdp(int sock, void *) - - case WCCP2_ASSIGNMENT_METHOD_HASH: - -- cache_identity = (struct wccp2_cache_identity_info_t *) ptr; -+ SetField(cache_identity, ptr, router_view_header, router_view_size, -+ "malformed packet (truncated router view info cache w/o assignment hash)"); - - ptr += sizeof(struct wccp2_cache_identity_info_t); - -@@ -1437,13 +1485,15 @@ wccp2HandleUdp(int sock, void *) - - case WCCP2_ASSIGNMENT_METHOD_MASK: - -- cache_mask_info = (struct cache_mask_info_t *) ptr; -+ SetField(cache_mask_info, ptr, router_view_header, router_view_size, -+ "malformed packet (truncated router view info cache w/o assignment mask)"); - - /* The mask assignment has an undocumented variable length entry here */ - - if (ntohl(cache_mask_info->num1) == 3) { - -- cache_mask_identity = (struct wccp2_cache_mask_identity_info_t *) ptr; -+ SetField(cache_mask_identity, ptr, router_view_header, router_view_size, -+ "malformed packet (truncated router view info cache w/o assignment mask identity)"); - - ptr += sizeof(struct wccp2_cache_mask_identity_info_t); - -@@ -1474,10 +1524,7 @@ wccp2HandleUdp(int sock, void *) - debugs (80, 5, "checking cache list: (" << std::hex << cache_address.s_addr << ":" << router_list_ptr->local_ip.s_addr << ")"); - - /* Check to see if it's the master, or us */ -- -- if (cache_address.s_addr == router_list_ptr->local_ip.s_addr) { -- found = 1; -- } -+ found = found || (cache_address.s_addr == router_list_ptr->local_ip.s_addr); - - if (cache_address.s_addr < router_list_ptr->local_ip.s_addr) { - service_list_ptr->lowest_ip = 0; -@@ -1494,7 +1541,7 @@ wccp2HandleUdp(int sock, void *) - cache_list_ptr->next = NULL; - - service_list_ptr->lowest_ip = 1; -- found = 1; -+ found = true; - num_caches = 1; - } - -@@ -1502,7 +1549,7 @@ wccp2HandleUdp(int sock, void *) - - router_list_ptr->num_caches = htonl(num_caches); - -- if ((found == 1) && (service_list_ptr->lowest_ip == 1)) { -+ if (found && (service_list_ptr->lowest_ip == 1)) { - if (ntohl(router_view_header->change_number) != router_list_ptr->member_change) { - debugs(80, 4, "Change detected - queueing up new assignment"); - router_list_ptr->member_change = ntohl(router_view_header->change_number); -@@ -1515,6 +1562,10 @@ wccp2HandleUdp(int sock, void *) - eventDelete(wccp2AssignBuckets, NULL); - debugs(80, 5, "I am not the lowest ip cache - not assigning buckets"); - } -+ -+ } catch (...) { -+ debugs(80, DBG_IMPORTANT, "ERROR: Ignoring WCCPv2 message: " << CurrentException); -+ } - } - - static void diff --git a/backport-CVE-2021-28651.patch b/backport-CVE-2021-28651.patch deleted file mode 100644 index 36b620b..0000000 --- a/backport-CVE-2021-28651.patch +++ /dev/null @@ -1,23 +0,0 @@ -From 47a085ff06598b64817875769022b8707a0af7db Mon Sep 17 00:00:00 2001 -From: Amos Jeffries -Date: Wed, 24 Feb 2021 00:53:21 +0000 -Subject: [PATCH] Bug 5104: Memory leak in RFC 2169 response parsing (#778) - -A temporary parsing buffer was not being released when -parsing completed. ---- - src/urn.cc | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/src/urn.cc b/src/urn.cc -index 69c29b75f4e..72ab801a906 100644 ---- a/src/urn.cc -+++ b/src/urn.cc -@@ -425,6 +425,7 @@ urnParseReply(const char *inbuf, const HttpRequestMethod& m) - } - - debugs(52, 3, "urnParseReply: Found " << i << " URLs"); -+ xfree(buf); - return list; - } - diff --git a/backport-CVE-2021-28662.patch b/backport-CVE-2021-28662.patch deleted file mode 100644 index ec5d806..0000000 --- a/backport-CVE-2021-28662.patch +++ /dev/null @@ -1,22 +0,0 @@ -From 051824924c709bd6162a378f746fb859454c674e Mon Sep 17 00:00:00 2001 -From: Alex Rousskov -Date: Tue, 16 Mar 2021 11:45:11 -0400 -Subject: [PATCH] Merge pull request from GHSA-jjq6-mh2h-g39h - ---- - src/http/RegisteredHeaders.cc | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/http/RegisteredHeaders.cc b/src/http/RegisteredHeaders.cc -index a4f96db2b78..84f177af2d8 100644 ---- a/src/http/RegisteredHeaders.cc -+++ b/src/http/RegisteredHeaders.cc -@@ -37,7 +37,7 @@ HeaderTableRecord::HeaderTableRecord(const char *n, HdrType theId, HdrFieldType - const HeaderTableRecord& - HeaderLookupTable_t::lookup (const char *buf, const std::size_t len) const { - const HeaderTableRecord *r = HttpHeaderHashTable::lookup(buf, len); -- if (!r) -+ if (!r || r->id == Http::HdrType::OTHER) - return BadHdr; - return *r; - } diff --git a/backport-CVE-2021-31806-CVE-2021-31808.patch b/backport-CVE-2021-31806-CVE-2021-31808.patch deleted file mode 100644 index 596c20c..0000000 --- a/backport-CVE-2021-31806-CVE-2021-31808.patch +++ /dev/null @@ -1,235 +0,0 @@ -From 7024fb734a59409889e53df2257b3fc817809fb4 Mon Sep 17 00:00:00 2001 -From: Alex Rousskov -Date: Wed, 31 Mar 2021 02:44:42 +0000 -Subject: [PATCH] Handle more Range requests (#790) - -Also removed some effectively unused code. ---- - src/HttpHdrRange.cc | 17 ------------- - src/HttpHeaderRange.h | 5 ++-- - src/client_side.cc | 4 ++-- - src/client_side_request.cc | 27 ++++++++++++++++++--- - src/client_side_request.h | 7 +++++- - src/http/Stream.cc | 49 ++++++-------------------------------- - 6 files changed, 41 insertions(+), 68 deletions(-) - -diff --git a/src/HttpHdrRange.cc b/src/HttpHdrRange.cc -index e7179dc..5849144 100644 ---- a/src/HttpHdrRange.cc -+++ b/src/HttpHdrRange.cc -@@ -526,23 +526,6 @@ HttpHdrRange::offsetLimitExceeded(const int64_t limit) const - return true; - } - --bool --HttpHdrRange::contains(const HttpHdrRangeSpec& r) const --{ -- assert(r.length >= 0); -- HttpHdrRangeSpec::HttpRange rrange(r.offset, r.offset + r.length); -- -- for (const_iterator i = begin(); i != end(); ++i) { -- HttpHdrRangeSpec::HttpRange irange((*i)->offset, (*i)->offset + (*i)->length); -- HttpHdrRangeSpec::HttpRange intersection = rrange.intersection(irange); -- -- if (intersection.start == irange.start && intersection.size() == irange.size()) -- return true; -- } -- -- return false; --} -- - const HttpHdrRangeSpec * - HttpHdrRangeIter::currentSpec() const - { -diff --git a/src/HttpHeaderRange.h b/src/HttpHeaderRange.h -index 2103b64..352e749 100644 ---- a/src/HttpHeaderRange.h -+++ b/src/HttpHeaderRange.h -@@ -78,7 +78,6 @@ public: - int64_t firstOffset() const; - int64_t lowestOffset(int64_t) const; - bool offsetLimitExceeded(const int64_t limit) const; -- bool contains(const HttpHdrRangeSpec& r) const; - std::vector specs; - - private: -@@ -100,9 +99,9 @@ public: - void updateSpec(); - int64_t debt() const; - void debt(int64_t); -- int64_t debt_size; /* bytes left to send from the current spec */ -+ int64_t debt_size = 0; /* bytes left to send from the current spec */ - String boundary; /* boundary for multipart responses */ -- bool valid; -+ bool valid = false; - }; - - #endif /* SQUID_HTTPHEADERRANGE_H */ -diff --git a/src/client_side.cc b/src/client_side.cc -index 120c39c..9516166 100644 ---- a/src/client_side.cc -+++ b/src/client_side.cc -@@ -724,8 +724,8 @@ clientPackRangeHdr(const HttpReply * rep, const HttpHdrRangeSpec * spec, String - * warning: assumes that HTTP headers for individual ranges at the - * time of the actuall assembly will be exactly the same as - * the headers when clientMRangeCLen() is called */ --int --ClientHttpRequest::mRangeCLen() -+int64_t -+ClientHttpRequest::mRangeCLen() const - { - int64_t clen = 0; - MemBuf mb; -diff --git a/src/client_side_request.cc b/src/client_side_request.cc -index a37f8d4..3c83e9e 100644 ---- a/src/client_side_request.cc -+++ b/src/client_side_request.cc -@@ -1094,9 +1094,6 @@ clientInterpretRequestHeaders(ClientHttpRequest * http) - * iter up at this point. - */ - node->readBuffer.offset = request->range->lowestOffset(0); -- http->range_iter.pos = request->range->begin(); -- http->range_iter.end = request->range->end(); -- http->range_iter.valid = true; - } - } - -@@ -1954,6 +1951,30 @@ ClientHttpRequest::setErrorUri(const char *aUri) - #include "client_side_request.cci" - #endif - -+// XXX: This should not be a _request_ method. Move range_iter elsewhere. -+int64_t -+ClientHttpRequest::prepPartialResponseGeneration() -+{ -+ assert(request); -+ assert(request->range); -+ -+ range_iter.pos = request->range->begin(); -+ range_iter.end = request->range->end(); -+ range_iter.debt_size = 0; -+ const auto multipart = request->range->specs.size() > 1; -+ if (multipart) -+ range_iter.boundary = rangeBoundaryStr(); -+ range_iter.valid = true; // TODO: Remove. -+ range_iter.updateSpec(); // TODO: Refactor to initialize rather than update. -+ -+ assert(range_iter.pos != range_iter.end); -+ const auto &firstRange = *range_iter.pos; -+ assert(firstRange); -+ out.offset = firstRange->offset; -+ -+ return multipart ? mRangeCLen() : firstRange->length; -+} -+ - #if USE_ADAPTATION - /// Initiate an asynchronous adaptation transaction which will call us back. - void -diff --git a/src/client_side_request.h b/src/client_side_request.h -index 704802b..7ab3262 100644 ---- a/src/client_side_request.h -+++ b/src/client_side_request.h -@@ -131,7 +131,7 @@ public: - - dlink_node active; - dlink_list client_stream; -- int mRangeCLen(); -+ int64_t mRangeCLen() const; - - ClientRequestContext *calloutContext; - void doCallouts(); -@@ -148,6 +148,11 @@ public: - /// neither the current request nor the parsed request URI are known - void setErrorUri(const char *errorUri); - -+ /// Prepares to satisfy a Range request with a generated HTTP 206 response. -+ /// Initializes range_iter state to allow raw range_iter access. -+ /// \returns Content-Length value for the future response; never negative -+ int64_t prepPartialResponseGeneration(); -+ - /// Build an error reply. For use with the callouts. - void calloutsError(const err_type error, const int errDetail); - -diff --git a/src/http/Stream.cc b/src/http/Stream.cc -index 1370862..ff44496 100644 ---- a/src/http/Stream.cc -+++ b/src/http/Stream.cc -@@ -444,59 +444,27 @@ Http::Stream::buildRangeHeader(HttpReply *rep) - } else { - /* XXX: TODO: Review, this unconditional set may be wrong. */ - rep->sline.set(rep->sline.version, Http::scPartialContent); -- // web server responded with a valid, but unexpected range. -- // will (try-to) forward as-is. -- //TODO: we should cope with multirange request/responses -- // TODO: review, since rep->content_range is always nil here. -- bool replyMatchRequest = contentRange != nullptr ? -- request->range->contains(contentRange->spec) : -- true; -+ -+ // before range_iter accesses -+ const auto actual_clen = http->prepPartialResponseGeneration(); -+ - const int spec_count = http->request->range->specs.size(); -- int64_t actual_clen = -1; - - debugs(33, 3, "range spec count: " << spec_count << - " virgin clen: " << rep->content_length); - assert(spec_count > 0); - /* append appropriate header(s) */ - if (spec_count == 1) { -- if (!replyMatchRequest) { -- hdr->putContRange(contentRange); -- actual_clen = rep->content_length; -- //http->range_iter.pos = rep->content_range->spec.begin(); -- (*http->range_iter.pos)->offset = contentRange->spec.offset; -- (*http->range_iter.pos)->length = contentRange->spec.length; -- -- } else { -- HttpHdrRange::iterator pos = http->request->range->begin(); -- assert(*pos); -- /* append Content-Range */ -- -- if (!contentRange) { -- /* No content range, so this was a full object we are -- * sending parts of. -- */ -- httpHeaderAddContRange(hdr, **pos, rep->content_length); -- } -- -- /* set new Content-Length to the actual number of bytes -- * transmitted in the message-body */ -- actual_clen = (*pos)->length; -- } -+ const auto singleSpec = *http->request->range->begin(); -+ assert(singleSpec); -+ httpHeaderAddContRange(hdr, *singleSpec, rep->content_length); - } else { - /* multipart! */ -- /* generate boundary string */ -- http->range_iter.boundary = http->rangeBoundaryStr(); - /* delete old Content-Type, add ours */ - hdr->delById(Http::HdrType::CONTENT_TYPE); - httpHeaderPutStrf(hdr, Http::HdrType::CONTENT_TYPE, - "multipart/byteranges; boundary=\"" SQUIDSTRINGPH "\"", - SQUIDSTRINGPRINT(http->range_iter.boundary)); -- /* Content-Length is not required in multipart responses -- * but it is always nice to have one */ -- actual_clen = http->mRangeCLen(); -- -- /* http->out needs to start where we want data at */ -- http->out.offset = http->range_iter.currentSpec()->offset; - } - - /* replace Content-Length header */ -@@ -504,9 +472,6 @@ Http::Stream::buildRangeHeader(HttpReply *rep) - hdr->delById(Http::HdrType::CONTENT_LENGTH); - hdr->putInt64(Http::HdrType::CONTENT_LENGTH, actual_clen); - debugs(33, 3, "actual content length: " << actual_clen); -- -- /* And start the range iter off */ -- http->range_iter.updateSpec(); - } - } - --- -2.23.0 diff --git a/backport-CVE-2021-33620.patch b/backport-CVE-2021-33620.patch deleted file mode 100644 index 9bfc82f..0000000 --- a/backport-CVE-2021-33620.patch +++ /dev/null @@ -1,111 +0,0 @@ -From 6c9c44d0e9cf7b72bb233360c5308aa063af3d69 Mon Sep 17 00:00:00 2001 -From: Alex Rousskov -Date: Fri, 2 Apr 2021 07:46:20 +0000 -Subject: [PATCH] Handle more partial responses (#791) - ---- - src/HttpHdrContRange.cc | 14 ++++++++++++-- - src/HttpHeaderRange.h | 7 +++++-- - src/clients/Client.cc | 7 +++++-- - src/http/Stream.cc | 9 +++++++-- - 4 files changed, 29 insertions(+), 8 deletions(-) - -diff --git a/src/HttpHdrContRange.cc b/src/HttpHdrContRange.cc -index 8270e0f11aa..79a507fb84c 100644 ---- a/src/HttpHdrContRange.cc -+++ b/src/HttpHdrContRange.cc -@@ -161,9 +161,13 @@ httpHdrContRangeParseInit(HttpHdrContRange * range, const char *str) - - ++p; - -- if (*p == '*') -+ if (*p == '*') { -+ if (!known_spec(range->spec.offset)) { -+ debugs(68, 2, "invalid (*/*) content-range-spec near: '" << str << "'"); -+ return 0; -+ } - range->elength = range_spec_unknown; -- else if (!httpHeaderParseOffset(p, &range->elength)) -+ } else if (!httpHeaderParseOffset(p, &range->elength)) - return 0; - else if (range->elength <= 0) { - /* Additional paranoidal check for BUG2155 - entity-length MUST be > 0 */ -@@ -174,6 +178,12 @@ httpHdrContRangeParseInit(HttpHdrContRange * range, const char *str) - return 0; - } - -+ // reject unsatisfied-range and such; we only use well-defined ranges today -+ if (!known_spec(range->spec.offset) || !known_spec(range->spec.length)) { -+ debugs(68, 2, "unwanted content-range-spec near: '" << str << "'"); -+ return 0; -+ } -+ - debugs(68, 8, "parsed content-range field: " << - (long int) range->spec.offset << "-" << - (long int) range->spec.offset + range->spec.length - 1 << " / " << -diff --git a/src/HttpHeaderRange.h b/src/HttpHeaderRange.h -index 6d93e72b2b8..bf54c8562ba 100644 ---- a/src/HttpHeaderRange.h -+++ b/src/HttpHeaderRange.h -@@ -18,8 +18,11 @@ - class HttpReply; - class Packable; - --/* http byte-range-spec */ -- -+// TODO: Refactor to disambiguate and provide message-specific APIs. -+/// either byte-range-spec (in a request Range header) -+/// or suffix-byte-range-spec (in a request Range header) -+/// or byte-range part of byte-range-resp (in a response Content-Range header) -+/// or "*" part of unsatisfied-range (in a response Content-Range header) - class HttpHdrRangeSpec - { - MEMPROXY_CLASS(HttpHdrRangeSpec); -diff --git a/src/clients/Client.cc b/src/clients/Client.cc -index dfb13d053c3..2a2f0e8f878 100644 ---- a/src/clients/Client.cc -+++ b/src/clients/Client.cc -@@ -520,8 +520,11 @@ Client::haveParsedReplyHeaders() - maybePurgeOthers(); - - // adaptation may overwrite old offset computed using the virgin response -- const bool partial = theFinalReply->contentRange(); -- currentOffset = partial ? theFinalReply->contentRange()->spec.offset : 0; -+ currentOffset = 0; -+ if (const auto cr = theFinalReply->contentRange()) { -+ if (cr->spec.offset != HttpHdrRangeSpec::UnknownPosition) -+ currentOffset = cr->spec.offset; -+ } - } - - /// whether to prevent caching of an otherwise cachable response -diff --git a/src/http/Stream.cc b/src/http/Stream.cc -index 9e346b9d99d..d685a22306e 100644 ---- a/src/http/Stream.cc -+++ b/src/http/Stream.cc -@@ -171,12 +171,13 @@ Http::Stream::getNextRangeOffset() const - return start; - } - -- } else if (reply && reply->contentRange()) { -+ } else if (const auto cr = reply ? reply->contentRange() : nullptr) { - /* request does not have ranges, but reply does */ - /** \todo FIXME: should use range_iter_pos on reply, as soon as reply->content_range - * becomes HttpHdrRange rather than HttpHdrRangeSpec. - */ -- return http->out.offset + reply->contentRange()->spec.offset; -+ if (cr->spec.offset != HttpHdrRangeSpec::UnknownPosition) -+ return http->out.offset + cr->spec.offset; - } - - return http->out.offset; -@@ -240,6 +241,10 @@ Http::Stream::socketState() - - // did we get at least what we expected, based on range specs? - -+ // this Content-Range does not tell us how many bytes to expect -+ if (bytesExpected == HttpHdrRangeSpec::UnknownPosition) -+ return STREAM_NONE; -+ - if (bytesSent == bytesExpected) // got everything - return STREAM_COMPLETE; diff --git a/backport-CVE-2021-46784.patch b/backport-CVE-2021-46784.patch deleted file mode 100644 index c2630d3..0000000 --- a/backport-CVE-2021-46784.patch +++ /dev/null @@ -1,129 +0,0 @@ -From 780c4ea1b4c9d2fb41f6962aa6ed73ae57f74b2b Mon Sep 17 00:00:00 2001 -From: Joshua Rogers -Date: Mon, 18 Apr 2022 13:42:36 +0000 -Subject: [PATCH] Improve handling of Gopher responses (#1022) - ---- - src/gopher.cc | 45 ++++++++++++++++++++------------------------- - 1 file changed, 20 insertions(+), 25 deletions(-) - -diff --git a/src/gopher.cc b/src/gopher.cc -index 169b0e18299..6187da18bcd 100644 ---- a/src/gopher.cc -+++ b/src/gopher.cc -@@ -371,7 +371,6 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) - char *lpos = NULL; - char *tline = NULL; - LOCAL_ARRAY(char, line, TEMP_BUF_SIZE); -- LOCAL_ARRAY(char, tmpbuf, TEMP_BUF_SIZE); - char *name = NULL; - char *selector = NULL; - char *host = NULL; -@@ -381,7 +380,6 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) - char gtype; - StoreEntry *entry = NULL; - -- memset(tmpbuf, '\0', TEMP_BUF_SIZE); - memset(line, '\0', TEMP_BUF_SIZE); - - entry = gopherState->entry; -@@ -416,7 +414,7 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) - return; - } - -- String outbuf; -+ SBuf outbuf; - - if (!gopherState->HTML_header_added) { - if (gopherState->conversion == GopherStateData::HTML_CSO_RESULT) -@@ -583,34 +581,34 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) - break; - } - -- memset(tmpbuf, '\0', TEMP_BUF_SIZE); -- - if ((gtype == GOPHER_TELNET) || (gtype == GOPHER_3270)) { - if (strlen(escaped_selector) != 0) -- snprintf(tmpbuf, TEMP_BUF_SIZE, " %s\n", -- icon_url, escaped_selector, rfc1738_escape_part(host), -- *port ? ":" : "", port, html_quote(name)); -+ outbuf.appendf(" %s\n", -+ icon_url, escaped_selector, rfc1738_escape_part(host), -+ *port ? ":" : "", port, html_quote(name)); - else -- snprintf(tmpbuf, TEMP_BUF_SIZE, " %s\n", -- icon_url, rfc1738_escape_part(host), *port ? ":" : "", -- port, html_quote(name)); -+ outbuf.appendf(" %s\n", -+ icon_url, rfc1738_escape_part(host), *port ? ":" : "", -+ port, html_quote(name)); - - } else if (gtype == GOPHER_INFO) { -- snprintf(tmpbuf, TEMP_BUF_SIZE, "\t%s\n", html_quote(name)); -+ outbuf.appendf("\t%s\n", html_quote(name)); - } else { - if (strncmp(selector, "GET /", 5) == 0) { - /* WWW link */ -- snprintf(tmpbuf, TEMP_BUF_SIZE, " %s\n", -- icon_url, host, rfc1738_escape_unescaped(selector + 5), html_quote(name)); -+ outbuf.appendf(" %s\n", -+ icon_url, host, rfc1738_escape_unescaped(selector + 5), html_quote(name)); -+ } else if (gtype == GOPHER_WWW) { -+ outbuf.appendf(" %s\n", -+ icon_url, rfc1738_escape_unescaped(selector), html_quote(name)); - } else { - /* Standard link */ -- snprintf(tmpbuf, TEMP_BUF_SIZE, " %s\n", -- icon_url, host, gtype, escaped_selector, html_quote(name)); -+ outbuf.appendf(" %s\n", -+ icon_url, host, gtype, escaped_selector, html_quote(name)); - } - } - - safe_free(escaped_selector); -- outbuf.append(tmpbuf); - } else { - memset(line, '\0', TEMP_BUF_SIZE); - continue; -@@ -643,13 +641,12 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len) - break; - - if (gopherState->cso_recno != recno) { -- snprintf(tmpbuf, TEMP_BUF_SIZE, "

Record# %d
%s

\n
", recno, html_quote(result));
-+                    outbuf.appendf("

Record# %d
%s

\n
", recno, html_quote(result));
-                     gopherState->cso_recno = recno;
-                 } else {
--                    snprintf(tmpbuf, TEMP_BUF_SIZE, "%s\n", html_quote(result));
-+                    outbuf.appendf("%s\n", html_quote(result));
-                 }
- 
--                outbuf.append(tmpbuf);
-                 break;
-             } else {
-                 int code;
-@@ -677,8 +674,7 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
- 
-                 case 502: { /* Too Many Matches */
-                     /* Print the message the server returns */
--                    snprintf(tmpbuf, TEMP_BUF_SIZE, "

%s

\n
", html_quote(result));
--                    outbuf.append(tmpbuf);
-+                    outbuf.appendf("

%s

\n
", html_quote(result));
-                     break;
-                 }
- 
-@@ -694,13 +690,12 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
- 
-     }               /* while loop */
- 
--    if (outbuf.size() > 0) {
--        entry->append(outbuf.rawBuf(), outbuf.size());
-+    if (outbuf.length() > 0) {
-+        entry->append(outbuf.rawContent(), outbuf.length());
-         /* now let start sending stuff to client */
-         entry->flush();
-     }
- 
--    outbuf.clean();
-     return;
- }
- 
diff --git a/backport-CVE-2022-41317.patch b/backport-CVE-2022-41317.patch
deleted file mode 100644
index 1c4b351..0000000
--- a/backport-CVE-2022-41317.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-From 2c5d2de9bdcd25d1127987f8f76c986ab5bfb6da Mon Sep 17 00:00:00 2001
-From: Amos Jeffries 
-Date: Wed, 17 Aug 2022 23:32:43 +0000
-Subject: [PATCH] Fix typo in manager ACL (#1113)
-
----
- src/cf.data.pre | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/src/cf.data.pre b/src/cf.data.pre
-index 4aef432cad1..f15d56b13d7 100644
---- a/src/cf.data.pre
-+++ b/src/cf.data.pre
-@@ -1001,7 +1001,7 @@ DEFAULT: ssl::certUntrusted ssl_error X509_V_ERR_INVALID_CA X509_V_ERR_SELF_SIGN
- DEFAULT: ssl::certSelfSigned ssl_error X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT
- ENDIF
- DEFAULT: all src all
--DEFAULT: manager url_regex -i ^cache_object:// +i ^https?://[^/]+/squid-internal-mgr/
-+DEFAULT: manager url_regex -i ^cache_object:// +i ^[^:]+://[^/]+/squid-internal-mgr/
- DEFAULT: localhost src 127.0.0.1/32 ::1
- DEFAULT: to_localhost dst 127.0.0.0/8 0.0.0.0/32 ::1/128 ::/128
- DEFAULT_DOC: ACLs all, manager, localhost, and to_localhost are predefined.
diff --git a/backport-CVE-2022-41318.patch b/backport-CVE-2022-41318.patch
deleted file mode 100644
index facc001..0000000
--- a/backport-CVE-2022-41318.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From 4031c6c2b004190fdffbc19dab7cd0305a2025b7 Mon Sep 17 00:00:00 2001
-From: Amos Jeffries 
-Date: Tue, 9 Aug 2022 23:34:54 +0000
-Subject: [PATCH] Bug 3193 pt2: NTLM decoder truncating strings (#1114)
-
-The initial bug fix overlooked large 'offset' causing integer
-wrap to extract a too-short length string.
-
-Improve debugs and checks sequence to clarify cases and ensure
-that all are handled correctly.
----
- lib/ntlmauth/ntlmauth.cc | 13 +++++++++++--
- 1 file changed, 11 insertions(+), 2 deletions(-)
-
-diff --git a/lib/ntlmauth/ntlmauth.cc b/lib/ntlmauth/ntlmauth.cc
-index 5d96372906d..f00fd51f83f 100644
---- a/lib/ntlmauth/ntlmauth.cc
-+++ b/lib/ntlmauth/ntlmauth.cc
-@@ -107,10 +107,19 @@ ntlm_fetch_string(const ntlmhdr *packet, const int32_t packet_size, const strhdr
-     int32_t o = le32toh(str->offset);
-     // debug("ntlm_fetch_string(plength=%d,l=%d,o=%d)\n",packet_size,l,o);
- 
--    if (l < 0 || l > NTLM_MAX_FIELD_LENGTH || o + l > packet_size || o == 0) {
--        debug("ntlm_fetch_string: insane data (pkt-sz: %d, fetch len: %d, offset: %d)\n", packet_size,l,o);
-+    if (l < 0 || l > NTLM_MAX_FIELD_LENGTH) {
-+        debug("ntlm_fetch_string: insane string length (pkt-sz: %d, fetch len: %d, offset: %d)\n", packet_size,l,o);
-         return rv;
-     }
-+    else if (o <= 0 || o > packet_size) {
-+        debug("ntlm_fetch_string: insane string offset (pkt-sz: %d, fetch len: %d, offset: %d)\n", packet_size,l,o);
-+        return rv;
-+    }
-+    else if (l > packet_size - o) {
-+        debug("ntlm_fetch_string: truncated string data (pkt-sz: %d, fetch len: %d, offset: %d)\n", packet_size,l,o);
-+        return rv;
-+    }
-+
-     rv.str = (char *)packet + o;
-     rv.l = 0;
-     if ((flags & NTLM_NEGOTIATE_ASCII) == 0) {
diff --git a/cache_swap.sh b/cache_swap.sh
index 5e94072..77d06ac 100644
--- a/cache_swap.sh
+++ b/cache_swap.sh
@@ -5,12 +5,17 @@ fi
 
 SQUID_CONF=${SQUID_CONF:-"/etc/squid/squid.conf"}
 
-CACHE_SWAP=`sed -e 's/#.*//g' $SQUID_CONF | \
-	grep cache_dir | awk '{ print $3 }'`
+CACHE_SWAP=`awk '/^[[:blank:]]*cache_dir/ { print $3 }' "$SQUID_CONF"`
 
+init_cache_dirs=0
 for adir in $CACHE_SWAP; do
 	if [ ! -d $adir/00 ]; then
 		echo -n "init_cache_dir $adir... "
-		squid -N -z -F -f $SQUID_CONF >> /var/log/squid/squid.out 2>&1
+		init_cache_dirs=1
 	fi
 done
+
+if [ $init_cache_dirs -ne 0 ]; then
+	echo ""
+	squid --foreground -z -f "$SQUID_CONF" >> /var/log/squid/squid.out 2>&1
+fi
diff --git a/fix-build-error-with-gcc-10.patch b/fix-build-error-with-gcc-10.patch
deleted file mode 100644
index 21316d3..0000000
--- a/fix-build-error-with-gcc-10.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From 486660deabfa33375df2c8f053d84d6128a1235f Mon Sep 17 00:00:00 2001
-From: Francesco Chemolli 
-Date: Sat, 21 Mar 2020 22:18:43 +0000
-Subject: [PATCH] FtpGateway.cc: fix build on gcc-10 [-Werror=class-memaccess]
- (#573)
-
-Since a1c06c7, tokens initialization is done by FtpLineToken
-constructor, and g++-10 complains about memsetting a nontrivial object:
-
-    clearing an object of non-trivial type [-Werror=class-memaccess]
----
- src/clients/FtpGateway.cc | 2 --
- 1 file changed, 2 deletions(-)
-
-diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc
-index 79de14d7889..c0350116727 100644
---- a/src/clients/FtpGateway.cc
-+++ b/src/clients/FtpGateway.cc
-@@ -563,8 +563,6 @@ ftpListParseParts(const char *buf, struct Ftp::GatewayFlags flags)
- 
-     n_tokens = 0;
- 
--    memset(tokens, 0, sizeof(tokens));
--
-     xbuf = xstrdup(buf);
- 
-     if (flags.tried_nlst) {
-
diff --git a/squid-3.0.STABLE1-perlpath.patch b/squid-3.0.STABLE1-perlpath.patch
index 6d244a4..d927e43 100644
--- a/squid-3.0.STABLE1-perlpath.patch
+++ b/squid-3.0.STABLE1-perlpath.patch
@@ -1,10 +1,10 @@
 diff --git a/contrib/url-normalizer.pl b/contrib/url-normalizer.pl
-index 90ac6a4..8dbed90 100755
+index 4cb0480..4b89910 100755
 --- a/contrib/url-normalizer.pl
 +++ b/contrib/url-normalizer.pl
 @@ -1,4 +1,4 @@
 -#!/usr/local/bin/perl -Tw
 +#!/usr/bin/perl -Tw
  #
- # * Copyright (C) 1996-2019 The Squid Software Foundation and contributors
+ # * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
  # *
diff --git a/squid-4.0.21-large-acl.patch b/squid-4.0.21-large-acl.patch
deleted file mode 100644
index 8aacf38..0000000
--- a/squid-4.0.21-large-acl.patch
+++ /dev/null
@@ -1,178 +0,0 @@
-diff --git a/src/acl/RegexData.cc b/src/acl/RegexData.cc
-index 01a4c12..b5c1679 100644
---- a/src/acl/RegexData.cc
-+++ b/src/acl/RegexData.cc
-@@ -22,6 +22,7 @@
- #include "ConfigParser.h"
- #include "Debug.h"
- #include "sbuf/List.h"
-+#include "sbuf/Algorithms.h"
- 
- ACLRegexData::~ACLRegexData()
- {
-@@ -129,6 +130,18 @@ compileRE(std::list &curlist, const char * RE, int flags)
-     return true;
- }
- 
-+static bool
-+compileRE(std::list &curlist, const SBufList &RE, int flags)
-+{
-+	if (RE.empty())
-+		return curlist.empty(); // XXX: old code did this. It looks wrong.
-+	SBuf regexp;
-+	static const SBuf openparen("("), closeparen(")"), separator(")|(");
-+	JoinContainerIntoSBuf(regexp, RE.begin(), RE.end(), separator, openparen,
-+			closeparen);
-+	return compileRE(curlist, regexp.c_str(), flags);
-+}
-+
- /** Compose and compile one large RE from a set of (small) REs.
-  * The ultimate goal is to have only one RE per ACL so that match() is
-  * called only once per ACL.
-@@ -137,16 +150,11 @@ static int
- compileOptimisedREs(std::list &curlist, const SBufList &sl)
- {
-     std::list newlist;
--    int numREs = 0;
-+    SBufList accumulatedRE;
-+    int numREs = 0, reSize = 0;
-     int flags = REG_EXTENDED | REG_NOSUB;
--    int largeREindex = 0;
--    char largeRE[BUFSIZ];
--    *largeRE = 0;
- 
-     for (const SBuf & configurationLineWord : sl) {
--        int RElen;
--        RElen = configurationLineWord.length();
--
-         static const SBuf minus_i("-i");
-         static const SBuf plus_i("+i");
-         if (configurationLineWord == minus_i) {
-@@ -155,10 +163,11 @@ compileOptimisedREs(std::list &curlist, const SBufList &sl)
-                 debugs(28, 2, "optimisation of -i ... -i" );
-             } else {
-                 debugs(28, 2, "-i" );
--                if (!compileRE(newlist, largeRE, flags))
-+                if (!compileRE(newlist, accumulatedRE, flags))
-                     return 0;
-                 flags |= REG_ICASE;
--                largeRE[largeREindex=0] = '\0';
-+                accumulatedRE.clear();
-+                reSize = 0;
-             }
-         } else if (configurationLineWord == plus_i) {
-             if ((flags & REG_ICASE) == 0) {
-@@ -166,37 +175,34 @@ compileOptimisedREs(std::list &curlist, const SBufList &sl)
-                 debugs(28, 2, "optimisation of +i ... +i");
-             } else {
-                 debugs(28, 2, "+i");
--                if (!compileRE(newlist, largeRE, flags))
-+                if (!compileRE(newlist, accumulatedRE, flags))
-                     return 0;
-                 flags &= ~REG_ICASE;
--                largeRE[largeREindex=0] = '\0';
-+                accumulatedRE.clear();
-+                reSize = 0;
-             }
--        } else if (RElen + largeREindex + 3 < BUFSIZ-1) {
-+        } else if (reSize < 1024) {
-             debugs(28, 2, "adding RE '" << configurationLineWord << "'");
--            if (largeREindex > 0) {
--                largeRE[largeREindex] = '|';
--                ++largeREindex;
--            }
--            largeRE[largeREindex] = '(';
--            ++largeREindex;
--            configurationLineWord.copy(largeRE+largeREindex, BUFSIZ-largeREindex);
--            largeREindex += configurationLineWord.length();
--            largeRE[largeREindex] = ')';
--            ++largeREindex;
--            largeRE[largeREindex] = '\0';
-+            accumulatedRE.push_back(configurationLineWord);
-             ++numREs;
-+            reSize += configurationLineWord.length();
-         } else {
-             debugs(28, 2, "buffer full, generating new optimised RE..." );
--            if (!compileRE(newlist, largeRE, flags))
-+            accumulatedRE.push_back(configurationLineWord);
-+            if (!compileRE(newlist, accumulatedRE, flags))
-                 return 0;
--            largeRE[largeREindex=0] = '\0';
-+            accumulatedRE.clear();
-+            reSize = 0;
-             continue;    /* do the loop again to add the RE to largeRE */
-         }
-     }
- 
--    if (!compileRE(newlist, largeRE, flags))
-+    if (!compileRE(newlist, accumulatedRE, flags))
-         return 0;
- 
-+    accumulatedRE.clear();
-+    reSize = 0;
-+
-     /* all was successful, so put the new list at the tail */
-     curlist.splice(curlist.end(), newlist);
- 
-diff --git a/src/sbuf/Algorithms.h b/src/sbuf/Algorithms.h
-index 21ee889..338e9c0 100644
---- a/src/sbuf/Algorithms.h
-+++ b/src/sbuf/Algorithms.h
-@@ -81,6 +81,57 @@ SBufContainerJoin(const Container &items, const SBuf& separator)
-     return rv;
- }
- 
-+/** Join container of SBufs and append to supplied target
-+ *
-+ * append to the target SBuf all elements in the [begin,end) range from
-+ * an iterable container, prefixed by prefix, separated by separator and
-+ * followed by suffix. Prefix and suffix are added also in case of empty
-+ * iterable
-+ *
-+ * \return the modified dest
-+ */
-+template 
-+SBuf&
-+JoinContainerIntoSBuf(SBuf &dest, const ContainerIterator &begin,
-+                      const ContainerIterator &end, const SBuf& separator,
-+                      const SBuf& prefix = SBuf(), const SBuf& suffix = SBuf())
-+{
-+    if (begin == end) {
-+        dest.append(prefix).append(suffix);
-+        return dest;
-+    }
-+
-+    // optimization: pre-calculate needed storage
-+    const SBuf::size_type totalContainerSize =
-+        std::accumulate(begin, end, 0, SBufAddLength(separator)) +
-+        dest.length() + prefix.length() + suffix.length();
-+    SBufReservationRequirements req;
-+    req.minSpace = totalContainerSize;
-+    dest.reserve(req);
-+
-+    auto i = begin;
-+    dest.append(prefix);
-+    dest.append(*i);
-+    ++i;
-+    for (; i != end; ++i)
-+        dest.append(separator).append(*i);
-+    dest.append(suffix);
-+    return dest;
-+}
-+
-+
-+/// convenience wrapper of JoinContainerIntoSBuf with no caller-supplied SBuf
-+template 
-+SBuf
-+JoinContainerToSBuf(const ContainerIterator &begin,
-+                    const ContainerIterator &end, const SBuf& separator,
-+                    const SBuf& prefix = SBuf(), const SBuf& suffix = SBuf())
-+{
-+    SBuf rv;
-+    return JoinContainerIntoSBuf(rv, begin, end, separator, prefix, suffix);
-+}
-+
-+
- namespace std {
- /// default hash functor to support std::unordered_map
- template <>
diff --git a/squid-4.9.tar.xz b/squid-4.9.tar.xz
deleted file mode 100644
index d372a76..0000000
Binary files a/squid-4.9.tar.xz and /dev/null differ
diff --git a/squid-4.9.tar.xz.asc b/squid-4.9.tar.xz.asc
deleted file mode 100644
index 824d12d..0000000
--- a/squid-4.9.tar.xz.asc
+++ /dev/null
@@ -1,25 +0,0 @@
-File: squid-4.9.tar.xz
-Date: Wed Nov  6 04:57:57 UTC 2019
-Size: 2444664
-MD5 : 5c2e335dd1e8ced9dda6e0e11894b344
-SHA1: 43c90a1a2eb4d1613f1bfc603ad08e8a835be319
-Key : CD6DBF8EF3B17D3E 
-            B068 84ED B779 C89B 044E  64E3 CD6D BF8E F3B1 7D3E
-      keyring = http://www.squid-cache.org/pgp.asc
-      keyserver = pool.sks-keyservers.net
------BEGIN PGP SIGNATURE-----
-
-iQIzBAABCgAdFiEEsGiE7bd5yJsETmTjzW2/jvOxfT4FAl3CUt8ACgkQzW2/jvOx
-fT4Wnw/+Osf9VTnDFj5g/eXgb6vhzDaSLVfPNKLI6mF46a6twTvlMcM1+sX+b2Of
-KXznDkUHvhIHijXGVbscSWx6Rn2tuPGDRRtDucqK98bYUo7mhEpdGtkVE7t8U3iz
-wIKm7Hbr8qar4nJDLoZiZSCswI+UTcYncUuAqZ0O8LGIK0m6aYYDSS4bRq04yiS2
-1JD0UEWW35X35hoVuhGlRRgvLzKn8F4KFeDde0gg+cqvkM0LR2+xkUqz6DcyE34m
-8uK6hlABu32Zj+9oRBvNNcDOr2bfNYsbS4tAy635thFTyGUF7jjrOEXhl2SYrDY5
-gVRzXHq/WBQ5rjTdmwvfn3wcwA1BQK/Oru6OaTFGaSrRlmJJM3JUFQWSsYWm8ARV
-BJEGy8iQ9R41Yom2Ct8SOhwg7f3fBlFnK+BB8En+8s+fEa8z5rVmmjh1Es8qm6Tj
-C/xGTZ23C4lUveKznDhc8MR2M4jjsH77Y7K/PvJUjZ/yYNpwsOwhv7fs51v70S5Q
-4wC+ykpsmwckmOajrkOnupUN9Un2FzfyOctTt6PQkmwlq++09Jwxwg36O+KLDX08
-f48F/qCCJ4bubuhFjM/A+cwVev0nAp0haSV0jpbemAHwzog21O51l70B8qUe18jp
-XKYpbp3zCJ5cNmrAummsEVaj2ZCsH5ZHxTUIwvJDIS5b0OFn/lo=
-=LNc9
------END PGP SIGNATURE-----
diff --git a/squid-5.7.tar.xz b/squid-5.7.tar.xz
new file mode 100644
index 0000000..5e57bb4
Binary files /dev/null and b/squid-5.7.tar.xz differ
diff --git a/squid-5.7.tar.xz.asc b/squid-5.7.tar.xz.asc
new file mode 100644
index 0000000..90630fe
--- /dev/null
+++ b/squid-5.7.tar.xz.asc
@@ -0,0 +1,25 @@
+File: squid-5.7.tar.xz
+Date: Mon 05 Sep 2022 16:01:50 UTC
+Size: 2566560
+MD5 : 7a3764a3c5833631a779d7827901cda7
+SHA1: 141e8007d6b1cfee34654127a9ca025125b37b58
+Key : CD6DBF8EF3B17D3E 
+            B068 84ED B779 C89B 044E  64E3 CD6D BF8E F3B1 7D3E
+      keyring = http://www.squid-cache.org/pgp.asc
+      keyserver = pool.sks-keyservers.net
+-----BEGIN PGP SIGNATURE-----
+
+iQIzBAABCgAdFiEEsGiE7bd5yJsETmTjzW2/jvOxfT4FAmMWHXYACgkQzW2/jvOx
+fT42RQ/8CMrgBaIcdZpH9LkNffk5duDYOn2qrYmOdqkHtzN4eYRNr7Z9W03TlLum
+bRbcGtunFIyHl2ASdDX9yM1pzu7gFa5PLUeT3bqaA8F+SbryYb4jju/uIDFUB6Sl
+lqiXR7d4a4enAvrOL0xaQ0v88Z9OSVYe/NjQl28Z4EdChTkErQLitNah7GuoybyK
+ZA7aGdf7eHDNHNnE9lmYkVHc2w4iDJ9UJTNhVLvmc9x68D3nP8QwuA31mSyUimRl
+eO5jqpIbEbwTs21z/oBj7NhpNJEMcisLKUNlYZqbhBCTU4Oa28d/SxVEiAvGH4o7
+PygujqtgJU3TBoCmvFI408LQirw5p/ZfGEpn7JwkRxvDI6ppX7E5+WrZidL+Vf/8
+xMIPFAwqs/wxbsCOs3A614SwzDDbPYjEDP5ufz5n+MEz6gVU07G8WUxo5MZ7Bjvd
+AA+NI1eLA+2gmTB0aINwCNhawQwdoWyiP2i4S2qpso5AYS2n+ndOoPd1N56Jqd9H
+9NvLINLx1IyRkzmsO/3a/pAOOQPzhNz2nmgHxvM/O6trfHsUHpFXICy+mrY+Lk1Y
+MskFlh9UHZ1hyBuwkx6/SqMGmzvj4lzN3cfDzwramA6Z/BU8ner7aKQMD8ToxwME
+Kup27VTLo8yNIGB95ZOG82Q/hv5pGFbxJigQH1/ls3QvHyH0doo=
+=YG5Z
+-----END PGP SIGNATURE-----
diff --git a/squid-add-TrivialDB-support-223.patch b/squid-add-TrivialDB-support-223.patch
deleted file mode 100644
index 470c872..0000000
--- a/squid-add-TrivialDB-support-223.patch
+++ /dev/null
@@ -1,555 +0,0 @@
-From acd207af1bf340df75a64e8bc4d1e93caa13bbb2 Mon Sep 17 00:00:00 2001
-From: Amos Jeffries 
-Date: Tue, 17 Jul 2018 23:36:31 +0000
-Subject: [PATCH] TrivialDB support (#223)
-
-Allow use of Samba TrivialDB instead of outdated BerkleyDB in
-the session helper.
-
-Require TrivialDB support for use of the time_quota helper.
-libdb v1.85 is no longer supported by distributors and
-upgrading to v5 only to deprecate use does not seem to be
-worthwhile.
----
- acinclude/tdb.m4                                  |  49 ++++++++
- configure.ac                                      |   1 +
- src/acl/external/session/Makefile.am              |   3 +-
- src/acl/external/session/ext_session_acl.cc       | 143 ++++++++++++++++++----
- src/acl/external/session/required.m4              |  26 ++--
- src/acl/external/time_quota/Makefile.am           |   2 +-
- src/acl/external/time_quota/ext_time_quota_acl.cc |  93 +++++++-------
- src/acl/external/time_quota/required.m4           |  13 +-
- test-suite/buildtests/layer-01-minimal.opts       |   1 +
- test-suite/buildtests/layer-02-maximus.opts       |   1 +
- 11 files changed, 253 insertions(+), 86 deletions(-)
- create mode 100644 acinclude/tdb.m4
-
-diff --git a/acinclude/tdb.m4 b/acinclude/tdb.m4
-new file mode 100644
-index 0000000..20b2b2a
---- /dev/null
-+++ b/acinclude/tdb.m4
-@@ -0,0 +1,49 @@
-+## Copyright (C) 1996-2018 The Squid Software Foundation and contributors
-+##
-+## Squid software is distributed under GPLv2+ license and includes
-+## contributions from numerous individuals and organizations.
-+## Please see the COPYING and CONTRIBUTORS files for details.
-+##
-+
-+dnl check for --with-tdb option
-+AC_DEFUN([SQUID_CHECK_LIBTDB],[
-+AC_ARG_WITH(tdb,
-+  AS_HELP_STRING([--without-tdb],
-+                 [Do not use Samba TrivialDB. Default: auto-detect]), [
-+case "$with_tdb" in
-+  yes|no|auto)
-+    : # Nothing special to do here
-+    ;;
-+  *)
-+    AS_IF([test ! -d "$withval"],
-+      AC_MSG_ERROR([--with-tdb path ($with_tdb) does not point to a directory])
-+    )
-+    LIBTDB_PATH="-L$withval/lib"
-+    CPPFLAGS="-I$withval/include $CPPFLAGS"
-+  ;;
-+esac
-+])
-+AH_TEMPLATE(USE_TRIVIALDB,[Samba TrivialDB support is available])
-+AS_IF([test "x$with_tdb" != "xno"],[
-+  SQUID_STATE_SAVE(squid_libtdb_state)
-+  LIBS="$LIBS $LIBTDB_PATH"
-+  PKG_CHECK_MODULES([LIBTDB],[tdb],[CPPFLAGS="$CPPFLAGS $LIBTDB_CFLAGS"],[:])
-+  AC_CHECK_HEADERS([sys/stat.h tdb.h],,,[
-+#if HAVE_SYS_STAT_H
-+#include 
-+#endif
-+  ])
-+  SQUID_STATE_ROLLBACK(squid_libtdb_state) #de-pollute LIBS
-+
-+  AS_IF([test "x$with_tdb" = "xyes" -a "x$LIBTDB_LIBS" = "x"],
-+    AC_MSG_ERROR([Required TrivialDB library not found])
-+  )
-+  AS_IF([test "x$LIBTDB_LIBS" != "x"],[
-+    CXXFLAGS="$LIBTDB_CFLAGS $CXXFLAGS"
-+    LIBTDB_LIBS="$LIBTDB_PATH $LIBTDB_LIBS"
-+    AC_DEFINE_UNQUOTED(USE_TRIVIALDB, HAVE_TDB_H, [Samba TrivialDB support is available])
-+  ],[with_tdb=no])
-+])
-+AC_MSG_NOTICE([Samba TrivialDB library support: ${with_tdb:=auto} ${LIBTDB_PATH} ${LIBTDB_LIBS}])
-+AC_SUBST(LIBTDB_LIBS)
-+])
-diff --git a/configure.ac b/configure.ac
-index 1ec245a..c8cd996 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -23,6 +23,7 @@ m4_include([acinclude/os-deps.m4])
- m4_include([acinclude/krb5.m4])
- m4_include([acinclude/pam.m4])
- m4_include([acinclude/pkg.m4])
-+m4_include([acinclude/tdb.m4])
- m4_include([acinclude/lib-checks.m4])
- m4_include([acinclude/ax_cxx_compile_stdcxx_11.m4])
- m4_include([acinclude/ax_cxx_0x_types.m4])
-diff --git a/src/acl/external/session/Makefile.am b/src/acl/external/session/Makefile.am
-index e54f22b..7509848 100644
---- a/src/acl/external/session/Makefile.am
-+++ b/src/acl/external/session/Makefile.am
-@@ -14,6 +14,7 @@ ext_session_acl_SOURCES= \
- 	ext_session_acl.cc
- ext_session_acl_LDADD = \
- 	$(COMPAT_LIB) \
--	-ldb
-+	$(LIBBDB_LIBS) \
-+	$(LIBTDB_LIBS)
- 
- EXTRA_DIST= ext_session_acl.8 required.m4
-diff --git a/src/acl/external/session/ext_session_acl.cc b/src/acl/external/session/ext_session_acl.cc
-index 09a35ca..64ffb60 100644
---- a/src/acl/external/session/ext_session_acl.cc
-+++ b/src/acl/external/session/ext_session_acl.cc
-@@ -43,6 +43,9 @@
- #endif
- #include 
- #include 
-+#if HAVE_TDB_H
-+#include 
-+#endif
- #if HAVE_UNISTD_H
- #include 
- #endif
-@@ -60,8 +63,36 @@ static int fixed_timeout = 0;
- char *db_path = NULL;
- const char *program_name;
- 
-+#if USE_BERKLEYDB
- DB *db = NULL;
- DB_ENV *db_env = NULL;
-+typedef DBT DB_ENTRY;
-+
-+#elif USE_TRIVIALDB
-+TDB_CONTEXT *db = nullptr;
-+typedef TDB_DATA DB_ENTRY;
-+
-+#endif
-+
-+static void
-+shutdown_db()
-+{
-+    if (db) {
-+#if USE_BERKLEYDB
-+        db->close(db, 0);
-+    }
-+    if (db_env) {
-+        db_env->close(db_env, 0);
-+
-+#elif USE_TRIVIALDB
-+        if (tdb_close(db) != 0) {
-+            fprintf(stderr, "%s| WARNING: error closing session db '%s'\n", program_name, db_path);
-+            exit(1);
-+        }
-+#endif
-+    }
-+    xfree(db_path);
-+}
- 
- static void init_db(void)
- {
-@@ -70,6 +101,7 @@ static void init_db(void)
-     if (db_path) {
-         if (!stat(db_path, &st_buf)) {
-             if (S_ISDIR (st_buf.st_mode)) {
-+#if USE_BERKLEYDB
-                 /* If directory then open database environment. This prevents sync problems
-                     between different processes. Otherwise fallback to single file */
-                 db_env_create(&db_env, 0);
-@@ -79,10 +111,16 @@ static void init_db(void)
-                     exit(1);
-                 }
-                 db_create(&db, db_env, 0);
-+#elif USE_TRIVIALDB
-+                std::string newPath(db_path);
-+                newPath.append("session", 7);
-+                db_path = xstrdup(newPath.c_str());
-+#endif
-             }
-         }
-     }
- 
-+#if USE_BERKLEYDB
-     if (db_env) {
-         if (db->open(db, NULL, "session", NULL, DB_BTREE, DB_CREATE, 0666)) {
-             fprintf(stderr, "FATAL: %s: Failed to open db file '%s' in dir '%s'\n",
-@@ -93,60 +131,121 @@ static void init_db(void)
-     } else {
-         db_create(&db, NULL, 0);
-         if (db->open(db, NULL, db_path, NULL, DB_BTREE, DB_CREATE, 0666)) {
--            fprintf(stderr, "FATAL: %s: Failed to open session db '%s'\n", program_name, db_path);
--            exit(1);
-+            db = nullptr;
-         }
-     }
-+#elif USE_TRIVIALDB
-+    db = tdb_open(db_path, 0, TDB_CLEAR_IF_FIRST, O_CREAT|O_DSYNC, 0666);
-+#endif
-+    if (!db) {
-+        fprintf(stderr, "FATAL: %s: Failed to open session db '%s'\n", program_name, db_path);
-+        shutdown_db();
-+        exit(1);
-+    }
- }
- 
--static void shutdown_db(void)
-+int session_is_active = 0;
-+
-+static size_t
-+dataSize(DB_ENTRY *data)
- {
--    db->close(db, 0);
--    if (db_env) {
--        db_env->close(db_env, 0);
--    }
-+#if USE_BERKLEYDB
-+    return data->size;
-+#elif USE_TRIVIALDB
-+    return data->dsize;
-+#endif
- }
- 
--int session_is_active = 0;
-+static bool
-+fetchKey(/*const*/ DB_ENTRY &key, DB_ENTRY *data)
-+{
-+#if USE_BERKLEYDB
-+    return (db->get(db, nullptr, &key, data, 0) == 0);
-+#elif USE_TRIVIALDB
-+    // NP: API says returns NULL on errors, but return is a struct type WTF??
-+    *data = tdb_fetch(db, key);
-+    return (data->dptr != nullptr);
-+#endif
-+}
-+
-+static void
-+deleteEntry(/*const*/ DB_ENTRY &key)
-+{
-+#if USE_BERKLEYDB
-+    db->del(db, nullptr, &key, 0);
-+#elif USE_TRIVIALDB
-+    tdb_delete(db, key);
-+#endif
-+}
-+
-+static void
-+copyValue(void *dst, const DB_ENTRY *src, size_t sz)
-+{
-+#if USE_BERKLEYDB
-+    memcpy(dst, src->data, sz);
-+#elif USE_TRIVIALDB
-+    memcpy(dst, src->dptr, sz);
-+#endif
-+}
- 
- static int session_active(const char *details, size_t len)
- {
-+#if USE_BERKLEYDB
-     DBT key = {0};
-     DBT data = {0};
-     key.data = (void *)details;
-     key.size = len;
--    if (db->get(db, NULL, &key, &data, 0) == 0) {
-+#elif USE_TRIVIALDB
-+    TDB_DATA key;
-+    TDB_DATA data;
-+#endif
-+    if (fetchKey(key, &data)) {
-         time_t timestamp;
--        if (data.size != sizeof(timestamp)) {
-+        if (dataSize(&data) != sizeof(timestamp)) {
-             fprintf(stderr, "ERROR: %s: CORRUPTED DATABASE (%s)\n", program_name, details);
--            db->del(db, NULL, &key, 0);
-+            deleteEntry(key);
-             return 0;
-         }
--        memcpy(×tamp, data.data, sizeof(timestamp));
-+        copyValue(×tamp, &data, sizeof(timestamp));
-         if (timestamp + session_ttl >= time(NULL))
-             return 1;
-     }
-     return 0;
- }
- 
--static void session_login(const char *details, size_t len)
-+static void
-+session_login(/*const*/ char *details, size_t len)
- {
--    DBT key = {0};
--    DBT data = {0};
--    key.data = (void *)details;
-+    DB_ENTRY key = {0};
-+    DB_ENTRY data = {0};
-+    time_t now = time(0);
-+#if USE_BERKLEYDB
-+    key.data = static_cast(details);
-     key.size = len;
--    time_t now = time(NULL);
-     data.data = &now;
-     data.size = sizeof(now);
-     db->put(db, NULL, &key, &data, 0);
-+#elif USE_TRIVIALDB
-+    key.dptr = reinterpret_cast(details);
-+    key.dsize = len;
-+    data.dptr = reinterpret_cast(&now);
-+    data.dsize = sizeof(now);
-+    tdb_store(db, key, data, 0);
-+#endif
- }
- 
--static void session_logout(const char *details, size_t len)
-+static void
-+session_logout(/*const*/ char *details, size_t len)
- {
--    DBT key = {0};
--    key.data = (void *)details;
-+    DB_ENTRY key = {0};
-+#if USE_BERKLEYDB
-+    key.data = static_cast(details);
-     key.size = len;
--    db->del(db, NULL, &key, 0);
-+#elif USE_TRIVIALDB
-+    key.dptr = reinterpret_cast(details);
-+    key.dsize = len;
-+#endif
-+    deleteEntry(key);
- }
- 
- static void usage(void)
-@@ -173,7 +272,7 @@ int main(int argc, char **argv)
-             session_ttl = strtol(optarg, NULL, 0);
-             break;
-         case 'b':
--            db_path = optarg;
-+            db_path = xstrdup(optarg);
-             break;
-         case 'a':
-             default_action = 0;
-diff --git a/src/acl/external/session/required.m4 b/src/acl/external/session/required.m4
-index 229774b..1fe8a0e 100755
---- a/src/acl/external/session/required.m4
-+++ b/src/acl/external/session/required.m4
-@@ -5,11 +5,23 @@
- ## Please see the COPYING and CONTRIBUTORS files for details.
- ##
- 
--AC_CHECK_HEADERS(db.h,[
--  AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]],[[
--    DB_ENV *db_env = nullptr;
--    db_env_create(&db_env, 0);
--  ]])],[
-+SQUID_CHECK_LIBTDB
-+if test "$with_tdb" != "no"; then
-     BUILD_HELPER="session"
--  ],[])
--])
-+fi
-+
-+LIBBDB_LIBS=
-+AH_TEMPLATE(USE_BERKLEYDB,[BerkleyDB support is available])
-+if test "x$with_tdb" = "xno"; then
-+  AC_CHECK_HEADERS(db.h,[
-+    AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]],[[
-+      DB_ENV *db_env = nullptr;
-+      db_env_create(&db_env, 0);
-+    ]])],[
-+      AC_DEFINE_UNQUOTED(USE_BERKLEYDB, HAVE_DB_H, [BerkleyDB support is available])
-+      BUILD_HELPER="session"
-+      LIBBDB_LIBS="-ldb"
-+    ],[])
-+  ])
-+fi
-+AC_SUBST(LIBBDB_LIBS)
-diff --git a/src/acl/external/time_quota/Makefile.am b/src/acl/external/time_quota/Makefile.am
-index 4684637..20eba22 100644
---- a/src/acl/external/time_quota/Makefile.am
-+++ b/src/acl/external/time_quota/Makefile.am
-@@ -16,6 +16,6 @@ ext_time_quota_acl_SOURCES= \
- 	ext_time_quota_acl.cc
- ext_time_quota_acl_LDADD = \
- 	$(COMPAT_LIB) \
--	-ldb
-+	$(LIBTDB_LIBS)
- 
- EXTRA_DIST= ext_time_quota_acl.8 required.m4
-diff --git a/src/acl/external/time_quota/ext_time_quota_acl.cc b/src/acl/external/time_quota/ext_time_quota_acl.cc
-index 764fa86..8f1bbef 100644
---- a/src/acl/external/time_quota/ext_time_quota_acl.cc
-+++ b/src/acl/external/time_quota/ext_time_quota_acl.cc
-@@ -41,19 +41,8 @@
- #if HAVE_GETOPT_H
- #include 
- #endif
--
--/* At this point all Bit Types are already defined, so we must
--   protect from multiple type definition on platform where
--   __BIT_TYPES_DEFINED__ is not defined.
-- */
--#ifndef        __BIT_TYPES_DEFINED__
--#define        __BIT_TYPES_DEFINED__
--#endif
--
--#if HAVE_DB_185_H
--#include 
--#elif HAVE_DB_H
--#include 
-+#if HAVE_TDB_H
-+#include 
- #endif
- 
- #ifndef DEFAULT_QUOTA_DB
-@@ -63,7 +52,7 @@
- const char *db_path = DEFAULT_QUOTA_DB;
- const char *program_name;
- 
--DB *db = NULL;
-+TDB_CONTEXT *db = nullptr;
- 
- #define KEY_LAST_ACTIVITY            "last-activity"
- #define KEY_PERIOD_START             "period-start"
-@@ -147,7 +136,7 @@ static void log_fatal(const char *format, ...)
- static void init_db(void)
- {
-     log_info("opening time quota database \"%s\".\n", db_path);
--    db = dbopen(db_path, O_CREAT | O_RDWR, 0666, DB_BTREE, NULL);
-+    db = tdb_open(db_path, 0, TDB_CLEAR_IF_FIRST, O_CREAT | O_RDWR, 0666);
-     if (!db) {
-         log_fatal("Failed to open time_quota db '%s'\n", db_path);
-         exit(1);
-@@ -156,52 +145,68 @@ static void init_db(void)
- 
- static void shutdown_db(void)
- {
--    db->close(db);
-+    tdb_close(db);
- }
- 
--static void writeTime(const char *user_key, const char *sub_key, time_t t)
-+static char *KeyString(int &len, const char *user_key, const char *sub_key)
- {
--    char keybuffer[TQ_BUFFERSIZE];
--    DBT key, data;
-+    static char keybuffer[TQ_BUFFERSIZE];
-+    *keybuffer = 0;
-+
-+    len = snprintf(keybuffer, sizeof(keybuffer), "%s-%s", user_key, sub_key);
-+    if (len < 0) {
-+        log_error("Cannot add entry: %s-%s", user_key, sub_key);
-+        len = 0;
- 
--    if ( strlen(user_key) + strlen(sub_key) + 1 + 1 > sizeof(keybuffer) ) {
-+    } else if (static_cast(len) >= sizeof(keybuffer)) {
-         log_error("key too long (%s,%s)\n", user_key, sub_key);
--    } else {
--        snprintf(keybuffer, sizeof(keybuffer), "%s-%s", user_key, sub_key);
-+        len = 0;
-+    }
-+
-+    return keybuffer;
-+}
-+
-+static void writeTime(const char *user_key, const char *sub_key, time_t t)
-+{
-+    int len = 0;
-+    if (/* const */ char *keybuffer = KeyString(len, user_key, sub_key)) {
-+
-+        TDB_DATA key, data;
-+
-+        key.dptr = reinterpret_cast(keybuffer);
-+        key.dsize = len;
- 
--        key.data = (void *)keybuffer;
--        key.size = strlen(keybuffer);
--        data.data = &t;
--        data.size = sizeof(t);
--        db->put(db, &key, &data, 0);
-+        data.dptr = reinterpret_cast(&t);
-+        data.dsize = sizeof(t);
-+
-+        tdb_store(db, key, data, TDB_REPLACE);
-         log_debug("writeTime(\"%s\", %d)\n", keybuffer, t);
-     }
- }
- 
- static time_t readTime(const char *user_key, const char *sub_key)
- {
--    char keybuffer[TQ_BUFFERSIZE];
--    DBT key, data;
--    time_t t = 0;
-+    int len = 0;
-+    if (/* const */ char *keybuffer = KeyString(len, user_key, sub_key)) {
- 
--    if ( strlen(user_key) + 1 + strlen(sub_key) + 1 > sizeof(keybuffer) ) {
--        log_error("key too long (%s,%s)\n", user_key, sub_key);
--    } else {
--        snprintf(keybuffer, sizeof(keybuffer), "%s-%s", user_key, sub_key);
-+        TDB_DATA key;
-+        key.dptr = reinterpret_cast(keybuffer);
-+        key.dsize = len;
- 
--        key.data = (void *)keybuffer;
--        key.size = strlen(keybuffer);
--        if (db->get(db, &key, &data, 0) == 0) {
--            if (data.size != sizeof(t)) {
--                log_error("CORRUPTED DATABASE (%s)\n", keybuffer);
--            } else {
--                memcpy(&t, data.data, sizeof(t));
--            }
-+        auto data = tdb_fetch(db, key);
-+
-+        time_t t = 0;
-+        if (data.dsize != sizeof(t)) {
-+            log_error("CORRUPTED DATABASE (%s)\n", keybuffer);
-+        } else {
-+            memcpy(&t, data.dptr, sizeof(t));
-         }
-+
-         log_debug("readTime(\"%s\")=%d\n", keybuffer, t);
-+        return t;
-     }
- 
--    return t;
-+    return 0;
- }
- 
- static void parseTime(const char *s, time_t *secs, time_t *start)
-@@ -388,8 +393,6 @@ static void processActivity(const char *user_key)
-         log_debug("ERR %s\n", message);
-         SEND_ERR("Time budget exceeded.");
-     }
--
--    db->sync(db, 0);
- }
- 
- static void usage(void)
-diff --git a/src/acl/external/time_quota/required.m4 b/src/acl/external/time_quota/required.m4
-index a54daae..c9e52bf 100644
---- a/src/acl/external/time_quota/required.m4
-+++ b/src/acl/external/time_quota/required.m4
-@@ -5,12 +5,7 @@
- ## Please see the COPYING and CONTRIBUTORS files for details.
- ##
- 
--AC_CHECK_HEADERS(db_185.h,[BUILD_HELPER="time_quota"],[
--  AC_CHECK_HEADERS(db.h,[
--    AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]],[[
--      DB *db = dbopen("/tmp", O_CREAT | O_RDWR, 0666, DB_BTREE, NULL);
--    ]])],[
--      BUILD_HELPER="time_quota"
--    ],[])
--  ])
--])
-+SQUID_CHECK_LIBTDB
-+if test "$with_tdb" != "no"; then
-+    BUILD_HELPER="time_quota"
-+fi
--- 
-1.8.3.1
-
diff --git a/squid-fix-detection-of-sys-sysctl.h-detection-511.patch b/squid-fix-detection-of-sys-sysctl.h-detection-511.patch
deleted file mode 100644
index 5e37cb1..0000000
--- a/squid-fix-detection-of-sys-sysctl.h-detection-511.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From 195ec8477565885e8f0975865a32bb716ea56272 Mon Sep 17 00:00:00 2001
-From: Amos Jeffries 
-Date: Tue, 19 Nov 2019 01:06:56 +1300
-Subject: [PATCH] Fix detection of sys/sysctl.h detection (#511)
-
-Make sure we test the EUI specific headers using same flags
-chosen for final build operations. This should make the
-test detect the header as unavailable if the user options
-would make the compiler #warning be a fatal error later.
----
- configure.ac | 5 +++++
- 1 file changed, 5 insertions(+)
-
-diff --git a/configure.ac b/configure.ac
-index a1f2823..7cc0dfd 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -1109,6 +1109,10 @@ AC_ARG_ENABLE(eui,
- SQUID_YESNO([$enableval],[--disable-eui expects no arguments])
- ])
- if test "x${enable_eui:=yes}" = "xyes" ; then
-+  SQUID_STATE_SAVE(LIBEUI)
-+  # GLIBC 2.30 deprecates sysctl.h. Test with the same flags that (may) break includes later.
-+  CFLAGS=$SQUID_CFLAGS
-+  CXXFLAGS=$SQUID_CXXFLAGS
-   case "$squid_host_os" in
-     linux|solaris|freebsd|openbsd|netbsd|cygwin)
-       ${TRUE}
-@@ -1148,6 +1152,7 @@ include 
- #include 
- #endif
-   ]])
-+  SQUID_STATE_ROLLBACK(LIBEUI)
- fi
- AC_SUBST(EUILIB)
- AC_MSG_NOTICE([EUI (MAC address) controls enabled: $enable_eui])
--- 
-1.8.3.1
-
diff --git a/squid.logrotate b/squid.logrotate
index 4a0406f..c88da04 100644
--- a/squid.logrotate
+++ b/squid.logrotate
@@ -2,6 +2,7 @@
     weekly
     rotate 5
     compress
+    delaycompress
     notifempty
     missingok
     nocreate
@@ -10,7 +11,5 @@
       # Asks squid to reopen its logs. (logfile_rotate 0 is set in squid.conf)
       # errors redirected to make it silent if squid is not running
       /usr/sbin/squid -k rotate 2>/dev/null
-      # Wait a little to allow Squid to catch up before the logs is compressed
-      sleep 1
     endscript
 }
diff --git a/squid.nm b/squid.nm
index 5e40f76..1f317da 100644
--- a/squid.nm
+++ b/squid.nm
@@ -2,6 +2,6 @@
 
 case "$2" in
         up|down|vpn-up|vpn-down)
-                /bin/systemctl -q reload squid.service || :
+                /usr/bin/systemctl -q reload squid.service || :
                 ;;
 esac
diff --git a/squid.service b/squid.service
index da1c0ea..e36fbfe 100644
--- a/squid.service
+++ b/squid.service
@@ -1,16 +1,18 @@
 [Unit]
 Description=Squid caching proxy
-After=network.target nss-lookup.target
+Documentation=man:squid(8)
+After=network.target network-online.target nss-lookup.target
 
 [Service]
-Type=forking
+Type=notify
 LimitNOFILE=16384
+PIDFile=/run/squid.pid
 EnvironmentFile=/etc/sysconfig/squid
 ExecStartPre=/usr/libexec/squid/cache_swap.sh
-ExecStart=/usr/sbin/squid $SQUID_OPTS -f $SQUID_CONF
-ExecReload=/usr/sbin/squid $SQUID_OPTS -k reconfigure -f $SQUID_CONF
-ExecStop=/usr/sbin/squid -k shutdown -f $SQUID_CONF
-TimeoutSec=0
+ExecStart=/usr/sbin/squid --foreground $SQUID_OPTS -f ${SQUID_CONF}
+ExecReload=/usr/bin/kill -HUP $MAINPID
+killMode=mixed
+NotifyAccess=all
 
 [Install]
 WantedBy=multi-user.target
diff --git a/squid.spec b/squid.spec
index 6f3f8c4..e1b4ca7 100644
--- a/squid.spec
+++ b/squid.spec
@@ -1,64 +1,37 @@
 %define __perl_requires %{SOURCE8}
 
 Name:     squid
-Version:  4.9
-Release:  17
+Version:  5.7
+Release:  1
 Summary:  The Squid proxy caching server
 Epoch:    7
 License:  GPLv2+ and (LGPLv2+ and MIT and BSD and Public Domain)
 URL:      http://www.squid-cache.org
-Source0:  http://www.squid-cache.org/Versions/v4/squid-4.9.tar.xz
-Source1:  http://www.squid-cache.org/Versions/v4/squid-4.9.tar.xz.asc
+Source0:  http://www.squid-cache.org/Versions/v5/squid-%{version}.tar.xz
+Source1:  http://www.squid-cache.org/Versions/v5/squid-%{version}.tar.xz.asc
 Source2:  squid.logrotate
 Source3:  squid.sysconfig
 Source4:  squid.pam
 Source5:  squid.nm
 Source6:  squid.service
 Source7:  cache_swap.sh
-Source8: perl-requires-squid.sh
+Source8:  perl-requires-squid.sh
 
-Patch0: squid-4.0.11-config.patch
-Patch1: squid-3.1.0.9-location.patch
-Patch2: squid-3.0.STABLE1-perlpath.patch
-Patch3: squid-3.5.9-include-guards.patch
-Patch4: squid-4.0.21-large-acl.patch
-Patch5: CVE-2019-12528.patch
-Patch6: CVE-2020-8517.patch
-Patch7: CVE-2020-8449_CVE-2020-8450.patch
-Patch8: squid-fix-detection-of-sys-sysctl.h-detection-511.patch
-Patch9: CVE-2019-12519.patch
-Patch10:CVE-2020-11945.patch
-Patch11:CVE-2020-14058.patch
-Patch12:CVE-2020-15049.patch
-Patch13:CVE-2020-15810.patch
-Patch14:CVE-2020-15811.patch
-Patch15:CVE-2020-24606.patch
-Patch16:backport-CVE-2020-25097.patch
-Patch17:backport-CVE-2021-28651.patch
-Patch18:backport-0001-CVE-2021-28652.patch
-Patch19:backport-0002-CVE-2021-28652.patch
-Patch20:backport-CVE-2021-28662.patch
-Patch21:backport-CVE-2021-31806-CVE-2021-31808.patch
-Patch22:backport-CVE-2021-33620.patch
-Patch23:fix-build-error-with-gcc-10.patch
-Patch24:squid-add-TrivialDB-support-223.patch
-Patch25:backport-CVE-2021-28116.patch
-Patch26:backport-CVE-2021-46784.patch
-Patch27:backport-CVE-2022-41317.patch
-Patch28:backport-CVE-2022-41318.patch
+Patch0:   squid-4.0.11-config.patch
+Patch1:   squid-3.1.0.9-location.patch
+Patch2:   squid-3.0.STABLE1-perlpath.patch
+Patch3:   squid-3.5.9-include-guards.patch
 
-Buildroot: %{_tmppath}/squid-4.9-1-root-%(%{__id_u} -n)
-Requires: bash >= 2.0
-Requires(pre): shadow-utils
-Requires(post): /sbin/chkconfig
-Requires(preun): /sbin/chkconfig
-Requires(post): systemd
-Requires(preun): systemd
-Requires(postun): systemd
+Requires: bash
+Requires: httpd-filesystem
 BuildRequires: openldap-devel pam-devel openssl-devel krb5-devel libtdb-devel expat-devel
 BuildRequires: libxml2-devel libcap-devel libecap-devel gcc-c++ libtool libtool-ltdl-devel
-BuildRequires: perl-generators pkgconfig(cppunit) autoconf
-BuildRequires: chrpath
+BuildRequires: perl-generators pkgconfig(cppunit)
+BuildRequires: chrpath systemd-devel
+
+%systemd_requires
+
+Conflicts: NetworkManager < 1.20
 
 %description
 Squid is a high-performance proxy caching server. It handles all requests in a single,
@@ -67,18 +40,13 @@ non-blocking, I/O-driven process and keeps meta data and implements negative cac
 %prep
 %autosetup -p1
 
-%build
-autoreconf -fi
-automake
-CXXFLAGS="$RPM_OPT_FLAGS -fPIC"
-CFLAGS="$RPM_OPT_FLAGS -fPIC"
-LDFLAGS="$RPM_LD_FLAGS -pie -Wl,-z,relro -Wl,-z,now -Wl,--warn-shared-textrel"
+sed -i 's|@SYSCONFDIR@/squid.conf.documented|%{_pkgdocdir}/squid.conf.documented|' src/squid.8.in
 
+%build
 %configure \
-   --exec_prefix=%{_prefix} --libexecdir=%{_libdir}/squid \
-   --localstatedir=%{_localstatedir} --datadir=%{_datadir}/squid \
+   --libexecdir=%{_libdir}/squid --datadir=%{_datadir}/squid \
    --sysconfdir=%{_sysconfdir}/squid  --with-logdir='%{_localstatedir}/log/squid' \
-   --with-pidfile='%{_localstatedir}/run/squid.pid' \
+   --with-pidfile='/run/squid.pid' \
    --disable-dependency-tracking --enable-eui \
    --enable-follow-x-forwarded-for --enable-auth \
    --enable-auth-basic="DB,fake,getpwnam,LDAP,NCSA,PAM,POP3,RADIUS,SASL,SMB,SMB_LM" \
@@ -96,10 +64,15 @@ LDFLAGS="$RPM_LD_FLAGS -pie -Wl,-z,relro -Wl,-z,now -Wl,--warn-shared-textrel"
    --enable-storeio="aufs,diskd,ufs,rock" --enable-diskio --enable-wccpv2 \
    --enable-esi --enable-ecap --with-aio --with-default-user="squid" \
    --with-dl --with-openssl --with-pthreads --disable-arch-native \
-   --with-pic --disable-security-cert-validators \
-   --with-tdb
+   --disable-security-cert-validators \
+   --with-tdb --disable-strict-error-checking \
+   --with-swapdir=%{_localstatedir}/spool/squid
 
-make DEFAULT_SWAP_DIR=%{_localstatedir}/spool/squid %{?_smp_mflags}
+mkdir -p src/icmp/tests
+mkdir -p tools/squidclient/tests
+mkdir -p tools/tests
+
+%make_build
 
 %check
 if ! getent passwd squid >/dev/null 2>&1 && [ `id -u` -eq 0 ];then
@@ -111,8 +84,7 @@ else
 fi
 
 %install
-rm -rf $RPM_BUILD_ROOT
-make DESTDIR=$RPM_BUILD_ROOT install
+%make_install
 echo "
 #
 # This is %{_sysconfdir}/httpd/conf.d/squid.conf
@@ -129,20 +101,18 @@ ScriptAlias /Squid/cgi-bin/cachemgr.cgi %{_libdir}/squid/cachemgr.cgi
 
 mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig \
          $RPM_BUILD_ROOT%{_sysconfdir}/pam.d $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.d/ \
-         $RPM_BUILD_ROOT%{_sysconfdir}/NetworkManager/dispatcher.d $RPM_BUILD_ROOT%{_unitdir} \
-         $RPM_BUILD_ROOT%{_libexecdir}/squid $RPM_BUILD_ROOT%{_sysconfdir}/rc.d/init.d
+         $RPM_BUILD_ROOT%{_prefix}/lib/NetworkManager/dispatcher.d $RPM_BUILD_ROOT%{_unitdir} \
+         $RPM_BUILD_ROOT%{_libexecdir}/squid
 install -m 644 %{SOURCE2} $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/squid
 install -m 644 %{SOURCE3} $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig/squid
 install -m 644 %{SOURCE4} $RPM_BUILD_ROOT%{_sysconfdir}/pam.d/squid
 install -m 644 %{SOURCE6} $RPM_BUILD_ROOT%{_unitdir}
 install -m 755 %{SOURCE7} $RPM_BUILD_ROOT%{_libexecdir}/squid
 install -m 644 $RPM_BUILD_ROOT/squid.httpd.tmp $RPM_BUILD_ROOT%{_sysconfdir}/httpd/conf.d/squid.conf
-install -m 644 %{SOURCE5} $RPM_BUILD_ROOT%{_sysconfdir}/NetworkManager/dispatcher.d/20-squid
+install -m 755 %{SOURCE5} $RPM_BUILD_ROOT%{_prefix}/lib/NetworkManager/dispatcher.d/20-squid
 mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/log/squid $RPM_BUILD_ROOT%{_localstatedir}/spool/squid \
-         $RPM_BUILD_ROOT%{_localstatedir}/run/squid
+         $RPM_BUILD_ROOT/run/squid
 chmod 644 contrib/url-normalizer.pl contrib/user-agents.pl
-iconv -f ISO88591 -t UTF8 ChangeLog -o ChangeLog.tmp
-mv -f ChangeLog.tmp ChangeLog
 
 mkdir -p ${RPM_BUILD_ROOT}%{_tmpfilesdir}
 cat > ${RPM_BUILD_ROOT}%{_tmpfilesdir}/squid.conf < %{buildroot}/etc/ld.so.conf.d/%{name}-%{_arch}.conf
 %attr(755,root,root) %dir %{_libdir}/squid
 %attr(770,squid,root) %dir %{_localstatedir}/log/squid
 %attr(750,squid,squid) %dir %{_localstatedir}/spool/squid
-%attr(755,squid,squid) %dir %{_localstatedir}/run/squid
+%attr(755,squid,squid) %dir /run/squid
 
 %config(noreplace) %attr(644,root,root) %{_sysconfdir}/httpd/conf.d/squid.conf
 %config(noreplace) %attr(640,root,squid) %{_sysconfdir}/squid/squid.conf
@@ -188,7 +158,7 @@ echo "%{_libdir}" > %{buildroot}/etc/ld.so.conf.d/%{name}-%{_arch}.conf
 
 %dir %{_datadir}/squid
 %attr(-,root,root) %{_datadir}/squid/errors
-%attr(755,root,root) %{_sysconfdir}/NetworkManager/dispatcher.d/20-squid
+%{_prefix}/lib/NetworkManager
 %{_datadir}/squid/icons
 %{_sbindir}/squid
 %{_bindir}/squidclient
@@ -221,6 +191,37 @@ done
 
 exit 0
 
+%pretrans -p 
+-- temporarilly commented until https://bugzilla.redhat.com/show_bug.cgi?id=1936422 is resolved
+-- previously /usr/share/squid/errors/es-mx was symlink, now it is directory since squid v5
+-- see https://docs.fedoraproject.org/en-US/packaging-guidelines/Directory_Replacement/
+-- Define the path to the symlink being replaced below.
+--
+-- path = "/usr/share/squid/errors/es-mx"
+-- st = posix.stat(path)
+-- if st and st.type == "link" then
+--   os.remove(path)
+-- end
+
+-- Due to a bug #447156
+paths = {"/usr/share/squid/errors/zh-cn", "/usr/share/squid/errors/zh-tw"}
+for key,path in ipairs(paths)
+do
+  st = posix.stat(path)
+  if st and st.type == "directory" then
+    status = os.rename(path, path .. ".rpmmoved")
+    if not status then
+      suffix = 0
+      while not status do
+        suffix = suffix + 1
+        status = os.rename(path .. ".rpmmoved", path .. ".rpmmoved." .. suffix)
+      end
+      os.rename(path, path .. ".rpmmoved")
+    end
+  end
+end
+
+
 %post
 %systemd_post squid.service
 /sbin/ldconfig
@@ -240,6 +241,12 @@ fi
     chgrp squid /var/cache/samba/winbindd_privileged >/dev/null 2>&1 || :
 
 %changelog
+* Mon Nov 14 2022 xinghe  - 7:5.7-1
+- Type:requirements
+- ID:NA
+- SUG:NA
+- DESC:upgrade to 5.7
+
 * Fri Nov 11 2022 xinghe  - 7:4.9-17
 - Type:bugfix
 - ID:NA
diff --git a/squid.sysconfig b/squid.sysconfig
index 3864bd8..f01b6e3 100644
--- a/squid.sysconfig
+++ b/squid.sysconfig
@@ -1,9 +1,5 @@
 # default squid options
 SQUID_OPTS=""
 
-# Time to wait for Squid to shut down when asked. Should not be necessary
-# most of the time.
-SQUID_SHUTDOWN_TIMEOUT=100
-
 # default squid conf file
 SQUID_CONF="/etc/squid/squid.conf"