Package upgrade

This commit is contained in:
zhuchengliang 2020-01-14 22:16:19 +08:00
parent 02fd90ff52
commit acfc96739e
17 changed files with 37 additions and 1993 deletions

View File

@ -1,959 +0,0 @@
commit fbbdf75efd7a5cc244b4886a9d42ea458c5a3a73
Author: Amos Jeffries <yadij@users.noreply.github.com>
Date: 2019-09-10 09:32:43 +0000
Update URI parser to use SBuf parsing APIs (#275)
Initial replacement of URI/URL parse method internals with
SBuf and Tokenizer based parse.
For now this parsing only handles the scheme section of
URL. With this we add the missing check for alpha character
as first in the scheme name for unknown schemes and
prohibit URL without any scheme (previously accepted).
Also polishes the documentation, URN and asterisk-form
URI parsing.
Also, adds validation of URN NID portion characters to
ensure valid authority host names are generated for
THTTP lookup URLs.
diff --git a/src/Downloader.cc b/src/Downloader.cc
index 7f7a8d6..fb102a8 100644
--- a/src/Downloader.cc
+++ b/src/Downloader.cc
@@ -129,7 +129,7 @@ Downloader::buildRequest()
const HttpRequestMethod method = Http::METHOD_GET;
const MasterXaction::Pointer mx = new MasterXaction(initiator_);
- HttpRequest *const request = HttpRequest::FromUrl(url_.c_str(), mx, method);
+ auto * const request = HttpRequest::FromUrl(url_, mx, method);
if (!request) {
debugs(33, 5, "Invalid URI: " << url_);
return false; //earlyError(...)
diff --git a/src/HttpRequest.cc b/src/HttpRequest.cc
index 3132a76..dc48d15 100644
--- a/src/HttpRequest.cc
+++ b/src/HttpRequest.cc
@@ -327,15 +327,7 @@ HttpRequest::parseFirstLine(const char *start, const char *end)
if (end < start) // missing URI
return false;
- char save = *end;
-
- * (char *) end = '\0'; // temp terminate URI, XXX dangerous?
-
- const bool ret = url.parse(method, start);
-
- * (char *) end = save;
-
- return ret;
+ return url.parse(method, SBuf(start, size_t(end-start)));
}
/* swaps out request using httpRequestPack */
@@ -519,7 +511,7 @@ HttpRequest::expectingBody(const HttpRequestMethod &, int64_t &theSize) const
* If the request cannot be created cleanly, NULL is returned
*/
HttpRequest *
-HttpRequest::FromUrl(const char * url, const MasterXaction::Pointer &mx, const HttpRequestMethod& method)
+HttpRequest::FromUrl(const SBuf &url, const MasterXaction::Pointer &mx, const HttpRequestMethod& method)
{
std::unique_ptr<HttpRequest> req(new HttpRequest(mx));
if (req->url.parse(method, url)) {
@@ -529,6 +521,12 @@ HttpRequest::FromUrl(const char * url, const MasterXaction::Pointer &mx, const H
return nullptr;
}
+HttpRequest *
+HttpRequest::FromUrlXXX(const char * url, const MasterXaction::Pointer &mx, const HttpRequestMethod& method)
+{
+ return FromUrl(SBuf(url), mx, method);
+}
+
/**
* Are responses to this request possible cacheable ?
* If false then no matter what the response must not be cached.
diff --git a/src/HttpRequest.h b/src/HttpRequest.h
index fdd13ce..62740c3 100644
--- a/src/HttpRequest.h
+++ b/src/HttpRequest.h
@@ -205,7 +205,10 @@ public:
static void httpRequestPack(void *obj, Packable *p);
- static HttpRequest * FromUrl(const char * url, const MasterXaction::Pointer &, const HttpRequestMethod &method = Http::METHOD_GET);
+ static HttpRequest * FromUrl(const SBuf &url, const MasterXaction::Pointer &, const HttpRequestMethod &method = Http::METHOD_GET);
+
+ /// \deprecated use SBuf variant instead
+ static HttpRequest * FromUrlXXX(const char * url, const MasterXaction::Pointer &, const HttpRequestMethod &method = Http::METHOD_GET);
ConnStateData *pinnedConnection();
diff --git a/src/Makefile.am b/src/Makefile.am
index 55b68fc..8605313 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -1136,6 +1136,7 @@ tests_testACLMaxUserIP_LDADD= \
acl/libstate.la \
acl/libapi.la \
anyp/libanyp.la \
+ parser/libparser.la \
base/libbase.la \
ip/libip.la \
ipc/libipc.la \
diff --git a/src/acl/Asn.cc b/src/acl/Asn.cc
index 63d4ee0..f9a822a 100644
--- a/src/acl/Asn.cc
+++ b/src/acl/Asn.cc
@@ -243,7 +243,7 @@ asnCacheStart(int as)
snprintf(asres, 4096, "whois://%s/!gAS%d", Config.as_whois_server, as);
asState->as_number = as;
const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initAsn);
- asState->request = HttpRequest::FromUrl(asres, mx);
+ asState->request = HttpRequest::FromUrlXXX(asres, mx);
assert(asState->request != NULL);
if ((e = storeGetPublic(asres, Http::METHOD_GET)) == NULL) {
diff --git a/src/adaptation/ecap/MessageRep.cc b/src/adaptation/ecap/MessageRep.cc
index 039f9e1..202194d 100644
--- a/src/adaptation/ecap/MessageRep.cc
+++ b/src/adaptation/ecap/MessageRep.cc
@@ -200,7 +200,7 @@ Adaptation::Ecap::RequestLineRep::uri(const Area &aUri)
{
// TODO: if method is not set, AnyP::Uri::parse will assume it is not connect;
// Can we change AnyP::Uri::parse API to remove the method parameter?
- const auto ok = theMessage.url.parse(theMessage.method, aUri.toString().c_str());
+ const auto ok = theMessage.url.parse(theMessage.method, SBuf(aUri.toString()));
Must(ok);
}
diff --git a/src/anyp/ProtocolType.h b/src/anyp/ProtocolType.h
index 6ac8706..5aa7358 100644
--- a/src/anyp/ProtocolType.h
+++ b/src/anyp/ProtocolType.h
@@ -14,6 +14,7 @@
namespace AnyP
{
+// TODO order by current protocol popularity (eg HTTPS before FTP)
/**
* List of all protocols known and supported.
* This is a combined list. It is used as type-codes where needed and
diff --git a/src/anyp/Uri.cc b/src/anyp/Uri.cc
index 065aea5..e42f7c6 100644
--- a/src/anyp/Uri.cc
+++ b/src/anyp/Uri.cc
@@ -12,6 +12,7 @@
#include "anyp/Uri.h"
#include "globals.h"
#include "HttpRequest.h"
+#include "parser/Tokenizer.h"
#include "rfc1738.h"
#include "SquidConfig.h"
#include "SquidString.h"
@@ -126,55 +127,40 @@ urlInitialize(void)
}
/**
- * Parse the scheme name from string b, into protocol type.
- * The string must be 0-terminated.
+ * Extract the URI scheme and ':' delimiter from the given input buffer.
+ *
+ * Schemes up to 16 characters are accepted.
+ *
+ * Governed by RFC 3986 section 3.1
*/
-AnyP::ProtocolType
-urlParseProtocol(const char *b)
+static AnyP::UriScheme
+uriParseScheme(Parser::Tokenizer &tok)
{
- // make e point to the ':' character
- const char *e = b + strcspn(b, ":");
- int len = e - b;
-
- /* test common stuff first */
-
- if (strncasecmp(b, "http", len) == 0)
- return AnyP::PROTO_HTTP;
-
- if (strncasecmp(b, "ftp", len) == 0)
- return AnyP::PROTO_FTP;
-
- if (strncasecmp(b, "https", len) == 0)
- return AnyP::PROTO_HTTPS;
-
- if (strncasecmp(b, "file", len) == 0)
- return AnyP::PROTO_FTP;
-
- if (strncasecmp(b, "coap", len) == 0)
- return AnyP::PROTO_COAP;
-
- if (strncasecmp(b, "coaps", len) == 0)
- return AnyP::PROTO_COAPS;
-
- if (strncasecmp(b, "gopher", len) == 0)
- return AnyP::PROTO_GOPHER;
-
- if (strncasecmp(b, "wais", len) == 0)
- return AnyP::PROTO_WAIS;
-
- if (strncasecmp(b, "cache_object", len) == 0)
- return AnyP::PROTO_CACHE_OBJECT;
-
- if (strncasecmp(b, "urn", len) == 0)
- return AnyP::PROTO_URN;
-
- if (strncasecmp(b, "whois", len) == 0)
- return AnyP::PROTO_WHOIS;
-
- if (len > 0)
- return AnyP::PROTO_UNKNOWN;
+ /*
+ * RFC 3986 section 3.1 paragraph 2:
+ *
+ * Scheme names consist of a sequence of characters beginning with a
+ * letter and followed by any combination of letters, digits, plus
+ * ("+"), period ("."), or hyphen ("-").
+ *
+ * The underscore ("_") required to match "cache_object://" squid
+ * special URI scheme.
+ */
+ static const auto schemeChars =
+#if USE_HTTP_VIOLATIONS
+ CharacterSet("special", "_") +
+#endif
+ CharacterSet("scheme", "+.-") + CharacterSet::ALPHA + CharacterSet::DIGIT;
+
+ SBuf str;
+ if (tok.prefix(str, schemeChars, 16) && tok.skip(':') && CharacterSet::ALPHA[str.at(0)]) {
+ const auto protocol = AnyP::UriScheme::FindProtocolType(str);
+ if (protocol == AnyP::PROTO_UNKNOWN)
+ return AnyP::UriScheme(protocol, str.c_str());
+ return AnyP::UriScheme(protocol, nullptr);
+ }
- return AnyP::PROTO_NONE;
+ throw TextException("invalid URI scheme", Here());
}
/**
@@ -204,44 +190,49 @@ urlAppendDomain(char *host)
/*
* Parse a URI/URL.
*
- * Stores parsed values in the `request` argument.
- *
- * This abuses HttpRequest as a way of representing the parsed url
- * and its components.
- * method is used to switch parsers and to init the HttpRequest.
- * If method is Http::METHOD_CONNECT, then rather than a URL a hostname:port is
- * looked for.
- * The url is non const so that if its too long we can NULL-terminate it in place.
- */
-
-/*
- * This routine parses a URL. Its assumed that the URL is complete -
+ * It is assumed that the URL is complete -
* ie, the end of the string is the end of the URL. Don't pass a partial
* URL here as this routine doesn't have any way of knowing whether
- * its partial or not (ie, it handles the case of no trailing slash as
+ * it is partial or not (ie, it handles the case of no trailing slash as
* being "end of host with implied path of /".
+ *
+ * method is used to switch parsers. If method is Http::METHOD_CONNECT,
+ * then rather than a URL a hostname:port is looked for.
*/
bool
-AnyP::Uri::parse(const HttpRequestMethod& method, const char *url)
+AnyP::Uri::parse(const HttpRequestMethod& method, const SBuf &rawUrl)
{
- LOCAL_ARRAY(char, proto, MAX_URL);
+ try {
+
LOCAL_ARRAY(char, login, MAX_URL);
LOCAL_ARRAY(char, foundHost, MAX_URL);
LOCAL_ARRAY(char, urlpath, MAX_URL);
char *t = NULL;
char *q = NULL;
int foundPort;
- AnyP::ProtocolType protocol = AnyP::PROTO_NONE;
int l;
int i;
const char *src;
char *dst;
- proto[0] = foundHost[0] = urlpath[0] = login[0] = '\0';
+ foundHost[0] = urlpath[0] = login[0] = '\0';
- if ((l = strlen(url)) + Config.appendDomainLen > (MAX_URL - 1)) {
+ if ((l = rawUrl.length()) + Config.appendDomainLen > (MAX_URL - 1)) {
debugs(23, DBG_IMPORTANT, MYNAME << "URL too large (" << l << " bytes)");
return false;
}
+
+ if ((method == Http::METHOD_OPTIONS || method == Http::METHOD_TRACE) &&
+ Asterisk().cmp(rawUrl) == 0) {
+ // XXX: these methods might also occur in HTTPS traffic. Handle this better.
+ setScheme(AnyP::PROTO_HTTP, nullptr);
+ port(getScheme().defaultPort());
+ path(Asterisk());
+ return true;
+ }
+
+ Parser::Tokenizer tok(rawUrl);
+ AnyP::UriScheme scheme;
+
if (method == Http::METHOD_CONNECT) {
/*
* RFC 7230 section 5.3.3: authority-form = authority
@@ -253,37 +244,37 @@ AnyP::Uri::parse(const HttpRequestMethod& method, const char *url)
*/
foundPort = 443;
+ // XXX: use tokenizer
+ auto B = tok.buf();
+ const char *url = B.c_str();
+
if (sscanf(url, "[%[^]]]:%d", foundHost, &foundPort) < 1)
if (sscanf(url, "%[^:]:%d", foundHost, &foundPort) < 1)
return false;
- } else if ((method == Http::METHOD_OPTIONS || method == Http::METHOD_TRACE) &&
- AnyP::Uri::Asterisk().cmp(url) == 0) {
- parseFinish(AnyP::PROTO_HTTP, nullptr, url, foundHost, SBuf(), 80 /* HTTP default port */);
- return true;
- } else if (strncmp(url, "urn:", 4) == 0) {
- debugs(23, 3, "Split URI '" << url << "' into proto='urn', path='" << (url+4) << "'");
- debugs(50, 5, "urn=" << (url+4));
- setScheme(AnyP::PROTO_URN, nullptr);
- path(url + 4);
- return true;
} else {
- /* Parse the URL: */
- src = url;
- i = 0;
- /* Find first : - everything before is protocol */
- for (i = 0, dst = proto; i < l && *src != ':'; ++i, ++src, ++dst) {
- *dst = *src;
+
+ scheme = uriParseScheme(tok);
+
+ if (scheme == AnyP::PROTO_NONE)
+ return false; // invalid scheme
+
+ if (scheme == AnyP::PROTO_URN) {
+ parseUrn(tok); // throws on any error
+ return true;
}
- if (i >= l)
- return false;
- *dst = '\0';
- /* Then its :// */
- if ((i+3) > l || *src != ':' || *(src + 1) != '/' || *(src + 2) != '/')
+ // URLs then have "//"
+ static const SBuf doubleSlash("//");
+ if (!tok.skip(doubleSlash))
return false;
- i += 3;
- src += 3;
+
+ auto B = tok.remaining();
+ const char *url = B.c_str();
+
+ /* Parse the URL: */
+ src = url;
+ i = 0;
/* Then everything until first /; thats host (and port; which we'll look for here later) */
// bug 1881: If we don't get a "/" then we imply it was there
@@ -324,8 +315,7 @@ AnyP::Uri::parse(const HttpRequestMethod& method, const char *url)
}
*dst = '\0';
- protocol = urlParseProtocol(proto);
- foundPort = AnyP::UriScheme(protocol).defaultPort();
+ foundPort = scheme.defaultPort(); // may be reset later
/* Is there any login information? (we should eventually parse it above) */
t = strrchr(foundHost, '@');
@@ -373,7 +363,7 @@ AnyP::Uri::parse(const HttpRequestMethod& method, const char *url)
}
// Bug 3183 sanity check: If scheme is present, host must be too.
- if (protocol != AnyP::PROTO_NONE && foundHost[0] == '\0') {
+ if (scheme != AnyP::PROTO_NONE && foundHost[0] == '\0') {
debugs(23, DBG_IMPORTANT, "SECURITY ALERT: Missing hostname in URL '" << url << "'. see access.log for details.");
return false;
}
@@ -402,7 +392,7 @@ AnyP::Uri::parse(const HttpRequestMethod& method, const char *url)
}
}
- debugs(23, 3, "Split URL '" << url << "' into proto='" << proto << "', host='" << foundHost << "', port='" << foundPort << "', path='" << urlpath << "'");
+ debugs(23, 3, "Split URL '" << rawUrl << "' into proto='" << scheme.image() << "', host='" << foundHost << "', port='" << foundPort << "', path='" << urlpath << "'");
if (Config.onoff.check_hostnames &&
strspn(foundHost, Config.onoff.allow_underscore ? valid_hostname_chars_u : valid_hostname_chars) != strlen(foundHost)) {
@@ -438,7 +428,7 @@ AnyP::Uri::parse(const HttpRequestMethod& method, const char *url)
#endif
if (stringHasWhitespace(urlpath)) {
- debugs(23, 2, "URI has whitespace: {" << url << "}");
+ debugs(23, 2, "URI has whitespace: {" << rawUrl << "}");
switch (Config.uri_whitespace) {
@@ -471,24 +461,59 @@ AnyP::Uri::parse(const HttpRequestMethod& method, const char *url)
}
}
- parseFinish(protocol, proto, urlpath, foundHost, SBuf(login), foundPort);
+ setScheme(scheme);
+ path(urlpath);
+ host(foundHost);
+ userInfo(SBuf(login));
+ port(foundPort);
return true;
+
+ } catch (...) {
+ debugs(23, 2, "error: " << CurrentException << " " << Raw("rawUrl", rawUrl.rawContent(), rawUrl.length()));
+ return false;
+ }
}
-/// Update the URL object with parsed URI data.
+/**
+ * Governed by RFC 8141 section 2:
+ *
+ * assigned-name = "urn" ":" NID ":" NSS
+ * NID = (alphanum) 0*30(ldh) (alphanum)
+ * ldh = alphanum / "-"
+ * NSS = pchar *(pchar / "/")
+ *
+ * RFC 3986 Appendix D.2 defines (as deprecated):
+ *
+ * alphanum = ALPHA / DIGIT
+ *
+ * Notice that NID is exactly 2-32 characters in length.
+ */
void
-AnyP::Uri::parseFinish(const AnyP::ProtocolType protocol,
- const char *const protoStr, // for unknown protocols
- const char *const aUrlPath,
- const char *const aHost,
- const SBuf &aLogin,
- const int aPort)
+AnyP::Uri::parseUrn(Parser::Tokenizer &tok)
{
- setScheme(protocol, protoStr);
- path(aUrlPath);
- host(aHost);
- userInfo(aLogin);
- port(aPort);
+ static const auto nidChars = CharacterSet("NID","-") + CharacterSet::ALPHA + CharacterSet::DIGIT;
+ static const auto alphanum = (CharacterSet::ALPHA + CharacterSet::DIGIT).rename("alphanum");
+ SBuf nid;
+ if (!tok.prefix(nid, nidChars, 32))
+ throw TextException("NID not found", Here());
+
+ if (!tok.skip(':'))
+ throw TextException("NID too long or missing ':' delimiter", Here());
+
+ if (nid.length() < 2)
+ throw TextException("NID too short", Here());
+
+ if (!alphanum[*nid.begin()])
+ throw TextException("NID prefix is not alphanumeric", Here());
+
+ if (!alphanum[*nid.rbegin()])
+ throw TextException("NID suffix is not alphanumeric", Here());
+
+ setScheme(AnyP::PROTO_URN, nullptr);
+ host(nid.c_str());
+ // TODO validate path characters
+ path(tok.remaining());
+ debugs(23, 3, "Split URI into proto=urn, nid=" << nid << ", " << Raw("path",path().rawContent(),path().length()));
}
void
@@ -536,6 +561,9 @@ AnyP::Uri::absolute() const
absolute_.append("@", 1);
}
absolute_.append(authority());
+ } else {
+ absolute_.append(host());
+ absolute_.append(":", 1);
}
absolute_.append(path());
}
diff --git a/src/anyp/Uri.h b/src/anyp/Uri.h
index 5a167cb..71287d0 100644
--- a/src/anyp/Uri.h
+++ b/src/anyp/Uri.h
@@ -11,6 +11,7 @@
#include "anyp/UriScheme.h"
#include "ip/Address.h"
+#include "parser/Tokenizer.h"
#include "rfc2181.h"
#include "sbuf/SBuf.h"
@@ -59,7 +60,7 @@ public:
}
void touch(); ///< clear the cached URI display forms
- bool parse(const HttpRequestMethod &, const char *url);
+ bool parse(const HttpRequestMethod &, const SBuf &url);
/// \return a new URI that honors uri_whitespace
static char *cleanup(const char *uri);
@@ -71,6 +72,10 @@ public:
scheme_ = AnyP::UriScheme(p, str);
touch();
}
+ void setScheme(const AnyP::UriScheme &s) {
+ scheme_ = s;
+ touch();
+ }
void userInfo(const SBuf &s) {userInfo_=s; touch();}
const SBuf &userInfo() const {return userInfo_;}
@@ -120,7 +125,7 @@ public:
SBuf &absolute() const;
private:
- void parseFinish(const AnyP::ProtocolType, const char *const, const char *const, const char *const, const SBuf &, const int);
+ void parseUrn(Parser::Tokenizer&);
/**
\par
diff --git a/src/anyp/UriScheme.cc b/src/anyp/UriScheme.cc
index b0b293d..0f4d531 100644
--- a/src/anyp/UriScheme.cc
+++ b/src/anyp/UriScheme.cc
@@ -48,6 +48,25 @@ AnyP::UriScheme::Init()
}
}
+const AnyP::ProtocolType
+AnyP::UriScheme::FindProtocolType(const SBuf &scheme)
+{
+ if (scheme.isEmpty())
+ return AnyP::PROTO_NONE;
+
+ Init();
+
+ auto img = scheme;
+ img.toLower();
+ // TODO: use base/EnumIterator.h if possible
+ for (int i = AnyP::PROTO_NONE + 1; i < AnyP::PROTO_UNKNOWN; ++i) {
+ if (LowercaseSchemeNames_.at(i) == img)
+ return AnyP::ProtocolType(i);
+ }
+
+ return AnyP::PROTO_UNKNOWN;
+}
+
unsigned short
AnyP::UriScheme::defaultPort() const
{
diff --git a/src/anyp/UriScheme.h b/src/anyp/UriScheme.h
index 7deb442..6ddedd2 100644
--- a/src/anyp/UriScheme.h
+++ b/src/anyp/UriScheme.h
@@ -54,6 +54,9 @@ public:
/// initializes down-cased protocol scheme names array
static void Init();
+ /// \returns ProtocolType for the given scheme name or PROTO_UNKNOWN
+ static const AnyP::ProtocolType FindProtocolType(const SBuf &);
+
private:
/// optimization: stores down-cased protocol scheme names, copied from
/// AnyP::ProtocolType_str
diff --git a/src/client_side_request.cc b/src/client_side_request.cc
index 8d1b000..a37f8d4 100644
--- a/src/client_side_request.cc
+++ b/src/client_side_request.cc
@@ -346,7 +346,8 @@ clientBeginRequest(const HttpRequestMethod& method, char const *url, CSCB * stre
http->uri = (char *)xcalloc(url_sz, 1);
strcpy(http->uri, url); // XXX: polluting http->uri before parser validation
- if ((request = HttpRequest::FromUrl(http->uri, mx, method)) == NULL) {
+ request = HttpRequest::FromUrlXXX(http->uri, mx, method);
+ if (!request) {
debugs(85, 5, "Invalid URL: " << http->uri);
return -1;
}
@@ -1262,7 +1263,7 @@ ClientRequestContext::clientRedirectDone(const Helper::Reply &reply)
// prevent broken helpers causing too much damage. If old URL == new URL skip the re-write.
if (urlNote != NULL && strcmp(urlNote, http->uri)) {
AnyP::Uri tmpUrl;
- if (tmpUrl.parse(old_request->method, urlNote)) {
+ if (tmpUrl.parse(old_request->method, SBuf(urlNote))) {
HttpRequest *new_request = old_request->clone();
new_request->url = tmpUrl;
debugs(61, 2, "URL-rewriter diverts URL from " << old_request->effectiveRequestUri() << " to " << new_request->effectiveRequestUri());
diff --git a/src/htcp.cc b/src/htcp.cc
index 5ff6026..5649c9f 100644
--- a/src/htcp.cc
+++ b/src/htcp.cc
@@ -674,7 +674,7 @@ htcpUnpackSpecifier(char *buf, int sz)
method.HttpRequestMethodXXX(s->method);
const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initHtcp);
- s->request = HttpRequest::FromUrl(s->uri, mx, method == Http::METHOD_NONE ? HttpRequestMethod(Http::METHOD_GET) : method);
+ s->request = HttpRequest::FromUrlXXX(s->uri, mx, method == Http::METHOD_NONE ? HttpRequestMethod(Http::METHOD_GET) : method);
if (!s->request) {
debugs(31, 3, "failed to create request. Invalid URI?");
return nil;
diff --git a/src/icmp/net_db.cc b/src/icmp/net_db.cc
index a97656d..bc7443a 100644
--- a/src/icmp/net_db.cc
+++ b/src/icmp/net_db.cc
@@ -1285,7 +1285,7 @@ netdbExchangeStart(void *data)
char *uri = internalRemoteUri(p->secure.encryptTransport, p->host, p->http_port, "/squid-internal-dynamic/", netDB);
debugs(38, 3, "Requesting '" << uri << "'");
const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initIcmp);
- HttpRequest *req = HttpRequest::FromUrl(uri, mx);
+ auto req = HttpRequest::FromUrlXXX(uri, mx);
if (!req) {
debugs(38, DBG_IMPORTANT, MYNAME << ": Bad URI " << uri);
diff --git a/src/icp_v2.cc b/src/icp_v2.cc
index 9bb36c6..7434929 100644
--- a/src/icp_v2.cc
+++ b/src/icp_v2.cc
@@ -440,9 +440,9 @@ icpGetRequest(char *url, int reqnum, int fd, Ip::Address &from)
return NULL;
}
- HttpRequest *result;
const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initIcp);
- if ((result = HttpRequest::FromUrl(url, mx)) == NULL)
+ auto *result = HttpRequest::FromUrlXXX(url, mx);
+ if (!result)
icpCreateAndSend(ICP_ERR, 0, url, reqnum, 0, fd, from);
return result;
diff --git a/src/mgr/Inquirer.cc b/src/mgr/Inquirer.cc
index 71706a4..86bacc5 100644
--- a/src/mgr/Inquirer.cc
+++ b/src/mgr/Inquirer.cc
@@ -76,7 +76,7 @@ Mgr::Inquirer::start()
if (strands.empty()) {
const char *url = aggrAction->command().params.httpUri.termedBuf();
const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initIpc);
- HttpRequest *req = HttpRequest::FromUrl(url, mx);
+ auto *req = HttpRequest::FromUrlXXX(url, mx);
ErrorState err(ERR_INVALID_URL, Http::scNotFound, req);
std::unique_ptr<HttpReply> reply(err.BuildHttpReply());
replyBuf.reset(reply->pack());
diff --git a/src/mime.cc b/src/mime.cc
index 34a0253..ab2ad45 100644
--- a/src/mime.cc
+++ b/src/mime.cc
@@ -402,7 +402,7 @@ MimeIcon::created(StoreEntry *newEntry)
/* fill `e` with a canned 2xx response object */
const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initIcon);
- HttpRequest *r = HttpRequest::FromUrl(url_, mx);
+ auto r = HttpRequest::FromUrlXXX(url_, mx);
if (!r)
fatalf("mimeLoadIcon: cannot parse internal URL: %s", url_);
diff --git a/src/neighbors.cc b/src/neighbors.cc
index 67b2c5c..55b73f2 100644
--- a/src/neighbors.cc
+++ b/src/neighbors.cc
@@ -1373,7 +1373,7 @@ peerCountMcastPeersStart(void *data)
p->in_addr.toUrl(url+7, MAX_URL -8 );
strcat(url, "/");
const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initPeerMcast);
- HttpRequest *req = HttpRequest::FromUrl(url, mx);
+ auto *req = HttpRequest::FromUrlXXX(url, mx);
assert(req != nullptr);
StoreEntry *fake = storeCreateEntry(url, url, RequestFlags(), Http::METHOD_GET);
psstate = new ps_state;
diff --git a/src/peer_digest.cc b/src/peer_digest.cc
index 548a151..fb8fb3d 100644
--- a/src/peer_digest.cc
+++ b/src/peer_digest.cc
@@ -327,7 +327,7 @@ peerDigestRequest(PeerDigest * pd)
debugs(72, 2, url);
const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initCacheDigest);
- req = HttpRequest::FromUrl(url, mx);
+ req = HttpRequest::FromUrlXXX(url, mx);
assert(req);
diff --git a/src/servers/FtpServer.cc b/src/servers/FtpServer.cc
index d63dc84..5171893 100644
--- a/src/servers/FtpServer.cc
+++ b/src/servers/FtpServer.cc
@@ -726,7 +726,7 @@ Ftp::Server::parseOneRequest()
calcUri(path);
MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initClient);
mx->tcpClient = clientConnection;
- HttpRequest *const request = HttpRequest::FromUrl(uri.c_str(), mx, method);
+ auto * const request = HttpRequest::FromUrl(uri, mx, method);
if (!request) {
debugs(33, 5, "Invalid FTP URL: " << uri);
uri.clear();
diff --git a/src/servers/Http1Server.cc b/src/servers/Http1Server.cc
index 66722f7..fccbf72 100644
--- a/src/servers/Http1Server.cc
+++ b/src/servers/Http1Server.cc
@@ -139,7 +139,8 @@ Http::One::Server::buildHttpRequest(Http::StreamPointer &context)
// TODO: move URL parse into Http Parser and INVALID_URL into the above parse error handling
MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initClient);
mx->tcpClient = clientConnection;
- if ((request = HttpRequest::FromUrl(http->uri, mx, parser_->method())) == NULL) {
+ request = HttpRequest::FromUrlXXX(http->uri, mx, parser_->method());
+ if (!request) {
debugs(33, 5, "Invalid URL: " << http->uri);
// setReplyToError() requires log_uri
http->setLogUriToRawUri(http->uri, parser_->method());
diff --git a/src/store_digest.cc b/src/store_digest.cc
index 6cfdeec..358bdcf 100644
--- a/src/store_digest.cc
+++ b/src/store_digest.cc
@@ -414,7 +414,7 @@ storeDigestRewriteStart(void *datanotused)
const char *url = internalLocalUri("/squid-internal-periodic/", SBuf(StoreDigestFileName));
const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initCacheDigest);
- auto req = HttpRequest::FromUrl(url, mx);
+ auto req = HttpRequest::FromUrlXXX(url, mx);
RequestFlags flags;
flags.cachable = true;
diff --git a/src/tests/stub_HttpRequest.cc b/src/tests/stub_HttpRequest.cc
index 8ec6eea..7a77387 100644
--- a/src/tests/stub_HttpRequest.cc
+++ b/src/tests/stub_HttpRequest.cc
@@ -47,7 +47,8 @@ int HttpRequest::prefixLen() const STUB_RETVAL(0)
void HttpRequest::swapOut(StoreEntry *) STUB
void HttpRequest::pack(Packable *) const STUB
void HttpRequest::httpRequestPack(void *, Packable *) STUB
-HttpRequest * HttpRequest::FromUrl(const char *, const MasterXaction::Pointer &, const HttpRequestMethod &) STUB_RETVAL(NULL)
+HttpRequest * HttpRequest::FromUrl(const SBuf &, const MasterXaction::Pointer &, const HttpRequestMethod &) STUB_RETVAL(nullptr)
+HttpRequest * HttpRequest::FromUrlXXX(const char *, const MasterXaction::Pointer &, const HttpRequestMethod &) STUB_RETVAL(nullptr)
ConnStateData *HttpRequest::pinnedConnection() STUB_RETVAL(NULL)
const SBuf HttpRequest::storeId() STUB_RETVAL(SBuf("."))
void HttpRequest::ignoreRange(const char *) STUB
diff --git a/src/tests/stub_libanyp.cc b/src/tests/stub_libanyp.cc
index 2eeff19..47dfb49 100644
--- a/src/tests/stub_libanyp.cc
+++ b/src/tests/stub_libanyp.cc
@@ -14,7 +14,7 @@
#include "anyp/Uri.h"
AnyP::Uri::Uri(AnyP::UriScheme const &) {STUB}
void AnyP::Uri::touch() STUB
-bool AnyP::Uri::parse(const HttpRequestMethod&, const char *) STUB_RETVAL(true)
+bool AnyP::Uri::parse(const HttpRequestMethod&, const SBuf &) STUB_RETVAL(true)
void AnyP::Uri::host(const char *) STUB
static SBuf nil;
const SBuf &AnyP::Uri::path() const STUB_RETVAL(nil)
diff --git a/src/tests/testHttpRequest.cc b/src/tests/testHttpRequest.cc
index c4d743e..3d7d968 100644
--- a/src/tests/testHttpRequest.cc
+++ b/src/tests/testHttpRequest.cc
@@ -45,60 +45,55 @@ testHttpRequest::testCreateFromUrl()
{
/* vanilla url, implict method */
unsigned short expected_port;
- char * url = xstrdup("http://foo:90/bar");
+ SBuf url("http://foo:90/bar");
const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initClient);
HttpRequest *aRequest = HttpRequest::FromUrl(url, mx);
expected_port = 90;
+ CPPUNIT_ASSERT(aRequest != nullptr);
CPPUNIT_ASSERT_EQUAL(expected_port, aRequest->url.port());
CPPUNIT_ASSERT(aRequest->method == Http::METHOD_GET);
CPPUNIT_ASSERT_EQUAL(String("foo"), String(aRequest->url.host()));
CPPUNIT_ASSERT_EQUAL(SBuf("/bar"), aRequest->url.path());
CPPUNIT_ASSERT_EQUAL(AnyP::PROTO_HTTP, static_cast<AnyP::ProtocolType>(aRequest->url.getScheme()));
- CPPUNIT_ASSERT_EQUAL(String("http://foo:90/bar"), String(url));
- xfree(url);
/* vanilla url */
- url = xstrdup("http://foo:90/bar");
+ url = "http://foo:90/bar";
aRequest = HttpRequest::FromUrl(url, mx, Http::METHOD_GET);
expected_port = 90;
+ CPPUNIT_ASSERT(aRequest != nullptr);
CPPUNIT_ASSERT_EQUAL(expected_port, aRequest->url.port());
CPPUNIT_ASSERT(aRequest->method == Http::METHOD_GET);
CPPUNIT_ASSERT_EQUAL(String("foo"), String(aRequest->url.host()));
CPPUNIT_ASSERT_EQUAL(SBuf("/bar"), aRequest->url.path());
CPPUNIT_ASSERT_EQUAL(AnyP::PROTO_HTTP, static_cast<AnyP::ProtocolType>(aRequest->url.getScheme()));
- CPPUNIT_ASSERT_EQUAL(String("http://foo:90/bar"), String(url));
- xfree(url);
/* vanilla url, different method */
- url = xstrdup("http://foo/bar");
+ url = "http://foo/bar";
aRequest = HttpRequest::FromUrl(url, mx, Http::METHOD_PUT);
expected_port = 80;
+ CPPUNIT_ASSERT(aRequest != nullptr);
CPPUNIT_ASSERT_EQUAL(expected_port, aRequest->url.port());
CPPUNIT_ASSERT(aRequest->method == Http::METHOD_PUT);
CPPUNIT_ASSERT_EQUAL(String("foo"), String(aRequest->url.host()));
CPPUNIT_ASSERT_EQUAL(SBuf("/bar"), aRequest->url.path());
CPPUNIT_ASSERT_EQUAL(AnyP::PROTO_HTTP, static_cast<AnyP::ProtocolType>(aRequest->url.getScheme()));
- CPPUNIT_ASSERT_EQUAL(String("http://foo/bar"), String(url));
- xfree(url);
/* a connect url with non-CONNECT data */
HttpRequest *nullRequest = nullptr;
- url = xstrdup(":foo/bar");
+ url = ":foo/bar";
aRequest = HttpRequest::FromUrl(url, mx, Http::METHOD_CONNECT);
- xfree(url);
CPPUNIT_ASSERT_EQUAL(nullRequest, aRequest);
/* a CONNECT url with CONNECT data */
- url = xstrdup("foo:45");
+ url = "foo:45";
aRequest = HttpRequest::FromUrl(url, mx, Http::METHOD_CONNECT);
expected_port = 45;
+ CPPUNIT_ASSERT(aRequest != nullptr);
CPPUNIT_ASSERT_EQUAL(expected_port, aRequest->url.port());
CPPUNIT_ASSERT(aRequest->method == Http::METHOD_CONNECT);
CPPUNIT_ASSERT_EQUAL(String("foo"), String(aRequest->url.host()));
CPPUNIT_ASSERT_EQUAL(SBuf(), aRequest->url.path());
CPPUNIT_ASSERT_EQUAL(AnyP::PROTO_NONE, static_cast<AnyP::ProtocolType>(aRequest->url.getScheme()));
- CPPUNIT_ASSERT_EQUAL(String("foo:45"), String(url));
- xfree(url);
// XXX: check METHOD_NONE input handling
}
@@ -110,11 +105,10 @@ void
testHttpRequest::testIPv6HostColonBug()
{
unsigned short expected_port;
- char * url = NULL;
HttpRequest *aRequest = NULL;
/* valid IPv6 address without port */
- url = xstrdup("http://[2000:800::45]/foo");
+ SBuf url("http://[2000:800::45]/foo");
const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initClient);
aRequest = HttpRequest::FromUrl(url, mx, Http::METHOD_GET);
expected_port = 80;
@@ -123,11 +117,9 @@ testHttpRequest::testIPv6HostColonBug()
CPPUNIT_ASSERT_EQUAL(String("[2000:800::45]"), String(aRequest->url.host()));
CPPUNIT_ASSERT_EQUAL(SBuf("/foo"), aRequest->url.path());
CPPUNIT_ASSERT_EQUAL(AnyP::PROTO_HTTP, static_cast<AnyP::ProtocolType>(aRequest->url.getScheme()));
- CPPUNIT_ASSERT_EQUAL(String("http://[2000:800::45]/foo"), String(url));
- xfree(url);
/* valid IPv6 address with port */
- url = xstrdup("http://[2000:800::45]:90/foo");
+ url = "http://[2000:800::45]:90/foo";
aRequest = HttpRequest::FromUrl(url, mx, Http::METHOD_GET);
expected_port = 90;
CPPUNIT_ASSERT_EQUAL(expected_port, aRequest->url.port());
@@ -135,11 +127,9 @@ testHttpRequest::testIPv6HostColonBug()
CPPUNIT_ASSERT_EQUAL(String("[2000:800::45]"), String(aRequest->url.host()));
CPPUNIT_ASSERT_EQUAL(SBuf("/foo"), aRequest->url.path());
CPPUNIT_ASSERT_EQUAL(AnyP::PROTO_HTTP, static_cast<AnyP::ProtocolType>(aRequest->url.getScheme()));
- CPPUNIT_ASSERT_EQUAL(String("http://[2000:800::45]:90/foo"), String(url));
- xfree(url);
/* IPv6 address as invalid (bug trigger) */
- url = xstrdup("http://2000:800::45/foo");
+ url = "http://2000:800::45/foo";
aRequest = HttpRequest::FromUrl(url, mx, Http::METHOD_GET);
expected_port = 80;
CPPUNIT_ASSERT_EQUAL(expected_port, aRequest->url.port());
@@ -147,8 +137,6 @@ testHttpRequest::testIPv6HostColonBug()
CPPUNIT_ASSERT_EQUAL(String("[2000:800::45]"), String(aRequest->url.host()));
CPPUNIT_ASSERT_EQUAL(SBuf("/foo"), aRequest->url.path());
CPPUNIT_ASSERT_EQUAL(AnyP::PROTO_HTTP, static_cast<AnyP::ProtocolType>(aRequest->url.getScheme()));
- CPPUNIT_ASSERT_EQUAL(String("http://2000:800::45/foo"), String(url));
- xfree(url);
}
void
diff --git a/src/urn.cc b/src/urn.cc
index 26ca180..e64da1f 100644
--- a/src/urn.cc
+++ b/src/urn.cc
@@ -34,7 +34,6 @@ class UrnState : public StoreClient
public:
void created (StoreEntry *newEntry);
void start (HttpRequest *, StoreEntry *);
- char *getHost(const SBuf &urlpath);
void setUriResFromRequest(HttpRequest *);
virtual ~UrnState();
@@ -45,11 +44,8 @@ public:
HttpRequest::Pointer request;
HttpRequest::Pointer urlres_r;
- struct {
- bool force_menu;
- } flags;
- char reqbuf[URN_REQBUF_SZ];
- int reqofs;
+ char reqbuf[URN_REQBUF_SZ] = { '\0' };
+ int reqofs = 0;
private:
char *urlres;
@@ -122,35 +118,16 @@ urnFindMinRtt(url_entry * urls, const HttpRequestMethod &, int *rtt_ret)
return min_u;
}
-char *
-UrnState::getHost(const SBuf &urlpath)
-{
- /** FIXME: this appears to be parsing the URL. *very* badly. */
- /* a proper encapsulated URI/URL type needs to clear this up. */
- size_t p;
- if ((p = urlpath.find(':')) != SBuf::npos)
- return SBufToCstring(urlpath.substr(0, p-1));
-
- return SBufToCstring(urlpath);
-}
-
void
UrnState::setUriResFromRequest(HttpRequest *r)
{
- static const SBuf menu(".menu");
- if (r->url.path().startsWith(menu)) {
- r->url.path(r->url.path().substr(5)); // strip prefix "menu."
- flags.force_menu = true;
- }
-
- SBuf uri = r->url.path();
+ const auto &query = r->url.absolute();
+ const auto host = r->url.host();
// TODO: use class AnyP::Uri instead of generating a string and re-parsing
LOCAL_ARRAY(char, local_urlres, 4096);
- char *host = getHost(uri);
- snprintf(local_urlres, 4096, "http://%s/uri-res/N2L?urn:" SQUIDSBUFPH, host, SQUIDSBUFPRINT(uri));
- safe_free(host);
+ snprintf(local_urlres, 4096, "http://%s/uri-res/N2L?" SQUIDSBUFPH, host, SQUIDSBUFPRINT(query));
safe_free(urlres);
- urlres_r = HttpRequest::FromUrl(local_urlres, r->masterXaction);
+ urlres_r = HttpRequest::FromUrlXXX(local_urlres, r->masterXaction);
if (!urlres_r) {
debugs(52, 3, "Bad uri-res URL " << local_urlres);
@@ -366,9 +343,7 @@ urnHandleReply(void *data, StoreIOBuffer result)
rep = new HttpReply;
rep->setHeaders(Http::scFound, NULL, "text/html", mb->contentSize(), 0, squid_curtime);
- if (urnState->flags.force_menu) {
- debugs(51, 3, "urnHandleReply: forcing menu");
- } else if (min_u) {
+ if (min_u) {
rep->header.putStr(Http::HdrType::LOCATION, min_u->url);
}

View File

@ -1,32 +0,0 @@
From 409956536647b3a05ee1e367424a24ae6b8f13fd Mon Sep 17 00:00:00 2001
From: Amos Jeffries <yadij@users.noreply.github.com>
Date: Sat, 8 Jun 2019 21:09:23 +0000
Subject: [PATCH] Fix Digest auth parameter parsing (#415)
Only remove quoting if the domain=, uri= or qop= parameter
value is surrounded by double-quotes.
---
src/auth/digest/Config.cc | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc
index a8a07cd4db..b547bf83d3 100644
--- a/src/auth/digest/Config.cc
+++ b/src/auth/digest/Config.cc
@@ -787,14 +787,14 @@ Auth::Digest::Config::decode(char const *proxy_auth, const char *aRequestRealm)
if (keyName == SBuf("domain",6) || keyName == SBuf("uri",3)) {
// domain is Special. Not a quoted-string, must not be de-quoted. But is wrapped in '"'
// BUG 3077: uri= can also be sent to us in a mangled (invalid!) form like domain
- if (*p == '"' && *(p + vlen -1) == '"') {
+ if (vlen > 1 && *p == '"' && *(p + vlen -1) == '"') {
value.limitInit(p+1, vlen-2);
}
} else if (keyName == SBuf("qop",3)) {
// qop is more special.
// On request this must not be quoted-string de-quoted. But is several values wrapped in '"'
// On response this is a single un-quoted token.
- if (*p == '"' && *(p + vlen -1) == '"') {
+ if (vlen > 1 && *p == '"' && *(p + vlen -1) == '"') {
value.limitInit(p+1, vlen-2);
} else {
value.limitInit(p, vlen);

View File

@ -1,139 +0,0 @@
commit 7aa0184a720fd216191474e079f4fe87de7c4f5a (refs/remotes/origin/v4)
Author: Eduard Bagdasaryan <eduard.bagdasaryan@measurement-factory.com>
Date: 2019-09-15 15:32:30 +0000
Fix URN response handling (#472)
urnHandleReply() may be called several times while copying the entry
from the store. Each time it must use the buffer length that is left
(from the previous call).
Also do not abandon a urn entry, still having clients attached.
Also allow urnHandleReply() to produce a reply if it receives a
zero-sized buffer. This may happen after the entry has been fully
stored.
CID 1453857: Error handling issues (UNCAUGHT_EXCEPT)
Due to various Store deficiencies, storeUnregister() might call swapout
code which Broadcast()s and throws Ipc::OneToOneUniQueue::ItemTooLarge.
diff --git a/src/urn.cc b/src/urn.cc
index e64da1f..a65ab94 100644
--- a/src/urn.cc
+++ b/src/urn.cc
@@ -9,6 +9,7 @@
/* DEBUG: section 52 URN Parsing */
#include "squid.h"
+#include "base/TextException.h"
#include "cbdata.h"
#include "errorpage.h"
#include "FwdState.h"
@@ -69,7 +70,18 @@ CBDATA_CLASS_INIT(UrnState);
UrnState::~UrnState()
{
- safe_free(urlres);
+ SWALLOW_EXCEPTIONS({
+ if (urlres_e) {
+ if (sc)
+ storeUnregister(sc, urlres_e, this);
+ urlres_e->unlock("~UrnState+res");
+ }
+
+ if (entry)
+ entry->unlock("~UrnState+prime");
+
+ safe_free(urlres);
+ });
}
static url_entry *
@@ -205,14 +217,6 @@ url_entry_sort(const void *A, const void *B)
return u1->rtt - u2->rtt;
}
-static void
-urnHandleReplyError(UrnState *urnState, StoreEntry *urlres_e)
-{
- urlres_e->unlock("urnHandleReplyError+res");
- urnState->entry->unlock("urnHandleReplyError+prime");
- delete urnState;
-}
-
/* TODO: use the clientStream support for this */
static void
urnHandleReply(void *data, StoreIOBuffer result)
@@ -235,8 +239,8 @@ urnHandleReply(void *data, StoreIOBuffer result)
debugs(52, 3, "urnHandleReply: Called with size=" << result.length << ".");
- if (EBIT_TEST(urlres_e->flags, ENTRY_ABORTED) || result.length == 0 || result.flags.error) {
- urnHandleReplyError(urnState, urlres_e);
+ if (EBIT_TEST(urlres_e->flags, ENTRY_ABORTED) || result.flags.error) {
+ delete urnState;
return;
}
@@ -245,15 +249,15 @@ urnHandleReply(void *data, StoreIOBuffer result)
/* Handle reqofs being bigger than normal */
if (urnState->reqofs >= URN_REQBUF_SZ) {
- urnHandleReplyError(urnState, urlres_e);
+ delete urnState;
return;
}
/* If we haven't received the entire object (urn), copy more */
- if (urlres_e->store_status == STORE_PENDING &&
- urnState->reqofs < URN_REQBUF_SZ) {
+ if (urlres_e->store_status == STORE_PENDING) {
+ Must(result.length > 0); // zero length ought to imply STORE_OK
tempBuffer.offset = urnState->reqofs;
- tempBuffer.length = URN_REQBUF_SZ;
+ tempBuffer.length = URN_REQBUF_SZ - urnState->reqofs;
tempBuffer.data = urnState->reqbuf + urnState->reqofs;
storeClientCopy(urnState->sc, urlres_e,
tempBuffer,
@@ -267,7 +271,7 @@ urnHandleReply(void *data, StoreIOBuffer result)
if (0 == k) {
debugs(52, DBG_IMPORTANT, "urnHandleReply: didn't find end-of-headers for " << e->url() );
- urnHandleReplyError(urnState, urlres_e);
+ delete urnState;
return;
}
@@ -283,7 +287,7 @@ urnHandleReply(void *data, StoreIOBuffer result)
err->url = xstrdup(e->url());
errorAppendEntry(e, err);
delete rep;
- urnHandleReplyError(urnState, urlres_e);
+ delete urnState;
return;
}
@@ -299,7 +303,7 @@ urnHandleReply(void *data, StoreIOBuffer result)
err = new ErrorState(ERR_URN_RESOLVE, Http::scNotFound, urnState->request.getRaw());
err->url = xstrdup(e->url());
errorAppendEntry(e, err);
- urnHandleReplyError(urnState, urlres_e);
+ delete urnState;
return;
}
@@ -358,10 +362,7 @@ urnHandleReply(void *data, StoreIOBuffer result)
}
safe_free(urls);
- /* mb was absorbed in httpBodySet call, so we must not clean it */
- storeUnregister(urnState->sc, urlres_e, urnState);
-
- urnHandleReplyError(urnState, urlres_e);
+ delete urnState;
}
static url_entry *

View File

@ -1,144 +0,0 @@
From 7f73e9c5d17664b882ed32590e6af310c247f320 Mon Sep 17 00:00:00 2001
From: Amos Jeffries <yadij@users.noreply.github.com>
Date: Wed, 19 Jun 2019 05:58:36 +0000
Subject: [PATCH] Update HttpHeader::getAuth to SBuf (#416)
Replace the fixed-size buffer for decoding base64 tokens with an
SBuf to avoid decoder issues on large inputs.
Update callers to SBuf API operations for more efficient memory
management.
---
src/HttpHeader.cc | 25 ++++++++++++++-----------
src/HttpHeader.h | 2 +-
src/cache_manager.cc | 13 +++++++------
src/clients/FtpGateway.cc | 2 +-
4 files changed, 23 insertions(+), 19 deletions(-)
diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc
index 9f747f9958..dd320d5629 100644
--- a/src/HttpHeader.cc
+++ b/src/HttpHeader.cc
@@ -1268,43 +1268,46 @@ HttpHeader::getContRange() const
return cr;
}
-const char *
-HttpHeader::getAuth(Http::HdrType id, const char *auth_scheme) const
+SBuf
+HttpHeader::getAuthToken(Http::HdrType id, const char *auth_scheme) const
{
const char *field;
int l;
assert(auth_scheme);
field = getStr(id);
+ static const SBuf nil;
if (!field) /* no authorization field */
- return NULL;
+ return nil;
l = strlen(auth_scheme);
if (!l || strncasecmp(field, auth_scheme, l)) /* wrong scheme */
- return NULL;
+ return nil;
field += l;
if (!xisspace(*field)) /* wrong scheme */
- return NULL;
+ return nil;
/* skip white space */
for (; field && xisspace(*field); ++field);
if (!*field) /* no authorization cookie */
- return NULL;
+ return nil;
- static char decodedAuthToken[8192];
+ const auto fieldLen = strlen(field);
+ SBuf result;
+ char *decodedAuthToken = result.rawAppendStart(BASE64_DECODE_LENGTH(fieldLen));
struct base64_decode_ctx ctx;
base64_decode_init(&ctx);
size_t decodedLen = 0;
- if (!base64_decode_update(&ctx, &decodedLen, reinterpret_cast<uint8_t*>(decodedAuthToken), strlen(field), field) ||
+ if (!base64_decode_update(&ctx, &decodedLen, reinterpret_cast<uint8_t*>(decodedAuthToken), fieldLen, field) ||
!base64_decode_final(&ctx)) {
- return NULL;
+ return nil;
}
- decodedAuthToken[decodedLen] = '\0';
- return decodedAuthToken;
+ result.rawAppendFinish(decodedAuthToken, decodedLen);
+ return result;
}
ETag
diff --git a/src/HttpHeader.h b/src/HttpHeader.h
index 64fd2781e4..35a941058c 100644
--- a/src/HttpHeader.h
+++ b/src/HttpHeader.h
@@ -134,7 +134,7 @@ class HttpHeader
HttpHdrRange *getRange() const;
HttpHdrSc *getSc() const;
HttpHdrContRange *getContRange() const;
- const char *getAuth(Http::HdrType id, const char *auth_scheme) const;
+ SBuf getAuthToken(Http::HdrType id, const char *auth_scheme) const;
ETag getETag(Http::HdrType id) const;
TimeOrTag getTimeOrTag(Http::HdrType id) const;
int hasListMember(Http::HdrType id, const char *member, const char separator) const;
diff --git a/src/cache_manager.cc b/src/cache_manager.cc
index f88cd1c46b..3556a44618 100644
--- a/src/cache_manager.cc
+++ b/src/cache_manager.cc
@@ -27,6 +27,7 @@
#include "mgr/FunAction.h"
#include "mgr/QueryParams.h"
#include "protos.h"
+#include "sbuf/StringConvert.h"
#include "SquidConfig.h"
#include "SquidTime.h"
#include "Store.h"
@@ -243,20 +244,20 @@ CacheManager::ParseHeaders(const HttpRequest * request, Mgr::ActionParams &param
// TODO: use the authentication system decode to retrieve these details properly.
/* base 64 _decoded_ user:passwd pair */
- const char *basic_cookie = request->header.getAuth(Http::HdrType::AUTHORIZATION, "Basic");
+ const auto basic_cookie(request->header.getAuthToken(Http::HdrType::AUTHORIZATION, "Basic"));
- if (!basic_cookie)
+ if (basic_cookie.isEmpty())
return;
- const char *passwd_del;
- if (!(passwd_del = strchr(basic_cookie, ':'))) {
+ const auto colonPos = basic_cookie.find(':');
+ if (colonPos == SBuf::npos) {
debugs(16, DBG_IMPORTANT, "CacheManager::ParseHeaders: unknown basic_cookie format '" << basic_cookie << "'");
return;
}
/* found user:password pair, reset old values */
- params.userName.limitInit(basic_cookie, passwd_del - basic_cookie);
- params.password = passwd_del + 1;
+ params.userName = SBufToString(basic_cookie.substr(0, colonPos));
+ params.password = SBufToString(basic_cookie.substr(colonPos+1));
/* warning: this prints decoded password which maybe not be what you want to do @?@ @?@ */
debugs(16, 9, "CacheManager::ParseHeaders: got user: '" <<
diff --git a/src/clients/FtpGateway.cc b/src/clients/FtpGateway.cc
index 9afe3781cd..140c441394 100644
--- a/src/clients/FtpGateway.cc
+++ b/src/clients/FtpGateway.cc
@@ -1039,7 +1039,7 @@ Ftp::Gateway::checkAuth(const HttpHeader * req_hdr)
#if HAVE_AUTH_MODULE_BASIC
/* Check HTTP Authorization: headers (better than defaults, but less than URL) */
- const SBuf auth(req_hdr->getAuth(Http::HdrType::AUTHORIZATION, "Basic"));
+ const auto auth(req_hdr->getAuthToken(Http::HdrType::AUTHORIZATION, "Basic"));
if (!auth.isEmpty()) {
flags.authenticated = 1;
loginParser(auth, false);

View File

@ -1,225 +0,0 @@
Backport of:
From dd46b5417809647f561d8a5e0e74c3aacd235258 Mon Sep 17 00:00:00 2001
From: Amos Jeffries <yadij@users.noreply.github.com>
Date: Tue, 21 May 2019 21:31:31 +0000
Subject: [PATCH] Replace uudecode with libnettle base64 decoder (#406)
Since RFC 7235 updated the HTTP Authentication credentials token
to the token68 characterset it is possible that characters
uudecode cannot cope with are received.
The Nettle decoder better handles characters which are valid but
not to be used for Basic auth token.
---
include/uudecode.h | 21 ------------
lib/Makefile.am | 3 +-
lib/uudecode.c | 73 ----------------------------------------
src/auth/basic/Config.cc | 20 ++++++++---
4 files changed, 17 insertions(+), 100 deletions(-)
delete mode 100644 include/uudecode.h
delete mode 100644 lib/uudecode.c
Index: squid-4.4/lib/Makefile.am
===================================================================
--- squid-4.4.orig/lib/Makefile.am 2019-07-16 12:03:04.428684097 -0400
+++ squid-4.4/lib/Makefile.am 2019-07-16 12:03:04.424684101 -0400
@@ -61,8 +61,7 @@ libmiscencoding_la_SOURCES = \
html_quote.c \
md5.c \
rfc1738.c \
- rfc2617.c \
- uudecode.c
+ rfc2617.c
libmisccontainers_la_SOURCES = \
hash.cc
Index: squid-4.4/src/auth/basic/Config.cc
===================================================================
--- squid-4.4.orig/src/auth/basic/Config.cc 2019-07-16 12:03:04.428684097 -0400
+++ squid-4.4/src/auth/basic/Config.cc 2019-07-16 12:03:04.424684101 -0400
@@ -20,6 +20,7 @@
#include "auth/CredentialsCache.h"
#include "auth/Gadgets.h"
#include "auth/State.h"
+#include "base64.h"
#include "cache_cf.h"
#include "charset.h"
#include "helper.h"
@@ -30,7 +31,6 @@
#include "SquidTime.h"
#include "Store.h"
#include "util.h"
-#include "uudecode.h"
#include "wordlist.h"
/* Basic Scheme */
@@ -169,10 +169,17 @@ Auth::Basic::Config::decodeCleartext(con
// XXX: really? is the \n actually still there? does the header parse not drop it?
char *eek = xstrdup(proxy_auth);
strtok(eek, "\n");
- char *cleartext = uudecode(eek);
- safe_free(eek);
- if (cleartext) {
+ const size_t srcLen = strlen(eek);
+ char *cleartext = static_cast<char*>(xmalloc(BASE64_DECODE_LENGTH(srcLen)+1));
+
+ struct base64_decode_ctx ctx;
+ base64_decode_init(&ctx);
+
+ size_t dstLen = 0;
+ if (base64_decode_update(&ctx, &dstLen, reinterpret_cast<uint8_t*>(cleartext), srcLen, eek) && base64_decode_final(&ctx)) {
+ cleartext[dstLen] = '\0';
+
/*
* Don't allow NL or CR in the credentials.
* Oezguer Kesim <oec@codeblau.de>
@@ -183,7 +190,12 @@ Auth::Basic::Config::decodeCleartext(con
debugs(29, DBG_IMPORTANT, "WARNING: Bad characters in authorization header '" << httpAuthHeader << "'");
safe_free(cleartext);
}
+ } else {
+ debugs(29, 2, "WARNING: Invalid Base64 character in authorization header '" << httpAuthHeader << "'");
+ safe_free(cleartext);
}
+
+ safe_free(eek);
return cleartext;
}
Index: squid-4.4/include/uudecode.h
===================================================================
--- squid-4.4.orig/include/uudecode.h 2019-07-16 12:03:04.428684097 -0400
+++ /dev/null 1970-01-01 00:00:00.000000000 +0000
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
- *
- * Squid software is distributed under GPLv2+ license and includes
- * contributions from numerous individuals and organizations.
- * Please see the COPYING and CONTRIBUTORS files for details.
- */
-
-#ifndef _SQUID_UUDECODE_H
-#define _SQUID_UUDECODE_H
-
-#ifdef __cplusplus
-extern "C"
-#else
-extern
-#endif
-
-char *uudecode(const char *);
-
-#endif /* _SQUID_UUDECODE_H */
-
Index: squid-4.4/lib/uudecode.c
===================================================================
--- squid-4.4.orig/lib/uudecode.c 2019-07-16 12:03:04.428684097 -0400
+++ /dev/null 1970-01-01 00:00:00.000000000 +0000
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
- *
- * Squid software is distributed under GPLv2+ license and includes
- * contributions from numerous individuals and organizations.
- * Please see the COPYING and CONTRIBUTORS files for details.
- */
-
-#include "squid.h"
-#include "uudecode.h"
-
-/* aaaack but it's fast and const should make it shared text page. */
-const int pr2six[256] = {
- 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
- 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64, 64, 63,
- 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 64, 64, 64, 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
- 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 64, 64, 26, 27,
- 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
- 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
- 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
- 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
- 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
- 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
- 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
-};
-
-char *
-uudecode(const char *bufcoded)
-{
- int nbytesdecoded;
- const unsigned char *bufin;
- char *bufplain;
- unsigned char *bufout;
- int nprbytes;
-
- /* Strip leading whitespace. */
-
- while (*bufcoded == ' ' || *bufcoded == '\t')
- bufcoded++;
-
- /* Figure out how many characters are in the input buffer.
- * Allocate this many from the per-transaction pool for the result.
- */
- bufin = (const unsigned char *) bufcoded;
- while (pr2six[*(bufin++)] <= 63);
- nprbytes = (const char *) bufin - bufcoded - 1;
- nbytesdecoded = ((nprbytes + 3) / 4) * 3;
-
- bufplain = xmalloc(nbytesdecoded + 1);
- bufout = (unsigned char *) bufplain;
- bufin = (const unsigned char *) bufcoded;
-
- while (nprbytes > 0) {
- *(bufout++) =
- (unsigned char) (pr2six[*bufin] << 2 | pr2six[bufin[1]] >> 4);
- *(bufout++) =
- (unsigned char) (pr2six[bufin[1]] << 4 | pr2six[bufin[2]] >> 2);
- *(bufout++) =
- (unsigned char) (pr2six[bufin[2]] << 6 | pr2six[bufin[3]]);
- bufin += 4;
- nprbytes -= 4;
- }
-
- if (nprbytes & 03) {
- if (pr2six[bufin[-2]] > 63)
- nbytesdecoded -= 2;
- else
- nbytesdecoded -= 1;
- }
- bufplain[nbytesdecoded] = '\0';
- return bufplain;
-}
-
Index: squid-4.4/lib/Makefile.in
===================================================================
--- squid-4.4.orig/lib/Makefile.in 2018-10-27 21:50:06.000000000 -0400
+++ squid-4.4/lib/Makefile.in 2019-07-16 12:03:48.588632154 -0400
@@ -185,7 +185,7 @@ am__v_lt_0 = --silent
am__v_lt_1 =
libmiscencoding_la_LIBADD =
am_libmiscencoding_la_OBJECTS = base64.lo charset.lo html_quote.lo \
- md5.lo rfc1738.lo rfc2617.lo uudecode.lo
+ md5.lo rfc1738.lo rfc2617.lo
libmiscencoding_la_OBJECTS = $(am_libmiscencoding_la_OBJECTS)
libmiscutil_la_LIBADD =
am_libmiscutil_la_OBJECTS = getfullhostname.lo heap.lo iso3307.lo \
@@ -836,8 +835,7 @@ libmiscencoding_la_SOURCES = \
html_quote.c \
md5.c \
rfc1738.c \
- rfc2617.c \
- uudecode.c
+ rfc2617.c
libmisccontainers_la_SOURCES = \
hash.cc
@@ -970,7 +968,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sspwin32.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/stub_memaccount.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/util.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/uudecode.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xusleep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@tests/$(DEPDIR)/testRFC1738.Po@am__quote@

View File

@ -1,43 +0,0 @@
commit 2981a957716c61ff7e21eee1d7d6eb5a237e466d
Author: Amos Jeffries <yadij@users.noreply.github.com>
Date: 2019-05-18 17:02:33 +0000
Bug 4937: cachemgr.cgi: unallocated memory access (#407)
... after base64_decode_update
Ensure that a terminator exists for the decoded string before
using str*() syscalls.
diff --git a/tools/cachemgr.cc b/tools/cachemgr.cc
index 0e5d4f1..1a05cb4 100644
--- a/tools/cachemgr.cc
+++ b/tools/cachemgr.cc
@@ -1091,7 +1091,6 @@ make_pub_auth(cachemgr_request * req)
static void
decode_pub_auth(cachemgr_request * req)
{
- char *buf;
const char *host_name;
const char *time_str;
const char *user_name;
@@ -1103,16 +1102,17 @@ decode_pub_auth(cachemgr_request * req)
if (!req->pub_auth || strlen(req->pub_auth) < 4 + strlen(safe_str(req->hostname)))
return;
- size_t decodedLen = BASE64_DECODE_LENGTH(strlen(req->pub_auth));
- buf = (char*)xmalloc(decodedLen);
+ char *buf = static_cast<char*>(xmalloc(BASE64_DECODE_LENGTH(strlen(req->pub_auth))+1));
struct base64_decode_ctx ctx;
base64_decode_init(&ctx);
+ size_t decodedLen = 0;
if (!base64_decode_update(&ctx, &decodedLen, reinterpret_cast<uint8_t*>(buf), strlen(req->pub_auth), req->pub_auth) ||
!base64_decode_final(&ctx)) {
debug("cmgr: base64 decode failure. Incomplete auth token string.\n");
xfree(buf);
return;
}
+ buf[decodedLen] = '\0';
debug("cmgr: length ok\n");

View File

@ -1,75 +0,0 @@
From be1dc8614e7514103ba84d4067ed6fd15ab8f82e Mon Sep 17 00:00:00 2001
From: Amos Jeffries <yadij@users.noreply.github.com>
Date: Fri, 5 Jul 2019 03:17:26 +0000
Subject: [PATCH] Bug 4957: Multiple XSS issues in cachemgr.cgi (#429)
The cachemgr.cgi web module of the squid proxy is vulnerable
to XSS issue. The vulnerable parameters "user_name" and "auth"
have insufficient sanitization in place.
---
tools/cachemgr.cc | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
Index: squid-4.6/tools/cachemgr.cc
===================================================================
--- squid-4.6.orig/tools/cachemgr.cc 2019-07-11 13:05:23.027988071 -0400
+++ squid-4.6/tools/cachemgr.cc 2019-07-11 13:05:23.027988071 -0400
@@ -355,7 +355,7 @@ auth_html(const char *host, int port, co
printf("<TR><TH ALIGN=\"left\">Manager name:</TH><TD><INPUT NAME=\"user_name\" ");
- printf("size=\"30\" VALUE=\"%s\"></TD></TR>\n", user_name);
+ printf("size=\"30\" VALUE=\"%s\"></TD></TR>\n", rfc1738_escape(user_name));
printf("<TR><TH ALIGN=\"left\">Password:</TH><TD><INPUT TYPE=\"password\" NAME=\"passwd\" ");
@@ -419,7 +419,7 @@ menu_url(cachemgr_request * req, const c
script_name,
req->hostname,
req->port,
- safe_str(req->user_name),
+ rfc1738_escape(safe_str(req->user_name)),
action,
safe_str(req->pub_auth));
return url;
@@ -1074,8 +1074,8 @@ make_pub_auth(cachemgr_request * req)
const int bufLen = snprintf(buf, sizeof(buf), "%s|%d|%s|%s",
req->hostname,
(int) now,
- req->user_name ? req->user_name : "",
- req->passwd);
+ rfc1738_escape(safe_str(req->user_name)),
+ rfc1738_escape(req->passwd));
debug("cmgr: pre-encoded for pub: %s\n", buf);
const int encodedLen = base64_encode_len(bufLen);
@@ -1094,7 +1094,5 @@ decode_pub_auth(cachemgr_request * req)
const char *host_name;
const char *time_str;
- const char *user_name;
- const char *passwd;
debug("cmgr: decoding pub: '%s'\n", safe_str(req->pub_auth));
safe_free(req->passwd);
@@ -1131,17 +1129,21 @@ decode_pub_auth(cachemgr_request * req)
debug("cmgr: decoded time: '%s' (now: %d)\n", time_str, (int) now);
+ char *user_name;
if ((user_name = strtok(NULL, "|")) == NULL) {
xfree(buf);
return;
}
+ rfc1738_unescape(user_name);
debug("cmgr: decoded uname: '%s'\n", user_name);
+ char *passwd;
if ((passwd = strtok(NULL, "|")) == NULL) {
xfree(buf);
return;
}
+ rfc1738_unescape(passwd);
debug("cmgr: decoded passwd: '%s'\n", passwd);

View File

@ -1,90 +0,0 @@
commit 36492033ea4097821a4f7ff3ddcb971fbd1e8ba0
Author: Amos Jeffries <yadij@users.noreply.github.com>
Date: 2019-07-12 03:08:00 +0000
Prevent truncation for large origin-relative domains (#427)
The domain in a URL must obey hostname length restrictions after
append_domain is applied, not just MAX_URL for the normalized
absolute-URL.
diff --git a/src/anyp/Uri.cc b/src/anyp/Uri.cc
index 6ce8d9b..36d3fe2 100644
--- a/src/anyp/Uri.cc
+++ b/src/anyp/Uri.cc
@@ -167,6 +167,30 @@ urlParseProtocol(const char *b)
return AnyP::PROTO_NONE;
}
+/**
+ * Appends configured append_domain to hostname, assuming
+ * the given buffer is at least SQUIDHOSTNAMELEN bytes long,
+ * and that the host FQDN is not a 'dotless' TLD.
+ *
+ * \returns false if and only if there is not enough space to append
+ */
+bool
+urlAppendDomain(char *host)
+{
+ /* For IPv4 addresses check for a dot */
+ /* For IPv6 addresses also check for a colon */
+ if (Config.appendDomain && !strchr(host, '.') && !strchr(host, ':')) {
+ const uint64_t dlen = strlen(host);
+ const uint64_t want = dlen + Config.appendDomainLen;
+ if (want > SQUIDHOSTNAMELEN - 1) {
+ debugs(23, 2, "URL domain too large (" << dlen << " bytes)");
+ return false;
+ }
+ strncat(host, Config.appendDomain, SQUIDHOSTNAMELEN - dlen - 1);
+ }
+ return true;
+}
+
/*
* Parse a URI/URL.
*
@@ -376,9 +400,8 @@ AnyP::Uri::parse(const HttpRequestMethod& method, const char *url)
return false;
}
- /* For IPV6 addresses also check for a colon */
- if (Config.appendDomain && !strchr(foundHost, '.') && !strchr(foundHost, ':'))
- strncat(foundHost, Config.appendDomain, SQUIDHOSTNAMELEN - strlen(foundHost) - 1);
+ if (!urlAppendDomain(foundHost))
+ return false;
/* remove trailing dots from hostnames */
while ((l = strlen(foundHost)) > 0 && foundHost[--l] == '.')
diff --git a/src/anyp/Uri.h b/src/anyp/Uri.h
index 1a8f057..c0b8416 100644
--- a/src/anyp/Uri.h
+++ b/src/anyp/Uri.h
@@ -191,6 +191,7 @@ bool urlIsRelative(const char *);
char *urlMakeAbsolute(const HttpRequest *, const char *);
char *urlRInternal(const char *host, unsigned short port, const char *dir, const char *name);
char *urlInternal(const char *dir, const char *name);
+bool urlAppendDomain(char *host); ///< apply append_domain config to the given hostname
enum MatchDomainNameFlags {
mdnNone = 0,
diff --git a/src/internal.cc b/src/internal.cc
index 11fae9b..facc3d9 100644
--- a/src/internal.cc
+++ b/src/internal.cc
@@ -98,13 +98,9 @@ internalRemoteUri(bool encrypt, const char *host, unsigned short port, const cha
/*
* append the domain in order to mirror the requests with appended
- * domains
+ * domains. If that fails, just use the hostname anyway.
*/
-
- /* For IPv6 addresses also check for a colon */
- if (Config.appendDomain && !strchr(lc_host, '.') && !strchr(lc_host, ':'))
- strncat(lc_host, Config.appendDomain, SQUIDHOSTNAMELEN -
- strlen(lc_host) - 1);
+ (void)urlAppendDomain(lc_host);
/* build URI */
AnyP::Uri tmp(AnyP::PROTO_HTTP);

View File

@ -1,121 +0,0 @@
commit 671ba97abe929156dc4c717ee52ad22fba0f7443
Author: Amos Jeffries <yadij@users.noreply.github.com>
Date: 2019-09-11 02:52:52 +0000
RFC 7230: server MUST reject messages with BWS after field-name (#445)
Obey the RFC requirement to reject HTTP requests with whitespace
between field-name and the colon delimiter. Rejection is
critical in the presence of broken HTTP agents that mishandle
malformed messages.
Also obey requirement to always strip such whitespace from HTTP
response messages. The relaxed parser is no longer necessary for
this response change.
For now non-HTTP protocols retain the old behaviour of removal
only when using the relaxed parser.
diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc
index dd320d5..a36ad85 100644
--- a/src/HttpHeader.cc
+++ b/src/HttpHeader.cc
@@ -421,15 +421,12 @@ HttpHeader::parse(const char *header_start, size_t hdrLen)
break; /* terminating blank line */
}
- HttpHeaderEntry *e;
- if ((e = HttpHeaderEntry::parse(field_start, field_end)) == NULL) {
+ const auto e = HttpHeaderEntry::parse(field_start, field_end, owner);
+ if (!e) {
debugs(55, warnOnError, "WARNING: unparseable HTTP header field {" <<
getStringPrefix(field_start, field_end-field_start) << "}");
debugs(55, warnOnError, " in {" << getStringPrefix(header_start, hdrLen) << "}");
- if (Config.onoff.relaxed_header_parser)
- continue;
-
PROF_stop(HttpHeaderParse);
clean();
return 0;
@@ -1386,7 +1383,7 @@ HttpHeaderEntry::~HttpHeaderEntry()
/* parses and inits header entry, returns true/false */
HttpHeaderEntry *
-HttpHeaderEntry::parse(const char *field_start, const char *field_end)
+HttpHeaderEntry::parse(const char *field_start, const char *field_end, const http_hdr_owner_type msgType)
{
/* note: name_start == field_start */
const char *name_end = (const char *)memchr(field_start, ':', field_end - field_start);
@@ -1403,19 +1400,41 @@ HttpHeaderEntry::parse(const char *field_start, const char *field_end)
if (name_len > 65534) {
/* String must be LESS THAN 64K and it adds a terminating NULL */
- debugs(55, DBG_IMPORTANT, "WARNING: ignoring header name of " << name_len << " bytes");
+ // TODO: update this to show proper name_len in Raw markup, but not print all that
+ debugs(55, 2, "ignoring huge header field (" << Raw("field_start", field_start, 100) << "...)");
return NULL;
}
- if (Config.onoff.relaxed_header_parser && xisspace(field_start[name_len - 1])) {
+ /*
+ * RFC 7230 section 3.2.4:
+ * "No whitespace is allowed between the header field-name and colon.
+ * ...
+ * A server MUST reject any received request message that contains
+ * whitespace between a header field-name and colon with a response code
+ * of 400 (Bad Request). A proxy MUST remove any such whitespace from a
+ * response message before forwarding the message downstream."
+ */
+ if (xisspace(field_start[name_len - 1])) {
+
+ if (msgType == hoRequest)
+ return nullptr;
+
+ // for now, also let relaxed parser remove this BWS from any non-HTTP messages
+ const bool stripWhitespace = (msgType == hoReply) ||
+ Config.onoff.relaxed_header_parser;
+ if (!stripWhitespace)
+ return nullptr; // reject if we cannot strip
+
debugs(55, Config.onoff.relaxed_header_parser <= 0 ? 1 : 2,
"NOTICE: Whitespace after header name in '" << getStringPrefix(field_start, field_end-field_start) << "'");
while (name_len > 0 && xisspace(field_start[name_len - 1]))
--name_len;
- if (!name_len)
+ if (!name_len) {
+ debugs(55, 2, "found header with only whitespace for name");
return NULL;
+ }
}
/* now we know we can parse it */
@@ -1448,11 +1467,7 @@ HttpHeaderEntry::parse(const char *field_start, const char *field_end)
if (field_end - value_start > 65534) {
/* String must be LESS THAN 64K and it adds a terminating NULL */
- debugs(55, DBG_IMPORTANT, "WARNING: ignoring '" << name << "' header of " << (field_end - value_start) << " bytes");
-
- if (id == Http::HdrType::OTHER)
- name.clean();
-
+ debugs(55, 2, "WARNING: found '" << name << "' header of " << (field_end - value_start) << " bytes");
return NULL;
}
diff --git a/src/HttpHeader.h b/src/HttpHeader.h
index 35a9410..be175b7 100644
--- a/src/HttpHeader.h
+++ b/src/HttpHeader.h
@@ -54,7 +54,7 @@ class HttpHeaderEntry
public:
HttpHeaderEntry(Http::HdrType id, const char *name, const char *value);
~HttpHeaderEntry();
- static HttpHeaderEntry *parse(const char *field_start, const char *field_end);
+ static HttpHeaderEntry *parse(const char *field_start, const char *field_end, const http_hdr_owner_type msgType);
HttpHeaderEntry *clone() const;
void packInto(Packable *p) const;
int getInt() const;

View File

@ -1,97 +0,0 @@
From bc54d7a6f7ec510a25966f2f800d3ea874657546 Mon Sep 17 00:00:00 2001
From: chi-mf <43963496+chi-mf@users.noreply.github.com>
Date: Tue, 30 Oct 2018 04:48:40 +0000
Subject: [PATCH] Fix netdb exchange with a TLS cache_peer (#307)
Squid uses http-scheme URLs when sending netdb exchange (and possibly
other) requests to a cache_peer. If a DIRECT path is selected for that
cache_peer URL, then Squid sends a clear text HTTP request to that
cache_peer. If that cache_peer expects a TLS connection, it will reject
that request (with, e.g., error:transaction-end-before-headers),
resulting in an HTTP 503 or 504 netdb fetch error.
Workaround this by adding an internalRemoteUri() parameter to indicate
whether https or http URL scheme should be used. Netdb fetches from
CachePeer::secure peers now get an https scheme and, hence, a TLS
connection.
---
src/icmp/net_db.cc | 2 +-
src/internal.cc | 9 ++++++---
src/internal.h | 2 +-
src/peer_digest.cc | 2 +-
4 files changed, 9 insertions(+), 6 deletions(-)
diff --git a/src/icmp/net_db.cc b/src/icmp/net_db.cc
index 0f488de2b2..526093f86e 100644
--- a/src/icmp/net_db.cc
+++ b/src/icmp/net_db.cc
@@ -1282,7 +1282,7 @@ netdbExchangeStart(void *data)
#if USE_ICMP
CachePeer *p = (CachePeer *)data;
static const SBuf netDB("netdb");
- char *uri = internalRemoteUri(p->host, p->http_port, "/squid-internal-dynamic/", netDB);
+ char *uri = internalRemoteUri(p->secure.encryptTransport, p->host, p->http_port, "/squid-internal-dynamic/", netDB);
debugs(38, 3, "Requesting '" << uri << "'");
const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initIcmp);
HttpRequest *req = HttpRequest::FromUrl(uri, mx);
diff --git a/src/internal.cc b/src/internal.cc
index 6ebc7a6793..ff7b4d635f 100644
--- a/src/internal.cc
+++ b/src/internal.cc
@@ -82,7 +82,7 @@ internalStaticCheck(const SBuf &urlPath)
* makes internal url with a given host and port (remote internal url)
*/
char *
-internalRemoteUri(const char *host, unsigned short port, const char *dir, const SBuf &name)
+internalRemoteUri(bool encrypt, const char *host, unsigned short port, const char *dir, const SBuf &name)
{
static char lc_host[SQUIDHOSTNAMELEN];
assert(host && !name.isEmpty());
@@ -115,7 +115,7 @@ internalRemoteUri(const char *host, unsigned short port, const char *dir, const
static MemBuf mb;
mb.reset();
- mb.appendf("http://" SQUIDSBUFPH, SQUIDSBUFPRINT(tmp.authority()));
+ mb.appendf("%s://" SQUIDSBUFPH, encrypt ? "https" : "http", SQUIDSBUFPRINT(tmp.authority()));
if (dir)
mb.append(dir, strlen(dir));
@@ -132,7 +132,10 @@ internalRemoteUri(const char *host, unsigned short port, const char *dir, const
char *
internalLocalUri(const char *dir, const SBuf &name)
{
- return internalRemoteUri(getMyHostname(),
+ // XXX: getMy*() may return https_port info, but we force http URIs
+ // because we have not checked whether the callers can handle https.
+ const bool secure = false;
+ return internalRemoteUri(secure, getMyHostname(),
getMyPort(), dir, name);
}
diff --git a/src/internal.h b/src/internal.h
index c91f9acabc..13a43a63f5 100644
--- a/src/internal.h
+++ b/src/internal.h
@@ -24,7 +24,7 @@ void internalStart(const Comm::ConnectionPointer &clientConn, HttpRequest *, Sto
bool internalCheck(const SBuf &urlPath);
bool internalStaticCheck(const SBuf &urlPath);
char *internalLocalUri(const char *dir, const SBuf &name);
-char *internalRemoteUri(const char *, unsigned short, const char *, const SBuf &);
+char *internalRemoteUri(bool, const char *, unsigned short, const char *, const SBuf &);
const char *internalHostname(void);
int internalHostnameIs(const char *);
diff --git a/src/peer_digest.cc b/src/peer_digest.cc
index 36a8705ec0..f515aaa0ee 100644
--- a/src/peer_digest.cc
+++ b/src/peer_digest.cc
@@ -323,7 +323,7 @@ peerDigestRequest(PeerDigest * pd)
if (p->digest_url)
url = xstrdup(p->digest_url);
else
- url = xstrdup(internalRemoteUri(p->host, p->http_port, "/squid-internal-periodic/", SBuf(StoreDigestFileName)));
+ url = xstrdup(internalRemoteUri(p->secure.encryptTransport, p->host, p->http_port, "/squid-internal-periodic/", SBuf(StoreDigestFileName)));
debugs(72, 2, url);
const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initCacheDigest);

View File

@ -1,25 +0,0 @@
From e10887e67fe84f52fa28d8c9b9e3d91e118fee3c Mon Sep 17 00:00:00 2001
From: Vadim Salavatov <xremmargorpx@gmail.com>
Date: Tue, 6 Aug 2019 23:11:36 +0000
Subject: [PATCH] Bug 4978: eCAP crash after using MyHost().newRequest() (#449)
Since commit 8babada, Squid was using a c_str() result after its
std::string toString() source went out of scope.
---
src/adaptation/ecap/MessageRep.cc | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/src/adaptation/ecap/MessageRep.cc b/src/adaptation/ecap/MessageRep.cc
index 96af88c89b..039f9e1ef4 100644
--- a/src/adaptation/ecap/MessageRep.cc
+++ b/src/adaptation/ecap/MessageRep.cc
@@ -200,8 +200,7 @@ Adaptation::Ecap::RequestLineRep::uri(const Area &aUri)
{
// TODO: if method is not set, AnyP::Uri::parse will assume it is not connect;
// Can we change AnyP::Uri::parse API to remove the method parameter?
- const char *buf = aUri.toString().c_str();
- const bool ok = theMessage.url.parse(theMessage.method, buf);
+ const auto ok = theMessage.url.parse(theMessage.method, aUri.toString().c_str());
Must(ok);
}

View File

@ -6,5 +6,5 @@ index 90ac6a4..8dbed90 100755
-#!/usr/local/bin/perl -Tw -#!/usr/local/bin/perl -Tw
+#!/usr/bin/perl -Tw +#!/usr/bin/perl -Tw
# #
# * Copyright (C) 1996-2018 The Squid Software Foundation and contributors # * Copyright (C) 1996-2019 The Squid Software Foundation and contributors
# * # *

Binary file not shown.

View File

@ -1,25 +0,0 @@
File: squid-4.2.tar.xz
Date: Sun Aug 5 15:04:50 UTC 2018
Size: 2426856
MD5 : 2cf3f5f183d04322d798f98ea5ead43f
SHA1: 77fd6f06e6028780faf85ba7e7e9aef0922e9dc0
Key : CD6DBF8EF3B17D3E <squid3@treenet.co.nz>
B068 84ED B779 C89B 044E 64E3 CD6D BF8E F3B1 7D3E
keyring = http://www.squid-cache.org/pgp.asc
keyserver = pool.sks-keyservers.net
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEEsGiE7bd5yJsETmTjzW2/jvOxfT4FAltnEhsACgkQzW2/jvOx
fT5elRAAvdmTD1kxfE4CZEmp2SJ1KgfMaCXehFFtFYRbdi8oNYoN5u75mwgo2VO1
WmBedYJ3c9UKyaqu5AIc60WRIE+boFBy0vvaJO5T+wl3Ay1SKkuS5253dllxg6RT
r+1Kh9j72rchyBTerWClAydH5PRc9r5fNOzufJGTV8s/SCoAD+dxZHFZT6kad9U1
n0+vcOM1I8dJCVNDakTXqY0zou/gURLfl7EJW/PYkHatJitXazoRVPROF0G1U4Xh
+/wDGj+1+bUEj7K4YIJ2/LfftnVY+c85UiG7URFZ32uNJx8aM7zevfSePdgN6U7c
DUBvtIScI/b2NOgp3scNKuDFs61aLIx8qOjXa603xQs2xbsufCaTv8vFnEz/oO0F
+924pcT3Fbh3vYe3iWP9MejaFf2dDF+1OAGt2sY/LX10VO68bEGpF0fAWGbyDEOC
HGk5cNdfUIK3TISbTOGzG7X3ysZ+5/UxtX4Q1y7x9vBVXGfdZrixfWG9BZkcYyxz
SvykpFe9WvNw1zoIUED/8Sf1FgK0f+WGrpyCnDSDnUrJ26NZOtTkXkJgf239yNBH
KUNgaTxH20Ix8mPGDohT5QgmvpULz0r5ZecHkeoYzyxvF8c6EFOdVt6Iq4jdOT3v
QpcZhfxQhg8yklE+cQjbD0Qywc0QIu19Uzh5URDu4UrarsVVpKM=
=I3Em
-----END PGP SIGNATURE-----

BIN
squid-4.9.tar.xz Normal file

Binary file not shown.

25
squid-4.9.tar.xz.asc Normal file
View File

@ -0,0 +1,25 @@
File: squid-4.9.tar.xz
Date: Wed Nov 6 04:57:57 UTC 2019
Size: 2444664
MD5 : 5c2e335dd1e8ced9dda6e0e11894b344
SHA1: 43c90a1a2eb4d1613f1bfc603ad08e8a835be319
Key : CD6DBF8EF3B17D3E <squid3@treenet.co.nz>
B068 84ED B779 C89B 044E 64E3 CD6D BF8E F3B1 7D3E
keyring = http://www.squid-cache.org/pgp.asc
keyserver = pool.sks-keyservers.net
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEEsGiE7bd5yJsETmTjzW2/jvOxfT4FAl3CUt8ACgkQzW2/jvOx
fT4Wnw/+Osf9VTnDFj5g/eXgb6vhzDaSLVfPNKLI6mF46a6twTvlMcM1+sX+b2Of
KXznDkUHvhIHijXGVbscSWx6Rn2tuPGDRRtDucqK98bYUo7mhEpdGtkVE7t8U3iz
wIKm7Hbr8qar4nJDLoZiZSCswI+UTcYncUuAqZ0O8LGIK0m6aYYDSS4bRq04yiS2
1JD0UEWW35X35hoVuhGlRRgvLzKn8F4KFeDde0gg+cqvkM0LR2+xkUqz6DcyE34m
8uK6hlABu32Zj+9oRBvNNcDOr2bfNYsbS4tAy635thFTyGUF7jjrOEXhl2SYrDY5
gVRzXHq/WBQ5rjTdmwvfn3wcwA1BQK/Oru6OaTFGaSrRlmJJM3JUFQWSsYWm8ARV
BJEGy8iQ9R41Yom2Ct8SOhwg7f3fBlFnK+BB8En+8s+fEa8z5rVmmjh1Es8qm6Tj
C/xGTZ23C4lUveKznDhc8MR2M4jjsH77Y7K/PvJUjZ/yYNpwsOwhv7fs51v70S5Q
4wC+ykpsmwckmOajrkOnupUN9Un2FzfyOctTt6PQkmwlq++09Jwxwg36O+KLDX08
f48F/qCCJ4bubuhFjM/A+cwVev0nAp0haSV0jpbemAHwzog21O51l70B8qUe18jp
XKYpbp3zCJ5cNmrAummsEVaj2ZCsH5ZHxTUIwvJDIS5b0OFn/lo=
=LNc9
-----END PGP SIGNATURE-----

View File

@ -1,14 +1,14 @@
%define __perl_requires %{SOURCE8} %define __perl_requires %{SOURCE8}
Name: squid Name: squid
Version: 4.2 Version: 4.9
Release: 4 Release: 1
Summary: The Squid proxy caching server Summary: The Squid proxy caching server
Epoch: 7 Epoch: 7
License: GPLv2+ and (LGPLv2+ and MIT and BSD and Public Domain) License: GPLv2+ and (LGPLv2+ and MIT and BSD and Public Domain)
URL: http://www.squid-cache.org URL: http://www.squid-cache.org
Source0: http://www.squid-cache.org/Versions/v4/squid-4.2.tar.xz Source0: http://www.squid-cache.org/Versions/v4/squid-4.9.tar.xz
Source1: http://www.squid-cache.org/Versions/v4/squid-4.2.tar.xz.asc Source1: http://www.squid-cache.org/Versions/v4/squid-4.9.tar.xz.asc
Source2: squid.logrotate Source2: squid.logrotate
Source3: squid.sysconfig Source3: squid.sysconfig
Source4: squid.pam Source4: squid.pam
@ -23,19 +23,7 @@ Patch2: squid-3.0.STABLE1-perlpath.patch
Patch3: squid-3.5.9-include-guards.patch Patch3: squid-3.5.9-include-guards.patch
Patch4: squid-4.0.21-large-acl.patch Patch4: squid-4.0.21-large-acl.patch
Patch6000: CVE-2019-12525.patch Buildroot: %{_tmppath}/squid-4.9-1-root-%(%{__id_u} -n)
Patch6001: CVE-2019-12527.patch
Patch6002: CVE-2019-12529.patch
Patch6003: CVE-2019-12854.patch
Patch6004: CVE-2019-13345.patch
Patch6005: CVE-2019-18677.patch
Patch6006: eCAP-crash-after-using-MyHost.newRequest.patch
Patch6007: Fix-netdb-exchange-with-a-TLS-cache_peer.patch
Patch6008: CVE-2019-12523_CVE-2019-18676.patch
Patch6009: CVE-2019-12526.patch
Patch6010: CVE-2019-18678_CVE-2019-18679.patch
Buildroot: %{_tmppath}/squid-4.2-2-root-%(%{__id_u} -n)
Requires: bash >= 2.0 Requires: bash >= 2.0
Requires(pre): shadow-utils Requires(pre): shadow-utils
Requires(post): /sbin/chkconfig Requires(post): /sbin/chkconfig
@ -212,6 +200,12 @@ fi
chgrp squid /var/cache/samba/winbindd_privileged >/dev/null 2>&1 || : chgrp squid /var/cache/samba/winbindd_privileged >/dev/null 2>&1 || :
%changelog %changelog
* Tue Jan 14 2020 openEuler Buildteam <buildteam@openeuler.org> - 4.9-1
- Type:NA
- ID:NA
- SUG:NA
- DESC:Package upgrade
* Fri Dec 20 2019 openEuler Buildteam <buildteam@openeuler.org>- 4.2-4 * Fri Dec 20 2019 openEuler Buildteam <buildteam@openeuler.org>- 4.2-4
- Type:bugfix - Type:bugfix
- ID: - ID: