diff --git a/backport-0001-CVE-2023-46219.patch b/backport-0001-CVE-2023-46219.patch index 556d00f..2e2ae77 100644 --- a/backport-0001-CVE-2023-46219.patch +++ b/backport-0001-CVE-2023-46219.patch @@ -10,7 +10,7 @@ Reported-by: Maksymilian Arciemowicz Closes #12388 -Conflict:Curl_rand_alnum -> Curl_rand_hex +Conflict:NA Reference:https://github.com/curl/curl/commit/73b65e94f3531179de45c6f3c836a610e3d0a846 --- lib/fopen.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++----- @@ -99,8 +99,8 @@ index 75b8a7aa5..a73ac068e 100644 fclose(*fh); *fh = NULL; -- result = Curl_rand_hex(data, randsuffix, sizeof(randsuffix)); -+ result = Curl_rand_hex(data, randbuf, sizeof(randbuf)); +- result = Curl_rand_alnum(data, randsuffix, sizeof(randsuffix)); ++ result = Curl_rand_alnum(data, randbuf, sizeof(randbuf)); if(result) goto fail; diff --git a/backport-CVE-2023-32001.patch b/backport-CVE-2023-32001.patch deleted file mode 100644 index 8827596..0000000 --- a/backport-CVE-2023-32001.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 0c667188e0c6cda615a036b8a2b4125f2c404dde Mon Sep 17 00:00:00 2001 -From: SaltyMilk -Date: Mon, 10 Jul 2023 21:43:28 +0200 -Subject: [PATCH] fopen: optimize - -Closes #11419 ---- - lib/fopen.c | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - -diff --git a/lib/fopen.c b/lib/fopen.c -index c9c9e3d6e..b6e3caddd 100644 ---- a/lib/fopen.c -+++ b/lib/fopen.c -@@ -56,13 +56,13 @@ CURLcode Curl_fopen(struct Curl_easy *data, const char *filename, - int fd = -1; - *tempname = NULL; - -- if(stat(filename, &sb) == -1 || !S_ISREG(sb.st_mode)) { -- /* a non-regular file, fallback to direct fopen() */ -- *fh = fopen(filename, FOPEN_WRITETEXT); -- if(*fh) -- return CURLE_OK; -+ *fh = fopen(filename, FOPEN_WRITETEXT); -+ if(!*fh) - goto fail; -- } -+ if(fstat(fileno(*fh), &sb) == -1 || !S_ISREG(sb.st_mode)) -+ return CURLE_OK; -+ fclose(*fh); -+ *fh = NULL; - - result = Curl_rand_hex(data, randsuffix, sizeof(randsuffix)); - if(result) --- -2.33.0 - diff --git a/backport-CVE-2023-38039.patch b/backport-CVE-2023-38039.patch deleted file mode 100644 index 03a879b..0000000 --- a/backport-CVE-2023-38039.patch +++ /dev/null @@ -1,212 +0,0 @@ -From 3ee79c1674fd6f99e8efca52cd7510e08b766770 Mon Sep 17 00:00:00 2001 -From: Daniel Stenberg -Date: Wed, 2 Aug 2023 23:34:48 +0200 -Subject: [PATCH] http: return error when receiving too large header set - -To avoid abuse. The limit is set to 300 KB for the accumulated size of -all received HTTP headers for a single response. Incomplete research -suggests that Chrome uses a 256-300 KB limit, while Firefox allows up to -1MB. - -Closes #11582 ---- - lib/c-hyper.c | 12 +++++++----- - lib/cf-h1-proxy.c | 4 +++- - lib/http.c | 34 ++++++++++++++++++++++++++++++---- - lib/http.h | 9 +++++++++ - lib/pingpong.c | 4 +++- - lib/urldata.h | 17 ++++++++--------- - 6 files changed, 60 insertions(+), 20 deletions(-) - -diff --git a/lib/c-hyper.c b/lib/c-hyper.c -index c29983c0b24a6..0b9d9ab478e67 100644 ---- a/lib/c-hyper.c -+++ b/lib/c-hyper.c -@@ -182,8 +182,11 @@ static int hyper_each_header(void *userdata, - } - } - -- data->info.header_size += (curl_off_t)len; -- data->req.headerbytecount += (curl_off_t)len; -+ result = Curl_bump_headersize(data, len, FALSE); -+ if(result) { -+ data->state.hresult = result; -+ return HYPER_ITER_BREAK; -+ } - return HYPER_ITER_CONTINUE; - } - -@@ -313,9 +316,8 @@ static CURLcode status_line(struct Curl_easy *data, - if(result) - return result; - } -- data->info.header_size += (curl_off_t)len; -- data->req.headerbytecount += (curl_off_t)len; -- return CURLE_OK; -+ result = Curl_bump_headersize(data, len, FALSE); -+ return result; - } - - /* -diff --git a/lib/cf-h1-proxy.c b/lib/cf-h1-proxy.c -index c9b157c9bccc7..b1d8cb618b7d1 100644 ---- a/lib/cf-h1-proxy.c -+++ b/lib/cf-h1-proxy.c -@@ -587,7 +587,9 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf, - return result; - } - -- data->info.header_size += (long)perline; -+ result = Curl_bump_headersize(data, perline, TRUE); -+ if(result) -+ return result; - - /* Newlines are CRLF, so the CR is ignored as the line isn't - really terminated until the LF comes. Treat a following CR -diff --git a/lib/http.c b/lib/http.c -index f7c71afd7d847..bc78ff97435c4 100644 ---- a/lib/http.c -+++ b/lib/http.c -@@ -3920,6 +3920,29 @@ static CURLcode verify_header(struct Curl_easy *data) - return CURLE_OK; - } - -+CURLcode Curl_bump_headersize(struct Curl_easy *data, -+ size_t delta, -+ bool connect_only) -+{ -+ size_t bad = 0; -+ if(delta < MAX_HTTP_RESP_HEADER_SIZE) { -+ if(!connect_only) -+ data->req.headerbytecount += (unsigned int)delta; -+ data->info.header_size += (unsigned int)delta; -+ if(data->info.header_size > MAX_HTTP_RESP_HEADER_SIZE) -+ bad = data->info.header_size; -+ } -+ else -+ bad = data->info.header_size + delta; -+ if(bad) { -+ failf(data, "Too large response headers: %zu > %zu", -+ bad, MAX_HTTP_RESP_HEADER_SIZE); -+ return CURLE_RECV_ERROR; -+ } -+ return CURLE_OK; -+} -+ -+ - /* - * Read any HTTP header lines from the server and pass them to the client app. - */ -@@ -4173,8 +4196,9 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data, - if(result) - return result; - -- data->info.header_size += (long)headerlen; -- data->req.headerbytecount += (long)headerlen; -+ result = Curl_bump_headersize(data, headerlen, FALSE); -+ if(result) -+ return result; - - /* - * When all the headers have been parsed, see if we should give -@@ -4496,8 +4520,10 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data, - if(result) - return result; - -- data->info.header_size += Curl_dyn_len(&data->state.headerb); -- data->req.headerbytecount += Curl_dyn_len(&data->state.headerb); -+ result = Curl_bump_headersize(data, Curl_dyn_len(&data->state.headerb), -+ FALSE); -+ if(result) -+ return result; - - Curl_dyn_reset(&data->state.headerb); - } -diff --git a/lib/http.h b/lib/http.h -index df3b4e38b8a88..4aeabc345938c 100644 ---- a/lib/http.h -+++ b/lib/http.h -@@ -64,6 +64,10 @@ extern const struct Curl_handler Curl_handler_wss; - - struct dynhds; - -+CURLcode Curl_bump_headersize(struct Curl_easy *data, -+ size_t delta, -+ bool connect_only); -+ - /* Header specific functions */ - bool Curl_compareheader(const char *headerline, /* line to check */ - const char *header, /* header keyword _with_ colon */ -@@ -183,6 +187,11 @@ CURLcode Curl_http_auth_act(struct Curl_easy *data); - #define EXPECT_100_THRESHOLD (1024*1024) - #endif - -+/* MAX_HTTP_RESP_HEADER_SIZE is the maximum size of all response headers -+ combined that libcurl allows for a single HTTP response, any HTTP -+ version. This count includes CONNECT response headers. */ -+#define MAX_HTTP_RESP_HEADER_SIZE (300*1024) -+ - #endif /* CURL_DISABLE_HTTP */ - - /**************************************************************************** -diff --git a/lib/pingpong.c b/lib/pingpong.c -index f3f7cb93cb9b7..523bbec189fe6 100644 ---- a/lib/pingpong.c -+++ b/lib/pingpong.c -@@ -341,7 +341,9 @@ CURLcode Curl_pp_readresp(struct Curl_easy *data, - ssize_t clipamount = 0; - bool restart = FALSE; - -- data->req.headerbytecount += (long)gotbytes; -+ result = Curl_bump_headersize(data, gotbytes, FALSE); -+ if(result) -+ return result; - - pp->nread_resp += gotbytes; - for(i = 0; i < gotbytes; ptr++, i++) { -diff --git a/lib/urldata.h b/lib/urldata.h -index e5446b6840f63..d21aa415dc94b 100644 ---- a/lib/urldata.h -+++ b/lib/urldata.h -@@ -629,17 +629,16 @@ struct SingleRequest { - curl_off_t bytecount; /* total number of bytes read */ - curl_off_t writebytecount; /* number of bytes written */ - -- curl_off_t headerbytecount; /* only count received headers */ -- curl_off_t deductheadercount; /* this amount of bytes doesn't count when we -- check if anything has been transferred at -- the end of a connection. We use this -- counter to make only a 100 reply (without a -- following second response code) result in a -- CURLE_GOT_NOTHING error code */ -- - curl_off_t pendingheader; /* this many bytes left to send is actually - header and not body */ - struct curltime start; /* transfer started at this time */ -+ unsigned int headerbytecount; /* only count received headers */ -+ unsigned int deductheadercount; /* this amount of bytes doesn't count when -+ we check if anything has been transferred -+ at the end of a connection. We use this -+ counter to make only a 100 reply (without -+ a following second response code) result -+ in a CURLE_GOT_NOTHING error code */ - enum { - HEADER_NORMAL, /* no bad header at all */ - HEADER_PARTHEADER, /* part of the chunk is a bad header, the rest -@@ -1089,7 +1088,6 @@ struct PureInfo { - int httpversion; /* the http version number X.Y = X*10+Y */ - time_t filetime; /* If requested, this is might get set. Set to -1 if the - time was unretrievable. */ -- curl_off_t header_size; /* size of read header(s) in bytes */ - curl_off_t request_size; /* the amount of bytes sent in the request(s) */ - unsigned long proxyauthavail; /* what proxy auth types were announced */ - unsigned long httpauthavail; /* what host auth types were announced */ -@@ -1097,6 +1095,7 @@ struct PureInfo { - char *contenttype; /* the content type of the object */ - char *wouldredirect; /* URL this would've been redirected to if asked to */ - curl_off_t retry_after; /* info from Retry-After: header */ -+ unsigned int header_size; /* size of read header(s) in bytes */ - - /* PureInfo members 'conn_primary_ip', 'conn_primary_port', 'conn_local_ip' - and, 'conn_local_port' are copied over from the connectdata struct in - diff --git a/backport-CVE-2023-38545.patch b/backport-CVE-2023-38545.patch deleted file mode 100644 index c15c273..0000000 --- a/backport-CVE-2023-38545.patch +++ /dev/null @@ -1,134 +0,0 @@ -From 92fd36dd54de9ac845549944692eb33c5aee7343 Mon Sep 17 00:00:00 2001 -From: Jay Satiro -Date: Mon, 9 Oct 2023 17:15:44 -0400 -Subject: [PATCH] socks: return error if hostname too long for remote resolve - -Prior to this change the state machine attempted to change the remote -resolve to a local resolve if the hostname was longer than 255 -characters. Unfortunately that did not work as intended and caused a -security issue. - -This patch applies to curl versions 7.87.0 - 8.1.2. Other versions -that are affected take a different patch. Refer to the CVE advisory -for more information. - -Bug: https://curl.se/docs/CVE-2023-38545.html ---- - lib/socks.c | 8 +++---- - tests/data/Makefile.inc | 2 +- - tests/data/test728 | 64 +++++++++++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 69 insertions(+), 5 deletions(-) - create mode 100644 tests/data/test728 - -diff --git a/lib/socks.c b/lib/socks.c -index d491e08..e7da5b4 100644 ---- a/lib/socks.c -+++ b/lib/socks.c -@@ -539,9 +539,9 @@ static CURLproxycode do_SOCKS5(struct Curl_cfilter *cf, - - /* RFC1928 chapter 5 specifies max 255 chars for domain name in packet */ - if(!socks5_resolve_local && hostname_len > 255) { -- infof(data, "SOCKS5: server resolving disabled for hostnames of " -- "length > 255 [actual len=%zu]", hostname_len); -- socks5_resolve_local = TRUE; -+ failf(data, "SOCKS5: the destination hostname is too long to be " -+ "resolved remotely by the proxy."); -+ return CURLPX_LONG_HOSTNAME; - } - - if(auth & ~(CURLAUTH_BASIC | CURLAUTH_GSSAPI)) -@@ -882,7 +882,7 @@ static CURLproxycode do_SOCKS5(struct Curl_cfilter *cf, - } - else { - socksreq[len++] = 3; -- socksreq[len++] = (char) hostname_len; /* one byte address length */ -+ socksreq[len++] = (unsigned char) hostname_len; /* one byte length */ - memcpy(&socksreq[len], sx->hostname, hostname_len); /* w/o NULL */ - len += hostname_len; - } -diff --git a/tests/data/Makefile.inc b/tests/data/Makefile.inc -index 3e0221a..64b11de 100644 ---- a/tests/data/Makefile.inc -+++ b/tests/data/Makefile.inc -@@ -99,7 +99,7 @@ test679 test680 test681 test682 test683 test684 test685 \ - \ - test700 test701 test702 test703 test704 test705 test706 test707 test708 \ - test709 test710 test711 test712 test713 test714 test715 test716 test717 \ --test718 test719 test720 test721 \ -+test718 test719 test720 test721 test728 \ - \ - test800 test801 test802 test803 test804 test805 test806 test807 test808 \ - test809 test810 test811 test812 test813 test814 test815 test816 test817 \ -diff --git a/tests/data/test728 b/tests/data/test728 -new file mode 100644 -index 0000000..05bcf28 ---- /dev/null -+++ b/tests/data/test728 -@@ -0,0 +1,64 @@ -+ -+ -+ -+HTTP -+HTTP GET -+SOCKS5 -+SOCKS5h -+followlocation -+ -+ -+ -+# -+# Server-side -+ -+# The hostname in this redirect is 256 characters and too long (> 255) for -+# SOCKS5 remote resolve. curl must return error CURLE_PROXY in this case. -+ -+HTTP/1.1 301 Moved Permanently -+Location: http://AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/ -+Content-Length: 0 -+Connection: close -+ -+ -+ -+ -+# -+# Client-side -+ -+ -+proxy -+ -+ -+http -+socks5 -+ -+ -+SOCKS5h with HTTP redirect to hostname too long -+ -+ -+--no-progress-meter --location --proxy socks5h://%HOSTIP:%SOCKSPORT http://%HOSTIP:%HTTPPORT/%TESTNUMBER -+ -+ -+ -+# -+# Verify data after the test has been "shot" -+ -+ -+GET /%TESTNUMBER HTTP/1.1 -+Host: %HOSTIP:%HTTPPORT -+User-Agent: curl/%VERSION -+Accept: */* -+ -+ -+ -+97 -+ -+# the error message is verified because error code CURLE_PROXY (97) may be -+# returned for any number of reasons and we need to make sure it is -+# specifically for the reason below so that we know the check is working. -+ -+curl: (97) SOCKS5: the destination hostname is too long to be resolved remotely by the proxy. -+ -+ -+ --- -2.7.4 - diff --git a/backport-CVE-2023-38546.patch b/backport-CVE-2023-38546.patch deleted file mode 100644 index 8159462..0000000 --- a/backport-CVE-2023-38546.patch +++ /dev/null @@ -1,128 +0,0 @@ -From 61275672b46d9abb3285740467b882e22ed75da8 Mon Sep 17 00:00:00 2001 -From: Daniel Stenberg -Date: Thu, 14 Sep 2023 23:28:32 +0200 -Subject: [PATCH] cookie: remove unnecessary struct fields - -Plus: reduce the hash table size from 256 to 63. It seems unlikely to -make much of a speed difference for most use cases but saves 1.5KB of -data per instance. - -Closes #11862 ---- - lib/cookie.c | 13 +------------ - lib/cookie.h | 13 ++++--------- - lib/easy.c | 4 +--- - 3 files changed, 6 insertions(+), 24 deletions(-) - -diff --git a/lib/cookie.c b/lib/cookie.c -index 4345a84c6fd9d2..e39c89a94a960d 100644 ---- a/lib/cookie.c -+++ b/lib/cookie.c -@@ -119,7 +119,6 @@ static void freecookie(struct Cookie *co) - free(co->name); - free(co->value); - free(co->maxage); -- free(co->version); - free(co); - } - -@@ -718,11 +717,7 @@ Curl_cookie_add(struct Curl_easy *data, - } - } - else if((nlen == 7) && strncasecompare("version", namep, 7)) { -- strstore(&co->version, valuep, vlen); -- if(!co->version) { -- badcookie = TRUE; -- break; -- } -+ /* just ignore */ - } - else if((nlen == 7) && strncasecompare("max-age", namep, 7)) { - /* -@@ -1160,7 +1155,6 @@ Curl_cookie_add(struct Curl_easy *data, - free(clist->path); - free(clist->spath); - free(clist->expirestr); -- free(clist->version); - free(clist->maxage); - - *clist = *co; /* then store all the new data */ -@@ -1224,9 +1218,6 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data, - c = calloc(1, sizeof(struct CookieInfo)); - if(!c) - return NULL; /* failed to get memory */ -- c->filename = strdup(file?file:"none"); /* copy the name just in case */ -- if(!c->filename) -- goto fail; /* failed to get memory */ - /* - * Initialize the next_expiration time to signal that we don't have enough - * information yet. -@@ -1378,7 +1369,6 @@ static struct Cookie *dup_cookie(struct Cookie *src) - CLONE(name); - CLONE(value); - CLONE(maxage); -- CLONE(version); - d->expires = src->expires; - d->tailmatch = src->tailmatch; - d->secure = src->secure; -@@ -1595,7 +1585,6 @@ void Curl_cookie_cleanup(struct CookieInfo *c) - { - if(c) { - unsigned int i; -- free(c->filename); - for(i = 0; i < COOKIE_HASH_SIZE; i++) - Curl_cookie_freelist(c->cookies[i]); - free(c); /* free the base struct as well */ -diff --git a/lib/cookie.h b/lib/cookie.h -index b3c0063b2cfb25..41e9e7a6914e0a 100644 ---- a/lib/cookie.h -+++ b/lib/cookie.h -@@ -36,11 +36,7 @@ struct Cookie { - char *domain; /* domain = */ - curl_off_t expires; /* expires = */ - char *expirestr; /* the plain text version */ -- -- /* RFC 2109 keywords. Version=1 means 2109-compliant cookie sending */ -- char *version; /* Version = */ - char *maxage; /* Max-Age = */ -- - bool tailmatch; /* whether we do tail-matching of the domain name */ - bool secure; /* whether the 'secure' keyword was used */ - bool livecookie; /* updated from a server, not a stored file */ -@@ -56,17 +52,16 @@ struct Cookie { - #define COOKIE_PREFIX__SECURE (1<<0) - #define COOKIE_PREFIX__HOST (1<<1) - --#define COOKIE_HASH_SIZE 256 -+#define COOKIE_HASH_SIZE 63 - - struct CookieInfo { - /* linked list of cookies we know of */ - struct Cookie *cookies[COOKIE_HASH_SIZE]; -- char *filename; /* file we read from/write to */ -- long numcookies; /* number of cookies in the "jar" */ -+ curl_off_t next_expiration; /* the next time at which expiration happens */ -+ int numcookies; /* number of cookies in the "jar" */ -+ int lastct; /* last creation-time used in the jar */ - bool running; /* state info, for cookie adding information */ - bool newsession; /* new session, discard session cookies on load */ -- int lastct; /* last creation-time used in the jar */ -- curl_off_t next_expiration; /* the next time at which expiration happens */ - }; - - /* The maximum sizes we accept for cookies. RFC 6265 section 6.1 says -diff --git a/lib/easy.c b/lib/easy.c -index 16bbd35251d408..03195481f9780a 100644 ---- a/lib/easy.c -+++ b/lib/easy.c -@@ -925,9 +925,7 @@ struct Curl_easy *curl_easy_duphandle(struct Curl_easy *data) - if(data->cookies) { - /* If cookies are enabled in the parent handle, we enable them - in the clone as well! */ -- outcurl->cookies = Curl_cookie_init(data, -- data->cookies->filename, -- outcurl->cookies, -+ outcurl->cookies = Curl_cookie_init(data, NULL, outcurl->cookies, - data->set.cookiesession); - if(!outcurl->cookies) - goto fail; diff --git a/backport-transfer-also-stop-the-sending-on-closed-connection.patch b/backport-transfer-also-stop-the-sending-on-closed-connection.patch deleted file mode 100644 index 473b724..0000000 --- a/backport-transfer-also-stop-the-sending-on-closed-connection.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 95a865b462195d9d847f7f2676f0c789179e2073 Mon Sep 17 00:00:00 2001 -From: Daniel Stenberg -Date: Mon, 4 Sep 2023 14:14:32 +0200 -Subject: [PATCH] transfer: also stop the sending on closed connection - -Previously this cleared the receiving bit only but in some cases it is -also still sending (like a request-body) when disconnected and neither -direction can continue then. - -Fixes #11769 -Reported-by: Oleg Jukovec -Closes #11795 - -Conflict: NA -Reference: https://github.com/curl/curl/commit/95a865b462195d9d847f7f2676f0c789179e2073 ---- - lib/transfer.c | 7 ++++--- - 1 file changed, 4 insertions(+), 3 deletions(-) - -diff --git a/lib/transfer.c b/lib/transfer.c -index fb0a6a45d..d0602b875 100644 ---- a/lib/transfer.c -+++ b/lib/transfer.c -@@ -492,15 +492,16 @@ static CURLcode readwrite_data(struct Curl_easy *data, - if(0 < nread || is_empty_data) { - buf[nread] = 0; - } -- else { -+ if(!nread) { - /* if we receive 0 or less here, either the data transfer is done or the - server closed the connection and we bail out from this! */ - if(data_eof_handled) - DEBUGF(infof(data, "nread == 0, stream closed, bailing")); - else - DEBUGF(infof(data, "nread <= 0, server closed connection, bailing")); -- k->keepon &= ~KEEP_RECV; -- break; -+ k->keepon = 0; /* stop sending as well */ -+ if(!is_empty_data) -+ break; - } - - /* Default buffer to use when we write the buffer, it may be changed --- -2.33.0 - diff --git a/backport-urlapi-make-sure-zoneid-is-also-duplicated-in-curl_u.patch b/backport-urlapi-make-sure-zoneid-is-also-duplicated-in-curl_u.patch deleted file mode 100644 index 129e9ce..0000000 --- a/backport-urlapi-make-sure-zoneid-is-also-duplicated-in-curl_u.patch +++ /dev/null @@ -1,112 +0,0 @@ -From 49e244318672c688097c1bf601a110005cd9a6a8 Mon Sep 17 00:00:00 2001 -From: Daniel Stenberg -Date: Mon, 31 Jul 2023 10:07:35 +0200 -Subject: [PATCH] urlapi: make sure zoneid is also duplicated in curl_url_dup - -Add several curl_url_dup() tests to the general lib1560 test. - -Reported-by: Rutger Broekhoff -Bug: https://curl.se/mail/lib-2023-07/0047.html -Closes #11549 - -Conflict: tests/libtest/lib1560.c for context adapt -Reference: https://github.com/curl/curl/commit/49e244318672c688097c1bf601a110005cd9a6a8 ---- - lib/urlapi.c | 1 + - tests/libtest/lib1560.c | 67 +++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 68 insertions(+) - -diff --git a/lib/urlapi.c b/lib/urlapi.c -index cd423c335d88f..b1a126d548213 100644 ---- a/lib/urlapi.c -+++ b/lib/urlapi.c -@@ -1385,6 +1385,7 @@ CURLU *curl_url_dup(const CURLU *in) - DUP(u, in, path); - DUP(u, in, query); - DUP(u, in, fragment); -+ DUP(u, in, zoneid); - u->portnum = in->portnum; - } - return u; -diff --git a/tests/libtest/lib1560.c b/tests/libtest/lib1560.c -index 0eca0fda72d0b..ff03bec9391a4 100644 ---- a/tests/libtest/lib1560.c -+++ b/tests/libtest/lib1560.c -@@ -1672,10 +1672,77 @@ static int huge(void) - return error; - } - -+static int urldup(void) -+{ -+ const char *url[] = { -+ "http://" -+ "user:pwd@" -+ "[2a04:4e42:e00::347%25eth0]" -+ ":80" -+ "/path" -+ "?query" -+ "#fraggie", -+ "https://example.com", -+ "https://user@example.com", -+ "https://user.pwd@example.com", -+ "https://user.pwd@example.com:1234", -+ "https://example.com:1234", -+ "example.com:1234", -+ "https://user.pwd@example.com:1234/path?query#frag", -+ NULL -+ }; -+ CURLU *copy = NULL; -+ char *h_str = NULL, *copy_str = NULL; -+ CURLU *h = curl_url(); -+ int i; -+ -+ if(!h) -+ goto err; -+ -+ for(i = 0; url[i]; i++) { -+ CURLUcode rc = curl_url_set(h, CURLUPART_URL, url[i], -+ CURLU_GUESS_SCHEME); -+ if(rc) -+ goto err; -+ copy = curl_url_dup(h); -+ -+ rc = curl_url_get(h, CURLUPART_URL, &h_str, 0); -+ if(rc) -+ goto err; -+ -+ rc = curl_url_get(copy, CURLUPART_URL, ©_str, 0); -+ if(rc) -+ goto err; -+ -+ if(strcmp(h_str, copy_str)) { -+ printf("Original: %s\nParsed: %s\nCopy: %s\n", -+ url[i], h_str, copy_str); -+ goto err; -+ } -+ curl_free(copy_str); -+ curl_free(h_str); -+ curl_url_cleanup(copy); -+ copy_str = NULL; -+ h_str = NULL; -+ copy = NULL; -+ } -+ curl_url_cleanup(h); -+ return 0; -+err: -+ curl_free(copy_str); -+ curl_free(h_str); -+ curl_url_cleanup(copy); -+ curl_url_cleanup(h); -+ return 1; -+} -+ - int test(char *URL) - { - (void)URL; /* not used */ - -+ if(urldup()) -+ return 11; -+ - if(get_url()) - return 3; - diff --git a/backport-vtls-avoid-memory-leak-if-sha256-call-fails.patch b/backport-vtls-avoid-memory-leak-if-sha256-call-fails.patch deleted file mode 100644 index bf475bc..0000000 --- a/backport-vtls-avoid-memory-leak-if-sha256-call-fails.patch +++ /dev/null @@ -1,41 +0,0 @@ -From a4a5e438ae533c9af5e97457ae424c9189545105 Mon Sep 17 00:00:00 2001 -From: Daniel Stenberg -Date: Mon, 12 Jun 2023 14:10:37 +0200 -Subject: [PATCH] vtls: avoid memory leak if sha256 call fails - -... in the pinned public key handling function. - -Reported-by: lizhuang0630 on github -Fixes #11306 -Closes #11307 - -Conflict: NA -Reference: https://github.com/curl/curl/commit/a4a5e438ae533c9af5e97457ae424c9189545105 ---- - lib/vtls/vtls.c | 12 +++++------- - 1 file changed, 5 insertions(+), 7 deletions(-) - -diff --git a/lib/vtls/vtls.c b/lib/vtls/vtls.c -index a4ff7d61a6193..cdd3a4fdc1c14 100644 ---- a/lib/vtls/vtls.c -+++ b/lib/vtls/vtls.c -@@ -907,14 +907,12 @@ CURLcode Curl_pin_peer_pubkey(struct Curl_easy *data, - if(!sha256sumdigest) - return CURLE_OUT_OF_MEMORY; - encode = Curl_ssl->sha256sum(pubkey, pubkeylen, -- sha256sumdigest, CURL_SHA256_DIGEST_LENGTH); -+ sha256sumdigest, CURL_SHA256_DIGEST_LENGTH); - -- if(encode != CURLE_OK) -- return encode; -- -- encode = Curl_base64_encode((char *)sha256sumdigest, -- CURL_SHA256_DIGEST_LENGTH, &encoded, -- &encodedlen); -+ if(!encode) -+ encode = Curl_base64_encode((char *)sha256sumdigest, -+ CURL_SHA256_DIGEST_LENGTH, &encoded, -+ &encodedlen); - Curl_safefree(sha256sumdigest); - - if(encode) diff --git a/curl-8.1.2.tar.xz b/curl-8.1.2.tar.xz deleted file mode 100644 index df364de..0000000 Binary files a/curl-8.1.2.tar.xz and /dev/null differ diff --git a/curl-8.4.0.tar.xz b/curl-8.4.0.tar.xz new file mode 100644 index 0000000..5f55505 Binary files /dev/null and b/curl-8.4.0.tar.xz differ diff --git a/curl.spec b/curl.spec index 71daee6..9c3d9e4 100644 --- a/curl.spec +++ b/curl.spec @@ -5,8 +5,8 @@ %global _configure ../configure Name: curl -Version: 8.1.2 -Release: 7 +Version: 8.4.0 +Release: 1 Summary: Curl is used in command lines or scripts to transfer data License: curl URL: https://curl.se/ @@ -15,16 +15,9 @@ Source: https://curl.se/download/curl-%{version}.tar.xz Patch1: backport-0101-curl-7.32.0-multilib.patch Patch2: backport-curl-7.84.0-test3026.patch Patch4: backport-curl-7.88.0-tests-warnings.patch -Patch5: backport-CVE-2023-32001.patch -Patch6: backport-vtls-avoid-memory-leak-if-sha256-call-fails.patch -Patch7: backport-urlapi-make-sure-zoneid-is-also-duplicated-in-curl_u.patch -Patch8: backport-CVE-2023-38039.patch -Patch9: backport-CVE-2023-38545.patch -Patch10: backport-CVE-2023-38546.patch Patch11: backport-CVE-2023-46218.patch Patch12: backport-0001-CVE-2023-46219.patch Patch13: backport-0002-CVE-2023-46219.patch -Patch14: backport-transfer-also-stop-the-sending-on-closed-connection.patch Patch15: backport-openssl-avoid-BN_num_bits-NULL-pointer-derefs.patch BuildRequires: automake brotli-devel coreutils gcc groff krb5-devel @@ -210,6 +203,12 @@ rm -rf ${RPM_BUILD_ROOT}%{_libdir}/libcurl.la %{_mandir}/man3/* %changelog +* Tue Jan 09 2024 zhouyihang - 8.4.0-1 +- Type:requirement +- CVE:NA +- SUG:NA +- DESC:update curl to 8.4.0 + * Thu Dec 28 2023 zhouyihang - 8.1.2-7 - Type:bugfix - CVE:NA