containerd:update vendor net/http package to fix CVE-2023-39325

This commit is contained in:
zhongjiawei 2023-11-08 10:47:20 +08:00
parent 8aba0d5156
commit a90d673203
4 changed files with 345 additions and 2 deletions

View File

@ -2,7 +2,7 @@
%global debug_package %{nil}
Version: 1.6.22
Name: containerd
Release: 4
Release: 5
Summary: An industry-standard container runtime
License: ASL 2.0
URL: https://containerd.io
@ -67,6 +67,12 @@ install -D -p -m 0644 %{S:7} %{buildroot}%{_sysconfdir}/containerd/config.toml
%exclude %{_bindir}/containerd-stress
%changelog
* Wed Nov 08 2023 zhongjiawei<zhongjiawei1@huawei.com> - 1.6.22-5
- Type:bugfix
- ID:NA
- SUG:NA
- DESC:update vendor net/http package to fix CVE-2023-39325
* Thu Oct 19 2023 zhongjiawei<zhongjiawei1@huawei.com> - 1.6.22-4
- Type:bugfix
- ID:NA

View File

@ -1 +1 @@
d7a5b33a59e6736009eabe49861eaf58ccf70fe2
5a7da853ca9b6dad3085882bcc32d7455c3db63a

View File

@ -0,0 +1,336 @@
From 96e6b8f40551e44f3d82d5e03cb9bd6d72d3191b Mon Sep 17 00:00:00 2001
From: Sebastiaan van Stijn <github@gone.nl>
Date: Mon, 16 Oct 2023 21:50:24 +0200
Subject: [PATCH] vendor: golang.org/x/net v0.17.0
full diff: https://github.com/golang/text/compare/v0.13.0...v0.17.0
This fixes the same CVE as go1.21.3 and go1.20.10;
- net/http: rapid stream resets can cause excessive work
A malicious HTTP/2 client which rapidly creates requests and
immediately resets them can cause excessive server resource consumption.
While the total number of requests is bounded to the
http2.Server.MaxConcurrentStreams setting, resetting an in-progress
request allows the attacker to create a new request while the existing
one is still executing.
HTTP/2 servers now bound the number of simultaneously executing
handler goroutines to the stream concurrency limit. New requests
arriving when at the limit (which can only happen after the client
has reset an existing, in-flight request) will be queued until a
handler exits. If the request queue grows too large, the server
will terminate the connection.
This issue is also fixed in golang.org/x/net/http2 v0.17.0,
for users manually configuring HTTP/2.
The default stream concurrency limit is 250 streams (requests)
per HTTP/2 connection. This value may be adjusted using the
golang.org/x/net/http2 package; see the Server.MaxConcurrentStreams
setting and the ConfigureServer function.
This is CVE-2023-39325 and Go issue https://go.dev/issue/63417.
This is also tracked by CVE-2023-44487.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
---
vendor/golang.org/x/net/http2/Dockerfile | 51 -------------
vendor/golang.org/x/net/http2/Makefile | 3 -
vendor/golang.org/x/net/http2/server.go | 86 ++++++++++++++++++----
vendor/golang.org/x/net/http2/transport.go | 33 +++++++--
4 files changed, 97 insertions(+), 76 deletions(-)
delete mode 100644 vendor/golang.org/x/net/http2/Dockerfile
delete mode 100644 vendor/golang.org/x/net/http2/Makefile
diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile
deleted file mode 100644
index 851224595..000000000
--- a/vendor/golang.org/x/net/http2/Dockerfile
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-# This Dockerfile builds a recent curl with HTTP/2 client support, using
-# a recent nghttp2 build.
-#
-# See the Makefile for how to tag it. If Docker and that image is found, the
-# Go tests use this curl binary for integration tests.
-#
-
-FROM ubuntu:trusty
-
-RUN apt-get update && \
- apt-get upgrade -y && \
- apt-get install -y git-core build-essential wget
-
-RUN apt-get install -y --no-install-recommends \
- autotools-dev libtool pkg-config zlib1g-dev \
- libcunit1-dev libssl-dev libxml2-dev libevent-dev \
- automake autoconf
-
-# The list of packages nghttp2 recommends for h2load:
-RUN apt-get install -y --no-install-recommends make binutils \
- autoconf automake autotools-dev \
- libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
- libev-dev libevent-dev libjansson-dev libjemalloc-dev \
- cython python3.4-dev python-setuptools
-
-# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
-ENV NGHTTP2_VER 895da9a
-RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
-
-WORKDIR /root/nghttp2
-RUN git reset --hard $NGHTTP2_VER
-RUN autoreconf -i
-RUN automake
-RUN autoconf
-RUN ./configure
-RUN make
-RUN make install
-
-WORKDIR /root
-RUN wget https://curl.se/download/curl-7.45.0.tar.gz
-RUN tar -zxvf curl-7.45.0.tar.gz
-WORKDIR /root/curl-7.45.0
-RUN ./configure --with-ssl --with-nghttp2=/usr/local
-RUN make
-RUN make install
-RUN ldconfig
-
-CMD ["-h"]
-ENTRYPOINT ["/usr/local/bin/curl"]
-
diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile
deleted file mode 100644
index 55fd826f7..000000000
--- a/vendor/golang.org/x/net/http2/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-curlimage:
- docker build -t gohttp2/curl .
-
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 8cb14f3c9..a4ba54faf 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -581,9 +581,11 @@ type serverConn struct {
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
curClientStreams uint32 // number of open streams initiated by the client
curPushedStreams uint32 // number of open streams initiated by server push
+ curHandlers uint32 // number of running handler goroutines
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
streams map[uint32]*stream
+ unstartedHandlers []unstartedHandler
initialStreamSendWindowSize int32
maxFrameSize int32
peerMaxHeaderListSize uint32 // zero means unknown (default)
@@ -981,6 +983,8 @@ func (sc *serverConn) serve() {
return
case gracefulShutdownMsg:
sc.startGracefulShutdownInternal()
+ case handlerDoneMsg:
+ sc.handlerDone()
default:
panic("unknown timer")
}
@@ -1012,14 +1016,6 @@ func (sc *serverConn) serve() {
}
}
-func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
- select {
- case <-sc.doneServing:
- case <-sharedCh:
- close(privateCh)
- }
-}
-
type serverMessage int
// Message values sent to serveMsgCh.
@@ -1028,6 +1024,7 @@ var (
idleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage)
+ handlerDoneMsg = new(serverMessage)
)
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
@@ -1897,9 +1894,11 @@ func (st *stream) copyTrailersToHandlerRequest() {
// onReadTimeout is run on its own goroutine (from time.AfterFunc)
// when the stream's ReadTimeout has fired.
func (st *stream) onReadTimeout() {
- // Wrap the ErrDeadlineExceeded to avoid callers depending on us
- // returning the bare error.
- st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
+ if st.body != nil {
+ // Wrap the ErrDeadlineExceeded to avoid callers depending on us
+ // returning the bare error.
+ st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
+ }
}
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
@@ -2017,13 +2016,10 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout != 0 {
sc.conn.SetReadDeadline(time.Time{})
- if st.body != nil {
- st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
- }
+ st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
- go sc.runHandler(rw, req, handler)
- return nil
+ return sc.scheduleHandler(id, rw, req, handler)
}
func (sc *serverConn) upgradeRequest(req *http.Request) {
@@ -2043,6 +2039,10 @@ func (sc *serverConn) upgradeRequest(req *http.Request) {
sc.conn.SetReadDeadline(time.Time{})
}
+ // This is the first request on the connection,
+ // so start the handler directly rather than going
+ // through scheduleHandler.
+ sc.curHandlers++
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
}
@@ -2283,8 +2283,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response
return &responseWriter{rws: rws}
}
+type unstartedHandler struct {
+ streamID uint32
+ rw *responseWriter
+ req *http.Request
+ handler func(http.ResponseWriter, *http.Request)
+}
+
+// scheduleHandler starts a handler goroutine,
+// or schedules one to start as soon as an existing handler finishes.
+func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error {
+ sc.serveG.check()
+ maxHandlers := sc.advMaxStreams
+ if sc.curHandlers < maxHandlers {
+ sc.curHandlers++
+ go sc.runHandler(rw, req, handler)
+ return nil
+ }
+ if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
+ return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm))
+ }
+ sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{
+ streamID: streamID,
+ rw: rw,
+ req: req,
+ handler: handler,
+ })
+ return nil
+}
+
+func (sc *serverConn) handlerDone() {
+ sc.serveG.check()
+ sc.curHandlers--
+ i := 0
+ maxHandlers := sc.advMaxStreams
+ for ; i < len(sc.unstartedHandlers); i++ {
+ u := sc.unstartedHandlers[i]
+ if sc.streams[u.streamID] == nil {
+ // This stream was reset before its goroutine had a chance to start.
+ continue
+ }
+ if sc.curHandlers >= maxHandlers {
+ break
+ }
+ sc.curHandlers++
+ go sc.runHandler(u.rw, u.req, u.handler)
+ sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references
+ }
+ sc.unstartedHandlers = sc.unstartedHandlers[i:]
+ if len(sc.unstartedHandlers) == 0 {
+ sc.unstartedHandlers = nil
+ }
+}
+
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
+ defer sc.sendServeMsg(handlerDoneMsg)
didPanic := true
defer func() {
rw.rws.stream.cancelCtx()
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 05ba23d3d..e909040d9 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -19,6 +19,7 @@ import (
"io/fs"
"log"
"math"
+ "math/bits"
mathrand "math/rand"
"net"
"net/http"
@@ -290,8 +291,7 @@ func (t *Transport) initConnPool() {
// HTTP/2 server.
type ClientConn struct {
t *Transport
- tconn net.Conn // usually *tls.Conn, except specialized impls
- tconnClosed bool
+ tconn net.Conn // usually *tls.Conn, except specialized impls
tlsState *tls.ConnectionState // nil only for specialized impls
reused uint32 // whether conn is being reused; atomic
singleUse bool // whether being used for a single http.Request
@@ -1653,7 +1653,27 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
return int(n) // doesn't truncate; max is 512K
}
-var bufPool sync.Pool // of *[]byte
+// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running
+// streaming requests using small frame sizes occupy large buffers initially allocated for prior
+// requests needing big buffers. The size ranges are as follows:
+// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB],
+// {256 KB, 512 KB], {512 KB, infinity}
+// In practice, the maximum scratch buffer size should not exceed 512 KB due to
+// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used.
+// It exists mainly as a safety measure, for potential future increases in max buffer size.
+var bufPools [7]sync.Pool // of *[]byte
+func bufPoolIndex(size int) int {
+ if size <= 16384 {
+ return 0
+ }
+ size -= 1
+ bits := bits.Len(uint(size))
+ index := bits - 14
+ if index >= len(bufPools) {
+ return len(bufPools) - 1
+ }
+ return index
+}
func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
cc := cs.cc
@@ -1671,12 +1691,13 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
// Scratch buffer for reading into & writing from.
scratchLen := cs.frameScratchBufferLen(maxFrameSize)
var buf []byte
- if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen {
- defer bufPool.Put(bp)
+ index := bufPoolIndex(scratchLen)
+ if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen {
+ defer bufPools[index].Put(bp)
buf = *bp
} else {
buf = make([]byte, scratchLen)
- defer bufPool.Put(&buf)
+ defer bufPools[index].Put(&buf)
}
var sawEOF bool
--
2.33.0

View File

@ -29,3 +29,4 @@ patch/0028-containerd-bugfix-add-nil-pointer-check-for-cgroup-v1-mem-usage.patch
patch/0029-containerd-fix-unable-to-checkpoint-the-container-more-than-onc.patch
patch/0030-containerd-fix-cio.Cancel-should-close-the-pipes.patch
patch/0031-containerd-fix-some-containerd-bug.patch
patch/0032-containerd-vendor-golang.org-x-net-v0.17.0.patch