sync from openEuler-22.03-LTS
This commit is contained in:
parent
e6424e05ea
commit
e676158bba
@ -1 +1 @@
|
||||
18.09.0.121
|
||||
18.09.0.301
|
||||
|
||||
34
docker.spec
34
docker.spec
@ -1,6 +1,6 @@
|
||||
Name: docker-engine
|
||||
Version: 18.09.0
|
||||
Release: 121
|
||||
Release: 301
|
||||
Summary: The open-source application container engine
|
||||
Group: Tools/Docker
|
||||
|
||||
@ -155,7 +155,7 @@ install -p -m 644 components/engine/contrib/syntax/nano/Dockerfile.nanorc $RPM_B
|
||||
/%{_bindir}/containerd
|
||||
/%{_bindir}/docker-proxy
|
||||
/%{_bindir}/containerd-shim
|
||||
%{_bindir}/runc
|
||||
/%{_bindir}/runc
|
||||
/%{_sysconfdir}/udev/rules.d/80-docker.rules
|
||||
%if 0%{?is_systemd}
|
||||
/%{_unitdir}/docker.service
|
||||
@ -212,17 +212,29 @@ fi
|
||||
%endif
|
||||
|
||||
%changelog
|
||||
* Thu Jun 09 2022 duyiwei <duyiwei@kylinos.cn> - 18.09.0-121
|
||||
* Thu Jun 16 2022 duyiwei <duyiwei@kylinos.cn> - 18.09.0-301
|
||||
- Type:bugfix
|
||||
- CVE:CVE-2022-24769
|
||||
- SUG:NA
|
||||
- DESC:fix CVE-2022-24769
|
||||
|
||||
* Mon Apr 11 2022 fushanqing <fushanqing@kylinos.cn> - 18.09.0-120
|
||||
- Integrated runc
|
||||
* Tue Mar 22 2022 chenjiankun<chenjiankun1@huawei.com> - 18.09.0-300
|
||||
- Type:bugfix
|
||||
- CVE:NA
|
||||
- SUG:NA
|
||||
- DESC:sync from internal
|
||||
|
||||
* Thu Feb 10 2022 fushanqing <fushanqing@kylinos.cn> - 18.09.0-119
|
||||
- remove install runc
|
||||
* Wed Mar 02 2022 chenjiankun<chenjiankun1@huawei.com> - 18.09.0-120
|
||||
- Type:bugfix
|
||||
- CVE:NA
|
||||
- SUG:NA
|
||||
- DESC:Use original process spec for execs
|
||||
|
||||
* Tue Dec 28 2021 chenjiankun<chenjiankun1@huawei.com> - 18.09.0-119
|
||||
- Type:bugfix
|
||||
- CVE:NA
|
||||
- SUG:NA
|
||||
- DESC:disable go module build
|
||||
|
||||
* Sun Sep 26 2021 xiadanni<xiadanni1@huawei.com> - 18.09.0-118
|
||||
- Type:bugfix
|
||||
@ -230,17 +242,17 @@ fi
|
||||
- SUG:NA
|
||||
- DESC:update seccomp whitelist to Linux 5.10 syscall list
|
||||
|
||||
* Tue Aug 31 2021 WangFengTu<wangfengtu@huawei.com> - 18.09.0-117
|
||||
* Wed Sep 08 2021 xiadanni<xiadanni1@huawei.com> - 18.09.0-117
|
||||
- Type:bugfix
|
||||
- CVE:NA
|
||||
- SUG:NA
|
||||
- DESC:fix rpmbuild failed
|
||||
- DESC:add clone3 to seccomp whitelist to fix curl failed in X86
|
||||
|
||||
* Mon Aug 30 2021 wangfengtu<wangfengtu@huawei.com> - 18.09.0-116
|
||||
* Fri Sep 03 2021 chenjiankun<chenjiankun1@huawei.com> - 18.09.0-116
|
||||
- Type:bugfix
|
||||
- CVE:NA
|
||||
- SUG:NA
|
||||
- DESC:fix dangling unpigz
|
||||
- DESC:enable debuginfo
|
||||
|
||||
* Thu Apr 01 2021 wangfengtu<wangfengtu@huawei.com> - 18.09.0-115
|
||||
- Type:bugfix
|
||||
|
||||
@ -1 +1 @@
|
||||
83b0845432ba7ae940cf3276334608b30e43b05a
|
||||
aa1eee89dbf55f1be74beab946d39bd5308554f6
|
||||
|
||||
@ -0,0 +1,27 @@
|
||||
From 9bc663c3332937cdb55aa5e31957678fe605b168 Mon Sep 17 00:00:00 2001
|
||||
From: xiangrenzhi <xiangrenzhi@huawei.com>
|
||||
Date: Thu, 25 Feb 2021 09:27:42 +0800
|
||||
Subject: [PATCH] docker: fix images filter when use multi reference filter
|
||||
|
||||
Signed-off-by: xiangrenzhi <xiangrenzhi@huawei.com>
|
||||
---
|
||||
components/engine/daemon/images/images.go | 3 +++
|
||||
1 file changed, 3 insertions(+)
|
||||
|
||||
diff --git a/components/engine/daemon/images/images.go b/components/engine/daemon/images/images.go
|
||||
index 49212341c..94e0c1eb8 100644
|
||||
--- a/components/engine/daemon/images/images.go
|
||||
+++ b/components/engine/daemon/images/images.go
|
||||
@@ -152,6 +152,9 @@ func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttr
|
||||
if matchErr != nil {
|
||||
return nil, matchErr
|
||||
}
|
||||
+ if found {
|
||||
+ break
|
||||
+ }
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
--
|
||||
2.19.1
|
||||
|
||||
26
patch/0194-docker-fix-docker-rmi-stucking.patch
Normal file
26
patch/0194-docker-fix-docker-rmi-stucking.patch
Normal file
@ -0,0 +1,26 @@
|
||||
From ac36676aac3f2dfca8e1ac31115417919b9e0160 Mon Sep 17 00:00:00 2001
|
||||
From: xiangrenzhi <xiangrenzhi@huawei.com>
|
||||
Date: Thu, 25 Feb 2021 09:37:29 +0800
|
||||
Subject: [PATCH] docker: fix docker rmi stucking
|
||||
|
||||
Signed-off-by: xiangrenzhi <xiangrenzhi@huawei.com>
|
||||
---
|
||||
components/engine/daemon/images/image_delete.go | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/components/engine/daemon/images/image_delete.go b/components/engine/daemon/images/image_delete.go
|
||||
index 94d6f872d..fbd6c16b7 100644
|
||||
--- a/components/engine/daemon/images/image_delete.go
|
||||
+++ b/components/engine/daemon/images/image_delete.go
|
||||
@@ -369,7 +369,7 @@ func (i *ImageService) checkImageDeleteConflict(imgID image.ID, mask conflictTyp
|
||||
if mask&conflictRunningContainer != 0 {
|
||||
// Check if any running container is using the image.
|
||||
running := func(c *container.Container) bool {
|
||||
- return c.IsRunning() && c.ImageID == imgID
|
||||
+ return c.ImageID == imgID && c.IsRunning()
|
||||
}
|
||||
if container := i.containers.First(running); container != nil {
|
||||
return &imageDeleteConflict{
|
||||
--
|
||||
2.19.1
|
||||
|
||||
@ -0,0 +1,35 @@
|
||||
From a0a85fc867a59c1ae7b6f4a36b624224dfdedeea Mon Sep 17 00:00:00 2001
|
||||
From: xiangrenzhi <xiangrenzhi@huawei.com>
|
||||
Date: Thu, 25 Feb 2021 09:42:04 +0800
|
||||
Subject: [PATCH] docker: fix network sandbox not cleaned up on failure
|
||||
|
||||
Signed-off-by: xiangrenzhi <xiangrenzhi@huawei.com>
|
||||
---
|
||||
components/engine/daemon/container_operations.go | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/components/engine/daemon/container_operations.go b/components/engine/daemon/container_operations.go
|
||||
index 909c7ccb2..39b52b037 100644
|
||||
--- a/components/engine/daemon/container_operations.go
|
||||
+++ b/components/engine/daemon/container_operations.go
|
||||
@@ -498,7 +498,7 @@ func (daemon *Daemon) updateContainerNetworkSettings(container *container.Contai
|
||||
}
|
||||
}
|
||||
|
||||
-func (daemon *Daemon) allocateNetwork(container *container.Container) error {
|
||||
+func (daemon *Daemon) allocateNetwork(container *container.Container) (retErr error) {
|
||||
start := time.Now()
|
||||
controller := daemon.netController
|
||||
|
||||
@@ -566,7 +566,7 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) error {
|
||||
}
|
||||
updateSandboxNetworkSettings(container, sb)
|
||||
defer func() {
|
||||
- if err != nil {
|
||||
+ if retErr != nil {
|
||||
sb.Delete()
|
||||
}
|
||||
}()
|
||||
--
|
||||
2.19.1
|
||||
|
||||
@ -0,0 +1,88 @@
|
||||
From 8034f96d1500dac8af17449b9dba01b07b956a04 Mon Sep 17 00:00:00 2001
|
||||
From: xiadanni <xiadanni1@huawei.com>
|
||||
Date: Tue, 2 Mar 2021 09:31:44 +0800
|
||||
Subject: [PATCH] docker: fix container status not consistent with its shim
|
||||
process status
|
||||
|
||||
1. fix containerd-shim residual when kill containerd during start container
|
||||
If containerd is killed after shim and container init process started,
|
||||
new containerd process will not clean them during load-task.
|
||||
But both of t.Start and t.Delete in docker failed because it cannot
|
||||
connect to containerd. In the meanwhile, docker have not received container
|
||||
start event yet, so it will not set container status to running.
|
||||
All of above caused shim and container init process residual but
|
||||
container status from docker is Created. Even after container is
|
||||
deleted, shim and init process still exist.
|
||||
So we add runc delete --force if t.Start failed, which do not need to
|
||||
send signal through containerd to kill container process.
|
||||
|
||||
2. fix shim killed but container status is running
|
||||
In the similar scene with 1, shim and container init process started,
|
||||
and start event is sent to dockerd. But containerd is killed and new
|
||||
containerd process is started before t.Delete, shim will be killed but
|
||||
container init process is still working, dockerd will not receive
|
||||
process exit event. So dockerd shows container is running but actually
|
||||
shim is killed.
|
||||
So we add runc delete --force if t.Start failed to kill container init
|
||||
process.
|
||||
|
||||
Signed-off-by: xiadanni <xiadanni1@huawei.com>
|
||||
---
|
||||
components/engine/libcontainerd/client_daemon.go | 20 ++++++++++++--------
|
||||
1 file changed, 12 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/components/engine/libcontainerd/client_daemon.go b/components/engine/libcontainerd/client_daemon.go
|
||||
index 502796b..9c65e54 100755
|
||||
--- a/components/engine/libcontainerd/client_daemon.go
|
||||
+++ b/components/engine/libcontainerd/client_daemon.go
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
+ "os/exec"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
@@ -317,10 +318,9 @@ func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin
|
||||
close(stdinCloseSync)
|
||||
|
||||
if err := t.Start(ctx); err != nil {
|
||||
- if _, err := t.Delete(ctx); err != nil {
|
||||
- c.logger.WithError(err).WithField("container", id).
|
||||
- Error("failed to delete task after fail start")
|
||||
- }
|
||||
+ exec.Command("runc", "--root", "/var/run/docker/runtime-runc/moby", "delete", "--force", id).Run()
|
||||
+ _, errD := t.Delete(ctx)
|
||||
+ logrus.Warnf("container %v start failed, delete task, delete err: %v", id, errD)
|
||||
ctr.setTask(nil)
|
||||
return -1, wrapError(err)
|
||||
}
|
||||
@@ -916,10 +916,7 @@ func (c *client) processEventStream(ctx context.Context, ns string) {
|
||||
c.logger.WithField("container", ei.ContainerID).Warn("unknown container")
|
||||
if et == EventExit && ei.ProcessID == ei.ContainerID && c.backend.IsContainerRunning(ei.ContainerID) {
|
||||
c.logger.WithField("container", ei.ContainerID).Warn("handle exit event force ...")
|
||||
- c.eventQ.append(ei.ContainerID, func() {
|
||||
- c.logger.WithField("container", ei.ContainerID).Warnf("handle exit event force: error=%v",
|
||||
- c.backend.ProcessEvent(ei.ContainerID, et, ei))
|
||||
- })
|
||||
+ c.processOrphanEvent(ctr, et, ei)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -935,6 +932,13 @@ func (c *client) processEventStream(ctx context.Context, ns string) {
|
||||
}
|
||||
}
|
||||
|
||||
+func (c *client) processOrphanEvent(ctr *container, et EventType, ei EventInfo) {
|
||||
+ c.eventQ.append(ei.ContainerID, func() {
|
||||
+ c.logger.WithField("container", ei.ContainerID).Warnf("handle exit event force: error=%v",
|
||||
+ c.backend.ProcessEvent(ei.ContainerID, et, ei))
|
||||
+ })
|
||||
+}
|
||||
+
|
||||
func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
|
||||
writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref))
|
||||
if err != nil {
|
||||
--
|
||||
1.8.3.1
|
||||
|
||||
51
patch/0197-docker-fix-hijack-hang.patch
Normal file
51
patch/0197-docker-fix-hijack-hang.patch
Normal file
@ -0,0 +1,51 @@
|
||||
From 06e9b3151585573818df8d890c0be1dc576500e6 Mon Sep 17 00:00:00 2001
|
||||
From: jingrui <jingrui@huawei.com>
|
||||
Date: Mon, 1 Feb 2021 16:56:40 +0800
|
||||
Subject: [PATCH] docker: fix hijack hang
|
||||
|
||||
Change-Id: Ica0fe7806227114acfe028b44dfeed70a5dd4577
|
||||
Signed-off-by: jingrui <jingrui@huawei.com>
|
||||
---
|
||||
.../docker/docker/client/container_exec.go | 18 ++++++++-
|
||||
.../dockerd/hack/malformed_host_override.go | 37 +++++++++++--------
|
||||
2 files changed, 38 insertions(+), 17 deletions(-)
|
||||
|
||||
diff --git a/components/cli/vendor/github.com/docker/docker/client/container_exec.go b/components/cli/vendor/github.com/docker/docker/client/container_exec.go
|
||||
index 535536b1e0..ac458e9c30 100644
|
||||
--- a/components/cli/vendor/github.com/docker/docker/client/container_exec.go
|
||||
+++ b/components/cli/vendor/github.com/docker/docker/client/container_exec.go
|
||||
@@ -3,6 +3,8 @@ package client // import "github.com/docker/docker/client"
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
+ "fmt"
|
||||
+ "time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
@@ -36,8 +38,20 @@ func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config
|
||||
// and the a reader to get output. It's up to the called to close
|
||||
// the hijacked connection by calling types.HijackedResponse.Close.
|
||||
func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) {
|
||||
- headers := map[string][]string{"Content-Type": {"application/json"}}
|
||||
- return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers)
|
||||
+ done := make(chan struct{})
|
||||
+ var resp types.HijackedResponse
|
||||
+ var err error
|
||||
+ go func() {
|
||||
+ headers := map[string][]string{"Content-Type": {"application/json"}}
|
||||
+ resp, err = cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers)
|
||||
+ close(done)
|
||||
+ }()
|
||||
+ select {
|
||||
+ case <-done:
|
||||
+ return resp, err
|
||||
+ case <-time.After(5 * time.Minute):
|
||||
+ return resp, fmt.Errorf("post exec hijacked timeout")
|
||||
+ }
|
||||
}
|
||||
|
||||
// ContainerExecInspect returns information about a specific exec process on the docker host.
|
||||
--
|
||||
2.17.1
|
||||
|
||||
81
patch/0198-docker-fix-docker-kill-command-block.patch
Normal file
81
patch/0198-docker-fix-docker-kill-command-block.patch
Normal file
@ -0,0 +1,81 @@
|
||||
From 74bd1d0c00c53f96696663e45507e332684dac7a Mon Sep 17 00:00:00 2001
|
||||
From: xiadanni <xiadanni1@huawei.com>
|
||||
Date: Wed, 3 Mar 2021 16:46:50 +0800
|
||||
Subject: [PATCH] docker: fix docker kill command block
|
||||
|
||||
reason:When docker kill command execute with start/restart command
|
||||
concurrently, kill command may block at <-container.Wait.
|
||||
As s.waitStop is variable, so there is case that waitStop in Wait
|
||||
function get a new s.waitStop(the old one is already closed before).
|
||||
So kill command blocked to wait the new s.waitStop close.
|
||||
|
||||
Signed-off-by: xiadanni <xiadanni1@huawei.com>
|
||||
---
|
||||
components/engine/container/state.go | 13 +++++++++++--
|
||||
components/engine/daemon/kill.go | 4 +++-
|
||||
2 files changed, 14 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/components/engine/container/state.go b/components/engine/container/state.go
|
||||
index 91ea30a..e9666ed 100644
|
||||
--- a/components/engine/container/state.go
|
||||
+++ b/components/engine/container/state.go
|
||||
@@ -65,6 +65,10 @@ func NewState() *State {
|
||||
}
|
||||
}
|
||||
|
||||
+func (s State) GetWaitStop() chan struct{} {
|
||||
+ return s.waitStop
|
||||
+}
|
||||
+
|
||||
// String returns a human-readable description of the state
|
||||
func (s *State) String() string {
|
||||
if s.Running {
|
||||
@@ -179,6 +183,10 @@ const (
|
||||
// otherwise, the results Err() method will return an error indicating why the
|
||||
// wait operation failed.
|
||||
func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateStatus {
|
||||
+ return s.Wait3(ctx, condition, nil)
|
||||
+}
|
||||
+
|
||||
+func (s *State) Wait3(ctx context.Context, condition WaitCondition, waitStop chan struct{}) <-chan StateStatus {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
@@ -197,9 +205,10 @@ func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateS
|
||||
|
||||
// If we are waiting only for removal, the waitStop channel should
|
||||
// remain nil and block forever.
|
||||
- var waitStop chan struct{}
|
||||
if condition < WaitConditionRemoved {
|
||||
- waitStop = s.waitStop
|
||||
+ if waitStop == nil {
|
||||
+ waitStop = s.waitStop
|
||||
+ }
|
||||
}
|
||||
|
||||
// Always wait for removal, just in case the container gets removed
|
||||
diff --git a/components/engine/daemon/kill.go b/components/engine/daemon/kill.go
|
||||
index d185065..4c8ccf9 100644
|
||||
--- a/components/engine/daemon/kill.go
|
||||
+++ b/components/engine/daemon/kill.go
|
||||
@@ -132,6 +132,8 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
+ waitStop := container.GetWaitStop()
|
||||
+
|
||||
// 1. Send SIGKILL
|
||||
if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil {
|
||||
// While normally we might "return err" here we're not going to
|
||||
@@ -166,7 +168,7 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
|
||||
|
||||
// Wait for exit with no timeout.
|
||||
// Ignore returned status.
|
||||
- <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning)
|
||||
+ <-container.Wait3(context.Background(), containerpkg.WaitConditionNotRunning, waitStop)
|
||||
|
||||
return nil
|
||||
}
|
||||
--
|
||||
1.8.3.1
|
||||
|
||||
@ -0,0 +1,82 @@
|
||||
From 9ddd6e47a90ac056d242969ff72bf75a43cc0004 Mon Sep 17 00:00:00 2001
|
||||
From: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
|
||||
Date: Thu, 29 Nov 2018 16:14:35 +0900
|
||||
Subject: [PATCH] pkg/archive: [backport] fix TestTarUntarWithXattr failure on recent
|
||||
kernel
|
||||
|
||||
Recent kernel has strict check for security.capability value.
|
||||
Fix #38289
|
||||
|
||||
Signed-off-by: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
|
||||
---
|
||||
Dockerfile | 1 +
|
||||
pkg/archive/archive_unix_test.go | 20 ++++++++++++++------
|
||||
2 files changed, 15 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/components/engine/Dockerfile b/components/engine/Dockerfile
|
||||
index b0895cf5e0..8337653e19 100644
|
||||
--- a/components/engine/Dockerfile
|
||||
+++ b/components/engine/Dockerfile
|
||||
@@ -182,6 +182,7 @@ RUN apt-get update && apt-get install -y \
|
||||
btrfs-tools \
|
||||
iptables \
|
||||
jq \
|
||||
+ libcap2-bin \
|
||||
libdevmapper-dev \
|
||||
libudev-dev \
|
||||
libsystemd-dev \
|
||||
diff --git a/components/engine/pkg/archive/archive_unix_test.go b/components/engine/pkg/archive/archive_unix_test.go
|
||||
index 83deab0840..dc4e1fdae6 100644
|
||||
--- a/components/engine/pkg/archive/archive_unix_test.go
|
||||
+++ b/components/engine/pkg/archive/archive_unix_test.go
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
+ "os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
@@ -222,6 +223,13 @@ func TestTarWithBlockCharFifo(t *testing.T) {
|
||||
// TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows
|
||||
func TestTarUntarWithXattr(t *testing.T) {
|
||||
skip.If(t, os.Getuid() != 0, "skipping test that requires root")
|
||||
+ if _, err := exec.LookPath("setcap"); err != nil {
|
||||
+ t.Skip("setcap not installed")
|
||||
+ }
|
||||
+ if _, err := exec.LookPath("getcap"); err != nil {
|
||||
+ t.Skip("getcap not installed")
|
||||
+ }
|
||||
+
|
||||
origin, err := ioutil.TempDir("", "docker-test-untar-origin")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(origin)
|
||||
@@ -232,8 +240,9 @@ func TestTarUntarWithXattr(t *testing.T) {
|
||||
assert.NilError(t, err)
|
||||
err = ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700)
|
||||
assert.NilError(t, err)
|
||||
- err = system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0)
|
||||
- assert.NilError(t, err)
|
||||
+ // there is no known Go implementation of setcap/getcap with support for v3 file capability
|
||||
+ out, err := exec.Command("setcap", "cap_block_suspend+ep", filepath.Join(origin, "2")).CombinedOutput()
|
||||
+ assert.NilError(t, err, string(out))
|
||||
|
||||
for _, c := range []Compression{
|
||||
Uncompressed,
|
||||
@@ -251,10 +260,9 @@ func TestTarUntarWithXattr(t *testing.T) {
|
||||
if len(changes) != 1 || changes[0].Path != "/3" {
|
||||
t.Fatalf("Unexpected differences after tarUntar: %v", changes)
|
||||
}
|
||||
- capability, _ := system.Lgetxattr(filepath.Join(origin, "2"), "security.capability")
|
||||
- if capability == nil && capability[0] != 0x00 {
|
||||
- t.Fatalf("Untar should have kept the 'security.capability' xattr.")
|
||||
- }
|
||||
+ out, err := exec.Command("getcap", filepath.Join(origin, "2")).CombinedOutput()
|
||||
+ assert.NilError(t, err, string(out))
|
||||
+ assert.Check(t, is.Contains(string(out), "= cap_block_suspend+ep"), "untar should have kept the 'security.capability' xattr")
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.27.0
|
||||
|
||||
148
patch/0200-docker-fix-unit-testcase-error.patch
Normal file
148
patch/0200-docker-fix-unit-testcase-error.patch
Normal file
@ -0,0 +1,148 @@
|
||||
From f2656c9524e517878131556988548e28e092b9a9 Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Mon, 7 Mar 2022 12:00:11 +0800
|
||||
Subject: [PATCH] docker: fix unit testcase error
|
||||
|
||||
---
|
||||
components/engine/client/hijack_test.go | 3 ++-
|
||||
components/engine/daemon/daemon_unix_test.go | 10 +++++-----
|
||||
.../daemon/graphdriver/quota/projectquota_test.go | 2 +-
|
||||
components/engine/opts/hosts_test.go | 8 ++++----
|
||||
components/engine/pkg/pidfile/pidfile.go | 2 +-
|
||||
components/engine/registry/registry_mock_test.go | 2 +-
|
||||
components/engine/registry/registry_test.go | 3 ++-
|
||||
7 files changed, 16 insertions(+), 14 deletions(-)
|
||||
|
||||
diff --git a/components/engine/client/hijack_test.go b/components/engine/client/hijack_test.go
|
||||
index d71dc9ea..05e8ca71 100644
|
||||
--- a/components/engine/client/hijack_test.go
|
||||
+++ b/components/engine/client/hijack_test.go
|
||||
@@ -72,7 +72,8 @@ func TestTLSCloseWriter(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
- ts.StartTLS()
|
||||
+ // certificate file in golang has been deleted
|
||||
+ ts.Start()
|
||||
defer ts.Close()
|
||||
|
||||
serverURL, err := url.Parse(ts.URL)
|
||||
diff --git a/components/engine/daemon/daemon_unix_test.go b/components/engine/daemon/daemon_unix_test.go
|
||||
index d9bba54a..8493a4a1 100644
|
||||
--- a/components/engine/daemon/daemon_unix_test.go
|
||||
+++ b/components/engine/daemon/daemon_unix_test.go
|
||||
@@ -270,27 +270,27 @@ func TestNetworkOptions(t *testing.T) {
|
||||
func TestGetContainerMountId(t *testing.T) {
|
||||
id := "56e143922c405419a38b23bfbccc92284f35525e3f2ad7011ea904501ccd1219"
|
||||
|
||||
- id1 := getContainerMountId("/var/lib/docker/aufs/mnt/" + id)
|
||||
+ _, id1 := getContainerMountId("/var/lib/docker/aufs/mnt/" + id)
|
||||
if id1 != id {
|
||||
t.Fatalf("Expected container mount id [%s], but got [%s]", id, id1)
|
||||
}
|
||||
|
||||
- id1 = getContainerMountId("/var/lib/docker/devicemapper/mnt/" + id)
|
||||
+ _, id1 = getContainerMountId("/var/lib/docker/devicemapper/mnt/" + id)
|
||||
if id1 != id {
|
||||
t.Fatalf("Expected container mount id [%s], but got [%s]", id, id1)
|
||||
}
|
||||
|
||||
- id1 = getContainerMountId("/var/lib/docker/overlay/" + id + "/merged")
|
||||
+ _, id1 = getContainerMountId("/var/lib/docker/overlay/" + id + "/merged")
|
||||
if id1 != id {
|
||||
t.Fatalf("Expected container mount id [%s], but got [%s]", id, id1)
|
||||
}
|
||||
|
||||
- id1 = getContainerMountId("/var/lib/docker/zfs/graph/" + id)
|
||||
+ _, id1 = getContainerMountId("/var/lib/docker/zfs/graph/" + id)
|
||||
if id1 != id {
|
||||
t.Fatalf("Expected container mount id [%s], but got [%s]", id, id1)
|
||||
}
|
||||
|
||||
- id1 = getContainerMountId("/var/lib/docker/devicemapper_err/mnt" + id)
|
||||
+ _, id1 = getContainerMountId("/var/lib/docker/devicemapper_err/mnt" + id)
|
||||
if id1 != "" {
|
||||
t.Fatalf("Expected a empty container mount id, but got [%s]", id1)
|
||||
}
|
||||
diff --git a/components/engine/daemon/graphdriver/quota/projectquota_test.go b/components/engine/daemon/graphdriver/quota/projectquota_test.go
|
||||
index aa164cc4..1a5ac693 100644
|
||||
--- a/components/engine/daemon/graphdriver/quota/projectquota_test.go
|
||||
+++ b/components/engine/daemon/graphdriver/quota/projectquota_test.go
|
||||
@@ -111,7 +111,7 @@ func wrapQuotaTest(testFunc func(t *testing.T, ctrl *Control, mountPoint, testDi
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
- ctrl, err := NewControl(testDir)
|
||||
+ ctrl, err := NewControl(testDir, "xfs")
|
||||
assert.NilError(t, err)
|
||||
|
||||
testSubDir, err := ioutil.TempDir(testDir, "quota-test")
|
||||
diff --git a/components/engine/opts/hosts_test.go b/components/engine/opts/hosts_test.go
|
||||
index cd8c3f91..fbe4b3cc 100644
|
||||
--- a/components/engine/opts/hosts_test.go
|
||||
+++ b/components/engine/opts/hosts_test.go
|
||||
@@ -53,8 +53,8 @@ func TestParseHost(t *testing.T) {
|
||||
func TestParseDockerDaemonHost(t *testing.T) {
|
||||
invalids := map[string]string{
|
||||
|
||||
- "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d",
|
||||
- "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path",
|
||||
+ "tcp:a.b.c.d": `parse tcp://tcp:a.b.c.d: invalid port ":a.b.c.d" after host`,
|
||||
+ "tcp:a.b.c.d/path": `parse tcp://tcp:a.b.c.d/path: invalid port ":a.b.c.d" after host`,
|
||||
"udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1",
|
||||
"udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375",
|
||||
"tcp://unix:///run/docker.sock": "Invalid proto, expected tcp: unix:///run/docker.sock",
|
||||
@@ -99,8 +99,8 @@ func TestParseTCP(t *testing.T) {
|
||||
defaultHTTPHost = "tcp://127.0.0.1:2376"
|
||||
)
|
||||
invalids := map[string]string{
|
||||
- "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d",
|
||||
- "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path",
|
||||
+ "tcp:a.b.c.d": `parse tcp://tcp:a.b.c.d: invalid port ":a.b.c.d" after host`,
|
||||
+ "tcp:a.b.c.d/path": `parse tcp://tcp:a.b.c.d/path: invalid port ":a.b.c.d" after host`,
|
||||
"udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1",
|
||||
"udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375",
|
||||
}
|
||||
diff --git a/components/engine/pkg/pidfile/pidfile.go b/components/engine/pkg/pidfile/pidfile.go
|
||||
index 485c0013..ab7484a3 100644
|
||||
--- a/components/engine/pkg/pidfile/pidfile.go
|
||||
+++ b/components/engine/pkg/pidfile/pidfile.go
|
||||
@@ -33,7 +33,7 @@ func isSameApplication(pid int) (bool, error) {
|
||||
for sc.Scan() {
|
||||
lens := strings.Split(sc.Text(), ":")
|
||||
if len(lens) == 2 && strings.TrimSpace(lens[0]) == "Name" {
|
||||
- if strings.TrimSpace(lens[1]) == os.Args[0] {
|
||||
+ if _, filename := filepath.Split(os.Args[0]); strings.TrimSpace(lens[1]) == strings.TrimSpace(filename) || strings.TrimSpace(lens[1]) == os.Args[0] {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
diff --git a/components/engine/registry/registry_mock_test.go b/components/engine/registry/registry_mock_test.go
|
||||
index bf17eb9f..b80aed15 100644
|
||||
--- a/components/engine/registry/registry_mock_test.go
|
||||
+++ b/components/engine/registry/registry_mock_test.go
|
||||
@@ -112,7 +112,7 @@ func init() {
|
||||
r.HandleFunc("/v2/version", handlerGetPing).Methods("GET")
|
||||
|
||||
testHTTPServer = httptest.NewServer(handlerAccessLog(r))
|
||||
- testHTTPSServer = httptest.NewTLSServer(handlerAccessLog(r))
|
||||
+ testHTTPSServer = httptest.NewServer(handlerAccessLog(r))
|
||||
|
||||
// override net.LookupIP
|
||||
lookupIP = func(host string) ([]net.IP, error) {
|
||||
diff --git a/components/engine/registry/registry_test.go b/components/engine/registry/registry_test.go
|
||||
index b7459471..f909685e 100644
|
||||
--- a/components/engine/registry/registry_test.go
|
||||
+++ b/components/engine/registry/registry_test.go
|
||||
@@ -75,7 +75,8 @@ func TestPingRegistryEndpoint(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEndpoint(t *testing.T) {
|
||||
- skip.If(t, os.Getuid() != 0, "skipping test that requires root")
|
||||
+ // certificate file in golang has been deleted
|
||||
+ skip.If(t, os.Getuid() == 0, "skipping test that requires root")
|
||||
// Simple wrapper to fail test if err != nil
|
||||
expandEndpoint := func(index *registrytypes.IndexInfo) *V1Endpoint {
|
||||
endpoint, err := NewV1Endpoint(index, "", nil)
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -0,0 +1,44 @@
|
||||
From 8b41a404dcb0aa7c377b18b5f0627ed379371245 Mon Sep 17 00:00:00 2001
|
||||
From: jingrui <jingrui@huawei.com>
|
||||
Date: Thu, 18 Mar 2021 17:28:20 +0800
|
||||
Subject: [PATCH] docker: use info level for create/start/stop command
|
||||
|
||||
Signed-off-by: jingrui <jingrui@huawei.com>
|
||||
---
|
||||
.../engine/api/server/middleware/debug.go | 17 ++++++++++++++++-
|
||||
1 file changed, 16 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/components/engine/api/server/middleware/debug.go b/components/engine/api/server/middleware/debug.go
|
||||
index 31165bf91..2c039aa5d 100644
|
||||
--- a/components/engine/api/server/middleware/debug.go
|
||||
+++ b/components/engine/api/server/middleware/debug.go
|
||||
@@ -13,10 +13,25 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
+func isKeyCmd(method string, uri string) bool {
|
||||
+ if method != "POST" {
|
||||
+ return false
|
||||
+ }
|
||||
+ if !strings.Contains(uri, "containers") {
|
||||
+ return false
|
||||
+ }
|
||||
+ return strings.Contains(uri, "create") || strings.Contains(uri, "start") || strings.Contains(uri, "stop") || strings.Contains(uri, "kill")
|
||||
+}
|
||||
+
|
||||
// DebugRequestMiddleware dumps the request to logger
|
||||
func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
- logrus.Debugf("Calling %s %s", r.Method, r.RequestURI)
|
||||
+ if isKeyCmd(r.Method, r.RequestURI) {
|
||||
+ agent, _ := r.Header["User-Agent"]
|
||||
+ logrus.Infof("Calling %s %s agent=%v", r.Method, r.RequestURI, agent)
|
||||
+ } else {
|
||||
+ logrus.Debugf("Calling %s %s", r.Method, r.RequestURI)
|
||||
+ }
|
||||
|
||||
if r.Method != "POST" {
|
||||
return handler(ctx, w, r, vars)
|
||||
--
|
||||
2.23.0
|
||||
|
||||
@ -0,0 +1,56 @@
|
||||
From fa960e384ada593add8e14c4cbc4da5a4ebf095e Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Fri, 16 Apr 2021 19:49:45 +0800
|
||||
Subject: [PATCH] docker: [backport] Fix for lack of synchronization in daemon/update.go
|
||||
|
||||
Conflict:NA
|
||||
Reference:https://github.com/moby/moby/pull/41999/commits/58825ffc3243f13795b36f430726ae8e3e14bed0
|
||||
|
||||
---
|
||||
components/engine/daemon/update.go | 12 +++++++++---
|
||||
1 file changed, 9 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/components/engine/daemon/update.go b/components/engine/daemon/update.go
|
||||
index 0ebb139d3..b38db991b 100644
|
||||
--- a/components/engine/daemon/update.go
|
||||
+++ b/components/engine/daemon/update.go
|
||||
@@ -42,20 +42,25 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
|
||||
|
||||
restoreConfig := false
|
||||
backupHostConfig := *container.HostConfig
|
||||
+
|
||||
defer func() {
|
||||
if restoreConfig {
|
||||
container.Lock()
|
||||
- container.HostConfig = &backupHostConfig
|
||||
- container.CheckpointTo(daemon.containersReplica)
|
||||
+ if !container.RemovalInProgress && !container.Dead {
|
||||
+ container.HostConfig = &backupHostConfig
|
||||
+ container.CheckpointTo(daemon.containersReplica)
|
||||
+ }
|
||||
container.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
+ container.Lock()
|
||||
+
|
||||
if container.RemovalInProgress || container.Dead {
|
||||
+ container.Unlock()
|
||||
return errCannotUpdate(container.ID, fmt.Errorf("container is marked for removal and cannot be \"update\""))
|
||||
}
|
||||
|
||||
- container.Lock()
|
||||
if err := container.UpdateContainer(hostConfig); err != nil {
|
||||
restoreConfig = true
|
||||
container.Unlock()
|
||||
@@ -66,6 +71,7 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
|
||||
container.Unlock()
|
||||
return errCannotUpdate(container.ID, err)
|
||||
}
|
||||
+
|
||||
container.Unlock()
|
||||
|
||||
// if Restart Policy changed, we need to update container monitor
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -0,0 +1,87 @@
|
||||
From f29dda9acd7a071ab2e4a86f820be236a23838f0 Mon Sep 17 00:00:00 2001
|
||||
From: Miloslav Trmač <mitr@redhat.com>
|
||||
Date: Thu, 6 Sep 2018 23:24:06 +0200
|
||||
Subject: [PATCH] docker: [backport] Don't fail on two concurrent reference.store.AddDigest calls
|
||||
|
||||
reference.store.addReference fails when adding a digest reference
|
||||
that already exists (regardless of the reference target). Both
|
||||
callers (via reference.store.AddDigest) do check in advance, using
|
||||
reference.store.Get, whether the digest reference exists before
|
||||
calling AddDigest, but the reference store lock is released between
|
||||
the two calls, so if another thread sets the reference in the meantime,
|
||||
AddDigest may fail with
|
||||
> Cannot overwrite digest ...
|
||||
.
|
||||
|
||||
Handle this by checking that the pre-existing reference points at the
|
||||
same image, i.e. that there is nothing to do, and succeeding immediately
|
||||
in that case. This is even cheaper, avoids a reference.store.save() call.
|
||||
|
||||
(In principle, the same failure could have happened via
|
||||
reference.store.AddTag, as
|
||||
> Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option
|
||||
but almost all callers (except for migrate/v1.Migrate, which is run
|
||||
single-threaded anyway) set the "force" parameter of AddTag to true,
|
||||
which makes the race invisible. This commit does not change the behavior
|
||||
of that case, except for speeding it up by avoiding the
|
||||
reference.store.save() call.)
|
||||
|
||||
The existing reference.store.Get checks are now, in a sense, redundant
|
||||
as such, but their existence allows the callers to provide nice
|
||||
context-dependent error messages, so this commit leaves them unchanged.
|
||||
|
||||
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
|
||||
|
||||
Conflict:NA
|
||||
Reference:https://github.com/moby/moby/commit/f29dda9acd7a071ab2e4a86f820be236a23838f0
|
||||
|
||||
---
|
||||
components/engine/reference/store.go | 5 +++++
|
||||
components/engine/reference/store_test.go | 8 ++++++++
|
||||
2 files changed, 13 insertions(+)
|
||||
|
||||
diff --git a/components/engine/reference/store.go b/components/engine/reference/store.go
|
||||
index b01051bf58..b942c42ca2 100644
|
||||
--- a/components/engine/reference/store.go
|
||||
+++ b/components/engine/reference/store.go
|
||||
@@ -149,6 +149,11 @@ func (store *store) addReference(ref reference.Named, id digest.Digest, force bo
|
||||
oldID, exists := repository[refStr]
|
||||
|
||||
if exists {
|
||||
+ if oldID == id {
|
||||
+ // Nothing to do. The caller may have checked for this using store.Get in advance, but store.mu was unlocked in the meantime, so this can legitimately happen nevertheless.
|
||||
+ return nil
|
||||
+ }
|
||||
+
|
||||
// force only works for tags
|
||||
if digested, isDigest := ref.(reference.Canonical); isDigest {
|
||||
return errors.WithStack(conflictingTagError("Cannot overwrite digest " + digested.Digest().String()))
|
||||
diff --git a/components/engine/reference/store_test.go b/components/engine/reference/store_test.go
|
||||
index 1ce674cbfb..435409d358 100644
|
||||
--- a/components/engine/reference/store_test.go
|
||||
+++ b/components/engine/reference/store_test.go
|
||||
@@ -163,6 +163,10 @@ func TestAddDeleteGet(t *testing.T) {
|
||||
if err = store.AddTag(ref4, testImageID2, false); err != nil {
|
||||
t.Fatalf("error adding to store: %v", err)
|
||||
}
|
||||
+ // Write the same values again; should silently succeed
|
||||
+ if err = store.AddTag(ref4, testImageID2, false); err != nil {
|
||||
+ t.Fatalf("error redundantly adding to store: %v", err)
|
||||
+ }
|
||||
|
||||
ref5, err := reference.ParseNormalizedNamed("username/repo3@sha256:58153dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c")
|
||||
if err != nil {
|
||||
@@ -171,6 +175,10 @@ func TestAddDeleteGet(t *testing.T) {
|
||||
if err = store.AddDigest(ref5.(reference.Canonical), testImageID2, false); err != nil {
|
||||
t.Fatalf("error adding to store: %v", err)
|
||||
}
|
||||
+ // Write the same values again; should silently succeed
|
||||
+ if err = store.AddDigest(ref5.(reference.Canonical), testImageID2, false); err != nil {
|
||||
+ t.Fatalf("error redundantly adding to store: %v", err)
|
||||
+ }
|
||||
|
||||
// Attempt to overwrite with force == false
|
||||
if err = store.AddTag(ref4, testImageID3, false); err == nil || !strings.HasPrefix(err.Error(), "Conflict:") {
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -0,0 +1,40 @@
|
||||
From 57bbb50663f80e78cbdb5283b28be19b64f14ea9 Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Thu, 13 May 2021 11:15:40 +0800
|
||||
Subject: [PATCH] docker: [backport] Unexport testcase.Cleanup to fix Go 1.14
|
||||
|
||||
Conflict:NA
|
||||
Reference:https://github.com/gotestyourself/gotest.tools/pull/169/commits/6bc35c2eea35a967a8fe3cf05f491da2cc1793d0
|
||||
|
||||
---
|
||||
components/engine/vendor/gotest.tools/x/subtest/context.go | 6 +++---
|
||||
1 file changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/components/engine/vendor/gotest.tools/x/subtest/context.go b/components/engine/vendor/gotest.tools/x/subtest/context.go
|
||||
index 878bdebf1..bcf13eed5 100644
|
||||
--- a/components/engine/vendor/gotest.tools/x/subtest/context.go
|
||||
+++ b/components/engine/vendor/gotest.tools/x/subtest/context.go
|
||||
@@ -27,9 +27,9 @@ func (tc *testcase) Ctx() context.Context {
|
||||
return tc.ctx
|
||||
}
|
||||
|
||||
-// Cleanup runs all cleanup functions. Functions are run in the opposite order
|
||||
+// cleanup runs all cleanup functions. Functions are run in the opposite order
|
||||
// in which they were added. Cleanup is called automatically before Run exits.
|
||||
-func (tc *testcase) Cleanup() {
|
||||
+func (tc *testcase) cleanup() {
|
||||
for _, f := range tc.cleanupFuncs {
|
||||
// Defer all cleanup functions so they all run even if one calls
|
||||
// t.FailNow() or panics. Deferring them also runs them in reverse order.
|
||||
@@ -59,7 +59,7 @@ type parallel interface {
|
||||
func Run(t *testing.T, name string, subtest func(t TestContext)) bool {
|
||||
return t.Run(name, func(t *testing.T) {
|
||||
tc := &testcase{TB: t}
|
||||
- defer tc.Cleanup()
|
||||
+ defer tc.cleanup()
|
||||
subtest(tc)
|
||||
})
|
||||
}
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -0,0 +1,79 @@
|
||||
From 782d36eae49ceff3e4fbd43c5a8112d9958dc791 Mon Sep 17 00:00:00 2001
|
||||
From: Stephen Benjamin <stephen@redhat.com>
|
||||
Date: Tue, 3 Sep 2019 10:56:45 -0400
|
||||
Subject: [PATCH] archive: [backport] fix race condition in cmdStream
|
||||
|
||||
There is a race condition in pkg/archive when using `cmd.Start` for pigz
|
||||
and xz where the `*bufio.Reader` could be returned to the pool while the
|
||||
command is still writing to it, and then picked up and used by a new
|
||||
command.
|
||||
|
||||
The command is wrapped in a `CommandContext` where the process will be
|
||||
killed when the context is cancelled, however this is not instantaneous,
|
||||
so there's a brief window while the command is still running but the
|
||||
`*bufio.Reader` was already returned to the pool.
|
||||
|
||||
wrapReadCloser calls `cancel()`, and then `readBuf.Close()` which
|
||||
eventually returns the buffer to the pool. However, because cmdStream
|
||||
runs `cmd.Wait` in a go routine that we never wait for to finish, it is
|
||||
not safe to return the reader to the pool yet. We need to ensure we
|
||||
wait for `cmd.Wait` to finish!
|
||||
|
||||
Signed-off-by: Stephen Benjamin <stephen@redhat.com>
|
||||
(cherry picked from commit 89dd10b06efe93d4f427057f043abf560c461281)
|
||||
Signed-off-by: WangFengTu <wangfengtu@huawei.com>
|
||||
---
|
||||
components/engine/pkg/archive/archive.go | 12 +++++++++++-
|
||||
components/engine/pkg/archive/archive_test.go | 4 +++-
|
||||
2 files changed, 14 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/components/engine/pkg/archive/archive.go b/components/engine/pkg/archive/archive.go
|
||||
index 070dccb756..82cd0a6c6f 100644
|
||||
--- a/components/engine/pkg/archive/archive.go
|
||||
+++ b/components/engine/pkg/archive/archive.go
|
||||
@@ -1216,6 +1216,9 @@ func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
+ // Ensure the command has exited before we clean anything up
|
||||
+ done := make(chan struct{})
|
||||
+
|
||||
// Copy stdout to the returned pipe
|
||||
go func() {
|
||||
if err := cmd.Wait(); err != nil {
|
||||
@@ -1223,9 +1226,16 @@ func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
|
||||
} else {
|
||||
pipeW.Close()
|
||||
}
|
||||
+ close(done)
|
||||
}()
|
||||
|
||||
- return pipeR, nil
|
||||
+ return ioutils.NewReadCloserWrapper(pipeR, func() error {
|
||||
+ // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as
|
||||
+ // cmd.Wait waits for any non-file stdout/stderr/stdin to close.
|
||||
+ err := pipeR.Close()
|
||||
+ <-done
|
||||
+ return err
|
||||
+ }), nil
|
||||
}
|
||||
|
||||
// NewTempArchive reads the content of src into a temporary file, and returns the contents
|
||||
diff --git a/components/engine/pkg/archive/archive_test.go b/components/engine/pkg/archive/archive_test.go
|
||||
index b448bac49a..f77b7c202d 100644
|
||||
--- a/components/engine/pkg/archive/archive_test.go
|
||||
+++ b/components/engine/pkg/archive/archive_test.go
|
||||
@@ -1356,7 +1356,9 @@ func TestPigz(t *testing.T) {
|
||||
_, err := exec.LookPath("unpigz")
|
||||
if err == nil {
|
||||
t.Log("Tested whether Pigz is used, as it installed")
|
||||
- assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{}))
|
||||
+ // For the command wait wrapper
|
||||
+ cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper)
|
||||
+ assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{}))
|
||||
} else {
|
||||
t.Log("Tested whether Pigz is not used, as it not installed")
|
||||
assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{}))
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -0,0 +1,64 @@
|
||||
From 20b8dbbf705988f94d16a401e9d4f510387cbd0d Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Mon, 7 Jun 2021 11:23:33 +0800
|
||||
Subject: [PATCH] docker: fix runc data and dm left when periodically kill
|
||||
containerd
|
||||
|
||||
---
|
||||
components/engine/daemon/start.go | 20 ++++++++++++++++++--
|
||||
1 file changed, 18 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/components/engine/daemon/start.go b/components/engine/daemon/start.go
|
||||
index 07bffaa27..7a7e2b2ee 100644
|
||||
--- a/components/engine/daemon/start.go
|
||||
+++ b/components/engine/daemon/start.go
|
||||
@@ -2,6 +2,7 @@ package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
"context"
|
||||
+ "os/exec"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
@@ -14,6 +15,12 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
+const RootDirectory = "/var/run/docker/runtime-runc/moby"
|
||||
+
|
||||
+func deleteForce(containerID string) error {
|
||||
+ return exec.Command("runc", "--root", RootDirectory, "delete", "--force", containerID).Run()
|
||||
+}
|
||||
+
|
||||
// ContainerStart starts a container.
|
||||
func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error {
|
||||
if checkpoint != "" && !daemon.HasExperimental() {
|
||||
@@ -210,7 +217,11 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint
|
||||
if err != nil {
|
||||
if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil {
|
||||
logrus.WithError(err).WithField("container", container.ID).
|
||||
- Error("failed to delete failed start container")
|
||||
+ Error("failed to delete failed start container, try to delete directly")
|
||||
+ err := deleteForce(container.ID)
|
||||
+ if err != nil {
|
||||
+ logrus.Errorf("failed to directly delete container %s", container.ID)
|
||||
+ }
|
||||
}
|
||||
return translateContainerdStartErr(container.Path, container.SetExitCode, err)
|
||||
}
|
||||
@@ -273,6 +284,11 @@ func (daemon *Daemon) Cleanup(container *container.Container) {
|
||||
container.CancelAttachContext()
|
||||
|
||||
if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil {
|
||||
- logrus.Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err)
|
||||
+ logrus.Errorf("%s cleanup: failed to delete container from containerd, try to delete directly: %v", container.ID, err)
|
||||
+
|
||||
+ err := deleteForce(container.ID)
|
||||
+ if err != nil {
|
||||
+ logrus.Errorf("%s cleanup: failed to directly delete container", container.ID)
|
||||
+ }
|
||||
}
|
||||
}
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -0,0 +1,82 @@
|
||||
From 210d1acba11aee0cb4a543fa97feb9ecfc4ba532 Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Tue, 15 Jun 2021 20:51:10 +0800
|
||||
Subject: [PATCH] docker: fix ProcessEvent block when CloseStreams block
|
||||
|
||||
The ProcessEvent function will block if the CloseStreams function block in
|
||||
exit event processing. The reason is the ProcessEvent function is serial
|
||||
processing. So we need add a timeout mechanism to deal with it.
|
||||
|
||||
---
|
||||
components/engine/container/stream/streams.go | 42 ++++++++++++-------
|
||||
1 file changed, 27 insertions(+), 15 deletions(-)
|
||||
|
||||
diff --git a/components/engine/container/stream/streams.go b/components/engine/container/stream/streams.go
|
||||
index 585f9e8e3..1a7ef33d4 100644
|
||||
--- a/components/engine/container/stream/streams.go
|
||||
+++ b/components/engine/container/stream/streams.go
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"sync"
|
||||
+ "time"
|
||||
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/docker/docker/pkg/broadcaster"
|
||||
@@ -92,27 +93,38 @@ func (c *Config) NewNopInputPipe() {
|
||||
|
||||
// CloseStreams ensures that the configured streams are properly closed.
|
||||
func (c *Config) CloseStreams() error {
|
||||
- var errors []string
|
||||
+ done := make(chan struct{})
|
||||
+ var errorsInLine error
|
||||
|
||||
- if c.stdin != nil {
|
||||
- if err := c.stdin.Close(); err != nil {
|
||||
- errors = append(errors, fmt.Sprintf("error close stdin: %s", err))
|
||||
+ go func() {
|
||||
+ var errors []string
|
||||
+ if c.stdin != nil {
|
||||
+ if err := c.stdin.Close(); err != nil {
|
||||
+ errors = append(errors, fmt.Sprintf("error close stdin: %s", err))
|
||||
+ }
|
||||
}
|
||||
- }
|
||||
|
||||
- if err := c.stdout.Clean(); err != nil {
|
||||
- errors = append(errors, fmt.Sprintf("error close stdout: %s", err))
|
||||
- }
|
||||
+ if err := c.stdout.Clean(); err != nil {
|
||||
+ errors = append(errors, fmt.Sprintf("error close stdout: %s", err))
|
||||
+ }
|
||||
|
||||
- if err := c.stderr.Clean(); err != nil {
|
||||
- errors = append(errors, fmt.Sprintf("error close stderr: %s", err))
|
||||
- }
|
||||
+ if err := c.stderr.Clean(); err != nil {
|
||||
+ errors = append(errors, fmt.Sprintf("error close stderr: %s", err))
|
||||
+ }
|
||||
|
||||
- if len(errors) > 0 {
|
||||
- return fmt.Errorf(strings.Join(errors, "\n"))
|
||||
- }
|
||||
+ if len(errors) > 0 {
|
||||
+ errorsInLine = fmt.Errorf(strings.Join(errors, "\n"))
|
||||
+ }
|
||||
+
|
||||
+ close(done)
|
||||
+ }()
|
||||
|
||||
- return nil
|
||||
+ select {
|
||||
+ case <-done:
|
||||
+ return errorsInLine
|
||||
+ case <-time.After(3 * time.Second):
|
||||
+ return fmt.Errorf("close stream timeout")
|
||||
+ }
|
||||
}
|
||||
|
||||
// CopyToPipe connects streamconfig with a libcontainerd.IOPipe
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -0,0 +1,90 @@
|
||||
From c79f7bc343ebb9b855e7a28282d8c9ebcaf7e63c Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Thu, 5 Aug 2021 15:12:14 +0800
|
||||
Subject: [PATCH] docker: check db file size before start containerd
|
||||
|
||||
if the db file's metadata is damaged, the db will load failed
|
||||
with error "file size too small" when starting. we need to check it
|
||||
before start containerd.
|
||||
---
|
||||
components/engine/cmd/dockerd/daemon.go | 45 +++++++++++++------------
|
||||
1 file changed, 24 insertions(+), 21 deletions(-)
|
||||
|
||||
diff --git a/components/engine/cmd/dockerd/daemon.go b/components/engine/cmd/dockerd/daemon.go
|
||||
index 04bc06b92..a96c9d98b 100644
|
||||
--- a/components/engine/cmd/dockerd/daemon.go
|
||||
+++ b/components/engine/cmd/dockerd/daemon.go
|
||||
@@ -113,28 +113,29 @@ func resumeDM() {
|
||||
}
|
||||
}
|
||||
|
||||
-func cleanupLocalDB(db string) {
|
||||
- _, err := os.Stat(db)
|
||||
- if err == nil {
|
||||
- err = os.Remove(db)
|
||||
- logrus.Infof("cleanup DB %s error=%v", db, err)
|
||||
+func cleanupLocalDB(db string, checkSize bool) {
|
||||
+ if info, err := os.Stat(db); err == nil {
|
||||
+ if checkSize == false || int(info.Size()) < 2*os.Getpagesize() {
|
||||
+ err = os.Remove(db)
|
||||
+ logrus.Infof("cleanup DB %s error=%v", db, err)
|
||||
+ }
|
||||
}
|
||||
}
|
||||
|
||||
// DB files may corrupted on exception poweroff but can be rebuild at run time,
|
||||
// so we can remove DB files on OS starts avoid daemon can not startup.
|
||||
func cleanupLocalDBs(run, root string) {
|
||||
+ checkSize := true
|
||||
+
|
||||
// check db lock is exist, do nothing if file is existed
|
||||
dbLockPath := filepath.Join(run, "dblock")
|
||||
- _, err := os.Stat(dbLockPath)
|
||||
- if err == nil {
|
||||
- return
|
||||
- }
|
||||
- if !os.IsNotExist(err) {
|
||||
- logrus.Errorf("stat dblock failed %v", err)
|
||||
- return
|
||||
+ _, statErr := os.Stat(dbLockPath)
|
||||
+ if os.IsNotExist(statErr) {
|
||||
+ checkSize = false
|
||||
+ logrus.Errorf("stat dblock failed %v", statErr)
|
||||
+ logrus.Devour(ioutil.WriteFile(dbLockPath, []byte{}, 0600))
|
||||
}
|
||||
- logrus.Devour(ioutil.WriteFile(dbLockPath, []byte{}, 0600))
|
||||
+
|
||||
files, err := ioutil.ReadDir(filepath.Join(run, "containerd"))
|
||||
logrus.Devour(err)
|
||||
olds, err := ioutil.ReadDir(filepath.Join(run, "libcontainerd"))
|
||||
@@ -145,17 +146,19 @@ func cleanupLocalDBs(run, root string) {
|
||||
return
|
||||
}
|
||||
}
|
||||
+
|
||||
if os.Getenv("DISABLE_CRASH_FILES_DELETE") == "true" {
|
||||
return
|
||||
}
|
||||
- cleanupLocalDB(filepath.Join(root, "containerd/daemon/io.containerd.metadata.v1.bolt/meta.db"))
|
||||
- cleanupLocalDB(filepath.Join(root, "builder/fscache.db"))
|
||||
- cleanupLocalDB(filepath.Join(root, "volumes/metadata.db"))
|
||||
- cleanupLocalDB(filepath.Join(root, "network/files/local-kv.db"))
|
||||
- cleanupLocalDB(filepath.Join(root, "accelerator/accel.db"))
|
||||
- cleanupLocalDB(filepath.Join(root, "buildkit/metadata.db"))
|
||||
- cleanupLocalDB(filepath.Join(root, "buildkit/cache.db"))
|
||||
- cleanupLocalDB(filepath.Join(root, "buildkit/snapshots.db"))
|
||||
+
|
||||
+ cleanupLocalDB(filepath.Join(root, "containerd/daemon/io.containerd.metadata.v1.bolt/meta.db"), checkSize)
|
||||
+ cleanupLocalDB(filepath.Join(root, "builder/fscache.db"), checkSize)
|
||||
+ cleanupLocalDB(filepath.Join(root, "volumes/metadata.db"), checkSize)
|
||||
+ cleanupLocalDB(filepath.Join(root, "network/files/local-kv.db"), checkSize)
|
||||
+ cleanupLocalDB(filepath.Join(root, "accelerator/accel.db"), checkSize)
|
||||
+ cleanupLocalDB(filepath.Join(root, "buildkit/metadata.db"), checkSize)
|
||||
+ cleanupLocalDB(filepath.Join(root, "buildkit/cache.db"), checkSize)
|
||||
+ cleanupLocalDB(filepath.Join(root, "buildkit/snapshots.db"), checkSize)
|
||||
}
|
||||
|
||||
func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
From 6926d4e842dc26043cbdd38de5a8c0776f0d4d43 Mon Sep 17 00:00:00 2001
|
||||
From 372bbea9041ab101156c881232d83d3e3124fd25 Mon Sep 17 00:00:00 2001
|
||||
From: WangFengTu <wangfengtu@huawei.com>
|
||||
Date: Sun, 29 Aug 2021 15:49:03 +0800
|
||||
Subject: [PATCH] fix dangling unpigz
|
||||
@ -9,7 +9,7 @@ Signed-off-by: WangFengTu <wangfengtu@huawei.com>
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/components/engine/builder/dockerfile/copy.go b/components/engine/builder/dockerfile/copy.go
|
||||
index ad9b08df..c323e703 100644
|
||||
index ad9b08dfe..c323e7033 100644
|
||||
--- a/components/engine/builder/dockerfile/copy.go
|
||||
+++ b/components/engine/builder/dockerfile/copy.go
|
||||
@@ -527,6 +527,7 @@ func isArchivePath(driver containerfs.ContainerFS, path string) bool {
|
||||
@ -21,5 +21,5 @@ index ad9b08df..c323e703 100644
|
||||
_, err = r.Next()
|
||||
return err == nil
|
||||
--
|
||||
2.25.1
|
||||
2.23.0
|
||||
|
||||
86
patch/0211-docker-add-timeout-for-IO.Wait.patch
Normal file
86
patch/0211-docker-add-timeout-for-IO.Wait.patch
Normal file
@ -0,0 +1,86 @@
|
||||
From 0ebaeb1830b42642ae78920afafcadc381053a1e Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Mon, 30 Aug 2021 20:44:36 +0800
|
||||
Subject: [PATCH] docker:add timeout for IO.Wait
|
||||
|
||||
---
|
||||
.../containerd/containerd/process.go | 40 +++++++++++++------
|
||||
1 file changed, 28 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/components/engine/vendor/github.com/containerd/containerd/process.go b/components/engine/vendor/github.com/containerd/containerd/process.go
|
||||
index 4d0dca9f7..a2aaa424b 100644
|
||||
--- a/components/engine/vendor/github.com/containerd/containerd/process.go
|
||||
+++ b/components/engine/vendor/github.com/containerd/containerd/process.go
|
||||
@@ -18,6 +18,7 @@ package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
+ "fmt"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
@@ -105,6 +106,21 @@ func (p *process) Pid() uint32 {
|
||||
return p.pid
|
||||
}
|
||||
|
||||
+func waitTimeout(io cio.IO, timeout time.Duration) error {
|
||||
+ done := make(chan struct{})
|
||||
+ go func() {
|
||||
+ io.Wait()
|
||||
+ close(done)
|
||||
+ }()
|
||||
+
|
||||
+ select {
|
||||
+ case <-done:
|
||||
+ return nil
|
||||
+ case <-time.After(timeout):
|
||||
+ return fmt.Errorf("Wait IO timeout")
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
// Start starts the exec process
|
||||
func (p *process) Start(ctx context.Context) error {
|
||||
r, err := p.task.client.TaskService().Start(ctx, &tasks.StartRequest{
|
||||
@@ -112,19 +128,14 @@ func (p *process) Start(ctx context.Context) error {
|
||||
ExecID: p.id,
|
||||
})
|
||||
if err != nil {
|
||||
- done := make(chan struct{})
|
||||
- go func() {
|
||||
- p.io.Cancel()
|
||||
- p.io.Wait()
|
||||
- p.io.Close()
|
||||
- close(done)
|
||||
- }()
|
||||
- select {
|
||||
- case <-time.After(30 * time.Second):
|
||||
+ p.io.Cancel()
|
||||
+
|
||||
+ errWait := waitTimeout(p.io, 30*time.Second)
|
||||
+ if errWait != nil {
|
||||
logrus.Warnf("process start failed with error %v, wait io close timeout, some fifo io may be dropped.", err)
|
||||
- case <-done:
|
||||
- // ok
|
||||
}
|
||||
+ p.io.Close()
|
||||
+
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
p.pid = r.Pid
|
||||
@@ -221,7 +232,12 @@ func (p *process) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitS
|
||||
}
|
||||
if p.io != nil {
|
||||
p.io.Cancel()
|
||||
- p.io.Wait()
|
||||
+
|
||||
+ err := waitTimeout(p.io, 3*time.Second)
|
||||
+ if err != nil {
|
||||
+ logrus.Warnf("Wait io close timeout, some fifo io may be dropped.")
|
||||
+ }
|
||||
+
|
||||
p.io.Close()
|
||||
}
|
||||
return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil
|
||||
--
|
||||
2.27.0
|
||||
|
||||
38
patch/0212-docker-fix-time-Ticker-leak.patch
Normal file
38
patch/0212-docker-fix-time-Ticker-leak.patch
Normal file
@ -0,0 +1,38 @@
|
||||
From aa1e1d6caf6983e6242a13b4cf98497161a7abb5 Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Sat, 11 Sep 2021 11:45:53 +0800
|
||||
Subject: [PATCH] docker:fix time Ticker leak
|
||||
|
||||
Tick's Ticker cannot be recovered by the garbage collector, it will
|
||||
leak and cause CPU usage high in this case. We should replace it with
|
||||
NewTicker and explicitly Stop it.
|
||||
---
|
||||
components/engine/daemon/freezer/freezer.go | 5 +++--
|
||||
1 file changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/components/engine/daemon/freezer/freezer.go b/components/engine/daemon/freezer/freezer.go
|
||||
index 907c7aac2..6df176f2f 100644
|
||||
--- a/components/engine/daemon/freezer/freezer.go
|
||||
+++ b/components/engine/daemon/freezer/freezer.go
|
||||
@@ -184,7 +184,8 @@ func (f *freezer) updateCgroup(state string) error {
|
||||
curState = strings.TrimSpace(curState)
|
||||
|
||||
timeout := time.After(30 * time.Second)
|
||||
- tick := time.Tick(1 * time.Millisecond)
|
||||
+ ticker := time.NewTicker(1 * time.Millisecond)
|
||||
+ defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
@@ -192,7 +193,7 @@ func (f *freezer) updateCgroup(state string) error {
|
||||
return fmt.Errorf("cannot write %s to freezer for %#v", curState, err)
|
||||
}
|
||||
return fmt.Errorf("update freezer cgroup timeout for 30s")
|
||||
- case <-tick:
|
||||
+ case <-ticker.C:
|
||||
// In case this loop does not exit because it doesn't get the expected
|
||||
// state, let's write again this state, hoping it's going to be properly
|
||||
// set this time. Otherwise, this loop could run infinitely, waiting for
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -0,0 +1,66 @@
|
||||
From 1cbe2e6c0865f11fa264c24378bb0180cce6d414 Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Wed, 22 Sep 2021 16:09:44 +0800
|
||||
Subject: [PATCH] docker:fix bug where failed kills didnt fallback to unix kill
|
||||
|
||||
if killPossiblyDeadProcess fails, we expect to execute killProcessDirectly to
|
||||
direct kill the process. But container.Wait return err when the timeout deadline
|
||||
exceeded, and not execute the killProcessDirectly fucntion. Then docker stop will
|
||||
hang.
|
||||
---
|
||||
components/engine/daemon/kill.go | 14 +++++++++-----
|
||||
components/engine/daemon/stop.go | 6 ++++--
|
||||
2 files changed, 13 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/components/engine/daemon/kill.go b/components/engine/daemon/kill.go
|
||||
index 4c8ccf93d..593275cf8 100644
|
||||
--- a/components/engine/daemon/kill.go
|
||||
+++ b/components/engine/daemon/kill.go
|
||||
@@ -153,8 +153,8 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
- if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil {
|
||||
- return err
|
||||
+ if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() == nil {
|
||||
+ return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,9 +166,13 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
|
||||
return err
|
||||
}
|
||||
|
||||
- // Wait for exit with no timeout.
|
||||
- // Ignore returned status.
|
||||
- <-container.Wait3(context.Background(), containerpkg.WaitConditionNotRunning, waitStop)
|
||||
+ // wait for container to exit one last time, if it doesn't then kill didnt work, so return error
|
||||
+ ctx2, cancel2 := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
+ defer cancel2()
|
||||
+
|
||||
+ if status := <-container.Wait3(ctx2, containerpkg.WaitConditionNotRunning, waitStop); status.Err() != nil {
|
||||
+ return errors.New("tried to kill container, but did not receive an exit event")
|
||||
+ }
|
||||
|
||||
return nil
|
||||
}
|
||||
diff --git a/components/engine/daemon/stop.go b/components/engine/daemon/stop.go
|
||||
index 40bc36dfd..741f5d5dd 100644
|
||||
--- a/components/engine/daemon/stop.go
|
||||
+++ b/components/engine/daemon/stop.go
|
||||
@@ -82,8 +82,10 @@ func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds i
|
||||
logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal)
|
||||
// 3. If it doesn't, then send SIGKILL
|
||||
if err := daemon.Kill(container); err != nil {
|
||||
- // Wait without a timeout, ignore result.
|
||||
- <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning)
|
||||
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
+ defer cancel()
|
||||
+
|
||||
+ <-container.Wait(ctx, containerpkg.WaitConditionNotRunning)
|
||||
logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it
|
||||
}
|
||||
}
|
||||
--
|
||||
2.27.0
|
||||
|
||||
31
patch/0214-docker-do-not-check-result-of-issueDiscard.patch
Normal file
31
patch/0214-docker-do-not-check-result-of-issueDiscard.patch
Normal file
@ -0,0 +1,31 @@
|
||||
From dd4eb547134482edc9d3248870480c3f24cab655 Mon Sep 17 00:00:00 2001
|
||||
From: WangFengTu <wangfengtu@huawei.com>
|
||||
Date: Mon, 18 Oct 2021 16:14:15 +0800
|
||||
Subject: [PATCH] do not check result of issueDiscard
|
||||
|
||||
If device not exist, issueDiscard will fail.
|
||||
We expect deleteDevice success if device not exist.
|
||||
|
||||
Signed-off-by: WangFengTu <wangfengtu@huawei.com>
|
||||
---
|
||||
components/engine/daemon/graphdriver/devmapper/deviceset.go | 4 +---
|
||||
1 file changed, 1 insertion(+), 3 deletions(-)
|
||||
|
||||
diff --git a/components/engine/daemon/graphdriver/devmapper/deviceset.go b/components/engine/daemon/graphdriver/devmapper/deviceset.go
|
||||
index 9b6cb0212..caa0a64cc 100644
|
||||
--- a/components/engine/daemon/graphdriver/devmapper/deviceset.go
|
||||
+++ b/components/engine/daemon/graphdriver/devmapper/deviceset.go
|
||||
@@ -2078,9 +2078,7 @@ func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error {
|
||||
defer devices.closeTransaction()
|
||||
|
||||
if devices.doBlkDiscard {
|
||||
- if err := devices.issueDiscard(info); err != nil {
|
||||
- return err
|
||||
- }
|
||||
+ devices.issueDiscard(info)
|
||||
}
|
||||
|
||||
// Try to deactivate device in case it is active.
|
||||
--
|
||||
2.27.0
|
||||
|
||||
69
patch/0215-docker-add-info-log-for-pulling-image.patch
Normal file
69
patch/0215-docker-add-info-log-for-pulling-image.patch
Normal file
@ -0,0 +1,69 @@
|
||||
From deb30c8d68ff1199b4cbe4822fc8336ff65f6e1f Mon Sep 17 00:00:00 2001
|
||||
From: WangFengTu <wangfengtu@huawei.com>
|
||||
Date: Wed, 3 Nov 2021 13:34:53 +0800
|
||||
Subject: [PATCH] add info log for pulling image
|
||||
|
||||
Signed-off-by: WangFengTu <wangfengtu@huawei.com>
|
||||
---
|
||||
.../api/server/router/image/image_routes.go | 16 ++++++++++++++--
|
||||
1 file changed, 14 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/components/engine/api/server/router/image/image_routes.go b/components/engine/api/server/router/image/image_routes.go
|
||||
index b7bb340e9..2c14945d2 100644
|
||||
--- a/components/engine/api/server/router/image/image_routes.go
|
||||
+++ b/components/engine/api/server/router/image/image_routes.go
|
||||
@@ -20,12 +20,14 @@ import (
|
||||
"github.com/docker/docker/registry"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
+ "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Creates an image from Pull or from Import
|
||||
func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
+ logrus.Errorf("parse image create http request failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -37,16 +39,26 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
|
||||
err error
|
||||
output = ioutils.NewWriteFlusher(w)
|
||||
platform *specs.Platform
|
||||
+ sp specs.Platform
|
||||
)
|
||||
defer output.Close()
|
||||
|
||||
+ logrus.Infof("received image create request, name:%v:%v repo:%v", image, tag, repo)
|
||||
+ defer func() {
|
||||
+ if err != nil {
|
||||
+ logrus.Errorf("image create request process failed, name:%v:%v repo:%v error: %v", image, tag, repo, err)
|
||||
+ } else {
|
||||
+ logrus.Infof("image create request process success, name:%v:%v repo:%v", image, tag, repo)
|
||||
+ }
|
||||
+ }()
|
||||
+
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if versions.GreaterThanOrEqualTo(version, "1.32") {
|
||||
apiPlatform := r.FormValue("platform")
|
||||
if apiPlatform != "" {
|
||||
- sp, err := platforms.Parse(apiPlatform)
|
||||
+ sp, err = platforms.Parse(apiPlatform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -70,7 +82,7 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
|
||||
authConfig := &types.AuthConfig{}
|
||||
if authEncoded != "" {
|
||||
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
|
||||
- if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
|
||||
+ if err = json.NewDecoder(authJSON).Decode(authConfig); err != nil {
|
||||
// for a pull it is not an error if no auth was given
|
||||
// to increase compatibility with the existing api it is defaulting to be empty
|
||||
authConfig = &types.AuthConfig{}
|
||||
--
|
||||
2.23.0
|
||||
|
||||
@ -0,0 +1,72 @@
|
||||
From 3fab78a174b23d012a71f96fd4cdc7590706323e Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Mon, 8 Nov 2021 20:23:08 +0800
|
||||
Subject: [PATCH] docker: Adding logs for debugging in docker stop
|
||||
|
||||
do the following logs for debug
|
||||
1. add container id in logs
|
||||
2. add logs for each "kill"
|
||||
3. sync with community
|
||||
---
|
||||
components/engine/daemon/container_operations_unix.go | 2 +-
|
||||
components/engine/daemon/stop.go | 10 ++++++----
|
||||
2 files changed, 7 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/components/engine/daemon/container_operations_unix.go b/components/engine/daemon/container_operations_unix.go
|
||||
index df2f3261f..2ea167ca2 100644
|
||||
--- a/components/engine/daemon/container_operations_unix.go
|
||||
+++ b/components/engine/daemon/container_operations_unix.go
|
||||
@@ -345,7 +345,6 @@ func killProcessDirectly(cntr *container.Container) error {
|
||||
if status.Err() != nil {
|
||||
// Ensure that we don't kill ourselves
|
||||
if pid := cntr.GetPID(); pid != 0 {
|
||||
- logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(cntr.ID))
|
||||
pattern := fmt.Sprintf("/var/run/docker/containerd/exit/moby/%s.%d.*", cntr.ID, pid)
|
||||
efiles, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
@@ -356,6 +355,7 @@ func killProcessDirectly(cntr *container.Container) error {
|
||||
return errNoSuchProcess{pid, 9}
|
||||
}
|
||||
|
||||
+ logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(cntr.ID))
|
||||
if err := unix.Kill(pid, 9); err != nil {
|
||||
if err != unix.ESRCH {
|
||||
return err
|
||||
diff --git a/components/engine/daemon/stop.go b/components/engine/daemon/stop.go
|
||||
index 741f5d5dd..633a34aab 100644
|
||||
--- a/components/engine/daemon/stop.go
|
||||
+++ b/components/engine/daemon/stop.go
|
||||
@@ -48,7 +48,7 @@ func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds i
|
||||
stopSignal := container.StopSignal()
|
||||
// 1. Send a stop signal
|
||||
if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil {
|
||||
- logrus.Infof("docker send %d signal to stop container get error: %v", stopSignal, err)
|
||||
+ logrus.Infof("docker send %d signal to stop container %v get error: %v", stopSignal, container.ID, err)
|
||||
// While normally we might "return err" here we're not going to
|
||||
// because if we can't stop the container by this point then
|
||||
// it's probably because it's already stopped. Meaning, between
|
||||
@@ -63,7 +63,7 @@ func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds i
|
||||
defer cancel()
|
||||
|
||||
if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil {
|
||||
- logrus.Infof("Container failed to stop after sending signal %d to the process, force killing", stopSignal)
|
||||
+ logrus.Infof("Container %v failed to stop after sending signal %d to the process, force killing", container.ID, stopSignal)
|
||||
if err := daemon.killPossiblyDeadProcess(container, 9); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -85,8 +85,10 @@ func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds i
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
- <-container.Wait(ctx, containerpkg.WaitConditionNotRunning)
|
||||
- logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it
|
||||
+ if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil {
|
||||
+ logrus.WithError(err).WithField("container", container.ID).Error("Error killing the container")
|
||||
+ return err
|
||||
+ }
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -0,0 +1,45 @@
|
||||
From b86b55f6bdad46b2fcb955402c512305eb36e90c Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Mon, 15 Nov 2021 15:40:55 +0800
|
||||
Subject: [PATCH] docker: add log for easy debug in exit event handler
|
||||
|
||||
---
|
||||
components/engine/daemon/monitor.go | 2 +-
|
||||
components/engine/libcontainerd/client_daemon.go | 7 +++++++
|
||||
2 files changed, 8 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/components/engine/daemon/monitor.go b/components/engine/daemon/monitor.go
|
||||
index 1b577c0da..0aadf33fd 100644
|
||||
--- a/components/engine/daemon/monitor.go
|
||||
+++ b/components/engine/daemon/monitor.go
|
||||
@@ -58,8 +58,8 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerd.EventType, ei libc
|
||||
daemon.LogContainerEvent(c, "oom")
|
||||
case libcontainerd.EventExit:
|
||||
if int(ei.Pid) == c.Pid {
|
||||
+ logrus.Infof("handle container %s exit event pid=%d", c.ID, c.Pid)
|
||||
c.Lock()
|
||||
- logrus.Infof("handle exit event cid=%s pid=%d", c.ID, c.Pid)
|
||||
_, _, err := daemon.containerd.DeleteTask(context.Background(), c.ID)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warnf("failed to delete container %s from containerd", c.ID)
|
||||
diff --git a/components/engine/libcontainerd/client_daemon.go b/components/engine/libcontainerd/client_daemon.go
|
||||
index 9c65e54c3..62e0f58d5 100755
|
||||
--- a/components/engine/libcontainerd/client_daemon.go
|
||||
+++ b/components/engine/libcontainerd/client_daemon.go
|
||||
@@ -726,6 +726,13 @@ func (c *client) processEvent(ctr *container, et EventType, ei EventInfo) {
|
||||
}).Error("failed to process event")
|
||||
}
|
||||
|
||||
+ defer func() {
|
||||
+ if et == EventExit {
|
||||
+ c.logger.Infof("handled exit event processID=%s containerID=%s pid=%d", ei.ProcessID, ei.ContainerID, ei.Pid)
|
||||
+ }
|
||||
+ }()
|
||||
+
|
||||
+
|
||||
if et == EventExit && ei.ProcessID != ei.ContainerID {
|
||||
p := ctr.getProcess(ei.ProcessID)
|
||||
if p == nil {
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -0,0 +1,26 @@
|
||||
From 0f1c3dc7a112d26b45001bf0631e6ae43f7c2f39 Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Sun, 21 Nov 2021 14:09:37 +0800
|
||||
Subject: [PATCH] docker: change log level when containerd return "container
|
||||
not found" err
|
||||
|
||||
---
|
||||
components/engine/daemon/kill.go | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/components/engine/daemon/kill.go b/components/engine/daemon/kill.go
|
||||
index 593275cf8..3f0972a72 100644
|
||||
--- a/components/engine/daemon/kill.go
|
||||
+++ b/components/engine/daemon/kill.go
|
||||
@@ -105,7 +105,7 @@ func (daemon *Daemon) killWithSignal(container *containerpkg.Container, sig int)
|
||||
if err := daemon.kill(container, sig); err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
unpause = false
|
||||
- logrus.WithError(err).WithField("container", container.ID).WithField("action", "kill").Debug("container kill failed because of 'container not found' or 'no such process'")
|
||||
+ logrus.WithError(err).WithField("container", container.ID).WithField("action", "kill").Info("container kill failed because of 'container not found' or 'no such process'")
|
||||
} else {
|
||||
return errors.Wrapf(err, "Cannot kill container %s", container.ID)
|
||||
}
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -0,0 +1,82 @@
|
||||
From d82a0c7617c5b05871c2cd19812e5bbe539dc1b5 Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Thu, 9 Dec 2021 11:55:02 +0800
|
||||
Subject: [PATCH] docker: Fix container exited after docker restart when
|
||||
processEvent hang
|
||||
|
||||
when processEvent hang, container state will not be Exited in time, and
|
||||
the containerStop in containerRestart will return nill due to "no such
|
||||
container", and the containerStart in containerRestart will not execute
|
||||
for the container state is Running.
|
||||
---
|
||||
components/engine/container/container.go | 8 ++++++++
|
||||
components/engine/daemon/container_operations_unix.go | 2 +-
|
||||
components/engine/daemon/kill.go | 10 ++++++----
|
||||
3 files changed, 15 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/components/engine/container/container.go b/components/engine/container/container.go
|
||||
index 7cdf07535..87cdaba2c 100644
|
||||
--- a/components/engine/container/container.go
|
||||
+++ b/components/engine/container/container.go
|
||||
@@ -539,6 +539,14 @@ func (container *Container) StopTimeout() int {
|
||||
return DefaultStopTimeout
|
||||
}
|
||||
|
||||
+func (container *Container) WaitForState(waitCondition WaitCondition, timeout int) error {
|
||||
+ ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
|
||||
+ defer cancel()
|
||||
+
|
||||
+ status := <-container.Wait(ctx, waitCondition)
|
||||
+ return status.Err()
|
||||
+}
|
||||
+
|
||||
// InitDNSHostConfig ensures that the dns fields are never nil.
|
||||
// New containers don't ever have those fields nil,
|
||||
// but pre created containers can still have those nil values.
|
||||
diff --git a/components/engine/daemon/container_operations_unix.go b/components/engine/daemon/container_operations_unix.go
|
||||
index 2ea167ca2..e1456ce86 100644
|
||||
--- a/components/engine/daemon/container_operations_unix.go
|
||||
+++ b/components/engine/daemon/container_operations_unix.go
|
||||
@@ -361,7 +361,7 @@ func killProcessDirectly(cntr *container.Container) error {
|
||||
return err
|
||||
}
|
||||
e := errNoSuchProcess{pid, 9}
|
||||
- logrus.Debug(e)
|
||||
+ logrus.WithError(e).WithField("container", cntr.ID).Warning("no such process")
|
||||
return e
|
||||
}
|
||||
}
|
||||
diff --git a/components/engine/daemon/kill.go b/components/engine/daemon/kill.go
|
||||
index 3f0972a72..2652f7ad2 100644
|
||||
--- a/components/engine/daemon/kill.go
|
||||
+++ b/components/engine/daemon/kill.go
|
||||
@@ -147,13 +147,12 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
|
||||
// by that time the container is still running, then the error
|
||||
// we got is probably valid and so we return it to the caller.
|
||||
if isErrNoSuchProcess(err) {
|
||||
+ // wait the container's stop amount of time to see the event is eventually processed
|
||||
+ container.WaitForState(containerpkg.WaitConditionNotRunning, container.StopTimeout())
|
||||
return nil
|
||||
}
|
||||
|
||||
- ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
- defer cancel()
|
||||
-
|
||||
- if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() == nil {
|
||||
+ if waitError := container.WaitForState(containerpkg.WaitConditionNotRunning, 2); waitError == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -161,6 +160,9 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
|
||||
// 2. Wait for the process to die, in last resort, try to kill the process directly
|
||||
if err := killProcessDirectly(container); err != nil {
|
||||
if isErrNoSuchProcess(err) {
|
||||
+ // there is a case where we hit here before the exit event is processed
|
||||
+ // So let's wait the container's stop timeout amount of time to see if the event is eventually processed
|
||||
+ container.WaitForState(containerpkg.WaitConditionNotRunning, container.StopTimeout())
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -0,0 +1,33 @@
|
||||
From a7c1bbed0aed4c9a5c67871f7506646c07c34574 Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Thu, 9 Dec 2021 20:58:32 +0800
|
||||
Subject: [PATCH] docker: fix "endpoint with name container_xx already exists
|
||||
in network none" error
|
||||
|
||||
---
|
||||
components/engine/daemon/kill.go | 9 +++++++++
|
||||
1 file changed, 9 insertions(+)
|
||||
|
||||
diff --git a/components/engine/daemon/kill.go b/components/engine/daemon/kill.go
|
||||
index 2652f7ad2..0388b16c9 100644
|
||||
--- a/components/engine/daemon/kill.go
|
||||
+++ b/components/engine/daemon/kill.go
|
||||
@@ -163,6 +163,15 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
|
||||
// there is a case where we hit here before the exit event is processed
|
||||
// So let's wait the container's stop timeout amount of time to see if the event is eventually processed
|
||||
container.WaitForState(containerpkg.WaitConditionNotRunning, container.StopTimeout())
|
||||
+ // using mock exit event to handle container exit
|
||||
+ ei := libcontainerd.EventInfo{
|
||||
+ ContainerID: container.ID,
|
||||
+ ProcessID: container.ID,
|
||||
+ Pid: uint32(container.GetPID()),
|
||||
+ ExitCode: 137,
|
||||
+ ExitedAt: time.Now(),
|
||||
+ }
|
||||
+ daemon.ProcessEvent(container.ID, libcontainerd.EventExit, ei)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -0,0 +1,25 @@
|
||||
From f250af43f458e27e37f2ed2690b320d5bbf80173 Mon Sep 17 00:00:00 2001
|
||||
From: chenjiankun <chenjiankun1@huawei.com>
|
||||
Date: Mon, 13 Dec 2021 17:20:13 +0800
|
||||
Subject: [PATCH] docker: fix "Up 292 years" in status in docker ps -a
|
||||
|
||||
---
|
||||
components/engine/container/state.go | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/components/engine/container/state.go b/components/engine/container/state.go
|
||||
index e9666ed92..da19cc49e 100644
|
||||
--- a/components/engine/container/state.go
|
||||
+++ b/components/engine/container/state.go
|
||||
@@ -283,7 +283,7 @@ func (s *State) SetRunning(pid int, initial bool) {
|
||||
}
|
||||
s.ExitCodeValue = 0
|
||||
s.Pid = pid
|
||||
- if initial {
|
||||
+ if initial || s.StartedAt.IsZero() {
|
||||
s.StartedAt = time.Now().UTC()
|
||||
}
|
||||
}
|
||||
--
|
||||
2.27.0
|
||||
|
||||
111
patch/0222-docker-Use-original-process-spec-for-execs.patch
Normal file
111
patch/0222-docker-Use-original-process-spec-for-execs.patch
Normal file
@ -0,0 +1,111 @@
|
||||
From 3d3d7570714a8ab60b979eaba39309b6e8fcf75e Mon Sep 17 00:00:00 2001
|
||||
From: Michael Crosby <crosbymichael@gmail.com>
|
||||
Date: Wed, 13 Mar 2019 16:04:28 -0400
|
||||
Subject: [PATCH] Use original process spec for execs
|
||||
|
||||
Fixes #38865
|
||||
|
||||
Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
|
||||
(cherry picked from commit 7603c22c7365d7d7150597fe396e0707d6e561da)
|
||||
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
|
||||
|
||||
Conflict:NA
|
||||
Reference:https://github.com/docker/engine/pull/178/commits/3d3d7570714a8ab60b979eaba39309b6e8fcf75e
|
||||
|
||||
---
|
||||
components/engine/daemon/exec.go | 24 ++++++++++++++++++------
|
||||
components/engine/integration/container/exec_test.go | 15 +++++++++++++++
|
||||
components/engine/integration/internal/container/ops.go | 7 +++++++
|
||||
3 files changed, 40 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/components/engine/daemon/exec.go b/components/engine/daemon/exec.go
|
||||
index f0b43d7253..abb239b520 100644
|
||||
--- a/components/engine/daemon/exec.go
|
||||
+++ b/components/engine/daemon/exec.go
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
+ "runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -16,7 +17,7 @@ import (
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
- specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
+ "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -217,12 +218,23 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
|
||||
ec.StreamConfig.NewNopInputPipe()
|
||||
}
|
||||
|
||||
- p := &specs.Process{
|
||||
- Args: append([]string{ec.Entrypoint}, ec.Args...),
|
||||
- Env: ec.Env,
|
||||
- Terminal: ec.Tty,
|
||||
- Cwd: ec.WorkingDir,
|
||||
+ p := &specs.Process{}
|
||||
+ if runtime.GOOS != "windows" {
|
||||
+ container, err := d.containerdCli.LoadContainer(ctx, ec.ContainerID)
|
||||
+ if err != nil {
|
||||
+ return err
|
||||
+ }
|
||||
+ spec, err := container.Spec(ctx)
|
||||
+ if err != nil {
|
||||
+ return err
|
||||
+ }
|
||||
+ p = spec.Process
|
||||
}
|
||||
+ p.Args = append([]string{ec.Entrypoint}, ec.Args...)
|
||||
+ p.Env = ec.Env
|
||||
+ p.Cwd = ec.WorkingDir
|
||||
+ p.Terminal = ec.Tty
|
||||
+
|
||||
if p.Cwd == "" {
|
||||
p.Cwd = "/"
|
||||
}
|
||||
diff --git a/components/engine/integration/container/exec_test.go b/components/engine/integration/container/exec_test.go
|
||||
index 20b1f3e8b5..0c3e01af41 100644
|
||||
--- a/components/engine/integration/container/exec_test.go
|
||||
+++ b/components/engine/integration/container/exec_test.go
|
||||
@@ -118,3 +118,18 @@ func TestExec(t *testing.T) {
|
||||
assert.Assert(t, is.Contains(out, "PWD=/tmp"), "exec command not running in expected /tmp working directory")
|
||||
assert.Assert(t, is.Contains(out, "FOO=BAR"), "exec command not running with expected environment variable FOO")
|
||||
}
|
||||
+
|
||||
+func TestExecUser(t *testing.T) {
|
||||
+ skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.39"), "broken in earlier versions")
|
||||
+ skip.If(t, testEnv.OSType == "windows", "FIXME. Probably needs to wait for container to be in running state.")
|
||||
+ defer setupTest(t)()
|
||||
+ ctx := context.Background()
|
||||
+ client := testEnv.APIClient()
|
||||
+
|
||||
+ cID := container.Run(t, ctx, client, container.WithTty(true), container.WithUser("1:1"))
|
||||
+
|
||||
+ result, err := container.Exec(ctx, client, cID, []string{"id"})
|
||||
+ assert.NilError(t, err)
|
||||
+
|
||||
+ assert.Assert(t, is.Contains(result.Stdout(), "uid=1(daemon) gid=1(daemon)"), "exec command not running as uid/gid 1")
|
||||
+}
|
||||
diff --git a/components/engine/integration/internal/container/ops.go b/components/engine/integration/internal/container/ops.go
|
||||
index df5598b62f..b2d170b4df 100644
|
||||
--- a/components/engine/integration/internal/container/ops.go
|
||||
+++ b/components/engine/integration/internal/container/ops.go
|
||||
@@ -134,3 +134,10 @@ func WithAutoRemove(c *TestContainerConfig) {
|
||||
}
|
||||
c.HostConfig.AutoRemove = true
|
||||
}
|
||||
+
|
||||
+// WithUser sets the user
|
||||
+func WithUser(user string) func(c *TestContainerConfig) {
|
||||
+ return func(c *TestContainerConfig) {
|
||||
+ c.Config.User = user
|
||||
+ }
|
||||
+}
|
||||
--
|
||||
2.27.0
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
From 5d90b8a13a76e296a27b63896f86a109158dcdd5 Mon Sep 17 00:00:00 2001
|
||||
From d3bf68367fe708a1d74d89a8d57c9b85c4fd292d Mon Sep 17 00:00:00 2001
|
||||
From: build <build@obs.com>
|
||||
Date: Fri, 10 Jun 2022 15:11:21 +0800
|
||||
Subject: [PATCH] 2022
|
||||
Date: Thu, 16 Jun 2022 09:53:40 +0800
|
||||
Subject: [PATCH] CVE-2022-24769
|
||||
|
||||
Signed-off-by: build <build@obs.com>
|
||||
---
|
||||
38
series.conf
38
series.conf
@ -188,9 +188,37 @@ patch/0186-docker-fix-execCommands-leak-in-health-check.patch
|
||||
patch/0188-docker-check-containerd-pid-before-kill-it.patch
|
||||
patch/0189-docker-fix-Access-to-remapped-root-allows-privilege-.patch
|
||||
patch/0190-docker-fix-CVE-2021-21285.patch
|
||||
patch/0191-rollback-if-docker-restart-when-doing-BlkDiscard.patch
|
||||
patch/0192-fix-dangling-unpigz.patch
|
||||
patch/0193-docker-add-clone3-to-seccomp-whitelist-to-fix-curl-f.patch
|
||||
patch/0194-docker-update-seccomp-whitelist-to-Linux-5.10-syscal.patch
|
||||
patch/0195-docker-fix-CVE-2022-24769.patch
|
||||
patch/0191-docker-add-clone3-to-seccomp-whitelist-to-fix-curl-f.patch
|
||||
patch/0192-docker-update-seccomp-whitelist-to-Linux-5.10-syscal.patch
|
||||
patch/0193-docker-fix-images-filter-when-use-multi-reference.patch
|
||||
patch/0194-docker-fix-docker-rmi-stucking.patch
|
||||
patch/0195-docker-fix-network-sandbox-not-cleaned-up-on-failure.patch
|
||||
patch/0196-docker-fix-container-status-not-consistent-with-its-.patch
|
||||
patch/0197-docker-fix-hijack-hang.patch
|
||||
patch/0198-docker-fix-docker-kill-command-block.patch
|
||||
patch/0199-docker-pkg-archive-fix-TestTarUntarWithXattr-failure-on-rec.patch
|
||||
patch/0200-docker-fix-unit-testcase-error.patch
|
||||
patch/0201-docker-use-info-level-for-create-start-stop-command.patch
|
||||
patch/0202-docker-rollback-if-docker-restart-when-doing-BlkDiscard.patch
|
||||
patch/0203-docker-Fix-for-lack-of-syncromization-in-daemon-update.go.patch
|
||||
patch/0204-docker-Don-t-fail-on-two-concurrent-reference.store.AddDige.patch
|
||||
patch/0205-docker-Unexport-testcase.Cleanup-to-fix-Go-1.14.patch
|
||||
patch/0206-docker-archive-fix-race-condition-in-cmdStream.patch
|
||||
patch/0207-docker-fix-runc-data-and-dm-left-when-periodically-kill-containerd.patch
|
||||
patch/0208-docker-fix-ProcessEvent-block-when-CloseStreams-block.patch
|
||||
patch/0209-docker-check-db-file-size-before-start-containerd.patch
|
||||
patch/0210-docker-fix-dangling-unpigz.patch
|
||||
patch/0211-docker-add-timeout-for-IO.Wait.patch
|
||||
patch/0212-docker-fix-time-Ticker-leak.patch
|
||||
patch/0213-docker-fix-bug-where-failed-kills-didnt-fallback-to-unix-kill.patch
|
||||
patch/0214-docker-do-not-check-result-of-issueDiscard.patch
|
||||
patch/0215-docker-add-info-log-for-pulling-image.patch
|
||||
patch/0216-docker-Adding-logs-for-debugging-in-docker-stop.patch
|
||||
patch/0217-docker-add-log-for-easy-debug-in-exit-event-handler.patch
|
||||
patch/0218-docker-change-log-level-when-containerd-return-conta.patch
|
||||
patch/0219-docker-Fix-container-exited-after-docker-restart-whe.patch
|
||||
patch/0220-docker-fix-endpoint-with-name-container_xx-already-e.patch
|
||||
patch/0221-docker-fix-Up-292-years-in-status-in-docker-ps-a.patch
|
||||
patch/0222-docker-Use-original-process-spec-for-execs.patch
|
||||
patch/0223-docker-fix-CVE-2022-24769.patch
|
||||
#end
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user