diff --git a/VERSION-openeuler b/VERSION-openeuler index e80a61f..fedb281 100644 --- a/VERSION-openeuler +++ b/VERSION-openeuler @@ -1 +1 @@ -0.9.5-21 +0.9.6-1 diff --git a/apply-patches b/apply-patches index 1b64e8c..c5733c8 100755 --- a/apply-patches +++ b/apply-patches @@ -11,7 +11,7 @@ cwd=${PWD} src=${cwd}/${pkg} tar_file=v"$(awk -F"-" '{print $1}' < VERSION-openeuler)".tar.gz -tar -zxvf "${tar_file}" +mkdir ${src} && tar -zxvf "${tar_file}" -C ${src} --strip-components 1 if [ ! -d patch ]; then tar -zxvf patch.tar.gz fi diff --git a/git-commit b/git-commit index 1b7ea05..7a8f4f0 100644 --- a/git-commit +++ b/git-commit @@ -1 +1 @@ -ec7c7a741944af0725c3446c6fe09513269a18c7 +8de0b373053da17a3dc3e63cda4ea2b8e49655ea diff --git a/isula-build.spec b/isula-build.spec index cb2905d..60114a8 100644 --- a/isula-build.spec +++ b/isula-build.spec @@ -1,8 +1,8 @@ %global is_systemd 1 Name: isula-build -Version: 0.9.5 -Release: 21 +Version: 0.9.6 +Release: 1 Summary: A tool to build container images License: Mulan PSL V2 URL: https://gitee.com/openeuler/isula-build @@ -85,6 +85,12 @@ fi /usr/share/bash-completion/completions/isula-build %changelog +* Mon Nov 29 2021 DCCooper <1866858@gmail.com> - 0.9.6-1 +- Type:enhancement +- CVE:NA +- SUG:restart +- DESC:Bump version to 0.9.6 + * Wed Nov 17 2021 jingxiaolu - 0.9.5-21 - Type:enhancement - CVE:NA diff --git a/patch/0013-vendor-change-auth.json-file-mode-from-0700-to-0600.patch b/patch/0013-vendor-change-auth.json-file-mode-from-0700-to-0600.patch deleted file mode 100644 index 26c3fc0..0000000 --- a/patch/0013-vendor-change-auth.json-file-mode-from-0700-to-0600.patch +++ /dev/null @@ -1,29 +0,0 @@ -From bde19bc4f9fce45ea09974fdd138cad111b9269c Mon Sep 17 00:00:00 2001 -From: lixiang -Date: Mon, 24 Aug 2020 10:17:20 +0800 -Subject: [PATCH] vendor:change auth.json file mode from 0700 to 0600 - -reason: change auth.json file mode from 0700 to 0600 -See details in https://github.com/containers/image/issues/974 - -Signed-off-by: lixiang ---- - .../github.com/containers/image/v5/pkg/docker/config/config.go | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go -index ce85af18..e0b8b9b1 100644 ---- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go -+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go -@@ -326,7 +326,7 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) ( - } - - dir := filepath.Dir(path) -- if err = os.MkdirAll(dir, 0700); err != nil { -+ if err = os.MkdirAll(dir, 0600); err != nil { - return err - } - --- -2.19.1 - diff --git a/patch/0030-xattr-support-ima-and-evm.patch b/patch/0030-xattr-support-ima-and-evm.patch index 7287de1..c2b3ebd 100644 --- a/patch/0030-xattr-support-ima-and-evm.patch +++ b/patch/0030-xattr-support-ima-and-evm.patch @@ -14,14 +14,14 @@ diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendo index 4472511..479ade8 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go -@@ -396,7 +396,7 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { +@@ -397,7 +397,7 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { if hdr.Xattrs == nil { hdr.Xattrs = make(map[string]string) } - for _, xattr := range []string{"security.capability", "security.ima"} { + for _, xattr := range []string{"security.capability", "security.ima", "security.evm"} { capability, err := system.Lgetxattr(path, xattr) - if err != nil && err != system.EOPNOTSUPP && err != system.ErrNotSupportedPlatform { + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { return errors.Wrapf(err, "failed to read %q attribute from %q", xattr, path) -- 2.27.0 diff --git a/patch/0033-isula-build-remove-docker-releated-path-for-authenti.patch b/patch/0033-isula-build-remove-docker-releated-path-for-authenti.patch index 79558ef..4d9a6f9 100644 --- a/patch/0033-isula-build-remove-docker-releated-path-for-authenti.patch +++ b/patch/0033-isula-build-remove-docker-releated-path-for-authenti.patch @@ -12,17 +12,17 @@ diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b index cf82ee8..1165d15 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go -@@ -146,14 +146,7 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath { +@@ -219,14 +219,7 @@ func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { paths = append(paths, authPath{path: filepath.Join(dockerConfig, "config.json"), legacyFormat: false}, ) - } else { - paths = append(paths, -- authPath{path: filepath.Join(homedir.Get(), dockerHomePath), legacyFormat: false}, +- authPath{path: filepath.Join(homeDir, dockerHomePath), legacyFormat: false}, - ) } - paths = append(paths, -- authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true}, +- authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true}, - ) return paths } diff --git a/patch/0037-isula-build-fix-goroutine-leak-problem.patch b/patch/0037-isula-build-fix-goroutine-leak-problem.patch deleted file mode 100644 index 7d3b118..0000000 --- a/patch/0037-isula-build-fix-goroutine-leak-problem.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 56012b7a20cd09c91788f610321fefe82f4bbb5f Mon Sep 17 00:00:00 2001 -From: yangfeiyu -Date: Mon, 7 Sep 2020 20:57:34 +0800 -Subject: [PATCH] isula-build: fix goroutine leak problem - -reason: -when import a zstd tar file, goroutine will leak because of -the unclosing channel of tar stream - -Signed-off-by: yangfeiyu ---- - vendor/github.com/containers/storage/layers.go | 1 + - vendor/github.com/containers/storage/pkg/archive/archive.go | 1 + - 2 files changed, 2 insertions(+) - -diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go -index 1fc25bab..2d2cf08e 100644 ---- a/vendor/github.com/containers/storage/layers.go -+++ b/vendor/github.com/containers/storage/layers.go -@@ -1329,6 +1329,7 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error - if err != nil { - return -1, err - } -+ defer uncompressed.Close() - uncompressedDigest := digest.Canonical.Digester() - uncompressedCounter := ioutils.NewWriteCounter(uncompressedDigest.Hash()) - uidLog := make(map[uint32]struct{}) -diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go -index dd3b7506..58c4d184 100755 ---- a/vendor/github.com/containers/storage/pkg/archive/archive.go -+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go -@@ -139,6 +139,7 @@ func IsArchivePath(path string) bool { - if err != nil { - return false - } -+ defer rdr.Close() - r := tar.NewReader(rdr) - _, err = r.Next() - return err == nil --- -2.23.0 - diff --git a/patch/0039-bugfix-remove-Healthcheck-field-when-build-from-scra.patch b/patch/0039-bugfix-remove-Healthcheck-field-when-build-from-scra.patch deleted file mode 100644 index 877717d..0000000 --- a/patch/0039-bugfix-remove-Healthcheck-field-when-build-from-scra.patch +++ /dev/null @@ -1,26 +0,0 @@ -From dd2d69a851cba9619196166b78564093861ce46b Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Tue, 9 Feb 2021 19:11:05 +0800 -Subject: [PATCH] bugfix: remove Healthcheck field when build from scratch - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - image/image.go | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/image/image.go b/image/image.go -index b5b2264..bbbc7b9 100644 ---- a/image/image.go -+++ b/image/image.go -@@ -289,7 +289,7 @@ func createScratchV2Image() *docker.Image { - ExposedPorts: make(docker.PortSet), - Env: make([]string, 0, 0), - Cmd: make([]string, 0, 0), -- Healthcheck: &docker.HealthConfig{}, -+ Healthcheck: nil, - Volumes: make(map[string]struct{}), - Entrypoint: make([]string, 0, 0), - OnBuild: make([]string, 0, 0), --- -1.8.3.1 - diff --git a/patch/0040-vendor-update-tabulate-vendor-to-support-eliminate-s.patch b/patch/0040-vendor-update-tabulate-vendor-to-support-eliminate-s.patch deleted file mode 100644 index a842e05..0000000 --- a/patch/0040-vendor-update-tabulate-vendor-to-support-eliminate-s.patch +++ /dev/null @@ -1,121 +0,0 @@ -From e38c2ef1e4dc0f7579027deb7c36cba2516e8161 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Wed, 10 Feb 2021 10:09:31 +0800 -Subject: [PATCH 1/2] vendor:update tabulate vendor to support eliminate space - line - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - go.mod | 2 +- - go.sum | 4 +-- - vendor/github.com/bndr/gotabulate/tabulate.go | 25 +++++++++++++------ - vendor/modules.txt | 2 +- - 4 files changed, 22 insertions(+), 11 deletions(-) - -diff --git a/go.mod b/go.mod -index 336dd2cc..b02071c9 100644 ---- a/go.mod -+++ b/go.mod -@@ -5,7 +5,7 @@ go 1.13 - require ( - github.com/BurntSushi/toml v0.3.1 - github.com/blang/semver v4.0.0+incompatible // indirect -- github.com/bndr/gotabulate v1.1.3-0.20170315142410-bc555436bfd5 -+ github.com/bndr/gotabulate v1.1.3-0.20210209140214-21a495b00e22 - github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340 // indirect - github.com/containerd/containerd v1.4.0-rc.0 - github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe // indirect -diff --git a/go.sum b/go.sum -index 1ecfa084..3a52a22f 100644 ---- a/go.sum -+++ b/go.sum -@@ -43,8 +43,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r - github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= - github.com/blang/semver v4.0.0+incompatible h1:hnDL+Ci6ZJmEDIbUvCUt3Gh3QsnkdiIj88cWsqe4C4I= - github.com/blang/semver v4.0.0+incompatible/go.mod h1:u4Z/LRonWXLVIJgtpeY3+xwWiIhiJ9ilXrKVGnfHe/c= --github.com/bndr/gotabulate v1.1.3-0.20170315142410-bc555436bfd5 h1:D48YSLPNJ8WpdwDqYF8bMMKUB2bgdWEiFx1MGwPIdbs= --github.com/bndr/gotabulate v1.1.3-0.20170315142410-bc555436bfd5/go.mod h1:0+8yUgaPTtLRTjf49E8oju7ojpU11YmXyvq1LbPAb3U= -+github.com/bndr/gotabulate v1.1.3-0.20210209140214-21a495b00e22 h1:IsKzSX8XqgT8xSo4nxtTOH7014e1L+vPB1wh3IqkWr0= -+github.com/bndr/gotabulate v1.1.3-0.20210209140214-21a495b00e22/go.mod h1:0+8yUgaPTtLRTjf49E8oju7ojpU11YmXyvq1LbPAb3U= - github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= - github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= - github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -diff --git a/vendor/github.com/bndr/gotabulate/tabulate.go b/vendor/github.com/bndr/gotabulate/tabulate.go -index a2e43265..e6684d22 100644 ---- a/vendor/github.com/bndr/gotabulate/tabulate.go -+++ b/vendor/github.com/bndr/gotabulate/tabulate.go -@@ -1,10 +1,13 @@ - package gotabulate - --import "fmt" --import "bytes" --import "github.com/mattn/go-runewidth" --import "unicode/utf8" --import "math" -+import ( -+ "bytes" -+ "fmt" -+ "math" -+ "unicode/utf8" -+ -+ "github.com/mattn/go-runewidth" -+) - - // Basic Structure of TableFormat - type TableFormat struct { -@@ -84,6 +87,7 @@ type Tabulate struct { - WrapStrings bool - WrapDelimiter rune - SplitConcat string -+ DenseMode bool - } - - // Represents normalized tabulate Row -@@ -292,7 +296,7 @@ func (t *Tabulate) Render(format ...interface{}) string { - // Add Data Rows - for index, element := range t.Data { - lines = append(lines, t.buildRow(t.padRow(element.Elements, t.TableFormat.Padding), padded_widths, cols, t.TableFormat.DataRow)) -- if index < len(t.Data)-1 { -+ if !t.DenseMode && index < len(t.Data)-1 { - if element.Continuos != true && !inSlice("betweenLine", t.HideLines) { - lines = append(lines, t.buildLine(padded_widths, cols, t.TableFormat.LineBetweenRows)) - } -@@ -385,7 +389,8 @@ func (t *Tabulate) SetEmptyString(empty string) { - // Can be: - // top - Top line of the table, - // belowheader - Line below the header, --// bottom - Bottom line of the table -+// bottomLine - Bottom line of the table -+// betweenLine - Between line of the table - func (t *Tabulate) SetHideLines(hide []string) { - t.HideLines = hide - } -@@ -401,6 +406,12 @@ func (t *Tabulate) SetMaxCellSize(max int) { - t.MaxSize = max - } - -+// Sets dense mode -+// Under dense mode, no space line between rows -+func (t *Tabulate) SetDenseMode() { -+ t.DenseMode = true -+} -+ - func (t *Tabulate) splitElement(e string) (bool, string) { - //check if we are not attempting to smartly wrap - if t.WrapDelimiter == 0 { -diff --git a/vendor/modules.txt b/vendor/modules.txt -index bb224e3e..0017d4a3 100644 ---- a/vendor/modules.txt -+++ b/vendor/modules.txt -@@ -33,7 +33,7 @@ github.com/acarl005/stripansi - github.com/beorn7/perks/quantile - # github.com/blang/semver v4.0.0+incompatible - github.com/blang/semver --# github.com/bndr/gotabulate v1.1.3-0.20170315142410-bc555436bfd5 -+# github.com/bndr/gotabulate v1.1.3-0.20210209140214-21a495b00e22 - github.com/bndr/gotabulate - # github.com/cespare/xxhash/v2 v2.1.1 - github.com/cespare/xxhash/v2 --- -2.27.0 - diff --git a/patch/0041-enhancement-remove-empty-lines-when-showing-image-li.patch b/patch/0041-enhancement-remove-empty-lines-when-showing-image-li.patch deleted file mode 100644 index 2a97023..0000000 --- a/patch/0041-enhancement-remove-empty-lines-when-showing-image-li.patch +++ /dev/null @@ -1,24 +0,0 @@ -From 647011394abab7da261e827a3148c0c89467a6f8 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Wed, 10 Feb 2021 10:16:17 +0800 -Subject: [PATCH 2/2] enhancement: remove empty lines when showing image list - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - cmd/cli/images.go | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/cmd/cli/images.go b/cmd/cli/images.go -index bded3617..19777ce8 100644 ---- a/cmd/cli/images.go -+++ b/cmd/cli/images.go -@@ -100,5 +100,6 @@ func formatAndPrint(images []*pb.ListResponse_ImageInfo) { - tabulate := gotabulate.Create(lines) - tabulate.SetHeaders(title) - tabulate.SetAlign("left") -+ tabulate.SetDenseMode() - fmt.Print(tabulate.Render("simple")) - } --- -2.27.0 - diff --git a/patch/0042-fix-some-make-checkall-golangci-lint-flaws.patch b/patch/0042-fix-some-make-checkall-golangci-lint-flaws.patch deleted file mode 100644 index c6f5b1e..0000000 --- a/patch/0042-fix-some-make-checkall-golangci-lint-flaws.patch +++ /dev/null @@ -1,136 +0,0 @@ -From 34fdae49f82410a8bcc9c6f5940af01a24538de6 Mon Sep 17 00:00:00 2001 -From: meilier -Date: Thu, 4 Feb 2021 18:40:31 +0800 -Subject: [PATCH 01/10] fix some make checkall golangci-lint flaws - ---- - builder/dockerfile/container/container_src.go | 2 +- - builder/dockerfile/container/help.go | 2 +- - daemon/load.go | 4 ++-- - daemon/login.go | 4 ++-- - daemon/logout.go | 4 ++-- - daemon/save.go | 2 -- - pkg/manifest/list.go | 2 +- - util/util.go | 2 -- - 8 files changed, 9 insertions(+), 13 deletions(-) - -diff --git a/builder/dockerfile/container/container_src.go b/builder/dockerfile/container/container_src.go -index ff52ee2c..9426ec76 100644 ---- a/builder/dockerfile/container/container_src.go -+++ b/builder/dockerfile/container/container_src.go -@@ -98,7 +98,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, - return nil, -1, errors.Wrapf(err, "blob file %q is not exit", blobFile) - } - -- layerFile, err := os.OpenFile(blobFile, os.O_RDONLY, constant.DefaultRootFileMode) -+ layerFile, err := os.OpenFile(filepath.Clean(blobFile), os.O_RDONLY, constant.DefaultRootFileMode) - if err != nil { - return nil, -1, errors.Wrapf(err, "open the blob file %q failed", blobFile) - } -diff --git a/builder/dockerfile/container/help.go b/builder/dockerfile/container/help.go -index c5aa381d..475b479d 100644 ---- a/builder/dockerfile/container/help.go -+++ b/builder/dockerfile/container/help.go -@@ -170,7 +170,7 @@ func (ref *Reference) saveLayerToStorage(path string, layer *storage.Layer) (dif - }() - - filename := filepath.Join(path, "layer") -- layerFile, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, constant.DefaultRootFileMode) -+ layerFile, err := os.OpenFile(filepath.Clean(filename), os.O_CREATE|os.O_WRONLY, constant.DefaultRootFileMode) - if err != nil { - return "", des, errors.Wrapf(err, "error opening file: %s", filename) - } -diff --git a/daemon/load.go b/daemon/load.go -index 08fb5b1f..d756f9ed 100644 ---- a/daemon/load.go -+++ b/daemon/load.go -@@ -55,8 +55,8 @@ func (b *Backend) Load(req *pb.LoadRequest, stream pb.Control_LoadServer) error - ) - opts := b.getLoadOptions(req) - -- if err := util.CheckLoadFile(req.Path); err != nil { -- return err -+ if cErr := util.CheckLoadFile(req.Path); cErr != nil { -+ return cErr - } - - repoTags, err = tryToParseImageFormatFromTarball(b.daemon.opts.DataRoot, &opts) -diff --git a/daemon/login.go b/daemon/login.go -index e3399983..6eeda28e 100644 ---- a/daemon/login.go -+++ b/daemon/login.go -@@ -60,8 +60,8 @@ func (b *Backend) Login(ctx context.Context, req *pb.LoginRequest) (*pb.LoginRes - } - - if loginWithAuthFile(req) { -- auth, err := config.GetCredentials(sysCtx, req.Server) -- if err != nil || auth.Password == "" { -+ auth, gErr := config.GetCredentials(sysCtx, req.Server) -+ if gErr != nil || auth.Password == "" { - auth = types.DockerAuthConfig{} - return &pb.LoginResponse{Content: errTryToUseAuth}, errors.Errorf("failed to read auth file: %v", errTryToUseAuth) - } -diff --git a/daemon/logout.go b/daemon/logout.go -index 355b1f7a..d1fbebcb 100644 ---- a/daemon/logout.go -+++ b/daemon/logout.go -@@ -47,8 +47,8 @@ func (b *Backend) Logout(ctx context.Context, req *pb.LogoutRequest) (*pb.Logout - } - - if req.All { -- if err := config.RemoveAllAuthentication(sysCtx); err != nil { -- return &pb.LogoutResponse{Result: "Remove authentications failed"}, err -+ if rErr := config.RemoveAllAuthentication(sysCtx); rErr != nil { -+ return &pb.LogoutResponse{Result: "Remove authentications failed"}, rErr - } - logrus.Info("Success logout from all servers") - -diff --git a/daemon/save.go b/daemon/save.go -index 3dce7bdf..c6411e04 100644 ---- a/daemon/save.go -+++ b/daemon/save.go -@@ -17,7 +17,6 @@ import ( - "context" - "os" - -- "github.com/containers/image/v5/docker/archive" - "github.com/containers/image/v5/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -@@ -34,7 +33,6 @@ import ( - ) - - type saveOptions struct { -- writer *archive.Writer - sysCtx *types.SystemContext - logger *logger.Logger - localStore *store.Store -diff --git a/pkg/manifest/list.go b/pkg/manifest/list.go -index 3f8b2fed..381746f7 100644 ---- a/pkg/manifest/list.go -+++ b/pkg/manifest/list.go -@@ -129,7 +129,7 @@ func (l *List) SaveListToImage(store *store.Store, imageID, name string) (string - return "", errors.Wrapf(err, "save manifest list to image %v error", imageID) - } - -- //marshal list instance information -+ // marshal list instance information - instancesBytes, err := json.Marshal(&l.instances) - if err != nil { - return "", errors.Wrap(err, "marshall list instances error") -diff --git a/util/util.go b/util/util.go -index 61458c73..3f46d796 100644 ---- a/util/util.go -+++ b/util/util.go -@@ -57,8 +57,6 @@ const ( - var ( - // DefaultRegistryPathPrefix is the map for registry and path - DefaultRegistryPathPrefix map[string]string -- // clientExporters to map exporter whether will export the image to client -- clientExporters map[string]bool - ) - - func init() { --- -2.27.0 - diff --git a/patch/0043-enhancement-add-go-test-for-RUN-panic-problem.patch b/patch/0043-enhancement-add-go-test-for-RUN-panic-problem.patch deleted file mode 100644 index ec828c8..0000000 --- a/patch/0043-enhancement-add-go-test-for-RUN-panic-problem.patch +++ /dev/null @@ -1,66 +0,0 @@ -From c0d4159d7719fb94b4b421415b8f367c6f61c68e Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Mon, 8 Feb 2021 10:22:33 +0800 -Subject: [PATCH 02/10] enhancement: add go test for RUN panic problem - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - builder/dockerfile/parser/parser_test.go | 18 ++++++++++++++---- - .../testfiles/preprocess/run_with_directive | 2 ++ - 2 files changed, 16 insertions(+), 4 deletions(-) - create mode 100644 builder/dockerfile/parser/testfiles/preprocess/run_with_directive - -diff --git a/builder/dockerfile/parser/parser_test.go b/builder/dockerfile/parser/parser_test.go -index f0cce1e9..3da5bea6 100644 ---- a/builder/dockerfile/parser/parser_test.go -+++ b/builder/dockerfile/parser/parser_test.go -@@ -69,8 +69,9 @@ func TestPreProcess(t *testing.T) { - - func TestFormat(t *testing.T) { - type testcase struct { -- name string -- expect int -+ name string -+ expect int -+ wantErr bool - } - var testcases = []testcase{ - { -@@ -89,6 +90,10 @@ func TestFormat(t *testing.T) { - name: "yum_config", - expect: 8, - }, -+ { -+ name: "run_with_directive", -+ wantErr: true, -+ }, - } - - for _, tc := range testcases { -@@ -103,8 +108,13 @@ func TestFormat(t *testing.T) { - d, err := newDirective(bytes.NewReader(buf.Bytes())) - assert.NilError(t, err) - lines, err := format(rows, d) -- assert.NilError(t, err) -- assert.Equal(t, len(lines), tc.expect) -+ if (err != nil) != tc.wantErr { -+ t.Errorf("Testing failed. Expected: %v, got: %v", tc.wantErr, err) -+ } -+ if !tc.wantErr { -+ assert.NilError(t, err, file) -+ assert.Equal(t, len(lines), tc.expect) -+ } - }) - } - } -diff --git a/builder/dockerfile/parser/testfiles/preprocess/run_with_directive b/builder/dockerfile/parser/testfiles/preprocess/run_with_directive -new file mode 100644 -index 00000000..3f3465d3 ---- /dev/null -+++ b/builder/dockerfile/parser/testfiles/preprocess/run_with_directive -@@ -0,0 +1,2 @@ -+FROM scratch -+RUN \ --- -2.27.0 - diff --git a/patch/0044-fix-load-oci-image-panic.patch b/patch/0044-fix-load-oci-image-panic.patch deleted file mode 100644 index cc4bd3f..0000000 --- a/patch/0044-fix-load-oci-image-panic.patch +++ /dev/null @@ -1,340 +0,0 @@ -From 947fc1ef0c103f687e195c467ddabd3cf0aa746f Mon Sep 17 00:00:00 2001 -From: meilier -Date: Sat, 20 Feb 2021 00:42:55 +0800 -Subject: [PATCH 06/10] fix load oci image panic - ---- - cmd/cli/save.go | 3 + - cmd/cli/save_test.go | 18 +++++ - daemon/load.go | 11 +-- - daemon/load_test.go | 188 +++++++++++++++++++++++++++++++++++-------- - 4 files changed, 181 insertions(+), 39 deletions(-) - -diff --git a/cmd/cli/save.go b/cmd/cli/save.go -index 64dc8acc..fe676731 100644 ---- a/cmd/cli/save.go -+++ b/cmd/cli/save.go -@@ -72,6 +72,9 @@ func saveCommand(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("save accepts at least one image") - } -+ if saveOpts.format == exporter.OCITransport && len(args) >= 2 { -+ return errors.New("oci image format now only supports saving single image") -+ } - if err := exporter.CheckImageFormat(saveOpts.format); err != nil { - return err - } -diff --git a/cmd/cli/save_test.go b/cmd/cli/save_test.go -index 4183aa8b..3fe6bf81 100644 ---- a/cmd/cli/save_test.go -+++ b/cmd/cli/save_test.go -@@ -38,6 +38,8 @@ func TestSaveCommand(t *testing.T) { - wantErr bool - } - -+ // For normal cases, default err is "invalid socket path: unix:///var/run/isula_build.sock". -+ // As daemon is not running as we run unit test. - var testcases = []testcase{ - { - name: "TC1 - normal case with format docker", -@@ -103,6 +105,22 @@ func TestSaveCommand(t *testing.T) { - errString: "colon in path", - format: "docker", - }, -+ { -+ name: "TC9 - normal case save multiple images with format docker", -+ path: tmpDir.Join("test9"), -+ args: []string{"testImage1", "testImage2"}, -+ wantErr: true, -+ errString: "isula_build.sock", -+ format: "docker", -+ }, -+ { -+ name: "TC10 - abnormal case save multiple images with format oci", -+ path: tmpDir.Join("test10"), -+ args: []string{"testImage1", "testImage2"}, -+ wantErr: true, -+ errString: "oci image format now only supports saving single image", -+ format: "oci", -+ }, - } - - for _, tc := range testcases { -diff --git a/daemon/load.go b/daemon/load.go -index d756f9ed..b557d386 100644 ---- a/daemon/load.go -+++ b/daemon/load.go -@@ -147,7 +147,6 @@ func tryToParseImageFormatFromTarball(dataRoot string, opts *loadOptions) ([][]s - - func getDockerRepoTagFromImageTar(systemContext *types.SystemContext, path string) ([][]string, error) { - // tmp dir will be removed after NewSourceFromFileWithContext -- - tarfileSource, err := tarfile.NewSourceFromFileWithContext(systemContext, path) - if err != nil { - return nil, errors.Wrapf(err, "failed to get the source of loading tar file") -@@ -168,8 +167,7 @@ func getDockerRepoTagFromImageTar(systemContext *types.SystemContext, path strin - - func getOCIRepoTagFromImageTar(systemContext *types.SystemContext, path string) ([][]string, error) { - var ( -- allRepoTags [][]string -- err error -+ err error - ) - - srcRef, err := alltransports.ParseImageName(exporter.FormatTransport(exporter.OCIArchiveTransport, path)) -@@ -179,14 +177,13 @@ func getOCIRepoTagFromImageTar(systemContext *types.SystemContext, path string) - - tarManifest, err := ociarchive.LoadManifestDescriptorWithContext(systemContext, srcRef) - if err != nil { -- return nil, errors.Wrapf(err, "failed to loadmanifest descriptor of oci image format") -+ return nil, errors.Wrapf(err, "failed to load manifest descriptor of oci image format") - } - -- // If index.json has no reference name, compute the image digest instead - // For now, we only support load single image in archive file - if _, ok := tarManifest.Annotations[imgspecv1.AnnotationRefName]; ok { -- allRepoTags = [][]string{{tarManifest.Annotations[imgspecv1.AnnotationRefName]}} -+ return [][]string{{tarManifest.Annotations[imgspecv1.AnnotationRefName]}}, nil - } - -- return allRepoTags, nil -+ return [][]string{{}}, nil - } -diff --git a/daemon/load_test.go b/daemon/load_test.go -index 0513a889..cbcb5d8f 100644 ---- a/daemon/load_test.go -+++ b/daemon/load_test.go -@@ -30,6 +30,12 @@ import ( - "isula.org/isula-build/store" - ) - -+const ( -+ loadedTarFile = "load.tar" -+ manifestJSONFile = "manifest.json" -+ indexJSONFile = "index.json" -+) -+ - var ( - localStore store.Store - daemon *Daemon -@@ -51,10 +57,10 @@ func (x *controlLoadServer) Context() context.Context { - return context.Background() - } - --func prepareLoadTar(dir *fs.Dir) error { -- manifest := dir.Join("manifest.json") -+func prepareLoadTar(dir *fs.Dir, jsonFile string) error { -+ manifest := dir.Join(jsonFile) - -- fi, err := os.Create(dir.Join("load.tar")) -+ fi, err := os.Create(dir.Join(loadedTarFile)) - if err != nil { - return nil - } -@@ -88,9 +94,9 @@ func prepareLoadTar(dir *fs.Dir) error { - - } - --func prepareForLoad(t *testing.T, manifest string) *fs.Dir { -- tmpDir := fs.NewDir(t, t.Name(), fs.WithFile("manifest.json", manifest)) -- if err := prepareLoadTar(tmpDir); err != nil { -+func prepareForLoad(t *testing.T, jsonFile, manifest string) *fs.Dir { -+ tmpDir := fs.NewDir(t, t.Name(), fs.WithFile(jsonFile, manifest)) -+ if err := prepareLoadTar(tmpDir, jsonFile); err != nil { - tmpDir.Remove() - return nil - } -@@ -119,34 +125,152 @@ func clean(dir *fs.Dir) { - dir.Remove() - } - --func TestLoad(t *testing.T) { -- manifestJSON := -- `[ -- { -- "Config":"76a4dd2d5d6a18323ac8d90f959c3c8562bf592e2a559bab9b462ab600e9e5fc.json", -- "RepoTags":[ -- "hello:latest" -- ], -- "Layers":[ -- "6eb4c21cc3fcb729a9df230ae522c1d3708ca66e5cf531713dbfa679837aa287.tar", -- "37841116ad3b1eeea972c75ab8bad05f48f721a7431924bc547fc91c9076c1c8.tar" -- ] -+func TestLoadSingleImage(t *testing.T) { -+ testcases := []struct { -+ name string -+ manifest string -+ format string -+ tarPath string -+ withTag bool -+ wantErr bool -+ errString string -+ }{ -+ { -+ name: "TC1 normal case load docker tar", -+ manifest: `[ -+ { -+ "Config":"76a4dd2d5d6a18323ac8d90f959c3c8562bf592e2a559bab9b462ab600e9e5fc.json", -+ "RepoTags":[ -+ "hello:latest" -+ ], -+ "Layers":[ -+ "6eb4c21cc3fcb729a9df230ae522c1d3708ca66e5cf531713dbfa679837aa287.tar", -+ "37841116ad3b1eeea972c75ab8bad05f48f721a7431924bc547fc91c9076c1c8.tar" -+ ] -+ } -+ ]`, -+ format: "docker", -+ withTag: true, -+ }, -+ { -+ name: "TC2 normal case load oci tar", -+ manifest: `{ -+ "schemaVersion": 2, -+ "manifests": [ -+ { -+ "mediaType": "application/vnd.oci.image.manifest.v1+json", -+ "digest": "sha256:a65db259a719d915df30c82ce554ab3880ea567e2150d6288580408c2629b802", -+ "size": 347, -+ "annotations": { -+ "org.opencontainers.image.ref.name": "hello:latest" -+ } -+ } -+ ] -+ }`, -+ format: "oci", -+ withTag: true, -+ }, -+ { -+ name: "TC3 normal case load docker tar with no RepoTags", -+ manifest: `[ -+ { -+ "Config":"76a4dd2d5d6a18323ac8d90f959c3c8562bf592e2a559bab9b462ab600e9e5fc.json", -+ "RepoTags":[], -+ "Layers":[ -+ "6eb4c21cc3fcb729a9df230ae522c1d3708ca66e5cf531713dbfa679837aa287.tar", -+ "37841116ad3b1eeea972c75ab8bad05f48f721a7431924bc547fc91c9076c1c8.tar" -+ ] -+ } -+ ]`, -+ format: "docker", -+ withTag: false, -+ }, -+ { -+ name: "TC4 normal case load oci tar with no annotations", -+ manifest: `{ -+ "schemaVersion": 2, -+ "manifests": [ -+ { -+ "mediaType": "application/vnd.oci.image.manifest.v1+json", -+ "digest": "sha256:a65db259a719d915df30c82ce554ab3880ea567e2150d6288580408c2629b802", -+ "size": 347 -+ } -+ ] -+ }`, -+ format: "oci", -+ withTag: false, -+ }, -+ { -+ name: "TC5 abnormal case load docker tar with wrong manifestJSON", -+ manifest: `[ -+ { -+ :"76a4dd2d5d6a18323ac8d90f959c3c8562bf592e2a559bab9b462ab600e9e5fc.json", -+ "RepoTags":[ -+ "hello:latest" -+ ], -+ "Layers":[ -+ "6eb4c21cc3fcb729a9df230ae522c1d3708ca66e5cf531713dbfa679837aa287.tar", -+ "37841116ad3b1eeea972c75ab8bad05f48f721a7431924bc547fc91c9076c1c8.tar" -+ ] -+ } -+ ]`, -+ format: "docker", -+ withTag: true, -+ wantErr: true, -+ errString: "error loading index", -+ }, -+ { -+ name: "TC6 abnormal case with wrong tar path", -+ manifest: `[ -+ { -+ "Config":"76a4dd2d5d6a18323ac8d90f959c3c8562bf592e2a559bab9b462ab600e9e5fc.json", -+ "RepoTags":[ -+ "hello:latest" -+ ], -+ "Layers":[ -+ "6eb4c21cc3fcb729a9df230ae522c1d3708ca66e5cf531713dbfa679837aa287.tar", -+ "37841116ad3b1eeea972c75ab8bad05f48f721a7431924bc547fc91c9076c1c8.tar" -+ ] -+ } -+ ]`, -+ -+ tarPath: "/path/that/not/exist/load.tar", -+ format: "docker", -+ withTag: true, -+ wantErr: true, -+ errString: "no such file or directory", -+ }, -+ } -+ -+ for _, tc := range testcases { -+ t.Run(tc.name, func(t *testing.T) { -+ var jsonFile string -+ if tc.format == "docker" { -+ jsonFile = manifestJSONFile - } -- ]` -- dir := prepareForLoad(t, manifestJSON) -- assert.Equal(t, dir != nil, true) -- defer clean(dir) -+ if tc.format == "oci" { -+ jsonFile = indexJSONFile -+ } -+ dir := prepareForLoad(t, jsonFile, tc.manifest) -+ assert.Equal(t, dir != nil, true) -+ defer clean(dir) - -- path := dir.Join("load.tar") -- repoTags, err := tryToParseImageFormatFromTarball(daemon.opts.DataRoot, &loadOptions{path: path}) -- assert.NilError(t, err) -- assert.Equal(t, repoTags[0][0], "hello:latest") -+ path := dir.Join(loadedTarFile) -+ if tc.tarPath == "" { -+ tc.tarPath = path -+ } -+ req := &pb.LoadRequest{Path: tc.tarPath} -+ stream := &controlLoadServer{} - -- req := &pb.LoadRequest{Path: path} -- stream := &controlLoadServer{} -+ err := daemon.backend.Load(req, stream) -+ if tc.wantErr { -+ assert.ErrorContains(t, err, tc.errString) -+ return -+ } -+ assert.ErrorContains(t, err, "failed to get the image") -+ }) -+ } - -- err = daemon.backend.Load(req, stream) -- assert.ErrorContains(t, err, "failed to get the image") - } - - func TestLoadMultipleImages(t *testing.T) { -@@ -181,11 +305,11 @@ func TestLoadMultipleImages(t *testing.T) { - ] - } - ]` -- dir := prepareForLoad(t, manifestJSON) -+ dir := prepareForLoad(t, manifestJSONFile, manifestJSON) - assert.Equal(t, dir != nil, true) - defer clean(dir) - -- path := dir.Join("load.tar") -+ path := dir.Join(loadedTarFile) - repoTags, err := tryToParseImageFormatFromTarball(daemon.opts.DataRoot, &loadOptions{path: path}) - assert.NilError(t, err) - assert.Equal(t, repoTags[0][0], "registry.example.com/sayhello:first") --- -2.27.0 - diff --git a/patch/0045-fix-images-command-when-only-give-repository.patch b/patch/0045-fix-images-command-when-only-give-repository.patch deleted file mode 100644 index 4176d52..0000000 --- a/patch/0045-fix-images-command-when-only-give-repository.patch +++ /dev/null @@ -1,409 +0,0 @@ -From 4e71f4409e53eadea0aa39383fba3e249072a932 Mon Sep 17 00:00:00 2001 -From: meilier -Date: Tue, 2 Feb 2021 00:46:23 +0800 -Subject: [PATCH 07/10] fix images command when only give repository - ---- - daemon/images.go | 145 +++++++++++++++++++++------------- - daemon/images_test.go | 178 ++++++++++++++++++++++++++++++++++++++++++ - image/image.go | 9 ++- - 3 files changed, 277 insertions(+), 55 deletions(-) - create mode 100644 daemon/images_test.go - -diff --git a/daemon/images.go b/daemon/images.go -index 5560d18c..e61817cc 100644 ---- a/daemon/images.go -+++ b/daemon/images.go -@@ -15,9 +15,11 @@ package daemon - - import ( - "context" -+ "fmt" - "sort" - "strings" - -+ "github.com/containers/storage" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - -@@ -29,79 +31,114 @@ import ( - ) - - const ( -- none = "" -- decimalPrefixBase = 1000 -+ none = "" -+ decimalPrefixBase = 1000 -+ minImageFieldLenWithTag = 2 - ) - -+type listOptions struct { -+ localStore *store.Store -+ logEntry *logrus.Entry -+ imageName string -+} -+ -+func (b *Backend) getListOptions(req *pb.ListRequest) listOptions { -+ return listOptions{ -+ localStore: b.daemon.localStore, -+ logEntry: logrus.WithFields(logrus.Fields{"ImageName": req.GetImageName()}), -+ imageName: req.GetImageName(), -+ } -+} -+ - // List lists all images - func (b *Backend) List(ctx context.Context, req *pb.ListRequest) (*pb.ListResponse, error) { -- logEntry := logrus.WithFields(logrus.Fields{"ImageName": req.GetImageName()}) -- logEntry.Info("ListRequest received") -- -- var reqRepository, reqTag string -- const minImageFieldLenWithTag = 2 -- if req.ImageName != "" { -- imageName := req.ImageName -- _, img, err := image.FindImage(b.daemon.localStore, imageName) -- if err != nil { -- logEntry.Error(err) -- return nil, errors.Wrapf(err, "find local image %v error", imageName) -- } -+ logrus.WithFields(logrus.Fields{ -+ "ImageName": req.GetImageName(), -+ }).Info("ListRequest received") - -- parts := strings.Split(imageName, ":") -- if len(parts) >= minImageFieldLenWithTag { -- reqRepository, reqTag = strings.Join(parts[0:len(parts)-1], ":"), parts[len(parts)-1] -- } -+ opts := b.getListOptions(req) - -- imageInfo := &pb.ListResponse_ImageInfo{ -- Repository: reqRepository, -- Tag: reqTag, -- Id: img.ID, -- Created: img.Created.Format(constant.LayoutTime), -- Size_: getImageSize(b.daemon.localStore, img.ID), -- } -+ slashLastIndex := strings.LastIndex(opts.imageName, "/") -+ colonLastIndex := strings.LastIndex(opts.imageName, ":") -+ if opts.imageName != "" && strings.Contains(opts.imageName, ":") && colonLastIndex > slashLastIndex { -+ return listOneImage(opts) -+ } -+ return listImages(opts) -+} - -- return &pb.ListResponse{Images: []*pb.ListResponse_ImageInfo{imageInfo}}, nil -+func listOneImage(opts listOptions) (*pb.ListResponse, error) { -+ _, image, err := image.FindImage(opts.localStore, opts.imageName) -+ if err != nil { -+ opts.logEntry.Error(err) -+ return nil, errors.Wrapf(err, "find local image %v error", opts.imageName) - } - -- images, err := b.daemon.localStore.Images() -+ result := make([]*pb.ListResponse_ImageInfo, 0, len(image.Names)) -+ appendImageToResult(&result, image, opts.localStore) -+ -+ for _, info := range result { -+ if opts.imageName == fmt.Sprintf("%s:%s", info.Repository, info.Tag) { -+ result = []*pb.ListResponse_ImageInfo{info} -+ } -+ } -+ -+ return &pb.ListResponse{Images: result}, nil -+} -+ -+func listImages(opts listOptions) (*pb.ListResponse, error) { -+ images, err := opts.localStore.Images() - if err != nil { -- logEntry.Error(err) -+ opts.logEntry.Error(err) - return &pb.ListResponse{}, errors.Wrap(err, "failed list images from local storage") - } -+ - sort.Slice(images, func(i, j int) bool { - return images[i].Created.After(images[j].Created) - }) - result := make([]*pb.ListResponse_ImageInfo, 0, len(images)) -- for _, image := range images { -- names := image.Names -- if len(names) == 0 { -- names = []string{none} -+ for i := range images { -+ appendImageToResult(&result, &images[i], opts.localStore) -+ } -+ -+ if opts.imageName == "" { -+ return &pb.ListResponse{Images: result}, nil -+ } -+ -+ sameRepositoryResult := make([]*pb.ListResponse_ImageInfo, 0, len(images)) -+ for _, info := range result { -+ if opts.imageName == info.Repository || strings.HasPrefix(info.Id, opts.imageName) { -+ sameRepositoryResult = append(sameRepositoryResult, info) -+ } -+ } -+ -+ if len(sameRepositoryResult) == 0 { -+ return &pb.ListResponse{}, errors.Errorf("failed to list images with repository %q in local storage", opts.imageName) -+ } -+ return &pb.ListResponse{Images: sameRepositoryResult}, nil -+} -+ -+func appendImageToResult(result *[]*pb.ListResponse_ImageInfo, image *storage.Image, store *store.Store) { -+ names := image.Names -+ if len(names) == 0 { -+ names = []string{none} -+ } -+ -+ for _, name := range names { -+ repository, tag := name, none -+ parts := strings.Split(name, ":") -+ if len(parts) >= minImageFieldLenWithTag { -+ repository, tag = strings.Join(parts[0:len(parts)-1], ":"), parts[len(parts)-1] - } -- for _, name := range names { -- repository, tag := name, none -- parts := strings.Split(name, ":") -- if len(parts) >= minImageFieldLenWithTag { -- repository, tag = strings.Join(parts[0:len(parts)-1], ":"), parts[len(parts)-1] -- } -- if reqRepository != "" && reqRepository != repository { -- continue -- } -- if reqTag != "" && reqTag != tag { -- continue -- } -- -- imageInfo := &pb.ListResponse_ImageInfo{ -- Repository: repository, -- Tag: tag, -- Id: image.ID, -- Created: image.Created.Format(constant.LayoutTime), -- Size_: getImageSize(b.daemon.localStore, image.ID), -- } -- result = append(result, imageInfo) -+ -+ imageInfo := &pb.ListResponse_ImageInfo{ -+ Repository: repository, -+ Tag: tag, -+ Id: image.ID, -+ Created: image.Created.Format(constant.LayoutTime), -+ Size_: getImageSize(store, image.ID), - } -+ *result = append(*result, imageInfo) - } -- return &pb.ListResponse{Images: result}, nil - } - - func getImageSize(store *store.Store, id string) string { -diff --git a/daemon/images_test.go b/daemon/images_test.go -new file mode 100644 -index 00000000..a970ce0b ---- /dev/null -+++ b/daemon/images_test.go -@@ -0,0 +1,178 @@ -+// Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+// isula-build licensed under the Mulan PSL v2. -+// You can use this software according to the terms and conditions of the Mulan PSL v2. -+// You may obtain a copy of Mulan PSL v2 at: -+// http://license.coscl.org.cn/MulanPSL2 -+// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+// IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+// PURPOSE. -+// See the Mulan PSL v2 for more details. -+// Author: Weizheng Xing -+// Create: 2021-02-03 -+// Description: This file tests List interface -+ -+package daemon -+ -+import ( -+ "context" -+ "fmt" -+ "testing" -+ -+ "github.com/bndr/gotabulate" -+ "github.com/containers/storage" -+ "github.com/containers/storage/pkg/stringid" -+ "gotest.tools/v3/assert" -+ -+ constant "isula.org/isula-build" -+ pb "isula.org/isula-build/api/services" -+) -+ -+func TestList(t *testing.T) { -+ d := prepare(t) -+ defer tmpClean(d) -+ -+ options := &storage.ImageOptions{} -+ img, err := d.Daemon.localStore.CreateImage(stringid.GenerateRandomID(), []string{"image:test1"}, "", "", options) -+ if err != nil { -+ t.Fatalf("create image with error: %v", err) -+ } -+ _, err = d.Daemon.localStore.CreateImage(stringid.GenerateRandomID(), []string{"image:test2"}, "", "", options) -+ if err != nil { -+ t.Fatalf("create image with error: %v", err) -+ } -+ _, err = d.Daemon.localStore.CreateImage(stringid.GenerateRandomID(), []string{"egami:test"}, "", "", options) -+ if err != nil { -+ t.Fatalf("create image with error: %v", err) -+ } -+ // image with no name and tag -+ _, err = d.Daemon.localStore.CreateImage(stringid.GenerateRandomID(), []string{}, "", "", options) -+ if err != nil { -+ t.Fatalf("create image with error: %v", err) -+ } -+ d.Daemon.localStore.SetNames(img.ID, append(img.Names, "image:test1-backup")) -+ // image who's repo contains port -+ _, err = d.Daemon.localStore.CreateImage(stringid.GenerateRandomID(), []string{"hub.example.com:8080/image:test"}, "", "", options) -+ if err != nil { -+ t.Fatalf("create image with error: %v", err) -+ } -+ -+ testcases := []struct { -+ name string -+ req *pb.ListRequest -+ wantErr bool -+ errString string -+ }{ -+ { -+ name: "normal case list specific image with repository[:tag]", -+ req: &pb.ListRequest{ -+ ImageName: "image:test1", -+ }, -+ wantErr: false, -+ }, -+ { -+ name: "normal case list specific image with image id", -+ req: &pb.ListRequest{ -+ ImageName: img.ID, -+ }, -+ wantErr: false, -+ }, -+ { -+ name: "normal case list all images", -+ req: &pb.ListRequest{ -+ ImageName: "", -+ }, -+ wantErr: false, -+ }, -+ { -+ name: "normal case list all images with repository", -+ req: &pb.ListRequest{ -+ ImageName: "image", -+ }, -+ wantErr: false, -+ }, -+ { -+ name: "abnormal case no image found in local store", -+ req: &pb.ListRequest{ -+ ImageName: "coffee:costa", -+ }, -+ wantErr: true, -+ errString: "failed to parse image", -+ }, -+ { -+ name: "abnormal case no repository", -+ req: &pb.ListRequest{ -+ ImageName: "coffee", -+ }, -+ wantErr: true, -+ errString: "failed to list images with repository", -+ }, -+ { -+ name: "abnormal case ImageName only contains latest tag", -+ req: &pb.ListRequest{ -+ ImageName: ":latest", -+ }, -+ wantErr: true, -+ errString: "invalid reference format", -+ }, -+ { -+ name: "normal case ImageName contains port number and tag", -+ req: &pb.ListRequest{ -+ ImageName: "hub.example.com:8080/image:test", -+ }, -+ wantErr: false, -+ }, -+ { -+ name: "normal case ImageName contains port number", -+ req: &pb.ListRequest{ -+ ImageName: "hub.example.com:8080/image", -+ }, -+ wantErr: false, -+ }, -+ { -+ name: "abnormal case wrong ImageName", -+ req: &pb.ListRequest{ -+ ImageName: "hub.example.com:8080/", -+ }, -+ wantErr: true, -+ errString: "failed to list images with repository", -+ }, -+ } -+ -+ for _, tc := range testcases { -+ t.Run(tc.name, func(t *testing.T) { -+ ctx := context.TODO() -+ resp, err := d.Daemon.backend.List(ctx, tc.req) -+ -+ if tc.wantErr == true { -+ assert.ErrorContains(t, err, tc.errString) -+ } -+ if tc.wantErr == false { -+ assert.NilError(t, err) -+ formatAndPrint(resp.Images) -+ } -+ }) -+ } -+} -+ -+func formatAndPrint(images []*pb.ListResponse_ImageInfo) { -+ emptyStr := `----------- ---- --------- -------- -+ REPOSITORY TAG IMAGE ID CREATED -+ ----------- ---- --------- --------` -+ lines := make([][]string, 0, len(images)) -+ title := []string{"REPOSITORY", "TAG", "IMAGE ID", "CREATED", "SIZE"} -+ for _, image := range images { -+ if image == nil { -+ continue -+ } -+ line := []string{image.Repository, image.Tag, image.Id[:constant.DefaultIDLen], image.Created, image.Size_} -+ lines = append(lines, line) -+ } -+ if len(lines) == 0 { -+ fmt.Println(emptyStr) -+ return -+ } -+ tabulate := gotabulate.Create(lines) -+ tabulate.SetHeaders(title) -+ tabulate.SetAlign("left") -+ fmt.Print(tabulate.Render("simple")) -+} -diff --git a/image/image.go b/image/image.go -index bbbc7b94..36785bdf 100644 ---- a/image/image.go -+++ b/image/image.go -@@ -590,12 +590,19 @@ func ResolveName(name string, sc *types.SystemContext, store *store.Store) ([]st - } - - func tryResolveNameInStore(name string, store *store.Store) string { -+ defaultTag := "latest" -+ - logrus.Infof("Try to find image: %s in local storage", name) - img, err := store.Image(name) -+ if err == nil { -+ return img.ID -+ } -+ -+ logrus.Infof("Try to find image: %s:%s in local storage", name, defaultTag) -+ img, err = store.Image(fmt.Sprintf("%s:%s", name, defaultTag)) - if err != nil { - return "" - } -- - return img.ID - } - --- -2.27.0 - diff --git a/patch/0046-check-if-add-default-tag-to-image-name-when-using-pu.patch b/patch/0046-check-if-add-default-tag-to-image-name-when-using-pu.patch deleted file mode 100644 index 70308be..0000000 --- a/patch/0046-check-if-add-default-tag-to-image-name-when-using-pu.patch +++ /dev/null @@ -1,336 +0,0 @@ -From 6ce9d998d0b8e15d7a673626a54477a0bfc9f768 Mon Sep 17 00:00:00 2001 -From: meilier -Date: Wed, 3 Feb 2021 01:04:17 +0800 -Subject: [PATCH 08/10] check if add default tag to image name when using push - and save command - ---- - daemon/pull_test.go | 6 +- - daemon/push.go | 6 ++ - daemon/push_test.go | 13 ++- - daemon/save.go | 8 ++ - daemon/save_test.go | 193 ++++++++++++++++++++++++++++++++++++++++++++ - image/image.go | 23 ++++++ - 6 files changed, 246 insertions(+), 3 deletions(-) - create mode 100644 daemon/save_test.go - -diff --git a/daemon/pull_test.go b/daemon/pull_test.go -index 67459d19..27a4d6e8 100644 ---- a/daemon/pull_test.go -+++ b/daemon/pull_test.go -@@ -58,7 +58,6 @@ func (c *controlPullServer) Send(response *pb.PullResponse) error { - - func init() { - reexec.Init() -- - } - - func prepare(t *testing.T) daemonTestOptions { -@@ -100,7 +99,10 @@ func TestPull(t *testing.T) { - defer tmpClean(d) - - options := &storage.ImageOptions{} -- d.Daemon.localStore.CreateImage(stringid.GenerateRandomID(), []string{"image:test"}, "", "", options) -+ _, err := d.Daemon.localStore.CreateImage(stringid.GenerateRandomID(), []string{"image:test"}, "", "", options) -+ if err != nil { -+ t.Fatalf("create image with error: %v", err) -+ } - - testcases := []struct { - name string -diff --git a/daemon/push.go b/daemon/push.go -index e6053dd8..4e3a6ed9 100644 ---- a/daemon/push.go -+++ b/daemon/push.go -@@ -63,6 +63,12 @@ func (b *Backend) Push(req *pb.PushRequest, stream pb.Control_PushServer) error - return err - } - -+ imageName, err := image.CheckAndAddDefaultTag(opt.imageName, opt.localStore) -+ if err != nil { -+ return err -+ } -+ opt.imageName = imageName -+ - manifestType, gErr := exporter.GetManifestType(opt.format) - if gErr != nil { - return gErr -diff --git a/daemon/push_test.go b/daemon/push_test.go -index 573e97fe..f4a9e2b1 100644 ---- a/daemon/push_test.go -+++ b/daemon/push_test.go -@@ -79,10 +79,21 @@ func TestPush(t *testing.T) { - Format: "oci", - }, - }, -+ { -+ name: "manifestNotExist fine without tag latest", -+ pushRequest: &pb.PushRequest{ -+ PushID: stringid.GenerateNonCryptoID()[:constant.DefaultIDLen], -+ ImageName: "127.0.0.1/no-repository/no-name", -+ Format: "oci", -+ }, -+ }, - } - - options := &storage.ImageOptions{} -- d.Daemon.localStore.CreateImage(stringid.GenerateRandomID(), []string{"127.0.0.1/no-repository/no-name:latest"}, "", "", options) -+ _, err := d.Daemon.localStore.CreateImage(stringid.GenerateRandomID(), []string{"127.0.0.1/no-repository/no-name:latest"}, "", "", options) -+ if err != nil { -+ t.Fatalf("create image with error: %v", err) -+ } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { -diff --git a/daemon/save.go b/daemon/save.go -index c6411e04..fd6174b4 100644 ---- a/daemon/save.go -+++ b/daemon/save.go -@@ -79,6 +79,14 @@ func (b *Backend) Save(req *pb.SaveRequest, stream pb.Control_SaveServer) error - return errors.New("wrong image format provided") - } - -+ for i, imageName := range opts.oriImgList { -+ nameWithTag, cErr := image.CheckAndAddDefaultTag(imageName, opts.localStore) -+ if cErr != nil { -+ return cErr -+ } -+ opts.oriImgList[i] = nameWithTag -+ } -+ - defer func() { - if err != nil { - if rErr := os.Remove(opts.outputPath); rErr != nil && !os.IsNotExist(rErr) { -diff --git a/daemon/save_test.go b/daemon/save_test.go -new file mode 100644 -index 00000000..a59086a8 ---- /dev/null -+++ b/daemon/save_test.go -@@ -0,0 +1,193 @@ -+// Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+// isula-build licensed under the Mulan PSL v2. -+// You can use this software according to the terms and conditions of the Mulan PSL v2. -+// You may obtain a copy of Mulan PSL v2 at: -+// http://license.coscl.org.cn/MulanPSL2 -+// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+// IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+// PURPOSE. -+// See the Mulan PSL v2 for more details. -+// Author: Weizheng Xing -+// Create: 2020-02-03 -+// Description: This file tests Save interface -+ -+package daemon -+ -+import ( -+ "context" -+ "testing" -+ -+ "github.com/containers/storage" -+ "github.com/containers/storage/pkg/reexec" -+ "github.com/containers/storage/pkg/stringid" -+ "github.com/pkg/errors" -+ "golang.org/x/sync/errgroup" -+ "google.golang.org/grpc" -+ "gotest.tools/v3/assert" -+ "gotest.tools/v3/fs" -+ -+ constant "isula.org/isula-build" -+ pb "isula.org/isula-build/api/services" -+ _ "isula.org/isula-build/exporter/register" -+ "isula.org/isula-build/pkg/logger" -+) -+ -+type controlSaveServer struct { -+ grpc.ServerStream -+} -+ -+func (c *controlSaveServer) Context() context.Context { -+ return context.Background() -+} -+ -+func (c *controlSaveServer) Send(response *pb.SaveResponse) error { -+ if response.Log == "error" { -+ return errors.New("error happened") -+ } -+ return nil -+} -+ -+func init() { -+ reexec.Init() -+} -+ -+func TestSave(t *testing.T) { -+ d := prepare(t) -+ defer tmpClean(d) -+ -+ //TODO: create image manually and save -+ options := &storage.ImageOptions{} -+ img, err := d.Daemon.localStore.CreateImage(stringid.GenerateRandomID(), []string{"image:latest"}, "", "", options) -+ if err != nil { -+ t.Fatalf("create image with error: %v", err) -+ } -+ -+ _, err = d.Daemon.localStore.CreateImage(stringid.GenerateRandomID(), []string{"image2:test"}, "", "", options) -+ if err != nil { -+ t.Fatalf("create image with error: %v", err) -+ } -+ -+ tempTarfileDir := fs.NewDir(t, t.Name()) -+ defer tempTarfileDir.Remove() -+ -+ testcases := []struct { -+ name string -+ req *pb.SaveRequest -+ wantErr bool -+ errString string -+ }{ -+ { -+ name: "normal case save with repository[:tag]", -+ req: &pb.SaveRequest{ -+ SaveID: stringid.GenerateNonCryptoID()[:constant.DefaultIDLen], -+ Images: []string{"image:latest"}, -+ Path: tempTarfileDir.Join("repotag.tar"), -+ Format: "docker", -+ }, -+ wantErr: true, -+ errString: "file does not exist", -+ }, -+ { -+ name: "normal case save with repository add default latest", -+ req: &pb.SaveRequest{ -+ SaveID: stringid.GenerateNonCryptoID()[:constant.DefaultIDLen], -+ Images: []string{"image"}, -+ Path: tempTarfileDir.Join("repolatest.tar"), -+ Format: "oci", -+ }, -+ wantErr: true, -+ errString: "file does not exist", -+ }, -+ { -+ name: "normal case with imageid", -+ req: &pb.SaveRequest{ -+ SaveID: stringid.GenerateNonCryptoID()[:constant.DefaultIDLen], -+ Images: []string{img.ID}, -+ Path: tempTarfileDir.Join("imageid.tar"), -+ Format: "docker", -+ }, -+ wantErr: true, -+ errString: "file does not exist", -+ }, -+ { -+ name: "normal case save multiple images with repository and ID", -+ req: &pb.SaveRequest{ -+ SaveID: stringid.GenerateNonCryptoID()[:constant.DefaultIDLen], -+ Images: []string{"image2:test", img.ID}, -+ Path: tempTarfileDir.Join("double.tar"), -+ Format: "docker", -+ }, -+ wantErr: true, -+ errString: "file does not exist", -+ }, -+ { -+ name: "abnormal case save image that not exist in local store", -+ req: &pb.SaveRequest{ -+ SaveID: stringid.GenerateNonCryptoID()[:constant.DefaultIDLen], -+ Images: []string{"noexist", img.ID}, -+ Path: tempTarfileDir.Join("notexist.tar"), -+ Format: "docker", -+ }, -+ wantErr: true, -+ errString: "failed to parse image", -+ }, -+ { -+ name: "abnormal case wrong image format", -+ req: &pb.SaveRequest{ -+ SaveID: stringid.GenerateNonCryptoID()[:constant.DefaultIDLen], -+ Images: []string{"image", img.ID}, -+ Path: tempTarfileDir.Join("image.tar"), -+ Format: "dock", -+ }, -+ wantErr: true, -+ errString: "wrong image format provided", -+ }, -+ } -+ -+ for _, tc := range testcases { -+ t.Run(tc.name, func(t *testing.T) { -+ stream := &controlSaveServer{} -+ -+ err := d.Daemon.backend.Save(tc.req, stream) -+ if tc.wantErr == true { -+ assert.ErrorContains(t, err, tc.errString) -+ } -+ if tc.wantErr == false { -+ assert.NilError(t, err) -+ } -+ }) -+ } -+ -+} -+ -+func TestSaveHandler(t *testing.T) { -+ ctx := context.TODO() -+ eg, _ := errgroup.WithContext(ctx) -+ -+ eg.Go(saveHandlerPrint("Push Response")) -+ eg.Go(saveHandlerPrint("")) -+ eg.Go(saveHandlerPrint("error")) -+ -+ eg.Wait() -+} -+ -+func saveHandlerPrint(message string) func() error { -+ return func() error { -+ stream := &controlSaveServer{} -+ cliLogger := logger.NewCliLogger(constant.CliLogBufferLen) -+ -+ ctx := context.TODO() -+ eg, _ := errgroup.WithContext(ctx) -+ -+ eg.Go(messageHandler(stream, cliLogger)) -+ eg.Go(func() error { -+ cliLogger.Print(message) -+ cliLogger.CloseContent() -+ return nil -+ }) -+ -+ eg.Wait() -+ -+ return nil -+ } -+} -diff --git a/image/image.go b/image/image.go -index 36785bdf..1e480391 100644 ---- a/image/image.go -+++ b/image/image.go -@@ -689,3 +689,26 @@ func tryResolveNameInRegistries(name string, sc *types.SystemContext) ([]string, - } - return candidates, exporter.DockerTransport - } -+ -+// CheckAndAddDefaultTag checks if src is format of repository[:tag], add default tag if src without tag -+func CheckAndAddDefaultTag(imageName string, store *store.Store) (string, error) { -+ _, img, err := FindImage(store, imageName) -+ if err != nil { -+ return "", errors.Wrapf(err, "find src image: %q failed", imageName) -+ } -+ -+ defaultTag := "latest" -+ for _, name := range img.Names { -+ // for imageName is the format of repository[:tag] -+ if imageName == name { -+ return imageName, nil -+ } -+ // for name is the format of repository -+ if fmt.Sprintf("%s:%s", imageName, defaultTag) == name { -+ return name, nil -+ } -+ } -+ -+ // for imageName is the format of imageID -+ return imageName, nil -+} --- -2.27.0 - diff --git a/patch/0047-checkAndExpandTag-return-empty-when-tag-is-empty.patch b/patch/0047-checkAndExpandTag-return-empty-when-tag-is-empty.patch deleted file mode 100644 index a3b9e30..0000000 --- a/patch/0047-checkAndExpandTag-return-empty-when-tag-is-empty.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 04dc1756a397edcf99caffd22a85902973a7c40f Mon Sep 17 00:00:00 2001 -From: meilier -Date: Wed, 3 Feb 2021 01:05:37 +0800 -Subject: [PATCH 09/10] checkAndExpandTag return empty when tag is empty - ---- - builder/dockerfile/builder.go | 2 +- - builder/dockerfile/builder_test.go | 2 +- - daemon/import.go | 4 +++- - 3 files changed, 5 insertions(+), 3 deletions(-) - -diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go -index f860e60b..42229746 100644 ---- a/builder/dockerfile/builder.go -+++ b/builder/dockerfile/builder.go -@@ -637,7 +637,7 @@ func parseOutputTag(output string) string { - // CheckAndExpandTag checks tag name. If it not include a tag, "latest" will be added. - func CheckAndExpandTag(tag string) (reference.Named, string, error) { - if tag == "" { -- return nil, ":", nil -+ return nil, "", nil - } - - newTag := tag -diff --git a/builder/dockerfile/builder_test.go b/builder/dockerfile/builder_test.go -index f8de41f1..3b7513be 100644 ---- a/builder/dockerfile/builder_test.go -+++ b/builder/dockerfile/builder_test.go -@@ -1533,7 +1533,7 @@ func TestCheckAndExpandTag(t *testing.T) { - { - name: "test 9", - tag: "", -- output: ":", -+ output: "", - wantErr: false, - }, - { -diff --git a/daemon/import.go b/daemon/import.go -index 21ffeaa3..3d7c0d03 100644 ---- a/daemon/import.go -+++ b/daemon/import.go -@@ -121,7 +121,9 @@ func (b *Backend) Import(req *pb.ImportRequest, stream pb.Control_ImportServer) - return errors.Wrapf(err, "error locating image %q in local storage after import", transports.ImageName(dstRef)) - } - imageID = img.ID -- img.Names = append(img.Names, reference) -+ if reference != "" { -+ img.Names = append(img.Names, reference) -+ } - newNames := util.CopyStringsWithoutSpecificElem(img.Names, tmpName) - if err = localStore.SetNames(img.ID, newNames); err != nil { - return errors.Wrapf(err, "failed to prune temporary name from image %q", imageID) --- -2.27.0 - diff --git a/patch/0048-trim-space-when-counting-length-of-fields-to-avoid-p.patch b/patch/0048-trim-space-when-counting-length-of-fields-to-avoid-p.patch deleted file mode 100644 index b335c4f..0000000 --- a/patch/0048-trim-space-when-counting-length-of-fields-to-avoid-p.patch +++ /dev/null @@ -1,111 +0,0 @@ -From af2e9918063d2797ba9f16306a4e7d2bbb0e85f7 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Tue, 2 Mar 2021 15:48:52 +0800 -Subject: [PATCH 10/10] trim space when counting length of fields to avoid - panic - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - builder/dockerfile/parser/parser.go | 2 +- - builder/dockerfile/parser/parser_test.go | 22 ++++++++++++++++++- - .../testfiles/preprocess/cmd_with_directive | 2 ++ - .../preprocess/cmd_with_directive_with_space | 2 ++ - .../preprocess/entrypoint_with_directive | 2 ++ - .../entrypoint_with_directive_with_space | 2 ++ - .../preprocess/run_with_directive_with_space | 2 ++ - 7 files changed, 32 insertions(+), 2 deletions(-) - create mode 100644 builder/dockerfile/parser/testfiles/preprocess/cmd_with_directive - create mode 100644 builder/dockerfile/parser/testfiles/preprocess/cmd_with_directive_with_space - create mode 100644 builder/dockerfile/parser/testfiles/preprocess/entrypoint_with_directive - create mode 100644 builder/dockerfile/parser/testfiles/preprocess/entrypoint_with_directive_with_space - create mode 100644 builder/dockerfile/parser/testfiles/preprocess/run_with_directive_with_space - -diff --git a/builder/dockerfile/parser/parser.go b/builder/dockerfile/parser/parser.go -index 3caa516a..a21a3f59 100644 ---- a/builder/dockerfile/parser/parser.go -+++ b/builder/dockerfile/parser/parser.go -@@ -161,7 +161,7 @@ func format(rows []*rowLine, d *directive) ([]*parser.Line, error) { - fields := strings.SplitN(logicLine, " ", 2) - const validLineLen = 2 - // we do not allow empty raw command been passed -- if len(fields) < validLineLen || len(fields[1]) == 0 { -+ if len(fields) < validLineLen || len(strings.TrimSpace(fields[1])) == 0 { - return nil, errors.Errorf("line %q should have at least two fields", logicLine) - } - line.Command = strings.ToUpper(fields[0]) -diff --git a/builder/dockerfile/parser/parser_test.go b/builder/dockerfile/parser/parser_test.go -index 3da5bea6..870132f1 100644 ---- a/builder/dockerfile/parser/parser_test.go -+++ b/builder/dockerfile/parser/parser_test.go -@@ -91,7 +91,27 @@ func TestFormat(t *testing.T) { - expect: 8, - }, - { -- name: "run_with_directive", -+ name: "run_with_directive", -+ wantErr: true, -+ }, -+ { -+ name: "run_with_directive_with_space", -+ wantErr: true, -+ }, -+ { -+ name: "cmd_with_directive", -+ wantErr: true, -+ }, -+ { -+ name: "cmd_with_directive_with_space", -+ wantErr: true, -+ }, -+ { -+ name: "entrypoint_with_directive", -+ wantErr: true, -+ }, -+ { -+ name: "entrypoint_with_directive_with_space", - wantErr: true, - }, - } -diff --git a/builder/dockerfile/parser/testfiles/preprocess/cmd_with_directive b/builder/dockerfile/parser/testfiles/preprocess/cmd_with_directive -new file mode 100644 -index 00000000..545c278c ---- /dev/null -+++ b/builder/dockerfile/parser/testfiles/preprocess/cmd_with_directive -@@ -0,0 +1,2 @@ -+FROM scratch -+CMD \ -diff --git a/builder/dockerfile/parser/testfiles/preprocess/cmd_with_directive_with_space b/builder/dockerfile/parser/testfiles/preprocess/cmd_with_directive_with_space -new file mode 100644 -index 00000000..fc309502 ---- /dev/null -+++ b/builder/dockerfile/parser/testfiles/preprocess/cmd_with_directive_with_space -@@ -0,0 +1,2 @@ -+FROM scratch -+CMD \ -diff --git a/builder/dockerfile/parser/testfiles/preprocess/entrypoint_with_directive b/builder/dockerfile/parser/testfiles/preprocess/entrypoint_with_directive -new file mode 100644 -index 00000000..59369bea ---- /dev/null -+++ b/builder/dockerfile/parser/testfiles/preprocess/entrypoint_with_directive -@@ -0,0 +1,2 @@ -+FROM scratch -+ENTRYPOINT \ -diff --git a/builder/dockerfile/parser/testfiles/preprocess/entrypoint_with_directive_with_space b/builder/dockerfile/parser/testfiles/preprocess/entrypoint_with_directive_with_space -new file mode 100644 -index 00000000..172aa714 ---- /dev/null -+++ b/builder/dockerfile/parser/testfiles/preprocess/entrypoint_with_directive_with_space -@@ -0,0 +1,2 @@ -+FROM scratch -+ENTRYPOINT \ -diff --git a/builder/dockerfile/parser/testfiles/preprocess/run_with_directive_with_space b/builder/dockerfile/parser/testfiles/preprocess/run_with_directive_with_space -new file mode 100644 -index 00000000..c742c4c3 ---- /dev/null -+++ b/builder/dockerfile/parser/testfiles/preprocess/run_with_directive_with_space -@@ -0,0 +1,2 @@ -+FROM scratch -+RUN \ --- -2.27.0 - diff --git a/patch/0049-fix-data-and-run-root-not-effective-when-setting-con.patch b/patch/0049-fix-data-and-run-root-not-effective-when-setting-con.patch deleted file mode 100644 index d12e2e6..0000000 --- a/patch/0049-fix-data-and-run-root-not-effective-when-setting-con.patch +++ /dev/null @@ -1,120 +0,0 @@ -From 022e5f3bfe5ec9731cf2d8808780a07d7408c820 Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Thu, 20 May 2021 15:58:43 +0800 -Subject: [PATCH 1/5] fix data and run root not effective when setting - configuration.toml after upgrading containers/storage - ---- - cmd/daemon/main.go | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++---- - 1 file changed, 59 insertions(+), 4 deletions(-) - -diff --git a/cmd/daemon/main.go b/cmd/daemon/main.go -index 41d2b60..4fd5356 100644 ---- a/cmd/daemon/main.go -+++ b/cmd/daemon/main.go -@@ -213,6 +213,35 @@ func loadConfig(path string) (config.TomlConfig, error) { - return conf, err - } - -+func checkRootSetInConfig(path string) (setRunRoot, setGraphRoot bool, err error) { -+ fi, err := os.Stat(path) -+ if err != nil { -+ return false, false, err -+ } -+ -+ if !fi.Mode().IsRegular() { -+ err = errors.New("config file must be a regular file") -+ return false, false, err -+ } -+ -+ if err = util.CheckFileSize(path, constant.MaxFileSize); err != nil { -+ return false, false, err -+ } -+ -+ configData, err := ioutil.ReadFile(filepath.Clean(path)) -+ if err != nil { -+ return false, false, err -+ } -+ conf := struct { -+ Storage struct { -+ RunRoot string `toml:"runroot"` -+ DataRoot string `toml:"graphroot"` -+ } `toml:"storage"` -+ }{} -+ _, err = toml.Decode(string(configData), &conf) -+ return conf.Storage.RunRoot != "", conf.Storage.DataRoot != "", err -+} -+ - func mergeStorageConfig(cmd *cobra.Command) error { - store.SetDefaultConfigFilePath(constant.StorageConfigPath) - option, err := store.GetDefaultStoreOptions(true) -@@ -226,13 +255,21 @@ func mergeStorageConfig(cmd *cobra.Command) error { - } - - var storeOpt store.DaemonStoreOptions -- if option.RunRoot == "" { -+ storeOpt.RunRoot = option.RunRoot -+ storeOpt.DataRoot = option.GraphRoot -+ -+ setRunRoot, setDataRoot, err := checkRootSetInConfig(constant.StorageConfigPath) -+ if err != nil { -+ return err -+ } -+ -+ if !setRunRoot { - storeOpt.RunRoot, err = securejoin.SecureJoin(daemonOpts.RunRoot, "storage") - if err != nil { - return err - } - } -- if option.GraphRoot == "" { -+ if !setDataRoot { - storeOpt.DataRoot, err = securejoin.SecureJoin(daemonOpts.DataRoot, "storage") - if err != nil { - return err -@@ -249,7 +286,7 @@ func mergeStorageConfig(cmd *cobra.Command) error { - return nil - } - --func mergeConfig(conf config.TomlConfig, cmd *cobra.Command) { -+func mergeConfig(conf config.TomlConfig, cmd *cobra.Command) error { - if conf.Debug && !cmd.Flag("debug").Changed { - daemonOpts.Debug = true - } -@@ -271,6 +308,22 @@ func mergeConfig(conf config.TomlConfig, cmd *cobra.Command) { - if conf.DataRoot != "" && !cmd.Flag("dataroot").Changed { - daemonOpts.DataRoot = conf.DataRoot - } -+ -+ runRoot, err := securejoin.SecureJoin(daemonOpts.RunRoot, "storage") -+ if err != nil { -+ return err -+ } -+ -+ dataRoot, err := securejoin.SecureJoin(daemonOpts.DataRoot, "storage") -+ if err != nil { -+ return err -+ } -+ store.SetDefaultStoreOptions(store.DaemonStoreOptions{ -+ DataRoot: dataRoot, -+ RunRoot: runRoot, -+ }) -+ -+ return nil - } - - func setupWorkingDirectories() error { -@@ -319,7 +372,9 @@ func checkAndValidateConfig(cmd *cobra.Command) error { - os.Exit(constant.DefaultFailedCode) - } - -- mergeConfig(conf, cmd) -+ if err = mergeConfig(conf, cmd); err != nil { -+ return err -+ } - } - - // file policy.json must be exist --- -1.8.3.1 - diff --git a/patch/0050-data-and-run-root-set-unit-test.patch b/patch/0050-data-and-run-root-set-unit-test.patch deleted file mode 100644 index a55f790..0000000 --- a/patch/0050-data-and-run-root-set-unit-test.patch +++ /dev/null @@ -1,133 +0,0 @@ -From d6c6c205122386b66ef82adc4af16c3c2eb86b18 Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Mon, 31 May 2021 00:46:16 +0800 -Subject: [PATCH 2/5] data and run root set unit test - ---- - cmd/daemon/main_test.go | 103 ++++++++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 103 insertions(+) - -diff --git a/cmd/daemon/main_test.go b/cmd/daemon/main_test.go -index 790fdfc..d98ea83 100644 ---- a/cmd/daemon/main_test.go -+++ b/cmd/daemon/main_test.go -@@ -18,9 +18,12 @@ import ( - "os" - "testing" - -+ "gotest.tools/v3/assert" - "gotest.tools/v3/fs" - - constant "isula.org/isula-build" -+ "isula.org/isula-build/cmd/daemon/config" -+ "isula.org/isula-build/store" - ) - - func TestSetupWorkingDirectories(t *testing.T) { -@@ -104,3 +107,103 @@ func TestSetupWorkingDirectories(t *testing.T) { - }) - } - } -+ -+func TestRunAndDataRootSet(t *testing.T) { -+ dataRoot := fs.NewDir(t, t.Name()) -+ runRoot := fs.NewDir(t, t.Name()) -+ -+ conf := config.TomlConfig{ -+ Debug: true, -+ Group: "isula", -+ LogLevel: "debug", -+ Runtime: "", -+ RunRoot: "", -+ DataRoot: "", -+ } -+ cmd := newDaemonCommand() -+ -+ result := store.DaemonStoreOptions{ -+ DataRoot: dataRoot.Join("storage"), -+ RunRoot: runRoot.Join("storage"), -+ } -+ -+ setStorage := func(content string) func() { -+ return func() { -+ if err := mergeConfig(conf, cmd); err != nil { -+ t.Fatalf("mrege config failed with error: %v", err) -+ } -+ -+ fileName := "storage.toml" -+ tmpDir := fs.NewDir(t, t.Name(), fs.WithFile(fileName, content)) -+ defer tmpDir.Remove() -+ -+ filePath := tmpDir.Join(fileName) -+ store.SetDefaultConfigFilePath(filePath) -+ option, err := store.GetDefaultStoreOptions(true) -+ if err != nil { -+ t.Fatalf("get default store options failed with error: %v", err) -+ } -+ -+ var storeOpt store.DaemonStoreOptions -+ storeOpt.RunRoot = option.RunRoot -+ storeOpt.DataRoot = option.GraphRoot -+ store.SetDefaultStoreOptions(storeOpt) -+ } -+ -+ } -+ -+ testcases := []struct { -+ name string -+ setF func() -+ expectation store.DaemonStoreOptions -+ }{ -+ { -+ name: "TC1 - cmd set, configuration and storage not set", -+ setF: func() { -+ cmd.PersistentFlags().Set("runroot", runRoot.Path()) -+ cmd.PersistentFlags().Set("dataroot", dataRoot.Path()) -+ checkAndValidateConfig(cmd) -+ }, -+ expectation: result, -+ }, -+ { -+ name: "TC2 - cmd and storage not set, configuration set", -+ setF: func() { -+ conf.DataRoot = dataRoot.Path() -+ conf.RunRoot = runRoot.Path() -+ checkAndValidateConfig(cmd) -+ }, -+ expectation: result, -+ }, -+ { -+ name: "TC3 - all not set", -+ setF: setStorage("[storage]"), -+ expectation: store.DaemonStoreOptions{ -+ DataRoot: "/var/lib/containers/storage", -+ RunRoot: "/var/run/containers/storage", -+ }, -+ }, -+ { -+ name: "TC4 - cmd and configuration not set, storage set", -+ setF: func() { -+ config := "[storage]\nrunroot = \"" + runRoot.Join("storage") + "\"\ngraphroot = \"" + dataRoot.Join("storage") + "\"" -+ sT := setStorage(config) -+ sT() -+ }, -+ expectation: result, -+ }, -+ } -+ -+ for _, tc := range testcases { -+ t.Run(tc.name, func(t *testing.T) { -+ tc.setF() -+ storeOptions, err := store.GetDefaultStoreOptions(false) -+ if err != nil { -+ t.Fatalf("get default store options failed with error: %v", err) -+ } -+ assert.Equal(t, tc.expectation.DataRoot, storeOptions.GraphRoot) -+ assert.Equal(t, tc.expectation.RunRoot, storeOptions.RunRoot) -+ }) -+ -+ } -+} --- -1.8.3.1 - diff --git a/patch/0051-bugfix-set-user-s-uid-and-gid-for-containers.patch b/patch/0051-bugfix-set-user-s-uid-and-gid-for-containers.patch deleted file mode 100644 index 5994c3e..0000000 --- a/patch/0051-bugfix-set-user-s-uid-and-gid-for-containers.patch +++ /dev/null @@ -1,34 +0,0 @@ -From fbd95494e6e402fd123955fbaf337696cc22c750 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Mon, 31 May 2021 20:50:24 +0800 -Subject: [PATCH 3/5] bugfix: set user's uid and gid for containers - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - builder/dockerfile/run.go | 10 ++++++++++ - 1 file changed, 10 insertions(+) - -diff --git a/builder/dockerfile/run.go b/builder/dockerfile/run.go -index 6c38b55..828fe67 100644 ---- a/builder/dockerfile/run.go -+++ b/builder/dockerfile/run.go -@@ -95,6 +95,16 @@ func (c *cmdBuilder) setupRuntimeSpec(command []string) (*specs.Spec, error) { - } - - // set specific runtime spec config -+ user := c.stage.docker.Config.User -+ if user != "" { -+ pair, err := util.GetChownOptions(user, c.stage.mountpoint) -+ if err != nil { -+ return nil, err -+ } -+ g.SetProcessUID(uint32(pair.UID)) -+ g.SetProcessGID(uint32(pair.GID)) -+ g.SetProcessUsername(c.stage.docker.Config.User) -+ } - g.RemoveHostname() - g.SetProcessArgs(command) - g.SetProcessTerminal(false) --- -1.8.3.1 - diff --git a/patch/0052-hack-make-isula-build-binary-static.patch b/patch/0052-hack-make-isula-build-binary-static.patch deleted file mode 100644 index be007c0..0000000 --- a/patch/0052-hack-make-isula-build-binary-static.patch +++ /dev/null @@ -1,1264 +0,0 @@ -From b575a9ae9970bbdafe132f1ac73db1d0b7ee50ba Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Wed, 19 May 2021 09:36:12 +0800 -Subject: [PATCH 4/5] hack: make isula-build binary static - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - Makefile | 5 +- - builder/dockerfile/builder.go | 12 ++--- - cmd/cli/build.go | 17 +++---- - cmd/cli/build_test.go | 7 ++- - cmd/cli/import.go | 10 +--- - cmd/cli/pull.go | 8 +--- - cmd/cli/pull_test.go | 12 ----- - cmd/cli/push.go | 13 ++---- - cmd/cli/push_test.go | 14 ------ - cmd/cli/save.go | 10 ++-- - constant.go | 14 ++++++ - daemon/import.go | 7 +++ - daemon/load.go | 10 ++-- - daemon/pull.go | 5 ++ - daemon/push.go | 13 ++++-- - daemon/save.go | 8 ++-- - exporter/common.go | 30 ++---------- - exporter/common_test.go | 88 ++++------------------------------- - exporter/docker/archive/archive.go | 3 +- - exporter/docker/daemon/daemon.go | 3 +- - exporter/docker/docker.go | 3 +- - exporter/exporter.go | 23 ---------- - exporter/isulad/isulad.go | 2 +- - exporter/manifest/manifest.go | 3 +- - exporter/oci/archive/archive.go | 3 +- - exporter/oci/oci.go | 3 +- - image/image.go | 14 +++--- - image/image_test.go | 5 +- - pkg/manifest/list.go | 3 +- - util/util.go | 49 ++++++++++++++++++++ - util/util_test.go | 94 ++++++++++++++++++++++++++++++++++++++ - 31 files changed, 254 insertions(+), 237 deletions(-) - -diff --git a/Makefile b/Makefile -index cbace59..e03254c 100644 ---- a/Makefile -+++ b/Makefile -@@ -22,7 +22,8 @@ BUILDTAGS := seccomp - BUILDFLAGS := -tags "$(BUILDTAGS)" - TMPDIR := /tmp/isula_build_tmpdir - BEFLAG := -tmpdir=${TMPDIR} --SAFEBUILDFLAGS := -buildid=IdByIsula -buildmode=pie -extldflags=-ftrapv -extldflags=-static -extldflags=-zrelro -extldflags=-znow $(LDFLAGS) $(BEFLAG) -+SAFEBUILDFLAGS := -buildid=IdByIsula -buildmode=pie -extldflags=-ftrapv -extldflags=-zrelro -extldflags=-znow $(BEFLAG) $(LDFLAGS) -+STATIC_LDFLAGS := -linkmode=external -extldflags=-static - - IMAGE_BUILDARGS := $(if $(http_proxy), --build-arg http_proxy=$(http_proxy)) - IMAGE_BUILDARGS += $(if $(https_proxy), --build-arg https_proxy=$(https_proxy)) -@@ -56,7 +57,7 @@ isula-builder: ./cmd/daemon - safe: - @echo "Safe building isula-build..." - mkdir -p ${TMPDIR} -- $(GO_BUILD) -ldflags '$(SAFEBUILDFLAGS)' -o bin/isula-build $(BUILDFLAGS) ./cmd/cli -+ $(GO_BUILD) -ldflags '$(SAFEBUILDFLAGS) $(STATIC_LDFLAGS)' -o bin/isula-build $(BUILDFLAGS) ./cmd/cli 2>/dev/null - $(GO_BUILD) -ldflags '$(SAFEBUILDFLAGS)' -o bin/isula-builder $(BUILDFLAGS) ./cmd/daemon - @echo "Safe build isula-build done!" - -diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go -index 4222974..cbd1e58 100644 ---- a/builder/dockerfile/builder.go -+++ b/builder/dockerfile/builder.go -@@ -159,11 +159,11 @@ func NewBuilder(ctx context.Context, store *store.Store, req *pb.BuildRequest, r - } - - func (b *Builder) parseFormat(format string) error { -- if err := exporter.CheckImageFormat(format); err != nil { -+ if err := util.CheckImageFormat(format); err != nil { - return err - } - -- if format == exporter.OCITransport { -+ if format == constant.OCITransport { - b.manifestType = imgspecv1.MediaTypeImageManifest - } - -@@ -183,12 +183,12 @@ func (b *Builder) parseOutputManifest(output []string) error { - } - - transport := segments[0] -- if transport == exporter.OCITransport { -+ if transport == constant.OCITransport { - // When transport is oci, still, we need to set b.buildOpts.Output[i] starting with prefix "docker://". We only need to set the related b.outputManifestType. - // As a result, we can push oci format image into registry. When with prefix "oci://", image is exported to local dir, which is not what we expect. - // See github.com/containers/image package for more information. - b.outputManifestType = append(b.outputManifestType, imgspecv1.MediaTypeImageManifest) -- b.buildOpts.Output[i] = fmt.Sprintf("%s:%s", exporter.DockerTransport, segments[1]) -+ b.buildOpts.Output[i] = fmt.Sprintf("%s:%s", constant.DockerTransport, segments[1]) - } - b.outputManifestType = append(b.outputManifestType, manifest.DockerV2Schema2MediaType) - } -@@ -617,11 +617,11 @@ func parseOutputTag(output string) string { - - var tag string - switch { -- case (outputFields[0] == exporter.DockerDaemonTransport || outputFields[0] == exporter.IsuladTransport) && len(outputFields) > 1: -+ case (outputFields[0] == constant.DockerDaemonTransport || outputFields[0] == constant.IsuladTransport) && len(outputFields) > 1: - tag = strings.Join(outputFields[1:], ":") - case exporter.CheckArchiveFormat(outputFields[0]) == nil && len(outputFields) > archiveOutputWithoutTagLen: - tag = strings.Join(outputFields[archiveOutputWithoutTagLen:], ":") -- case exporter.CheckImageFormat(outputFields[0]) == nil && len(outputFields) > 1: -+ case util.CheckImageFormat(outputFields[0]) == nil && len(outputFields) > 1: - repoAndTag := strings.Join(outputFields[1:], ":") - // repo format regexp, "//registry.example.com/" for example - repo := regexp.MustCompile(`^\/\/[\w\.\-\:]+\/`).FindString(repoAndTag) -diff --git a/cmd/cli/build.go b/cmd/cli/build.go -index decf285..3d9f549 100644 ---- a/cmd/cli/build.go -+++ b/cmd/cli/build.go -@@ -25,7 +25,6 @@ import ( - "strings" - "time" - -- "github.com/containers/storage/pkg/stringid" - "github.com/gogo/protobuf/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -@@ -35,8 +34,6 @@ import ( - - constant "isula.org/isula-build" - pb "isula.org/isula-build/api/services" -- "isula.org/isula-build/exporter" -- _ "isula.org/isula-build/exporter/register" - "isula.org/isula-build/pkg/opts" - "isula.org/isula-build/util" - ) -@@ -107,7 +104,7 @@ func NewBuildCmd() *cobra.Command { - if util.CheckCliExperimentalEnabled() { - buildCmd.PersistentFlags().StringVar(&buildOpts.format, "format", "oci", "Image format of the built image") - } else { -- buildOpts.format = exporter.DockerTransport -+ buildOpts.format = constant.DockerTransport - } - buildCmd.PersistentFlags().StringVarP(&buildOpts.output, "output", "o", "", "Destination of output images") - buildCmd.PersistentFlags().BoolVar(&buildOpts.proxyFlag, "proxy", true, "Inherit proxy environment variables from host") -@@ -161,7 +158,7 @@ func buildCommand(c *cobra.Command, args []string) error { - - func newBuildOptions(args []string) error { - // unique buildID for each build progress -- buildOpts.buildID = stringid.GenerateNonCryptoID()[:constant.DefaultIDLen] -+ buildOpts.buildID = util.GenerateNonCryptoID()[:constant.DefaultIDLen] - - if len(args) < 1 { - // use current working directory as default context directory -@@ -219,7 +216,7 @@ func checkOutput(output string) ([]string, error) { - if len(transport) == 0 { - return nil, errors.New("transport should not be empty") - } -- if !exporter.IsSupport(transport) { -+ if !util.IsSupportExporter(transport) { - return nil, errors.Errorf("transport %q not support", transport) - } - -@@ -248,12 +245,12 @@ func checkAbsPath(path string) (string, error) { - func modifyLocalTransporter(transport string, absPath string, segments []string) error { - const validIsuladFieldsLen = 3 - switch transport { -- case exporter.DockerArchiveTransport, exporter.OCIArchiveTransport: -+ case constant.DockerArchiveTransport, constant.OCIArchiveTransport: - newSeg := util.CopyStrings(segments) - newSeg[1] = absPath - buildOpts.output = strings.Join(newSeg, ":") - return nil -- case exporter.IsuladTransport: -+ case constant.IsuladTransport: - if len(segments) != validIsuladFieldsLen { - return errors.Errorf("invalid isulad output format: %v", buildOpts.output) - } -@@ -275,7 +272,7 @@ func checkAndProcessOutput() error { - - transport := segments[0] - // just build, not need to export to any destination -- if !exporter.IsClientExporter(transport) { -+ if !util.IsClientExporter(transport) { - return nil - } - -@@ -325,7 +322,7 @@ func runBuild(ctx context.Context, cli Cli) (string, error) { - digest string - ) - -- if err = exporter.CheckImageFormat(buildOpts.format); err != nil { -+ if err = util.CheckImageFormat(buildOpts.format); err != nil { - return "", err - } - if err = checkAndProcessOutput(); err != nil { -diff --git a/cmd/cli/build_test.go b/cmd/cli/build_test.go -index fcdf6b4..a7fe64e 100644 ---- a/cmd/cli/build_test.go -+++ b/cmd/cli/build_test.go -@@ -25,7 +25,6 @@ import ( - "gotest.tools/v3/fs" - - constant "isula.org/isula-build" -- "isula.org/isula-build/exporter" - _ "isula.org/isula-build/exporter/register" - "isula.org/isula-build/util" - ) -@@ -175,7 +174,7 @@ func TestRunBuildWithNArchiveExporter(t *testing.T) { - format: "docker", - }, - { -- exporter: exporter.OCIArchiveTransport, -+ exporter: constant.OCIArchiveTransport, - descSpec: "oci-archive:isula:latest", - format: "oci", - }, -@@ -222,12 +221,12 @@ func TestRunBuildWithArchiveExporter(t *testing.T) { - - var testcases = []testcase{ - { -- exporter: exporter.DockerArchiveTransport, -+ exporter: constant.DockerArchiveTransport, - descSpec: "docker-archive:/tmp/image:isula:latest", - format: "docker", - }, - { -- exporter: exporter.OCIArchiveTransport, -+ exporter: constant.OCIArchiveTransport, - descSpec: "oci-archive:/tmp/image:isula:latest", - format: "oci", - }, -diff --git a/cmd/cli/import.go b/cmd/cli/import.go -index 320197b..96263db 100644 ---- a/cmd/cli/import.go -+++ b/cmd/cli/import.go -@@ -20,8 +20,6 @@ import ( - "os" - "path/filepath" - -- dockerref "github.com/containers/image/v5/docker/reference" -- "github.com/containers/storage/pkg/stringid" - "github.com/pkg/errors" - "github.com/spf13/cobra" - -@@ -76,12 +74,6 @@ func importCommand(c *cobra.Command, args []string) error { - } - - func runImport(ctx context.Context, cli Cli) error { -- if importOpts.reference != "" { -- if _, err := dockerref.Parse(importOpts.reference); err != nil { -- return err -- } -- } -- - if !filepath.IsAbs(importOpts.source) { - pwd, err := os.Getwd() - if err != nil { -@@ -90,7 +82,7 @@ func runImport(ctx context.Context, cli Cli) error { - importOpts.source = util.MakeAbsolute(importOpts.source, pwd) - } - -- importOpts.importID = stringid.GenerateNonCryptoID()[:constant.DefaultIDLen] -+ importOpts.importID = util.GenerateNonCryptoID()[:constant.DefaultIDLen] - - stream, err := cli.Client().Import(ctx, &pb.ImportRequest{ - Source: importOpts.source, -diff --git a/cmd/cli/pull.go b/cmd/cli/pull.go -index 316c548..02951ec 100644 ---- a/cmd/cli/pull.go -+++ b/cmd/cli/pull.go -@@ -18,13 +18,12 @@ import ( - "fmt" - "io" - -- dockerref "github.com/containers/image/v5/docker/reference" -- "github.com/containers/storage/pkg/stringid" - "github.com/pkg/errors" - "github.com/spf13/cobra" - - constant "isula.org/isula-build" - pb "isula.org/isula-build/api/services" -+ "isula.org/isula-build/util" - ) - - const ( -@@ -46,9 +45,6 @@ func pullCommand(c *cobra.Command, args []string) error { - if len(args) != 1 { - return errors.New("pull requires exactly one argument") - } -- if _, err := dockerref.Parse(args[0]); err != nil { -- return err -- } - - ctx := context.TODO() - cli, err := NewClient(ctx) -@@ -60,7 +56,7 @@ func pullCommand(c *cobra.Command, args []string) error { - } - - func runPull(ctx context.Context, cli Cli, imageName string) error { -- pullID := stringid.GenerateNonCryptoID()[:constant.DefaultIDLen] -+ pullID := util.GenerateNonCryptoID()[:constant.DefaultIDLen] - - pullStream, err := cli.Client().Pull(ctx, &pb.PullRequest{ - PullID: pullID, -diff --git a/cmd/cli/pull_test.go b/cmd/cli/pull_test.go -index a4dbd04..0a11b0b 100644 ---- a/cmd/cli/pull_test.go -+++ b/cmd/cli/pull_test.go -@@ -40,18 +40,6 @@ func TestPullCommand(t *testing.T) { - wantErr: true, - errString: "pull requires exactly one argument", - }, -- { -- name: "abnormal case with empty args", -- args: []string{""}, -- wantErr: true, -- errString: "repository name must have at least one component", -- }, -- { -- name: "abnormal case with invalid args", -- args: []string{"busybox-:latest"}, -- wantErr: true, -- errString: "invalid reference format", -- }, - } - - for _, tc := range testcases { -diff --git a/cmd/cli/push.go b/cmd/cli/push.go -index 7502abd..061c17b 100644 ---- a/cmd/cli/push.go -+++ b/cmd/cli/push.go -@@ -18,14 +18,11 @@ import ( - "fmt" - "io" - -- dockerref "github.com/containers/image/v5/docker/reference" -- "github.com/containers/storage/pkg/stringid" - "github.com/pkg/errors" - "github.com/spf13/cobra" - - constant "isula.org/isula-build" - pb "isula.org/isula-build/api/services" -- "isula.org/isula-build/exporter" - "isula.org/isula-build/util" - ) - -@@ -50,7 +47,7 @@ func NewPushCmd() *cobra.Command { - if util.CheckCliExperimentalEnabled() { - pushCmd.PersistentFlags().StringVarP(&pushOpts.format, "format", "f", "oci", "Format for image pushing to a registry") - } else { -- pushOpts.format = exporter.DockerTransport -+ pushOpts.format = constant.DockerTransport - } - return pushCmd - } -@@ -59,10 +56,8 @@ func pushCommand(c *cobra.Command, args []string) error { - if len(args) != 1 { - return errors.New("push requires exactly one argument") - } -- if _, err := dockerref.Parse(args[0]); err != nil { -- return err -- } -- if err := exporter.CheckImageFormat(pushOpts.format); err != nil { -+ -+ if err := util.CheckImageFormat(pushOpts.format); err != nil { - return err - } - -@@ -76,7 +71,7 @@ func pushCommand(c *cobra.Command, args []string) error { - } - - func runPush(ctx context.Context, cli Cli, imageName string) error { -- pushID := stringid.GenerateNonCryptoID()[:constant.DefaultIDLen] -+ pushID := util.GenerateNonCryptoID()[:constant.DefaultIDLen] - - pushStream, err := cli.Client().Push(ctx, &pb.PushRequest{ - PushID: pushID, -diff --git a/cmd/cli/push_test.go b/cmd/cli/push_test.go -index 0f8db2e..27caef3 100644 ---- a/cmd/cli/push_test.go -+++ b/cmd/cli/push_test.go -@@ -43,20 +43,6 @@ func TestPushCommand(t *testing.T) { - errString: "push requires exactly one argument", - }, - { -- name: "abnormal case with empty args", -- args: []string{""}, -- format: "docker", -- wantErr: true, -- errString: "repository name must have at least one component", -- }, -- { -- name: "abnormal case with invalid args", -- args: []string{"busybox-:latest"}, -- format: "oci", -- wantErr: true, -- errString: "invalid reference format", -- }, -- { - name: "normal case with image format oci", - args: []string{"openeuler:latest"}, - format: "oci", -diff --git a/cmd/cli/save.go b/cmd/cli/save.go -index fe67673..cb78ecf 100644 ---- a/cmd/cli/save.go -+++ b/cmd/cli/save.go -@@ -21,13 +21,11 @@ import ( - "path/filepath" - "strings" - -- "github.com/containers/storage/pkg/stringid" - "github.com/pkg/errors" - "github.com/spf13/cobra" - - constant "isula.org/isula-build" - pb "isula.org/isula-build/api/services" -- "isula.org/isula-build/exporter" - "isula.org/isula-build/util" - ) - -@@ -59,7 +57,7 @@ func NewSaveCmd() *cobra.Command { - if util.CheckCliExperimentalEnabled() { - saveCmd.PersistentFlags().StringVarP(&saveOpts.format, "format", "f", "oci", "Format of image saving to local tarball") - } else { -- saveOpts.format = exporter.DockerTransport -+ saveOpts.format = constant.DockerTransport - } - - return saveCmd -@@ -72,10 +70,10 @@ func saveCommand(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("save accepts at least one image") - } -- if saveOpts.format == exporter.OCITransport && len(args) >= 2 { -+ if saveOpts.format == constant.OCITransport && len(args) >= 2 { - return errors.New("oci image format now only supports saving single image") - } -- if err := exporter.CheckImageFormat(saveOpts.format); err != nil { -+ if err := util.CheckImageFormat(saveOpts.format); err != nil { - return err - } - if err := checkSavePath(); err != nil { -@@ -115,7 +113,7 @@ func checkSavePath() error { - } - - func runSave(ctx context.Context, cli Cli, args []string) error { -- saveOpts.saveID = stringid.GenerateNonCryptoID()[:constant.DefaultIDLen] -+ saveOpts.saveID = util.GenerateNonCryptoID()[:constant.DefaultIDLen] - saveOpts.images = args - - saveStream, err := cli.Client().Save(ctx, &pb.SaveRequest{ -diff --git a/constant.go b/constant.go -index 30c1653..9926728 100644 ---- a/constant.go -+++ b/constant.go -@@ -75,6 +75,20 @@ const ( - BuildContainerImageType = "ctr-img" - // BufferSize is default buffer size for file transportation - BufferSize = 32 * 1024 -+ // DockerTransport used to export docker image format images to registry -+ DockerTransport = "docker" -+ // DockerArchiveTransport used to export docker image format images to local tarball -+ DockerArchiveTransport = "docker-archive" -+ // DockerDaemonTransport used to export images to docker daemon -+ DockerDaemonTransport = "docker-daemon" -+ // OCITransport used to export oci image format images to registry -+ OCITransport = "oci" -+ // OCIArchiveTransport used to export oci image format images to local tarball -+ OCIArchiveTransport = "oci-archive" -+ // IsuladTransport use to export images to isulad -+ IsuladTransport = "isulad" -+ // ManifestTransport used to export manifest list -+ ManifestTransport = "manifest" - ) - - var ( -diff --git a/daemon/import.go b/daemon/import.go -index 3d7c0d0..40a0a92 100644 ---- a/daemon/import.go -+++ b/daemon/import.go -@@ -18,6 +18,7 @@ import ( - "path/filepath" - - cp "github.com/containers/image/v5/copy" -+ dockerref "github.com/containers/image/v5/docker/reference" - is "github.com/containers/image/v5/storage" - "github.com/containers/image/v5/tarball" - "github.com/containers/image/v5/transports" -@@ -49,6 +50,12 @@ func (b *Backend) Import(req *pb.ImportRequest, stream pb.Control_ImportServer) - logEntry := logrus.WithFields(logrus.Fields{"ImportID": importID}) - logEntry.Info("ImportRequest received") - -+ if reference != "" { -+ if _, err := dockerref.Parse(reference); err != nil { -+ return err -+ } -+ } -+ - tmpName := importID + "-import-tmp" - dstRef, err := is.Transport.ParseStoreReference(localStore, tmpName) - if err != nil { -diff --git a/daemon/load.go b/daemon/load.go -index b557d38..f2d818f 100644 ---- a/daemon/load.go -+++ b/daemon/load.go -@@ -127,16 +127,16 @@ func tryToParseImageFormatFromTarball(dataRoot string, opts *loadOptions) ([][]s - - allRepoTags, err = getDockerRepoTagFromImageTar(systemContext, opts.path) - if err == nil { -- logrus.Infof("Parse image successful with %q format", exporter.DockerTransport) -- opts.format = exporter.DockerArchiveTransport -+ logrus.Infof("Parse image successful with %q format", constant.DockerTransport) -+ opts.format = constant.DockerArchiveTransport - return allRepoTags, nil - } - logrus.Warnf("Try to Parse image of docker format failed with error: %v", err) - - allRepoTags, err = getOCIRepoTagFromImageTar(systemContext, opts.path) - if err == nil { -- logrus.Infof("Parse image successful with %q format", exporter.OCITransport) -- opts.format = exporter.OCIArchiveTransport -+ logrus.Infof("Parse image successful with %q format", constant.OCITransport) -+ opts.format = constant.OCIArchiveTransport - return allRepoTags, nil - } - logrus.Warnf("Try to parse image of oci format failed with error: %v", err) -@@ -170,7 +170,7 @@ func getOCIRepoTagFromImageTar(systemContext *types.SystemContext, path string) - err error - ) - -- srcRef, err := alltransports.ParseImageName(exporter.FormatTransport(exporter.OCIArchiveTransport, path)) -+ srcRef, err := alltransports.ParseImageName(exporter.FormatTransport(constant.OCIArchiveTransport, path)) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse image name of oci image format") - } -diff --git a/daemon/pull.go b/daemon/pull.go -index 56be755..6d2e33d 100644 ---- a/daemon/pull.go -+++ b/daemon/pull.go -@@ -16,6 +16,7 @@ package daemon - import ( - "context" - -+ dockerref "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -@@ -53,6 +54,10 @@ func (b *Backend) Pull(req *pb.PullRequest, stream pb.Control_PullServer) error - imageName: req.GetImageName(), - } - -+ if _, err := dockerref.Parse(opt.imageName); err != nil { -+ return err -+ } -+ - ctx := context.WithValue(stream.Context(), util.LogFieldKey(util.LogKeySessionID), req.GetPullID()) - eg, egCtx := errgroup.WithContext(ctx) - eg.Go(pullHandler(egCtx, opt)) -diff --git a/daemon/push.go b/daemon/push.go -index 4e3a6ed..e36198d 100644 ---- a/daemon/push.go -+++ b/daemon/push.go -@@ -16,6 +16,7 @@ package daemon - import ( - "context" - -+ dockerref "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -@@ -59,7 +60,11 @@ func (b *Backend) Push(req *pb.PushRequest, stream pb.Control_PushServer) error - format: req.GetFormat(), - } - -- if err := exporter.CheckImageFormat(opt.format); err != nil { -+ if err := util.CheckImageFormat(opt.format); err != nil { -+ return err -+ } -+ -+ if _, err := dockerref.Parse(opt.imageName); err != nil { - return err - } - -@@ -111,11 +116,11 @@ func pushHandler(ctx context.Context, options pushOptions) func() error { - ManifestType: options.manifestType, - } - -- if err := exporter.Export(options.imageName, exporter.FormatTransport(exporter.DockerTransport, options.imageName), -+ if err := exporter.Export(options.imageName, exporter.FormatTransport(constant.DockerTransport, options.imageName), - exOpts, options.localStore); err != nil { - logrus.WithField(util.LogKeySessionID, options.pushID). -- Errorf("Push image %q of format %q failed with %v", options.imageName, exporter.DockerTransport, err) -- return errors.Wrapf(err, "push image %q of format %q failed", options.imageName, exporter.DockerTransport) -+ Errorf("Push image %q of format %q failed with %v", options.imageName, constant.DockerTransport, err) -+ return errors.Wrapf(err, "push image %q of format %q failed", options.imageName, constant.DockerTransport) - } - - return nil -diff --git a/daemon/save.go b/daemon/save.go -index fd6174b..de644c3 100644 ---- a/daemon/save.go -+++ b/daemon/save.go -@@ -71,10 +71,10 @@ func (b *Backend) Save(req *pb.SaveRequest, stream pb.Control_SaveServer) error - opts := b.getSaveOptions(req) - - switch opts.format { -- case exporter.DockerTransport: -- opts.format = exporter.DockerArchiveTransport -- case exporter.OCITransport: -- opts.format = exporter.OCIArchiveTransport -+ case constant.DockerTransport: -+ opts.format = constant.DockerArchiveTransport -+ case constant.OCITransport: -+ opts.format = constant.OCIArchiveTransport - default: - return errors.New("wrong image format provided") - } -diff --git a/exporter/common.go b/exporter/common.go -index 8f390ac..bded6ec 100644 ---- a/exporter/common.go -+++ b/exporter/common.go -@@ -209,17 +209,7 @@ func NewPolicyContext(sc *types.SystemContext) (*signature.PolicyContext, error) - // CheckArchiveFormat used to check if save or load image format is either docker-archive or oci-archive - func CheckArchiveFormat(format string) error { - switch format { -- case DockerArchiveTransport, OCIArchiveTransport: -- return nil -- default: -- return errors.New("wrong image format provided") -- } --} -- --// CheckImageFormat used to check if the image format is either docker or oci --func CheckImageFormat(format string) error { -- switch format { -- case DockerTransport, OCITransport: -+ case constant.DockerArchiveTransport, constant.OCIArchiveTransport: - return nil - default: - return errors.New("wrong image format provided") -@@ -228,7 +218,7 @@ func CheckImageFormat(format string) error { - - // FormatTransport for formatting transport with corresponding path - func FormatTransport(transport, path string) string { -- if transport == DockerTransport { -+ if transport == constant.DockerTransport { - return fmt.Sprintf("%s://%s", transport, path) - } - return fmt.Sprintf("%s:%s", transport, path) -@@ -238,23 +228,13 @@ func FormatTransport(transport, path string) string { - func GetManifestType(format string) (string, error) { - var manifestType string - switch format { -- case OCITransport: -+ case constant.OCITransport: - manifestType = imgspecv1.MediaTypeImageManifest -- case DockerTransport: -+ case constant.DockerTransport: - manifestType = manifest.DockerV2Schema2MediaType - default: -- return "", errors.Errorf("unknown format %q. Choose one of the supported formats: %s,%s", format, DockerTransport, OCITransport) -+ return "", errors.Errorf("unknown format %q. Choose one of the supported formats: %s,%s", format, constant.DockerTransport, constant.OCITransport) - } - return manifestType, nil - } - --// IsClientExporter used to determinate exporter whether need to send the image to client --func IsClientExporter(exporter string) bool { -- clientExporters := map[string]bool{ -- DockerArchiveTransport: true, -- OCIArchiveTransport: true, -- IsuladTransport: true, -- } -- _, ok := clientExporters[exporter] -- return ok --} -diff --git a/exporter/common_test.go b/exporter/common_test.go -index ca29296..7434d3b 100644 ---- a/exporter/common_test.go -+++ b/exporter/common_test.go -@@ -21,6 +21,8 @@ import ( - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "gotest.tools/v3/assert" - "gotest.tools/v3/fs" -+ -+ "isula.org/isula-build" - ) - - func TestFormatTransport(t *testing.T) { -@@ -36,13 +38,13 @@ func TestFormatTransport(t *testing.T) { - }{ - { - name: "docker format transport", -- transport: DockerTransport, -+ transport: constant.DockerTransport, - path: "registry.example.com/library/image:test", - result: "docker://registry.example.com/library/image:test", - }, - { - name: "oci-archive format transport", -- transport: OCIArchiveTransport, -+ transport: constant.OCIArchiveTransport, - path: ociArchiveFilePath, - result: "oci-archive:", - }, -@@ -59,43 +61,6 @@ func TestFormatTransport(t *testing.T) { - } - } - --func TestCheckImageFormat(t *testing.T) { -- testcases := []struct { -- name string -- format string -- wantErr bool -- errString string -- }{ -- { -- name: "docker image format", -- format: DockerTransport, -- wantErr: false, -- }, -- { -- name: "oci image format", -- format: OCITransport, -- wantErr: false, -- }, -- { -- name: "unknown image format", -- format: "you guess", -- wantErr: true, -- }, -- } -- for _, tc := range testcases { -- t.Run(tc.name, func(t *testing.T) { -- err := CheckImageFormat(tc.format) -- if tc.wantErr { -- assert.Error(t, err, "wrong image format provided") -- return -- } -- if !tc.wantErr { -- assert.NilError(t, err) -- } -- }) -- } --} -- - func TestCheckArchiveFormat(t *testing.T) { - testcases := []struct { - name string -@@ -105,12 +70,12 @@ func TestCheckArchiveFormat(t *testing.T) { - }{ - { - name: "docker-archive image format", -- format: DockerArchiveTransport, -+ format: constant.DockerArchiveTransport, - wantErr: false, - }, - { - name: "oci-archive imagee format", -- format: OCIArchiveTransport, -+ format: constant.OCIArchiveTransport, - wantErr: false, - }, - { -@@ -143,13 +108,13 @@ func TestGetManifestType(t *testing.T) { - }{ - { - name: "docker format manifest type", -- format: DockerTransport, -+ format: constant.DockerTransport, - manifest: manifest.DockerV2Schema2MediaType, - wantErr: false, - }, - { - name: "oci format manifest type", -- format: OCITransport, -+ format: constant.OCITransport, - manifest: imgspecv1.MediaTypeImageManifest, - wantErr: false, - }, -@@ -173,40 +138,3 @@ func TestGetManifestType(t *testing.T) { - } - } - --func TestIsClientExporter(t *testing.T) { -- testcases := []struct { -- name string -- exporter string -- wantResult bool -- }{ -- { -- name: "normal docker archive exporter", -- exporter: DockerArchiveTransport, -- wantResult: true, -- }, -- { -- name: "normal oci archive exporter", -- exporter: OCIArchiveTransport, -- wantResult: true, -- }, -- { -- name: "normal isulad exporter", -- exporter: IsuladTransport, -- wantResult: true, -- }, -- { -- name: "abnormal unkown", -- exporter: "unkown", -- wantResult: false, -- }, -- } -- -- for _, tc := range testcases { -- t.Run(tc.name, func(t *testing.T) { -- isExporter := IsClientExporter(tc.exporter) -- if isExporter != tc.wantResult { -- t.Fatal("test client exporter failed") -- } -- }) -- } --} -diff --git a/exporter/docker/archive/archive.go b/exporter/docker/archive/archive.go -index 5da3f53..04654cf 100644 ---- a/exporter/docker/archive/archive.go -+++ b/exporter/docker/archive/archive.go -@@ -25,6 +25,7 @@ import ( - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - -+ constant "isula.org/isula-build" - "isula.org/isula-build/exporter" - "isula.org/isula-build/image" - "isula.org/isula-build/store" -@@ -47,7 +48,7 @@ var DockerArchiveExporter = dockerArchiveExporter{ - } - - func (d *dockerArchiveExporter) Name() string { -- return exporter.DockerArchiveTransport -+ return constant.DockerArchiveTransport - } - - func (d *dockerArchiveExporter) Init(opts exporter.ExportOptions, src, destSpec string, localStore *store.Store) error { -diff --git a/exporter/docker/daemon/daemon.go b/exporter/docker/daemon/daemon.go -index c7e06b7..308495c 100644 ---- a/exporter/docker/daemon/daemon.go -+++ b/exporter/docker/daemon/daemon.go -@@ -21,6 +21,7 @@ import ( - "github.com/containers/image/v5/types" - "github.com/pkg/errors" - -+ constant "isula.org/isula-build" - "isula.org/isula-build/exporter" - "isula.org/isula-build/image" - "isula.org/isula-build/store" -@@ -40,7 +41,7 @@ var _dockerDaemonExporter = dockerDaemonExporter{ - } - - func (d *dockerDaemonExporter) Name() string { -- return exporter.DockerDaemonTransport -+ return constant.DockerDaemonTransport - } - - func (d *dockerDaemonExporter) Init(opts exporter.ExportOptions, src, destSpec string, localStore *store.Store) error { -diff --git a/exporter/docker/docker.go b/exporter/docker/docker.go -index e830811..987ee94 100644 ---- a/exporter/docker/docker.go -+++ b/exporter/docker/docker.go -@@ -21,6 +21,7 @@ import ( - "github.com/containers/image/v5/types" - "github.com/pkg/errors" - -+ constant "isula.org/isula-build" - "isula.org/isula-build/exporter" - "isula.org/isula-build/image" - "isula.org/isula-build/store" -@@ -41,7 +42,7 @@ var _dockerExporter = dockerExporter{ - } - - func (d *dockerExporter) Name() string { -- return exporter.DockerTransport -+ return constant.DockerTransport - } - - func (d *dockerExporter) Init(opts exporter.ExportOptions, src, destSpec string, localStore *store.Store) error { -diff --git a/exporter/exporter.go b/exporter/exporter.go -index af1148f..60af001 100644 ---- a/exporter/exporter.go -+++ b/exporter/exporter.go -@@ -21,29 +21,6 @@ import ( - "isula.org/isula-build/store" - ) - --const ( -- // DockerTransport used to export docker image format images to registry -- DockerTransport = "docker" -- -- // DockerArchiveTransport used to export docker image format images to local tarball -- DockerArchiveTransport = "docker-archive" -- -- // DockerDaemonTransport used to export images to docker daemon -- DockerDaemonTransport = "docker-daemon" -- -- // OCITransport used to export oci image format images to registry -- OCITransport = "oci" -- -- // OCIArchiveTransport used to export oci image format images to local tarball -- OCIArchiveTransport = "oci-archive" -- -- // IsuladTransport use to export images to isulad -- IsuladTransport = "isulad" -- -- // ManifestTransport used to export manifest list -- ManifestTransport = "manifest" --) -- - type exportHub struct { - items map[string]Exporter - sync.RWMutex -diff --git a/exporter/isulad/isulad.go b/exporter/isulad/isulad.go -index dd41d2d..73b0496 100644 ---- a/exporter/isulad/isulad.go -+++ b/exporter/isulad/isulad.go -@@ -46,7 +46,7 @@ var _isuladExporter = isuladExporter{ - } - - func (d *isuladExporter) Name() string { -- return exporter.IsuladTransport -+ return constant.IsuladTransport - } - - func (d *isuladExporter) Init(opts exporter.ExportOptions, src, destSpec string, localStore *store.Store) error { -diff --git a/exporter/manifest/manifest.go b/exporter/manifest/manifest.go -index b6b8d2a..1b06788 100644 ---- a/exporter/manifest/manifest.go -+++ b/exporter/manifest/manifest.go -@@ -22,6 +22,7 @@ import ( - "github.com/containers/image/v5/types" - "github.com/pkg/errors" - -+ constant "isula.org/isula-build" - "isula.org/isula-build/exporter" - "isula.org/isula-build/pkg/manifest" - "isula.org/isula-build/store" -@@ -42,7 +43,7 @@ var _manifestExporter = manifestExporter{ - } - - func (d *manifestExporter) Name() string { -- return exporter.ManifestTransport -+ return constant.ManifestTransport - } - - func (d *manifestExporter) Init(opts exporter.ExportOptions, src, destSpec string, localStore *store.Store) error { -diff --git a/exporter/oci/archive/archive.go b/exporter/oci/archive/archive.go -index 03f5e4f..9c39df3 100644 ---- a/exporter/oci/archive/archive.go -+++ b/exporter/oci/archive/archive.go -@@ -22,6 +22,7 @@ import ( - "github.com/containers/image/v5/types" - "github.com/pkg/errors" - -+ constant "isula.org/isula-build" - "isula.org/isula-build/exporter" - "isula.org/isula-build/image" - "isula.org/isula-build/store" -@@ -42,7 +43,7 @@ var _ociArchiveExporter = ociArchiveExporter{ - } - - func (o *ociArchiveExporter) Name() string { -- return exporter.OCIArchiveTransport -+ return constant.OCIArchiveTransport - } - - func (o *ociArchiveExporter) Init(opts exporter.ExportOptions, src, destSpec string, localStore *store.Store) error { -diff --git a/exporter/oci/oci.go b/exporter/oci/oci.go -index 2328a77..c0d703d 100644 ---- a/exporter/oci/oci.go -+++ b/exporter/oci/oci.go -@@ -20,6 +20,7 @@ import ( - "github.com/containers/image/v5/types" - "github.com/pkg/errors" - -+ constant "isula.org/isula-build" - "isula.org/isula-build/exporter" - "isula.org/isula-build/image" - "isula.org/isula-build/store" -@@ -39,7 +40,7 @@ var _ociExporter = ociExporter{ - } - - func (o *ociExporter) Name() string { -- return exporter.OCITransport -+ return constant.OCITransport - } - - func (o *ociExporter) Init(opts exporter.ExportOptions, src, destSpec string, localStore *store.Store) error { -diff --git a/image/image.go b/image/image.go -index 1e48039..91ab720 100644 ---- a/image/image.go -+++ b/image/image.go -@@ -152,7 +152,7 @@ func PullAndGetImageInfo(opt *PrepareImageOptions) (types.ImageReference, *stora - ) - - imageName := exporter.FormatTransport(transport, strImage) -- if transport == exporter.DockerArchiveTransport { -+ if transport == constant.DockerArchiveTransport { - srcRef, pErr = alltransports.ParseImageName(imageName + ":@" + strconv.Itoa(opt.ManifestIndex)) - } else { - srcRef, pErr = alltransports.ParseImageName(imageName) -@@ -255,7 +255,7 @@ func getLocalImageNameFromRef(store storage.Store, srcRef types.ImageReference) - return stringid.GenerateRandomID() + ":" + stringid.GenerateRandomID(), nil - } - -- if srcRef.Transport().Name() != exporter.DockerTransport { -+ if srcRef.Transport().Name() != constant.DockerTransport { - return "", errors.Errorf("the %s transport is not supported yet", srcRef.Transport().Name()) - } - -@@ -612,11 +612,11 @@ func tryResolveNameWithTransport(name string) (string, string) { - if len(splits) == 2 { - if trans := transports.Get(splits[0]); trans != nil { - switch trans.Name() { -- case exporter.DockerTransport: -+ case constant.DockerTransport: - // trim prefix if dest like docker://registry.example.com format - dest := strings.TrimPrefix(splits[1], "//") - return dest, trans.Name() -- case exporter.DockerArchiveTransport, exporter.OCIArchiveTransport: -+ case constant.DockerArchiveTransport, constant.OCIArchiveTransport: - dest := strings.TrimSpace(splits[1]) - return dest, trans.Name() - } -@@ -632,7 +632,7 @@ func tryResolveNameWithDockerReference(name string) (string, string, error) { - return "", "", errors.Wrapf(err, "error parsing image name %q", name) - } - if named.String() == name { -- return name, exporter.DockerTransport, nil -+ return name, constant.DockerTransport, nil - } - - domain := reference.Domain(named) -@@ -648,7 +648,7 @@ func tryResolveNameWithDockerReference(name string) (string, string, error) { - } - defaultPrefix := pathPrefix + string(os.PathSeparator) - if strings.HasPrefix(repoPath, defaultPrefix) && path.Join(domain, repoPath[len(defaultPrefix):])+tag+digest == name { -- return name, exporter.DockerTransport, nil -+ return name, constant.DockerTransport, nil - } - } - -@@ -687,7 +687,7 @@ func tryResolveNameInRegistries(name string, sc *types.SystemContext) ([]string, - candidate := path.Join(registry, middle, name) - candidates = append(candidates, candidate) - } -- return candidates, exporter.DockerTransport -+ return candidates, constant.DockerTransport - } - - // CheckAndAddDefaultTag checks if src is format of repository[:tag], add default tag if src without tag -diff --git a/image/image_test.go b/image/image_test.go -index 17176d4..c698b4d 100644 ---- a/image/image_test.go -+++ b/image/image_test.go -@@ -25,7 +25,6 @@ import ( - "gotest.tools/v3/fs" - - constant "isula.org/isula-build" -- "isula.org/isula-build/exporter" - "isula.org/isula-build/store" - ) - -@@ -65,7 +64,7 @@ func TestTryResolveNameWithDockerReference(t *testing.T) { - var testcases = []testcase{ - { - name: "docker.io/library/busybox:latest", -- expectTrans: exporter.DockerTransport, -+ expectTrans: constant.DockerTransport, - errStr: "", - }, { - name: "busybox:latest", -@@ -122,5 +121,5 @@ registries = [] - name := "busybox:latest" - candidates, transport := tryResolveNameInRegistries(name, nil) - assert.Assert(t, cmp.Contains(candidates, "localhost/busybox:latest")) -- assert.Equal(t, transport, exporter.DockerTransport) -+ assert.Equal(t, transport, constant.DockerTransport) - } -diff --git a/pkg/manifest/list.go b/pkg/manifest/list.go -index 381746f..bc6037f 100644 ---- a/pkg/manifest/list.go -+++ b/pkg/manifest/list.go -@@ -24,6 +24,7 @@ import ( - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - -+ constant "isula.org/isula-build" - pb "isula.org/isula-build/api/services" - "isula.org/isula-build/builder/dockerfile/container" - "isula.org/isula-build/exporter" -@@ -60,7 +61,7 @@ func NewManifestList() *List { - func (l *List) AddImage(ctx context.Context, store *store.Store, imageSpec string) (digest.Digest, error) { - img, _, err := image.ResolveFromImage(&image.PrepareImageOptions{ - Ctx: ctx, -- FromImage: exporter.FormatTransport(exporter.DockerTransport, imageSpec), -+ FromImage: exporter.FormatTransport(constant.DockerTransport, imageSpec), - SystemContext: image.GetSystemContext(), - Store: store, - }) -diff --git a/util/util.go b/util/util.go -index 3f46d79..f527608 100644 ---- a/util/util.go -+++ b/util/util.go -@@ -315,3 +315,52 @@ func ChangeGroup(path, g string) error { - } - return nil - } -+ -+// GenerateNonCryptoID generate none crypto id with length 32 -+func GenerateNonCryptoID() string { -+ b := make([]byte, 32) -+ _, err := rand.Read(b) -+ if err != nil { -+ panic(err) // This shouldn't happen -+ } -+ id := fmt.Sprintf("%x", b) -+ -+ return id -+} -+ -+// IsSupportExporter returns true when the specific exporter is supported -+func IsSupportExporter(name string) bool { -+ exporters := map[string]bool{ -+ constant.DockerTransport: true, -+ constant.DockerArchiveTransport: true, -+ constant.DockerDaemonTransport: true, -+ constant.OCITransport: true, -+ constant.OCIArchiveTransport: true, -+ constant.IsuladTransport: true, -+ constant.ManifestTransport: true, -+ } -+ _, ok := exporters[name] -+ -+ return ok -+} -+ -+// CheckImageFormat used to check if the image format is either docker or oci -+func CheckImageFormat(format string) error { -+ switch format { -+ case constant.DockerTransport, constant.OCITransport: -+ return nil -+ default: -+ return errors.New("wrong image format provided") -+ } -+} -+ -+// IsClientExporter used to determinate exporter whether need to send the image to client -+func IsClientExporter(exporter string) bool { -+ clientExporters := map[string]bool{ -+ constant.DockerArchiveTransport: true, -+ constant.OCIArchiveTransport: true, -+ constant.IsuladTransport: true, -+ } -+ _, ok := clientExporters[exporter] -+ return ok -+} -diff --git a/util/util_test.go b/util/util_test.go -index 722c2a3..db57393 100644 ---- a/util/util_test.go -+++ b/util/util_test.go -@@ -315,3 +315,97 @@ func TestSetDaemonLock(t *testing.T) { - _, err = SetDaemonLock(root, name) - assert.ErrorContains(t, err, "check if there is another daemon running") - } -+ -+func TestGenerateNonCryptoID(t *testing.T) { -+ tests := []struct { -+ name string -+ want int -+ }{ -+ { -+ name: "TC1 - generate id", -+ want:64, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ if got := GenerateNonCryptoID(); len(got) != tt.want { -+ t.Errorf("GenerateNonCryptoID() = %v, want %v", got, tt.want) -+ } -+ }) -+ } -+} -+ -+func TestCheckImageFormat(t *testing.T) { -+ testcases := []struct { -+ name string -+ format string -+ wantErr bool -+ errString string -+ }{ -+ { -+ name: "docker image format", -+ format: constant.DockerTransport, -+ wantErr: false, -+ }, -+ { -+ name: "oci image format", -+ format: constant.OCITransport, -+ wantErr: false, -+ }, -+ { -+ name: "unknown image format", -+ format: "you guess", -+ wantErr: true, -+ }, -+ } -+ for _, tc := range testcases { -+ t.Run(tc.name, func(t *testing.T) { -+ err := CheckImageFormat(tc.format) -+ if tc.wantErr { -+ assert.Error(t, err, "wrong image format provided") -+ return -+ } -+ if !tc.wantErr { -+ assert.NilError(t, err) -+ } -+ }) -+ } -+} -+ -+func TestIsClientExporter(t *testing.T) { -+ testcases := []struct { -+ name string -+ exporter string -+ wantResult bool -+ }{ -+ { -+ name: "normal docker archive exporter", -+ exporter: constant.DockerArchiveTransport, -+ wantResult: true, -+ }, -+ { -+ name: "normal oci archive exporter", -+ exporter: constant.OCIArchiveTransport, -+ wantResult: true, -+ }, -+ { -+ name: "normal isulad exporter", -+ exporter: constant.IsuladTransport, -+ wantResult: true, -+ }, -+ { -+ name: "abnormal unkown", -+ exporter: "unkown", -+ wantResult: false, -+ }, -+ } -+ -+ for _, tc := range testcases { -+ t.Run(tc.name, func(t *testing.T) { -+ isExporter := IsClientExporter(tc.exporter) -+ if isExporter != tc.wantResult { -+ t.Fatal("test client exporter failed") -+ } -+ }) -+ } -+} --- -1.8.3.1 - diff --git a/patch/0053-integration-test-from-new-flaw-of-run-and-data-root-.patch b/patch/0053-integration-test-from-new-flaw-of-run-and-data-root-.patch deleted file mode 100644 index 1e19829..0000000 --- a/patch/0053-integration-test-from-new-flaw-of-run-and-data-root-.patch +++ /dev/null @@ -1,186 +0,0 @@ -From 78d5ee37ff4b2b3ef0a3e3031087d8cdb2e0c0cd Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Sun, 30 May 2021 20:55:07 +0800 -Subject: [PATCH 5/5] integration test from new flaw of run and data root set - ---- - Makefile | 18 ++++++--- - README.zh.md | 2 +- - tests/src/test_integration_set_new_root.sh | 60 ++++++++++++++++++++++++++++++ - tests/test.sh | 29 +++++++++++++-- - 4 files changed, 98 insertions(+), 11 deletions(-) - create mode 100644 tests/src/test_integration_set_new_root.sh - -diff --git a/Makefile b/Makefile -index cbace59..f8578a4 100644 ---- a/Makefile -+++ b/Makefile -@@ -73,13 +73,13 @@ debug: - build-image: - isula-build ctr-img build -f Dockerfile.proto ${IMAGE_BUILDARGS} -o isulad:${IMAGE_NAME}:latest . - --tests: test-integration test-unit -+tests: test-base test-unit test-integration - --.PHONY: test-integration --test-integration: -- @echo "Integration test starting..." -- @./tests/test.sh -- @echo "Integration test done!" -+.PHONY: test-base -+test-base: -+ @echo "Base test starting..." -+ @./tests/test.sh base -+ @echo "Base test done!" - - .PHONY: test-unit - test-unit: -@@ -87,6 +87,12 @@ test-unit: - @./hack/unit_test.sh - @echo "Unit test done!" - -+.PHONY: test-integration -+test-integration: -+ @echo "Integration test starting..." -+ @./tests/test.sh integration -+ @echo "Integration test done!" -+ - .PHONY: proto - proto: - @echo "Generating protobuf..." -diff --git a/README.zh.md b/README.zh.md -index 4b53ba3..15301c0 100644 ---- a/README.zh.md -+++ b/README.zh.md -@@ -106,7 +106,7 @@ sudo rpm -ivh isula-build-*.rpm - 如果需要使用`systemd`进行管理isula-build,请参考以下步骤: - - ```sh --sudo install -p -m 640 ./isula-build.service /etc/systemd/system/isula-build. -+sudo install -p -m 640 ./isula-build.service /etc/systemd/system/isula-build.service - sudo systemctl enable isula-build - sudo systemctl start isula-build - ``` -diff --git a/tests/src/test_integration_set_new_root.sh b/tests/src/test_integration_set_new_root.sh -new file mode 100644 -index 0000000..85b724a ---- /dev/null -+++ b/tests/src/test_integration_set_new_root.sh -@@ -0,0 +1,60 @@ -+#!/bin/bash -+ -+# Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+# isula-build licensed under the Mulan PSL v2. -+# You can use this software according to the terms and conditions of the Mulan PSL v2. -+# You may obtain a copy of Mulan PSL v2 at: -+# http://license.coscl.org.cn/MulanPSL2 -+# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+# PURPOSE. -+# See the Mulan PSL v2 for more details. -+# Author: Weizheng Xing -+# Create: 2021-05-29 -+# Description: test set new run and data root in configuration.toml -+ -+run_root="/var/run/new-isula-build" -+data_root="/var/lib/new-isula-build" -+config_file="/etc/isula-build/configuration.toml" -+base_image="hub.oepkgs.net/openeuler/openeuler:21.03" -+ -+function clean() -+{ -+ isula-build ctr-img rm $base_image >/dev/null 2>&1 -+ rm -f $config_file -+ mv "$config_file".bak $config_file -+ systemctl stop isula-build -+ rm -rf $run_root $data_root -+} -+ -+# change to new data and run root -+function pre_test() -+{ -+ cp $config_file "$config_file".bak -+ sed -i "/run_root/d;/data_root/d" $config_file -+ echo "run_root = \"${run_root}\"" >> $config_file -+ echo "data_root = \"${data_root}\"" >> $config_file -+ -+ systemctl restart isula-build -+} -+ -+# check if new resources are downloaded in new root -+function do_test() -+{ -+ tree_node_befor=$(tree -L 3 $data_root | wc -l) -+ isula-build ctr-img pull $base_image >/dev/null 2>&1 -+ tree_node_after=$(tree -L 3 $data_root | wc -l) -+ -+ if [ $(($tree_node_after - $tree_node_befor)) -eq 8 ]; then -+ echo "PASS" -+ else -+ echo "Sets of run and data root are not effective" -+ clean -+ exit 1 -+ fi -+} -+ -+# clean -+pre_test -+do_test -+clean -diff --git a/tests/test.sh b/tests/test.sh -index 79fde8a..e04cc96 100755 ---- a/tests/test.sh -+++ b/tests/test.sh -@@ -2,8 +2,8 @@ - - top_dir=$(git rev-parse --show-toplevel) - --# normal test --function normal() { -+# base test -+function base() { - source "$top_dir"/tests/lib/common.sh - pre_check - start_isula_builder -@@ -33,15 +33,36 @@ function fuzz() { - exit $failed - } - -+# base test -+function integration() { -+ source "$top_dir"/tests/lib/common.sh -+ pre_check -+ systemctl restart isula-build -+ -+ while IFS= read -r testfile; do -+ printf "%-45s" "test $(basename "$testfile"): " -+ if ! bash "$testfile"; then -+ exit 1 -+ fi -+ done < <(find "$top_dir"/tests/src -maxdepth 1 -name "test_integration*" -type f -print) -+} -+ - # main function to chose which kind of test - function main() { - case "$1" in - fuzz) - fuzz "$2" - ;; -+ base) -+ base -+ ;; -+ integration) -+ integration -+ ;; - *) -- normal -- ;; -+ echo "Unknow test type." -+ exit 1 -+ ;; - esac - } - --- -1.8.3.1 - diff --git a/patch/0054-isula-build-cleancode-for-errors.Wrap-function.patch b/patch/0054-isula-build-cleancode-for-errors.Wrap-function.patch deleted file mode 100644 index 35f834a..0000000 --- a/patch/0054-isula-build-cleancode-for-errors.Wrap-function.patch +++ /dev/null @@ -1,86 +0,0 @@ -From 80422e04f262f925458e7cee6986edb0903cef71 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Tue, 15 Jun 2021 20:01:25 +0800 -Subject: [PATCH] isula-build:cleancode for errors.Wrap function - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - cmd/cli/grpc_client.go | 2 +- - daemon/load.go | 2 +- - exporter/common.go | 2 +- - image/image.go | 6 +++--- - 4 files changed, 6 insertions(+), 6 deletions(-) - -diff --git a/cmd/cli/grpc_client.go b/cmd/cli/grpc_client.go -index 5f10fb0..cab5950 100644 ---- a/cmd/cli/grpc_client.go -+++ b/cmd/cli/grpc_client.go -@@ -82,7 +82,7 @@ func NewClient(ctx context.Context) (*GrpcClient, error) { - defer cancel() - connected, err := cli.HealthCheck(healthCtx) - if !connected || err != nil { -- return nil, errors.Wrapf(err, "Cannot connect to the isula-builder at %s. Is the isula-builder running?\nError", constant.DefaultGRPCAddress) -+ return nil, errors.Errorf( "Cannot connect to the isula-builder at %s. Is the isula-builder running?\nError: %v", constant.DefaultGRPCAddress, err) - } - - return cli, nil -diff --git a/daemon/load.go b/daemon/load.go -index f2d818f..2fb8e27 100644 ---- a/daemon/load.go -+++ b/daemon/load.go -@@ -154,7 +154,7 @@ func getDockerRepoTagFromImageTar(systemContext *types.SystemContext, path strin - - topLevelImageManifest, err := tarfileSource.LoadTarManifest() - if err != nil || len(topLevelImageManifest) == 0 { -- return nil, errors.Wrapf(err, "failed to get the top level image manifest") -+ return nil, errors.Errorf("failed to get the top level image manifest: %v", err) - } - - var allRepoTags [][]string -diff --git a/exporter/common.go b/exporter/common.go -index bded6ec..cd976d2 100644 ---- a/exporter/common.go -+++ b/exporter/common.go -@@ -132,7 +132,7 @@ func export(e Exporter, exOpts ExportOptions) (reference.Canonical, digest.Diges - - destRef, srcRef := e.GetDestRef(exOpts.ExportID), e.GetSrcRef(exOpts.ExportID) - if destRef == nil || srcRef == nil { -- return nil, "", errors.Wrapf(err, "get dest or src reference by export ID %v failed", exOpts.ExportID) -+ return nil, "", errors.Errorf("get dest or src reference by export ID %v failed %v", exOpts.ExportID, err) - } - - if manifestBytes, err = cp.Image(exOpts.Ctx, policyContext, destRef, srcRef, cpOpts); err != nil { -diff --git a/image/image.go b/image/image.go -index 91ab720..e06d253 100644 ---- a/image/image.go -+++ b/image/image.go -@@ -314,7 +314,7 @@ func createImageV2Image(ctx context.Context, fromImage types.Image, targetMIMETy - ManifestMIMEType: targetMIMEType, - }) - if err2 != nil { -- return nil, errors.Wrapf(err, "failed to convert image %q", imageName) -+ return nil, errors.Wrapf(err2, "failed to convert image %q", imageName) - } - fromImage = updatedImg - } -@@ -533,7 +533,7 @@ func ParseImagesToReference(store *store.Store, names []string) (types.ImageRefe - // For support export archive file, we need provide reference.Named field when names is the format of name[:tag] not the image ID - pRef, pErr := reference.Parse(name) - if pErr != nil { -- return nil, nil, errors.Wrapf(err, "error parse name %q", name) -+ return nil, nil, errors.Wrapf(pErr, "error parse name %q", name) - } - namedRef, isNamed := pRef.(reference.Named) - if !isNamed { -@@ -543,7 +543,7 @@ func ParseImagesToReference(store *store.Store, names []string) (types.ImageRefe - var nErr error - ref, nErr = is.Transport.NewStoreReference(store, namedRef, img2.ID) - if nErr != nil { -- return nil, nil, errors.Wrap(err, "error get reference from store") -+ return nil, nil, errors.Wrap(nErr, "error get reference from store") - } - } - break --- -1.8.3.1 - diff --git a/patch/0055-isula-build-change-isula-build-file-mode.patch b/patch/0055-isula-build-change-isula-build-file-mode.patch deleted file mode 100644 index 2164b67..0000000 --- a/patch/0055-isula-build-change-isula-build-file-mode.patch +++ /dev/null @@ -1,49 +0,0 @@ -From f22214ca3bcb452238d2390a06891cf6d446e8ac Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Mon, 26 Jul 2021 16:58:31 +0800 -Subject: [PATCH] isula-build: change isula-build file mode - -reason: since isula-build client file mode is too large(0551), -we decided to remove other's permission(0550) on it. -Beside, we change the public key(isula-build.pub) file -mode to 0400(from 0444), so only the owner of the public -key can read the key. -After this commit, if the non-root user want to use command -login, logout, build with args(http_proxy, https_proxy, etc...), -they need use sudo to temporarily obtain root permission. - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - Makefile | 2 +- - constant.go | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/Makefile b/Makefile -index 925968a..a9d4c93 100644 ---- a/Makefile -+++ b/Makefile -@@ -102,7 +102,7 @@ proto: - - .PHONY: install - install: -- install -D -m0551 bin/isula-build $(BINDIR) -+ install -D -m0550 bin/isula-build $(BINDIR) - install -D -m0550 bin/isula-builder $(BINDIR) - @( getent group isula > /dev/null ) || ( groupadd --system isula ) - @[ ! -d ${CONFIG_DIR}/${CONFIG_FILE} ] && install -dm0650 ${CONFIG_DIR} -diff --git a/constant.go b/constant.go -index 9926728..bfe399b 100644 ---- a/constant.go -+++ b/constant.go -@@ -50,7 +50,7 @@ const ( - // DefaultRootDirMode is the default root dir mode - DefaultRootDirMode = 0700 - // DefaultReadOnlyFileMode is the default root read only file mode -- DefaultReadOnlyFileMode = 0444 -+ DefaultReadOnlyFileMode = 0400 - // DefaultUmask is the working umask of isula-builder as a process, not for users - DefaultUmask = 0022 - // CliLogBufferLen is log channel buffer size --- -1.8.3.1 - diff --git a/patch/0056-isula-build-update-documents-about-file-mode.patch b/patch/0056-isula-build-update-documents-about-file-mode.patch deleted file mode 100644 index 1ab181f..0000000 --- a/patch/0056-isula-build-update-documents-about-file-mode.patch +++ /dev/null @@ -1,58 +0,0 @@ -From e024b5b7ddabca7f7ff0cebd05366146df9832ab Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Mon, 26 Jul 2021 19:04:49 +0800 -Subject: [PATCH] isula-build: update documents about file mode - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - doc/manual_en.md | 4 ++-- - doc/manual_zh.md | 4 ++-- - 2 files changed, 4 insertions(+), 4 deletions(-) - -diff --git a/doc/manual_en.md b/doc/manual_en.md -index c23b306..3064c17 100644 ---- a/doc/manual_en.md -+++ b/doc/manual_en.md -@@ -1070,7 +1070,7 @@ The isula-build component processes communicate with each other through the Unix - - | **File Path** | **File/Folder Permission** | **Description** | - | ------------------------------------------- | ------------------- | ------------------------------------------------------------ | --| /usr/bin/isula-build | 551 | Binary file of the command line tool. | -+| /usr/bin/isula-build | 550 | Binary file of the command line tool. | - | /usr/bin/isula-builder | 550 | Binary file of the isula-builder process on the server. | - | /usr/lib/systemd/system/isula-build.service | 640 | systemd configuration file, which is used to manage the isula-build service. | - | /usr/isula-build | 650 | Root directory of the isula-builder configuration file. | -@@ -1078,7 +1078,7 @@ The isula-build component processes communicate with each other through the Unix - | /etc/isula-build/policy.json | 600 | Syntax file of the signature verification policy file. | - | /etc/isula-build/registries.toml | 600 | Configuration file of each image repository, including the available image repository list and image repository blacklist. | - | /etc/isula-build/storage.toml | 600 | Configuration file for local persistent storage, including the configuration of the used storage driver. | --| /etc/isula-build/isula-build.pub | 444 | Asymmetric encryption public key file. | -+| /etc/isula-build/isula-build.pub | 400 | Asymmetric encryption public key file. | - | /var/run/isula_build.sock | 660 | Local socket of isula-builder. | - | /var/lib/isula-build | 700 | Local persistency directory. | - | /var/run/isula-build | 700 | Local runtime directory. | -diff --git a/doc/manual_zh.md b/doc/manual_zh.md -index c0234f1..8104305 100644 ---- a/doc/manual_zh.md -+++ b/doc/manual_zh.md -@@ -1060,7 +1060,7 @@ isula-build两个组件进程之间通过unix socket套接字文件进行通信 - - | **文件路径** | **文件/文件夹权限** | **说明** | - | ------------------------------------------- | ------------------- | ------------------------------------------------------------ | --| /usr/bin/isula-build | 551 | 命令行工具二进制文件。 | -+| /usr/bin/isula-build | 550 | 命令行工具二进制文件。 | - | /usr/bin/isula-builder | 550 | 服务端isula-builder进程二进制文件。 | - | /usr/lib/systemd/system/isula-build.service | 640 | systemd配置文件,用于管理isula-build服务。 | - | /etc/isula-build | 650 | isula-builder 配置文件根目录 | -@@ -1068,7 +1068,7 @@ isula-build两个组件进程之间通过unix socket套接字文件进行通信 - | /etc/isula-build/policy.json | 600 | 签名验证策略文件的语法文件。 | - | /etc/isula-build/registries.toml | 600 | 针对各个镜像仓库的配置文件,含可用的镜像仓库列表、镜像仓库黑名单。 | - | /etc/isula-build/storage.toml | 600 | 本地持久化存储的配置文件,包含所使用的存储驱动的配置。 | --| /etc/isula-build/isula-build.pub | 444 | 非对称加密公钥文件 | -+| /etc/isula-build/isula-build.pub | 400 | 非对称加密公钥文件 | - | /var/run/isula_build.sock | 660 | 服务端isula-builder的本地套接字。 | - | /var/lib/isula-build | 700 | 本地持久化目录。 | - | /var/run/isula-build | 700 | 本地运行时目录。 | --- -1.8.3.1 - diff --git a/patch/0057-bugfix-pidofbuilder-do-not-set-when-running-a-new-ba.patch b/patch/0057-bugfix-pidofbuilder-do-not-set-when-running-a-new-ba.patch deleted file mode 100644 index 63f03cd..0000000 --- a/patch/0057-bugfix-pidofbuilder-do-not-set-when-running-a-new-ba.patch +++ /dev/null @@ -1,27 +0,0 @@ -From c1061acaafa4120075fe83ca8075e593403cb6f8 Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Sun, 22 Aug 2021 12:21:21 +0800 -Subject: [PATCH 1/4] bugfix: pidofbuilder do not set when running a new bash - script in new child process - ---- - tests/lib/common.sh | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/tests/lib/common.sh b/tests/lib/common.sh -index f393ee1..5cd66ff 100755 ---- a/tests/lib/common.sh -+++ b/tests/lib/common.sh -@@ -13,6 +13,9 @@ - # Create: 2020-03-01 - # Description: common functions for tests - -+# cross process environment for killing isula-builder -+declare -x pidofbuilder -+ - # check if legacy builder exists - function pre_check() { - if pgrep isula-builder > /dev/null 2>&1; then --- -1.8.3.1 - diff --git a/patch/0058-shellcheck-fix-of-common.sh.patch b/patch/0058-shellcheck-fix-of-common.sh.patch deleted file mode 100644 index 32953a1..0000000 --- a/patch/0058-shellcheck-fix-of-common.sh.patch +++ /dev/null @@ -1,90 +0,0 @@ -From 29ad6f4d4de67e143ba0ab7bba1ca3668cda9797 Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Tue, 24 Aug 2021 19:23:55 +0800 -Subject: [PATCH 2/4] shellcheck fix of common.sh - ---- - tests/lib/common.sh | 28 +++++++++++++--------------- - 1 file changed, 13 insertions(+), 15 deletions(-) - -diff --git a/tests/lib/common.sh b/tests/lib/common.sh -index f393ee1..2099eac 100755 ---- a/tests/lib/common.sh -+++ b/tests/lib/common.sh -@@ -123,8 +123,7 @@ function test_build_with_oci_archive_output() { - - # test build image with docker-daemon output - function test_build_with_docker_daemon_output() { -- systemctl status docker > /dev/null 2>&1 -- if [ $? -ne 0 ]; then -+ if ! systemctl status docker > /dev/null 2>&1; then - return 0 - fi - -@@ -146,9 +145,8 @@ function test_build_with_docker_daemon_output() { - } - - # test build image with isulad output --function test_build_with_isulad_output() { -- systemctl status isulad > /dev/null 2>&1 -- if [ $? -ne 0 ]; then -+function test_build_with_isulad_output() { -+ if ! systemctl status isulad > /dev/null 2>&1; then - return 0 - fi - -@@ -172,41 +170,41 @@ function test_build_with_isulad_output() { - # test isula build base command - function test_isula_build_base_command() { - show_and_run_command "Build docker format image:" \ -- " isula-build ctr-img build --tag "$1"-docker:latest --output=docker-archive:/tmp/"$1"-docker.tar:"$1"-docker:latest "$2"" -+ " isula-build ctr-img build --tag $1-docker:latest --output=docker-archive:/tmp/$1-docker.tar:$1-docker:latest $2" - - show_and_run_command "Build oci format image:" \ -- "isula-build ctr-img build --tag "$1"-oci:latest --output=oci-archive:/tmp/"$1"-oci.tar:"$1"-oci:latest "$2"" -+ "isula-build ctr-img build --tag $1-oci:latest --output=oci-archive:/tmp/$1-oci.tar:$1-oci:latest $2" - - show_and_run_command "List all images:" \ - "isula-build ctr-img images" - - show_and_run_command "List docker format image:" \ -- "isula-build ctr-img images "$1"-docker:latest" -+ "isula-build ctr-img images $1-docker:latest" - - show_and_run_command "List oci format image:" \ -- "isula-build ctr-img images "$1"-oci:latest" -+ "isula-build ctr-img images $1-oci:latest" - - rm -f /tmp/"$1"-docker.tar /tmp/"$1"-oci.tar - - show_and_run_command "Save image with docker format:" \ -- "isula-build ctr-img save -f docker "$1"-docker:latest -o /tmp/"$1"-docker.tar" -+ "isula-build ctr-img save -f docker $1-docker:latest -o /tmp/$1-docker.tar" - - show_and_run_command "Save image with oci format:" \ -- "isula-build ctr-img save -f oci "$1"-oci:latest -o /tmp/"$1"-oci.tar" -+ "isula-build ctr-img save -f oci $1-oci:latest -o /tmp/$1-oci.tar" - - show_and_run_command "Load docker format images:" \ -- "isula-build ctr-img load -i /tmp/"$1"-docker.tar" -+ "isula-build ctr-img load -i /tmp/$1-docker.tar" - - show_and_run_command "Load oci format images:" \ -- "isula-build ctr-img load -i /tmp/"$1"-oci.tar" -+ "isula-build ctr-img load -i /tmp/$1-oci.tar" - - show_and_run_command "Save multipile images with docker format:" \ -- "isula-build ctr-img save -f docker "$1"-docker:latest "$1"-oci:latest -o /tmp/"$1"-all.tar" -+ "isula-build ctr-img save -f docker $1-docker:latest $1-oci:latest -o /tmp/$1-all.tar" - - rm -f /tmp/"$1"-docker.tar /tmp/"$1"-oci.tar /tmp/"$1"-all.tar - - show_and_run_command "Remove images:" \ -- "isula-build ctr-img rm "$1"-docker:latest "$1"-oci:latest" -+ "isula-build ctr-img rm $1-docker:latest $1-oci:latest" - } - - function show_and_run_command() { --- -1.8.3.1 - diff --git a/patch/0059-bugfix-fix-save-multiple-tags-single-image-failed.patch b/patch/0059-bugfix-fix-save-multiple-tags-single-image-failed.patch deleted file mode 100644 index ee957ae..0000000 --- a/patch/0059-bugfix-fix-save-multiple-tags-single-image-failed.patch +++ /dev/null @@ -1,309 +0,0 @@ -From caf11183fa91a301402a00a302e81894861f9957 Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Sun, 22 Aug 2021 12:31:04 +0800 -Subject: [PATCH 3/4] bugfix: fix save multiple tags single image failed - ---- - daemon/save.go | 145 ++++++++++++++++++++++++------------- - exporter/docker/archive/archive.go | 23 +----- - image/image.go | 18 ----- - 3 files changed, 95 insertions(+), 91 deletions(-) - -diff --git a/daemon/save.go b/daemon/save.go -index de644c3..7ad1285 100644 ---- a/daemon/save.go -+++ b/daemon/save.go -@@ -17,6 +17,7 @@ import ( - "context" - "os" - -+ "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -@@ -33,26 +34,30 @@ import ( - ) - - type saveOptions struct { -- sysCtx *types.SystemContext -- logger *logger.Logger -- localStore *store.Store -- logEntry *logrus.Entry -- saveID string -- outputPath string -- oriImgList []string -- format string -+ sysCtx *types.SystemContext -+ localStore *store.Store -+ saveID string -+ format string -+ oriImgList []string -+ finalImageOrdered []string -+ finalImageSet map[string][]reference.NamedTagged -+ outputPath string -+ logger *logger.Logger -+ logEntry *logrus.Entry - } - - func (b *Backend) getSaveOptions(req *pb.SaveRequest) saveOptions { - return saveOptions{ -- sysCtx: image.GetSystemContext(), -- logger: logger.NewCliLogger(constant.CliLogBufferLen), -- localStore: b.daemon.localStore, -- saveID: req.GetSaveID(), -- outputPath: req.GetPath(), -- oriImgList: req.GetImages(), -- format: req.GetFormat(), -- logEntry: logrus.WithFields(logrus.Fields{"SaveID": req.GetSaveID(), "Format": req.GetFormat()}), -+ sysCtx: image.GetSystemContext(), -+ localStore: b.daemon.localStore, -+ saveID: req.GetSaveID(), -+ format: req.GetFormat(), -+ oriImgList: req.GetImages(), -+ finalImageOrdered: make([]string, 0), -+ finalImageSet: make(map[string][]reference.NamedTagged), -+ outputPath: req.GetPath(), -+ logger: logger.NewCliLogger(constant.CliLogBufferLen), -+ logEntry: logrus.WithFields(logrus.Fields{"SaveID": req.GetSaveID(), "Format": req.GetFormat()}), - } - } - -@@ -63,28 +68,14 @@ func (b *Backend) Save(req *pb.SaveRequest, stream pb.Control_SaveServer) error - "Format": req.GetFormat(), - }).Info("SaveRequest received") - -- var ( -- ok bool -- err error -- ) -- -+ var err error - opts := b.getSaveOptions(req) - -- switch opts.format { -- case constant.DockerTransport: -- opts.format = constant.DockerArchiveTransport -- case constant.OCITransport: -- opts.format = constant.OCIArchiveTransport -- default: -- return errors.New("wrong image format provided") -+ if err = checkFormatAndExpandTag(&opts); err != nil { -+ return err - } -- -- for i, imageName := range opts.oriImgList { -- nameWithTag, cErr := image.CheckAndAddDefaultTag(imageName, opts.localStore) -- if cErr != nil { -- return cErr -- } -- opts.oriImgList[i] = nameWithTag -+ if err = filterImageName(&opts); err != nil { -+ return err - } - - defer func() { -@@ -98,26 +89,18 @@ func (b *Backend) Save(req *pb.SaveRequest, stream pb.Control_SaveServer) error - ctx := context.WithValue(stream.Context(), util.LogFieldKey(util.LogKeySessionID), opts.saveID) - eg, _ := errgroup.WithContext(ctx) - -- eg.Go(exportHandler(ctx, opts)) -+ eg.Go(exportHandler(ctx, &opts)) - eg.Go(messageHandler(stream, opts.logger)) -- errC := make(chan error, 1) - -- errC <- eg.Wait() -- defer close(errC) -- -- err, ok = <-errC -- if !ok { -- opts.logEntry.Info("Channel errC closed") -- return nil -- } -- if err != nil { -+ if err = eg.Wait(); err != nil { -+ opts.logEntry.Warnf("Save stream closed with: %v", err) - return err - } - - return nil - } - --func exportHandler(ctx context.Context, opts saveOptions) func() error { -+func exportHandler(ctx context.Context, opts *saveOptions) func() error { - return func() error { - defer func() { - opts.logger.CloseContent() -@@ -129,18 +112,22 @@ func exportHandler(ctx context.Context, opts saveOptions) func() error { - } - }() - -- for _, imageID := range opts.oriImgList { -+ for _, imageID := range opts.finalImageOrdered { -+ copyCtx := *opts.sysCtx -+ // It's ok for DockerArchiveAdditionalTags == nil, as a result, no additional tags will be appended to the final archive file. -+ copyCtx.DockerArchiveAdditionalTags = opts.finalImageSet[imageID] -+ - exOpts := exporter.ExportOptions{ - Ctx: ctx, -- SystemContext: opts.sysCtx, -+ SystemContext: ©Ctx, - ExportID: opts.saveID, - ReportWriter: opts.logger, - } - - if err := exporter.Export(imageID, exporter.FormatTransport(opts.format, opts.outputPath), - exOpts, opts.localStore); err != nil { -- opts.logEntry.Errorf("Save Image %s output to %s failed with: %v", imageID, opts.format, err) -- return errors.Wrapf(err, "save Image %s output to %s failed", imageID, opts.format) -+ opts.logEntry.Errorf("Save image %q in format %q failed: %v", imageID, opts.format, err) -+ return errors.Wrapf(err, "save image %q in format %q failed", imageID, opts.format) - } - } - -@@ -164,3 +151,59 @@ func messageHandler(stream pb.Control_SaveServer, cliLogger *logger.Logger) func - return nil - } - } -+ -+func checkFormatAndExpandTag(opts *saveOptions) error { -+ switch opts.format { -+ case constant.DockerTransport: -+ opts.format = constant.DockerArchiveTransport -+ case constant.OCITransport: -+ opts.format = constant.OCIArchiveTransport -+ default: -+ return errors.New("wrong image format provided") -+ } -+ -+ for i, imageName := range opts.oriImgList { -+ nameWithTag, err := image.CheckAndAddDefaultTag(imageName, opts.localStore) -+ if err != nil { -+ return errors.Wrapf(err, "check format and expand tag failed with image name %q", imageName) -+ } -+ opts.oriImgList[i] = nameWithTag -+ } -+ -+ return nil -+} -+ -+func filterImageName(opts *saveOptions) error { -+ if opts.format == constant.OCIArchiveTransport { -+ opts.finalImageOrdered = opts.oriImgList -+ return nil -+ } -+ -+ visitedImage := make(map[string]bool) -+ for _, imageName := range opts.oriImgList { -+ if _, exists := visitedImage[imageName]; exists { -+ continue -+ } -+ visitedImage[imageName] = true -+ -+ _, img, err := image.FindImageLocally(opts.localStore, imageName) -+ if err != nil { -+ return errors.Wrapf(err, "filter image name failed when finding image name %q", imageName) -+ } -+ if _, ok := opts.finalImageSet[img.ID]; !ok { -+ opts.finalImageOrdered = append(opts.finalImageOrdered, img.ID) -+ } -+ -+ ref, err := reference.Parse(imageName) -+ if err != nil { -+ return errors.Wrapf(err, "filter image name failed when parsing name %q", imageName) -+ } -+ tagged, withTag := ref.(reference.NamedTagged) -+ if !withTag { -+ continue -+ } -+ opts.finalImageSet[img.ID] = append(opts.finalImageSet[img.ID], tagged) -+ } -+ -+ return nil -+} -diff --git a/exporter/docker/archive/archive.go b/exporter/docker/archive/archive.go -index 04654cf..cc6b872 100644 ---- a/exporter/docker/archive/archive.go -+++ b/exporter/docker/archive/archive.go -@@ -21,9 +21,7 @@ import ( - "github.com/containers/image/v5/docker/archive" - "github.com/containers/image/v5/transports/alltransports" - "github.com/containers/image/v5/types" -- "github.com/docker/distribution/reference" - "github.com/pkg/errors" -- "github.com/sirupsen/logrus" - - constant "isula.org/isula-build" - "isula.org/isula-build/exporter" -@@ -91,32 +89,13 @@ func (d *dockerArchiveExporter) Init(opts exporter.ExportOptions, src, destSpec - DockerArchiveExporter.InitArchiveWriter(opts.ExportID, archWriter) - } - -- // There is a slightly difference between FindImageLocally and ParseImagesToReference to get a reference. -- // FindImageLocally or FindImage, both result a reference with a nil named field of *storageReference. -- // ParseImagesToReference returns a reference with non-nil named field of *storageReference that used to set destReference, if names is the format of name[:tag] with and without repository domain. -- -- // If using srcReferenceForDest to replace srcReference, When src is the format of name[:tag] without a registry domain name, -- // in which time, cp.Image() will be called and new image source will call imageMatchesRepo() to check If image matches repository or not. -- // ParseNormalizedNamed will finally called to add default docker.io/library/ prefix to name[:tag], return false result of the checking. -- // As a result, we will get error of no image matching reference found. - srcReference, _, err = image.FindImageLocally(localStore, src) - if err != nil { - return errors.Wrapf(err, "find src image: %q failed with transport %q", src, d.Name()) - } - -- imageReferenceForDest, _, err := image.ParseImagesToReference(localStore, []string{src}) -- if err != nil { -- return errors.Wrapf(err, "parse image: %q to reference failed with transport %q", src, d.Name()) -- } - archiveWriter := DockerArchiveExporter.GetArchiveWriter(opts.ExportID) -- nameAndTag, ok := imageReferenceForDest.DockerReference().(reference.NamedTagged) -- // src is the format of ImageID, ok is false -- if ok { -- destReference, err = archiveWriter.NewReference(nameAndTag) -- } else { -- logrus.Infof("Transform image reference failed, use nil instead") -- destReference, err = archiveWriter.NewReference(nil) -- } -+ destReference, err = archiveWriter.NewReference(nil) - if err != nil { - return errors.Wrapf(err, "parse dest spec: %q failed", destSpec) - } -diff --git a/image/image.go b/image/image.go -index e06d253..5ae7245 100644 ---- a/image/image.go -+++ b/image/image.go -@@ -504,8 +504,6 @@ func FindImageLocally(store *store.Store, image string) (types.ImageReference, * - } - - // ParseImagesToReference get the image reference in store --// When names is the format of ImageID (sha256), return ref with nil named field of *storageReference --// When names is the format of name[:tag] with and without repository domain, such as registry.example.com/name:tag, name:tag, return corresponding ref with non-nil named field of *storageReference with and without domain - func ParseImagesToReference(store *store.Store, names []string) (types.ImageReference, *storage.Image, error) { - var ( - ref types.ImageReference -@@ -529,22 +527,6 @@ func ParseImagesToReference(store *store.Store, names []string) (types.ImageRefe - continue - } - img = img2 -- -- // For support export archive file, we need provide reference.Named field when names is the format of name[:tag] not the image ID -- pRef, pErr := reference.Parse(name) -- if pErr != nil { -- return nil, nil, errors.Wrapf(pErr, "error parse name %q", name) -- } -- namedRef, isNamed := pRef.(reference.Named) -- if !isNamed { -- return nil, nil, errors.Errorf("reference %s has no name", pRef.String()) -- } -- -- var nErr error -- ref, nErr = is.Transport.NewStoreReference(store, namedRef, img2.ID) -- if nErr != nil { -- return nil, nil, errors.Wrap(nErr, "error get reference from store") -- } - } - break - } --- -1.8.3.1 - diff --git a/patch/0060-add-integration-test-for-saving-one-image-with-multi.patch b/patch/0060-add-integration-test-for-saving-one-image-with-multi.patch deleted file mode 100644 index f7fb0c0..0000000 --- a/patch/0060-add-integration-test-for-saving-one-image-with-multi.patch +++ /dev/null @@ -1,281 +0,0 @@ -From 87c8603713cdcbd0f2abad29c73d3909b3f4c417 Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Tue, 24 Aug 2021 17:14:47 +0800 -Subject: [PATCH 4/4] add integration test for saving one image with multiple - tags - ---- - Makefile | 1 + - tests/lib/common.sh | 8 +++ - ...gration_test_save_single_image_multiple_tags.sh | 58 ++++++++++++++++++++ - tests/src/integration_test_set_new_root.sh | 62 ++++++++++++++++++++++ - tests/src/test_integration_set_new_root.sh | 60 --------------------- - tests/test.sh | 9 ++-- - 6 files changed, 134 insertions(+), 64 deletions(-) - create mode 100644 tests/src/integration_test_save_single_image_multiple_tags.sh - create mode 100644 tests/src/integration_test_set_new_root.sh - delete mode 100644 tests/src/test_integration_set_new_root.sh - -diff --git a/Makefile b/Makefile -index a9d4c93..1d87625 100644 ---- a/Makefile -+++ b/Makefile -@@ -91,6 +91,7 @@ test-unit: - .PHONY: test-integration - test-integration: - @echo "Integration test starting..." -+ @./tests/test.sh base - @./tests/test.sh integration - @echo "Integration test done!" - -diff --git a/tests/lib/common.sh b/tests/lib/common.sh -index f393ee1..5e4c208 100755 ---- a/tests/lib/common.sh -+++ b/tests/lib/common.sh -@@ -219,3 +219,11 @@ function show_and_run_command() { - fi - echo "PASS" - } -+ -+function run_with_debug() { -+ if [ "${DEBUG:-0}" -eq 1 ]; then -+ $1 -+ else -+ $1 > /dev/null 2>&1 -+ fi -+} -\ No newline at end of file -diff --git a/tests/src/integration_test_save_single_image_multiple_tags.sh b/tests/src/integration_test_save_single_image_multiple_tags.sh -new file mode 100644 -index 0000000..a25786a ---- /dev/null -+++ b/tests/src/integration_test_save_single_image_multiple_tags.sh -@@ -0,0 +1,58 @@ -+#!/bin/bash -+ -+# Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+# isula-build licensed under the Mulan PSL v2. -+# You can use this software according to the terms and conditions of the Mulan PSL v2. -+# You may obtain a copy of Mulan PSL v2 at: -+# http://license.coscl.org.cn/MulanPSL2 -+# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+# PURPOSE. -+# See the Mulan PSL v2 for more details. -+# Author: Weizheng Xing -+# Create: 2021-08-24 -+# Description: check if saving single image with multiple tags has been corrected -+ -+top_dir=$(git rev-parse --show-toplevel) -+# shellcheck disable=SC1091 -+source "$top_dir"/tests/lib/common.sh -+ -+image_name=add-chown-basic -+context_dir="$top_dir"/tests/data/add-chown-basic -+ -+function clean() -+{ -+ systemctl stop isula-build -+ rm -rf "$temp_tar" -+} -+ -+function pre_test() -+{ -+ temp_tar=$(mktemp -u --suffix=.tar) -+ systemctl restart isula-build -+} -+ -+function do_test() -+{ -+ if ! run_with_debug "isula-build ctr-img build -t $image_name:latest $context_dir"; then -+ echo "FAIL" -+ fi -+ -+ if ! run_with_debug "isula-build ctr-img tag $image_name:latest $image_name:latest-child"; then -+ echo "FAIL" -+ fi -+ -+ if ! run_with_debug "isula-build ctr-img save -f docker $image_name:latest $image_name:latest-child -o $temp_tar"; then -+ echo "FAIL" -+ fi -+ -+ if ! run_with_debug "isula-build ctr-img rm $image_name:latest $image_name:latest-child"; then -+ echo "FAIL" -+ fi -+ -+ echo "PASS" -+} -+ -+pre_test -+do_test -+clean -diff --git a/tests/src/integration_test_set_new_root.sh b/tests/src/integration_test_set_new_root.sh -new file mode 100644 -index 0000000..7238240 ---- /dev/null -+++ b/tests/src/integration_test_set_new_root.sh -@@ -0,0 +1,62 @@ -+#!/bin/bash -+ -+# Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+# isula-build licensed under the Mulan PSL v2. -+# You can use this software according to the terms and conditions of the Mulan PSL v2. -+# You may obtain a copy of Mulan PSL v2 at: -+# http://license.coscl.org.cn/MulanPSL2 -+# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+# PURPOSE. -+# See the Mulan PSL v2 for more details. -+# Author: Weizheng Xing -+# Create: 2021-05-29 -+# Description: test set new run and data root in configuration.toml -+ -+top_dir=$(git rev-parse --show-toplevel) -+# shellcheck disable=SC1091 -+source "$top_dir"/tests/lib/common.sh -+ -+run_root="/var/run/new-isula-build" -+data_root="/var/lib/new-isula-build" -+config_file="/etc/isula-build/configuration.toml" -+base_image="hub.oepkgs.net/openeuler/openeuler:21.03" -+ -+function clean() -+{ -+ rm -f $config_file -+ mv "$config_file".bak $config_file -+ systemctl stop isula-build -+ rm -rf $run_root $data_root -+} -+ -+# change to new data and run root -+function pre_test() -+{ -+ cp $config_file "$config_file".bak -+ sed -i "/run_root/d;/data_root/d" $config_file -+ echo "run_root = \"${run_root}\"" >> $config_file -+ echo "data_root = \"${data_root}\"" >> $config_file -+ -+ systemctl restart isula-build -+} -+ -+# check if new resources are downloaded in new root -+function do_test() -+{ -+ tree_node_befor=$(tree -L 3 $data_root | wc -l) -+ run_with_debug "isula-build ctr-img pull $base_image" -+ tree_node_after=$(tree -L 3 $data_root | wc -l) -+ -+ if [ $((tree_node_after - tree_node_befor)) -eq 8 ] && run_with_debug "isula-build ctr-img rm $base_image"; then -+ echo "PASS" -+ else -+ echo "Sets of run and data root are not effective" -+ clean -+ exit 1 -+ fi -+} -+ -+pre_test -+do_test -+clean -diff --git a/tests/src/test_integration_set_new_root.sh b/tests/src/test_integration_set_new_root.sh -deleted file mode 100644 -index 85b724a..0000000 ---- a/tests/src/test_integration_set_new_root.sh -+++ /dev/null -@@ -1,60 +0,0 @@ --#!/bin/bash -- --# Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. --# isula-build licensed under the Mulan PSL v2. --# You can use this software according to the terms and conditions of the Mulan PSL v2. --# You may obtain a copy of Mulan PSL v2 at: --# http://license.coscl.org.cn/MulanPSL2 --# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR --# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR --# PURPOSE. --# See the Mulan PSL v2 for more details. --# Author: Weizheng Xing --# Create: 2021-05-29 --# Description: test set new run and data root in configuration.toml -- --run_root="/var/run/new-isula-build" --data_root="/var/lib/new-isula-build" --config_file="/etc/isula-build/configuration.toml" --base_image="hub.oepkgs.net/openeuler/openeuler:21.03" -- --function clean() --{ -- isula-build ctr-img rm $base_image >/dev/null 2>&1 -- rm -f $config_file -- mv "$config_file".bak $config_file -- systemctl stop isula-build -- rm -rf $run_root $data_root --} -- --# change to new data and run root --function pre_test() --{ -- cp $config_file "$config_file".bak -- sed -i "/run_root/d;/data_root/d" $config_file -- echo "run_root = \"${run_root}\"" >> $config_file -- echo "data_root = \"${data_root}\"" >> $config_file -- -- systemctl restart isula-build --} -- --# check if new resources are downloaded in new root --function do_test() --{ -- tree_node_befor=$(tree -L 3 $data_root | wc -l) -- isula-build ctr-img pull $base_image >/dev/null 2>&1 -- tree_node_after=$(tree -L 3 $data_root | wc -l) -- -- if [ $(($tree_node_after - $tree_node_befor)) -eq 8 ]; then -- echo "PASS" -- else -- echo "Sets of run and data root are not effective" -- clean -- exit 1 -- fi --} -- --# clean --pre_test --do_test --clean -diff --git a/tests/test.sh b/tests/test.sh -index e04cc96..01f0f31 100755 ---- a/tests/test.sh -+++ b/tests/test.sh -@@ -33,18 +33,17 @@ function fuzz() { - exit $failed - } - --# base test -+# integration test - function integration() { - source "$top_dir"/tests/lib/common.sh -- pre_check - systemctl restart isula-build - - while IFS= read -r testfile; do -- printf "%-45s" "test $(basename "$testfile"): " -+ printf "%-65s" "test $(basename "$testfile"): " - if ! bash "$testfile"; then - exit 1 - fi -- done < <(find "$top_dir"/tests/src -maxdepth 1 -name "test_integration*" -type f -print) -+ done < <(find "$top_dir"/tests/src -maxdepth 1 -name "integration_test*" -type f -print) - } - - # main function to chose which kind of test -@@ -67,4 +66,6 @@ function main() { - } - - export "ISULABUILD_CLI_EXPERIMENTAL"="enabled" -+export DEBUG=0 -+ - main "$@" --- -1.8.3.1 - diff --git a/patch/0061-fix-save-single-image-error-when-id-first-with-its-n.patch b/patch/0061-fix-save-single-image-error-when-id-first-with-its-n.patch deleted file mode 100644 index 381d87e..0000000 --- a/patch/0061-fix-save-single-image-error-when-id-first-with-its-n.patch +++ /dev/null @@ -1,223 +0,0 @@ -From b7a8bfbf90d920662e0bf8119c2640ec7a6379ca Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Tue, 31 Aug 2021 22:42:18 +0800 -Subject: [PATCH] fix save single image error when id first with its name at - last - ---- - Makefile | 4 +- - daemon/save.go | 27 +++++++++---- - tests/lib/common.sh | 22 ++++++++-- - ...on_test_save_single_image_multiple_tags.sh | 40 ++++++++++++++----- - tests/src/integration_test_set_new_root.sh | 2 + - 5 files changed, 72 insertions(+), 23 deletions(-) - -diff --git a/Makefile b/Makefile -index 1d87625..d5b1c53 100644 ---- a/Makefile -+++ b/Makefile -@@ -74,7 +74,7 @@ debug: - build-image: - isula-build ctr-img build -f Dockerfile.proto ${IMAGE_BUILDARGS} -o isulad:${IMAGE_NAME}:latest . - --tests: test-base test-unit test-integration -+tests: test-unit test-integration - - .PHONY: test-base - test-base: -@@ -89,7 +89,7 @@ test-unit: - @echo "Unit test done!" - - .PHONY: test-integration --test-integration: -+test-integration: debug install - @echo "Integration test starting..." - @./tests/test.sh base - @./tests/test.sh integration -diff --git a/daemon/save.go b/daemon/save.go -index 7ad1285..8ba9dd1 100644 ---- a/daemon/save.go -+++ b/daemon/save.go -@@ -33,6 +33,11 @@ import ( - "isula.org/isula-build/util" - ) - -+type savedImage struct { -+ exist bool -+ tags []reference.NamedTagged -+} -+ - type saveOptions struct { - sysCtx *types.SystemContext - localStore *store.Store -@@ -40,7 +45,7 @@ type saveOptions struct { - format string - oriImgList []string - finalImageOrdered []string -- finalImageSet map[string][]reference.NamedTagged -+ finalImageSet map[string]*savedImage - outputPath string - logger *logger.Logger - logEntry *logrus.Entry -@@ -54,7 +59,7 @@ func (b *Backend) getSaveOptions(req *pb.SaveRequest) saveOptions { - format: req.GetFormat(), - oriImgList: req.GetImages(), - finalImageOrdered: make([]string, 0), -- finalImageSet: make(map[string][]reference.NamedTagged), -+ finalImageSet: make(map[string]*savedImage), - outputPath: req.GetPath(), - logger: logger.NewCliLogger(constant.CliLogBufferLen), - logEntry: logrus.WithFields(logrus.Fields{"SaveID": req.GetSaveID(), "Format": req.GetFormat()}), -@@ -114,8 +119,10 @@ func exportHandler(ctx context.Context, opts *saveOptions) func() error { - - for _, imageID := range opts.finalImageOrdered { - copyCtx := *opts.sysCtx -- // It's ok for DockerArchiveAdditionalTags == nil, as a result, no additional tags will be appended to the final archive file. -- copyCtx.DockerArchiveAdditionalTags = opts.finalImageSet[imageID] -+ if opts.format == constant.DockerArchiveTransport { -+ // It's ok for DockerArchiveAdditionalTags == nil, as a result, no additional tags will be appended to the final archive file. -+ copyCtx.DockerArchiveAdditionalTags = opts.finalImageSet[imageID].tags -+ } - - exOpts := exporter.ExportOptions{ - Ctx: ctx, -@@ -190,7 +197,11 @@ func filterImageName(opts *saveOptions) error { - if err != nil { - return errors.Wrapf(err, "filter image name failed when finding image name %q", imageName) - } -- if _, ok := opts.finalImageSet[img.ID]; !ok { -+ -+ finalImage, ok := opts.finalImageSet[img.ID] -+ if !ok { -+ finalImage = &savedImage{exist: true} -+ finalImage.tags = []reference.NamedTagged{} - opts.finalImageOrdered = append(opts.finalImageOrdered, img.ID) - } - -@@ -199,10 +210,10 @@ func filterImageName(opts *saveOptions) error { - return errors.Wrapf(err, "filter image name failed when parsing name %q", imageName) - } - tagged, withTag := ref.(reference.NamedTagged) -- if !withTag { -- continue -+ if withTag { -+ finalImage.tags = append(finalImage.tags, tagged) - } -- opts.finalImageSet[img.ID] = append(opts.finalImageSet[img.ID], tagged) -+ opts.finalImageSet[img.ID] = finalImage - } - - return nil -diff --git a/tests/lib/common.sh b/tests/lib/common.sh -index 6a207da..4dd34aa 100755 ---- a/tests/lib/common.sh -+++ b/tests/lib/common.sh -@@ -222,9 +222,23 @@ function show_and_run_command() { - } - - function run_with_debug() { -- if [ "${DEBUG:-0}" -eq 1 ]; then -- $1 -- else -- $1 > /dev/null 2>&1 -+ function fail_and_exit(){ -+ echo "FAIL" -+ echo "Run \"journalctl -xefu isula-build\" to get the log." -+ systemctl stop isula-build -+ exit 1 -+ } -+ -+ if [ "${DEBUG:-0}" -eq 0 ]; then -+ if ! $1 > /dev/null 2>&1; then -+ fail_and_exit -+ fi -+ return -+ fi -+ echo "$1" -+ if ! $1; then -+ fail_and_exit - fi -+ echo "------------command-delimiter-----------" -+ echo " " - } -\ No newline at end of file -diff --git a/tests/src/integration_test_save_single_image_multiple_tags.sh b/tests/src/integration_test_save_single_image_multiple_tags.sh -index a25786a..1eaeb8d 100644 ---- a/tests/src/integration_test_save_single_image_multiple_tags.sh -+++ b/tests/src/integration_test_save_single_image_multiple_tags.sh -@@ -22,6 +22,7 @@ context_dir="$top_dir"/tests/data/add-chown-basic - - function clean() - { -+ isula-build ctr-img rm -p > /dev/null 2>&1 - systemctl stop isula-build - rm -rf "$temp_tar" - } -@@ -34,21 +35,42 @@ function pre_test() - - function do_test() - { -- if ! run_with_debug "isula-build ctr-img build -t $image_name:latest $context_dir"; then -+ # get image id -+ if ! image_id1=$(isula-build ctr-img build -t $image_name:latest "$context_dir"|grep "Build success with image id: "|cut -d ":" -f 2); then - echo "FAIL" - fi -- -- if ! run_with_debug "isula-build ctr-img tag $image_name:latest $image_name:latest-child"; then -+ if ! image_id2=$(isula-build ctr-img build -t $image_name:latest2 "$context_dir"|grep "Build success with image id: "|cut -d ":" -f 2); then - echo "FAIL" - fi - -- if ! run_with_debug "isula-build ctr-img save -f docker $image_name:latest $image_name:latest-child -o $temp_tar"; then -- echo "FAIL" -- fi -+ ! run_with_debug "isula-build ctr-img tag $image_name:latest $image_name:latest-child" - -- if ! run_with_debug "isula-build ctr-img rm $image_name:latest $image_name:latest-child"; then -- echo "FAIL" -- fi -+ # save with id + name -+ ! run_with_debug "isula-build ctr-img save -f docker $image_id1 $image_name:latest-child -o $temp_tar" -+ rm -rf "$temp_tar" -+ -+ # save with name + id -+ ! run_with_debug "isula-build ctr-img save -f docker $image_name:latest-child $image_id1 -o $temp_tar" -+ rm -rf "$temp_tar" -+ -+ # save with name + name -+ ! run_with_debug "isula-build ctr-img save -f docker $image_name:latest $image_name:latest-child -o $temp_tar" -+ rm -rf "$temp_tar" -+ -+ # save with different images id1 + id2 -+ ! run_with_debug "isula-build ctr-img save -f docker $image_id1 $image_id2 -o $temp_tar" -+ rm -rf "$temp_tar" -+ -+ # save with different images "without latest tag" + id2 -+ ! run_with_debug "isula-build ctr-img save -f docker $image_name $image_id2 -o $temp_tar" -+ rm -rf "$temp_tar" -+ -+ # save with id1 + id2 + name -+ ! run_with_debug "isula-build ctr-img save -f docker $image_id1 $image_id2 $image_name:latest2 -o $temp_tar" -+ rm -rf "$temp_tar" -+ -+ ! run_with_debug "isula-build ctr-img rm $image_name:latest $image_name:latest-child" -+ ! run_with_debug "isula-build ctr-img rm $image_name:latest2" - - echo "PASS" - } -diff --git a/tests/src/integration_test_set_new_root.sh b/tests/src/integration_test_set_new_root.sh -index 7238240..bb11a08 100644 ---- a/tests/src/integration_test_set_new_root.sh -+++ b/tests/src/integration_test_set_new_root.sh -@@ -26,6 +26,8 @@ function clean() - { - rm -f $config_file - mv "$config_file".bak $config_file -+ -+ isula-build ctr-img rm -p > /dev/null 2>&1 - systemctl stop isula-build - rm -rf $run_root $data_root - } --- -2.27.0 - diff --git a/patch/0062-clean-code-staticcheck-fix-of-S1020-S1023-SA9003-S10.patch b/patch/0062-clean-code-staticcheck-fix-of-S1020-S1023-SA9003-S10.patch deleted file mode 100644 index 15c4aca..0000000 --- a/patch/0062-clean-code-staticcheck-fix-of-S1020-S1023-SA9003-S10.patch +++ /dev/null @@ -1,401 +0,0 @@ -From 6c355a7c9393982e648d79701a6c33ea0911a33a Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Sat, 28 Aug 2021 21:55:14 +0800 -Subject: [PATCH 13/20] clean code: staticcheck fix of S1020, S1023, SA9003, - S1002 and some U1000 - ---- - builder/dockerfile/add_copy_test.go | 1 - - builder/dockerfile/builder.go | 2 +- - builder/dockerfile/builder_test.go | 7 ++----- - builder/dockerfile/container/help.go | 2 +- - builder/dockerfile/parser/command.go | 4 ++-- - builder/dockerfile/parser/parser.go | 14 +++++++------- - builder/dockerfile/parser/util.go | 1 - - builder/dockerfile/stage_builder.go | 2 +- - cmd/cli/logout_test.go | 3 +-- - daemon/info.go | 4 ++-- - daemon/remove.go | 6 +++--- - image/image.go | 12 ++++++------ - pkg/logger/logger.go | 2 +- - pkg/manifest/list.go | 2 +- - pkg/opts/opts.go | 2 +- - pkg/stack/stack.go | 2 +- - util/cipher.go | 6 +++--- - util/common.go | 2 +- - 18 files changed, 34 insertions(+), 40 deletions(-) - -diff --git a/builder/dockerfile/add_copy_test.go b/builder/dockerfile/add_copy_test.go -index 05fac1f3..8873872a 100644 ---- a/builder/dockerfile/add_copy_test.go -+++ b/builder/dockerfile/add_copy_test.go -@@ -158,7 +158,6 @@ func TestResolveCopySource(t *testing.T) { - isAdd bool - rawSources []string - dest string -- contextDir string - } - tests := []struct { - name string -diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go -index cbd1e589..1e1e24dc 100644 ---- a/builder/dockerfile/builder.go -+++ b/builder/dockerfile/builder.go -@@ -559,7 +559,7 @@ func (b *Builder) export(imageID string) error { - } - - func (b *Builder) applyTag(imageID string) error { -- tags := make([]string, 0, 0) -+ tags := make([]string, 0) - if b.buildOpts.Tag != "" { - tags = append(tags, b.buildOpts.Tag) - } -diff --git a/builder/dockerfile/builder_test.go b/builder/dockerfile/builder_test.go -index 3b7513be..60668e1e 100644 ---- a/builder/dockerfile/builder_test.go -+++ b/builder/dockerfile/builder_test.go -@@ -21,7 +21,6 @@ import ( - "os" - "path/filepath" - "reflect" -- "regexp" - "strings" - "testing" - -@@ -725,8 +724,7 @@ func TestGetFlagsAndArgs(t *testing.T) { - // FROM alpine@digest@digest <- fail - func TestResolveImageName(t *testing.T) { - type args struct { -- s string -- reg *regexp.Regexp -+ s string - } - tests := []struct { - name string -@@ -882,8 +880,7 @@ func TestResolveImageName(t *testing.T) { - // FROM $imageName@${digest}$${digest2} <- fail - func TestResolveImageNameWithArgs(t *testing.T) { - type args struct { -- s string -- reg *regexp.Regexp -+ s string - } - tests := []struct { - name string -diff --git a/builder/dockerfile/container/help.go b/builder/dockerfile/container/help.go -index 475b479d..c2d5bd87 100644 ---- a/builder/dockerfile/container/help.go -+++ b/builder/dockerfile/container/help.go -@@ -69,7 +69,7 @@ func (ref *Reference) createConfigsAndManifests() (docker.Image, docker.Manifest - } - - func (ref *Reference) getContainerLayers() ([]string, error) { -- layers := make([]string, 0, 0) -+ layers := make([]string, 0) - layerID := ref.layerID - layer, err := ref.store.Layer(layerID) - if err != nil { -diff --git a/builder/dockerfile/parser/command.go b/builder/dockerfile/parser/command.go -index 1d62303d..635159ee 100644 ---- a/builder/dockerfile/parser/command.go -+++ b/builder/dockerfile/parser/command.go -@@ -322,7 +322,7 @@ func extractFlags(line *parser.Line, cmd string) (string, error) { - flagRegs := cmdFlagRegs[cmd] - parts := strings.Fields(line.Raw) - -- existFlags := make(map[string]bool, 0) -+ existFlags := make(map[string]bool) - var i int - for ; i <= len(parts)-1; i++ { - if !strings.HasPrefix(parts[i], "--") { -@@ -545,7 +545,7 @@ func parseKeyValue(line *parser.Line) error { - } - - func parseKeyEqualValuePairs(str string) []string { -- kvPairs := make([]string, 0, 0) -+ kvPairs := make([]string, 0) - - for i := 0; i <= len(str)-1; i++ { - word := []byte{} -diff --git a/builder/dockerfile/parser/parser.go b/builder/dockerfile/parser/parser.go -index a21a3f59..650c5e5c 100644 ---- a/builder/dockerfile/parser/parser.go -+++ b/builder/dockerfile/parser/parser.go -@@ -109,7 +109,7 @@ func newRowLine(num int, content string) *rowLine { - - // preprocess the Dockerfile and get the effective physical line - func preProcess(r io.Reader) []*rowLine { -- rowLines := make([]*rowLine, 0, 0) -+ rowLines := make([]*rowLine, 0) - scanner := bufio.NewScanner(r) - lineNum := 1 - for scanner.Scan() { -@@ -134,7 +134,7 @@ func format(rows []*rowLine, d *directive) ([]*parser.Line, error) { - text := rows[i].content - line := &parser.Line{ - Begin: rows[i].lineNum, -- Flags: make(map[string]string, 0), -+ Flags: make(map[string]string), - } - - var logicLine string -@@ -193,7 +193,7 @@ func constructPages(lines []*parser.Line, onbuild bool) ([]*parser.Page, error) - - var ( - pageMap = make(map[string]*parser.Page) -- pages = make([]*parser.Page, 0, 0) -+ pages = make([]*parser.Page, 0) - currentPage *parser.Page - pageNum int - ) -@@ -204,7 +204,7 @@ func constructPages(lines []*parser.Line, onbuild bool) ([]*parser.Page, error) - } - if onbuild && currentPage == nil { - currentPage = &parser.Page{ -- Lines: make([]*parser.Line, 0, 0), -+ Lines: make([]*parser.Line, 0), - Begin: line.Begin, - End: line.End, - } -@@ -227,7 +227,7 @@ func constructPages(lines []*parser.Line, onbuild bool) ([]*parser.Page, error) - Name: name, - Begin: line.Begin, - End: line.End, -- Lines: make([]*parser.Line, 0, 0), -+ Lines: make([]*parser.Line, 0), - } - // page name comes from the last cell from "FROM {image} AS {name} - // or named it with the index of stage in this dockerfile -@@ -266,7 +266,7 @@ func constructPages(lines []*parser.Line, onbuild bool) ([]*parser.Page, error) - // truncHeadingArgs Handle those ARGs before first FROM in the file - // returns the truncated lines and converted heading args - func truncHeadingArgs(lines *[]*parser.Line, onbuild bool) ([]string, error) { -- args := make([]string, 0, 0) -+ args := make([]string, 0) - if onbuild { - return args, nil - } -@@ -295,7 +295,7 @@ const ignoreFile = ".dockerignore" - // ParseIgnore parses the .dockerignore file in the provide dir, which - // must be the context directory - func (df *dockerfile) ParseIgnore(dir string) ([]string, error) { -- var ignores = make([]string, 0, 0) -+ var ignores = make([]string, 0) - - fullPath := path.Join(dir, ignoreFile) - if _, err := os.Stat(fullPath); err != nil { -diff --git a/builder/dockerfile/parser/util.go b/builder/dockerfile/parser/util.go -index bac13fbd..b8867f7f 100644 ---- a/builder/dockerfile/parser/util.go -+++ b/builder/dockerfile/parser/util.go -@@ -136,7 +136,6 @@ func (r *resolver) noDollar() { - // not "\$", this must be hyphen between args, such as '/' in "hub/image" or '_' in 'module_arch' - r.resolved += string(r.origin[r.idx]) - r.idx++ -- return - } - - func (r *resolver) noBrace() error { -diff --git a/builder/dockerfile/stage_builder.go b/builder/dockerfile/stage_builder.go -index 23f488cb..f01bad54 100644 ---- a/builder/dockerfile/stage_builder.go -+++ b/builder/dockerfile/stage_builder.go -@@ -330,7 +330,7 @@ func (s *stageBuilder) updateStageBuilder() error { - onbuildData = append(onbuildData, []byte(fmt.Sprintf("%s\n", item))...) - } - // OnBuild is handled, clean it here so that we can add new ONBUILDs on cmd builder if needed -- s.docker.Config.OnBuild = make([]string, 0, 0) -+ s.docker.Config.OnBuild = make([]string, 0) - - p, err := parser.NewParser(parser.DefaultParser) - if err != nil { -diff --git a/cmd/cli/logout_test.go b/cmd/cli/logout_test.go -index f5f09057..1328c2a1 100644 ---- a/cmd/cli/logout_test.go -+++ b/cmd/cli/logout_test.go -@@ -157,9 +157,8 @@ func TestNewLogoutCmd(t *testing.T) { - cmd.SetArgs(strings.Split(tt.args, " ")) - err := cmd.Execute() - if err != nil { -- -+ assert.ErrorContains(t, err, tt.errString) - } -- assert.ErrorContains(t, err, tt.errString) - }) - } - } -diff --git a/daemon/info.go b/daemon/info.go -index 8462be1b..cfc658c1 100644 ---- a/daemon/info.go -+++ b/daemon/info.go -@@ -108,8 +108,8 @@ func (b *Backend) Info(ctx context.Context, req *pb.InfoRequest) (*pb.InfoRespon - } - - func getRegistryInfo() ([]string, []string, []string, error) { -- registriesInsecure := make([]string, 0, 0) -- registriesBlock := make([]string, 0, 0) -+ registriesInsecure := make([]string, 0) -+ registriesBlock := make([]string, 0) - systemContext := image.GetSystemContext() - - registriesSearch, err := sysregistriesv2.UnqualifiedSearchRegistries(systemContext) -diff --git a/daemon/remove.go b/daemon/remove.go -index 89e68cfc..4d90bf53 100644 ---- a/daemon/remove.go -+++ b/daemon/remove.go -@@ -73,7 +73,7 @@ func (b *Backend) Remove(req *pb.RemoveRequest, stream pb.Control_RemoveServer) - continue - } - -- if removed == true { -+ if removed { - imageString := fmt.Sprintf("Untagged image: %v", imageID) - logrus.Debug(imageString) - if err = stream.Send(&pb.RemoveResponse{LayerMessage: imageString}); err != nil { -@@ -118,7 +118,7 @@ func (b *Backend) Remove(req *pb.RemoveRequest, stream pb.Control_RemoveServer) - } - - func untagImage(imageID string, store storage.Store, image *storage.Image) (bool, error) { -- newNames := make([]string, 0, 0) -+ newNames := make([]string, 0) - removed := false - for _, imgName := range image.Names { - if imgName == imageID { -@@ -128,7 +128,7 @@ func untagImage(imageID string, store storage.Store, image *storage.Image) (bool - newNames = append(newNames, imgName) - } - -- if removed == true { -+ if removed { - if err := store.SetNames(image.ID, newNames); err != nil { - return false, errors.Wrapf(err, "remove name %v from image %v error", imageID, image.ID) - } -diff --git a/image/image.go b/image/image.go -index e06d2530..1f3944d1 100644 ---- a/image/image.go -+++ b/image/image.go -@@ -287,19 +287,19 @@ func createScratchV2Image() *docker.Image { - ContainerConfig: docker.Config{}, - Config: &docker.Config{ - ExposedPorts: make(docker.PortSet), -- Env: make([]string, 0, 0), -- Cmd: make([]string, 0, 0), -+ Env: make([]string, 0), -+ Cmd: make([]string, 0), - Healthcheck: nil, - Volumes: make(map[string]struct{}), -- Entrypoint: make([]string, 0, 0), -- OnBuild: make([]string, 0, 0), -+ Entrypoint: make([]string, 0), -+ OnBuild: make([]string, 0), - Labels: make(map[string]string), - StopTimeout: nil, -- Shell: make([]string, 0, 0), -+ Shell: make([]string, 0), - }, - }, - RootFS: &docker.RootFS{}, -- History: make([]docker.History, 0, 0), -+ History: make([]docker.History, 0), - } - } - -diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go -index 2e4dbdd1..1d9a61df 100644 ---- a/pkg/logger/logger.go -+++ b/pkg/logger/logger.go -@@ -45,7 +45,7 @@ type Logger struct { - // NewRunTimer return an instance of RunTimer - func NewRunTimer() *RunTimer { - return &RunTimer{ -- commands: make([]string, 0, 0), -+ commands: make([]string, 0), - cmdMap: make(map[string]time.Duration), - } - } -diff --git a/pkg/manifest/list.go b/pkg/manifest/list.go -index bc6037fc..10907e1d 100644 ---- a/pkg/manifest/list.go -+++ b/pkg/manifest/list.go -@@ -53,7 +53,7 @@ func NewManifestList() *List { - SchemaVersion: container.SchemaVersion, - MediaType: manifest.DockerV2ListMediaType, - }, -- instances: make(map[digest.Digest]string, 0), -+ instances: make(map[digest.Digest]string), - } - } - -diff --git a/pkg/opts/opts.go b/pkg/opts/opts.go -index 4664d50d..8fb3b308 100644 ---- a/pkg/opts/opts.go -+++ b/pkg/opts/opts.go -@@ -61,7 +61,7 @@ func (opts *ListOpts) Type() string { - - // NewListOpts creates a new ListOpts - func NewListOpts(validator validatorFunc) ListOpts { -- values := make(map[string]string, 0) -+ values := make(map[string]string) - return ListOpts{ - Values: values, - validator: validator, -diff --git a/pkg/stack/stack.go b/pkg/stack/stack.go -index b2100ce6..8343b6b0 100644 ---- a/pkg/stack/stack.go -+++ b/pkg/stack/stack.go -@@ -64,7 +64,7 @@ func dumpStack(path string) { - ) - - for { -- stackBuf = make([]byte, bufSize, bufSize) -+ stackBuf = make([]byte, bufSize) - stackSize = runtime.Stack(stackBuf, true) - // if these two sizes equal, which means the allocated buf is not large enough to carry all - // stacks back, so enlarge the buf and try again -diff --git a/util/cipher.go b/util/cipher.go -index 8e62e76b..b2aea2a9 100644 ---- a/util/cipher.go -+++ b/util/cipher.go -@@ -61,7 +61,7 @@ func GenerateCryptoKey(s int) ([]byte, error) { - } else { - size = aesKeyLenLowerBound - } -- key := make([]byte, size, size) -+ key := make([]byte, size) - if _, err := io.ReadFull(rand.Reader, key); err != nil { - return nil, errGenCryptoKey - } -@@ -105,7 +105,7 @@ func EncryptAES(data string, aeskey string) (string, error) { - return "", errors.Errorf("generate rand data for iv failed: %v", err) - } - mode := cipher.NewCFBEncrypter(block, iv) -- encryptData := make([]byte, len(plainText), len(plainText)) -+ encryptData := make([]byte, len(plainText)) - mode.XORKeyStream(encryptData, plainText) - encryptData = append(iv, encryptData...) - -@@ -134,7 +134,7 @@ func DecryptAES(data string, aeskey string) (string, error) { - } - - decrypter := cipher.NewCFBDecrypter(block, cipherText[:block.BlockSize()]) -- decryptData := make([]byte, len(cipherText)-block.BlockSize(), len(cipherText)-block.BlockSize()) -+ decryptData := make([]byte, len(cipherText)-block.BlockSize()) - decrypter.XORKeyStream(decryptData, cipherText[block.BlockSize():]) - - return string(decryptData), nil -diff --git a/util/common.go b/util/common.go -index d0cd9d06..00b1b941 100644 ---- a/util/common.go -+++ b/util/common.go -@@ -44,7 +44,7 @@ func CopyMapStringString(m map[string]string) map[string]string { - - // CopyStrings copies all strings in a slice to a new slice - func CopyStrings(str []string) []string { -- result := make([]string, len(str), len(str)) -+ result := make([]string, len(str)) - copy(result, str) - return result - } --- -2.31.1 - diff --git a/patch/0063-relocation-exporter-package-and-remove-unused-const.patch b/patch/0063-relocation-exporter-package-and-remove-unused-const.patch deleted file mode 100644 index 461fd2b..0000000 --- a/patch/0063-relocation-exporter-package-and-remove-unused-const.patch +++ /dev/null @@ -1,153 +0,0 @@ -From 0715aadaf5a5850d5ff1e6f74f7abafc4418f4d5 Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Sun, 29 Aug 2021 05:05:10 +0800 -Subject: [PATCH 14/20] relocation exporter package and remove unused const - ---- - exporter/common.go | 91 +++++++++++++++++++++------------------------- - 1 file changed, 42 insertions(+), 49 deletions(-) - -diff --git a/exporter/common.go b/exporter/common.go -index cd976d21..b58f59cb 100644 ---- a/exporter/common.go -+++ b/exporter/common.go -@@ -27,7 +27,6 @@ import ( - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/signature" - "github.com/containers/image/v5/types" -- "github.com/containers/storage/pkg/archive" - securejoin "github.com/cyphar/filepath-securejoin" - "github.com/docker/distribution/reference" - "github.com/opencontainers/go-digest" -@@ -40,11 +39,6 @@ import ( - "isula.org/isula-build/util" - ) - --const ( -- // Uncompressed represents uncompressed -- Uncompressed = archive.Uncompressed --) -- - // ExportOptions is a struct for exporter - type ExportOptions struct { - SystemContext *types.SystemContext -@@ -56,6 +50,28 @@ type ExportOptions struct { - ImageListSelection cp.ImageListSelection - } - -+// parseExporter parses an exporter instance and inits it with the src and dest reference. -+func parseExporter(opts ExportOptions, src, destSpec string, localStore *store.Store) (Exporter, error) { -+ const partsNum = 2 -+ // 1. parse exporter -+ parts := strings.SplitN(destSpec, ":", partsNum) -+ if len(parts) != partsNum { -+ return nil, errors.Errorf(`invalid dest spec %q, expected colon-separated exporter:reference`, destSpec) -+ } -+ -+ ept := GetAnExporter(parts[0]) -+ if ept == nil { -+ return nil, errors.Errorf(`invalid image name: %q, unknown exporter "%s"`, src, parts[0]) -+ } -+ -+ // 2. Init exporter reference -+ err := ept.Init(opts, src, destSpec, localStore) -+ if err != nil { -+ return nil, errors.Wrap(err, `fail to Init exporter"`) -+ } -+ return ept, nil -+} -+ - // Export exports an image to an output destination - func Export(imageID, outputDest string, opts ExportOptions, localStore *store.Store) error { - eLog := logrus.WithField(util.LogKeySessionID, opts.Ctx.Value(util.LogFieldKey(util.LogKeySessionID))) -@@ -89,26 +105,6 @@ func Export(imageID, outputDest string, opts ExportOptions, localStore *store.St - return nil - } - --func exportToIsulad(ctx context.Context, tarPath string) error { -- // no tarPath need to export -- if len(tarPath) == 0 { -- return nil -- } -- defer func() { -- if rErr := os.Remove(tarPath); rErr != nil { -- logrus.Errorf("Remove file %s failed: %v", tarPath, rErr) -- } -- }() -- // dest here will not be influenced by external input, no security risk -- cmd := exec.CommandContext(ctx, "isula", "load", "-i", tarPath) // nolint:gosec -- if bytes, lErr := cmd.CombinedOutput(); lErr != nil { -- logrus.Errorf("Load image to isulad failed, stderr: %v, err: %v", string(bytes), lErr) -- return errors.Errorf("load image to isulad failed, stderr: %v, err: %v", string(bytes), lErr) -- } -- -- return nil --} -- - func export(e Exporter, exOpts ExportOptions) (reference.Canonical, digest.Digest, error) { - var ( - ref reference.Canonical -@@ -158,28 +154,6 @@ func export(e Exporter, exOpts ExportOptions) (reference.Canonical, digest.Diges - return ref, manifestDigest, nil - } - --// parseExporter parses an exporter instance and inits it with the src and dest reference. --func parseExporter(opts ExportOptions, src, destSpec string, localStore *store.Store) (Exporter, error) { -- const partsNum = 2 -- // 1. parse exporter -- parts := strings.SplitN(destSpec, ":", partsNum) -- if len(parts) != partsNum { -- return nil, errors.Errorf(`invalid dest spec %q, expected colon-separated exporter:reference`, destSpec) -- } -- -- ept := GetAnExporter(parts[0]) -- if ept == nil { -- return nil, errors.Errorf(`invalid image name: %q, unknown exporter "%s"`, src, parts[0]) -- } -- -- // 2. Init exporter reference -- err := ept.Init(opts, src, destSpec, localStore) -- if err != nil { -- return nil, errors.Wrap(err, `fail to Init exporter"`) -- } -- return ept, nil --} -- - // NewCopyOptions will return copy options - func NewCopyOptions(opts ExportOptions) *cp.Options { - cpOpts := &cp.Options{} -@@ -206,6 +180,26 @@ func NewPolicyContext(sc *types.SystemContext) (*signature.PolicyContext, error) - return policyContext, nil - } - -+func exportToIsulad(ctx context.Context, tarPath string) error { -+ // no tarPath need to export -+ if len(tarPath) == 0 { -+ return nil -+ } -+ defer func() { -+ if rErr := os.Remove(tarPath); rErr != nil { -+ logrus.Errorf("Remove file %s failed: %v", tarPath, rErr) -+ } -+ }() -+ // dest here will not be influenced by external input, no security risk -+ cmd := exec.CommandContext(ctx, "isula", "load", "-i", tarPath) // nolint:gosec -+ if bytes, lErr := cmd.CombinedOutput(); lErr != nil { -+ logrus.Errorf("Load image to isulad failed, stderr: %v, err: %v", string(bytes), lErr) -+ return errors.Errorf("load image to isulad failed, stderr: %v, err: %v", string(bytes), lErr) -+ } -+ -+ return nil -+} -+ - // CheckArchiveFormat used to check if save or load image format is either docker-archive or oci-archive - func CheckArchiveFormat(format string) error { - switch format { -@@ -237,4 +231,3 @@ func GetManifestType(format string) (string, error) { - } - return manifestType, nil - } -- --- -2.31.1 - diff --git a/patch/0064-clean-code-tidy-FindImage-function.patch b/patch/0064-clean-code-tidy-FindImage-function.patch deleted file mode 100644 index d63fb15..0000000 --- a/patch/0064-clean-code-tidy-FindImage-function.patch +++ /dev/null @@ -1,236 +0,0 @@ -From 9b1191dafa500bc55b37912898e3ebb8e9d6ec24 Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Sat, 28 Aug 2021 23:27:49 +0800 -Subject: [PATCH 15/20] clean code: tidy FindImage function - ---- - daemon/images.go | 2 +- - daemon/images_test.go | 2 +- - daemon/push_test.go | 2 +- - daemon/remove.go | 2 +- - daemon/save.go | 2 +- - daemon/save_test.go | 2 +- - exporter/docker/archive/archive.go | 4 +- - image/image.go | 67 ++++++------------------------ - image/image_test.go | 2 +- - 9 files changed, 21 insertions(+), 64 deletions(-) - -diff --git a/daemon/images.go b/daemon/images.go -index e61817cc..baeb375c 100644 ---- a/daemon/images.go -+++ b/daemon/images.go -@@ -70,7 +70,7 @@ func listOneImage(opts listOptions) (*pb.ListResponse, error) { - _, image, err := image.FindImage(opts.localStore, opts.imageName) - if err != nil { - opts.logEntry.Error(err) -- return nil, errors.Wrapf(err, "find local image %v error", opts.imageName) -+ return nil, errors.Wrapf(err, "find local image %q failed", opts.imageName) - } - - result := make([]*pb.ListResponse_ImageInfo, 0, len(image.Names)) -diff --git a/daemon/images_test.go b/daemon/images_test.go -index a970ce0b..efbf6f14 100644 ---- a/daemon/images_test.go -+++ b/daemon/images_test.go -@@ -96,7 +96,7 @@ func TestList(t *testing.T) { - ImageName: "coffee:costa", - }, - wantErr: true, -- errString: "failed to parse image", -+ errString: "not found in local store", - }, - { - name: "abnormal case no repository", -diff --git a/daemon/push_test.go b/daemon/push_test.go -index f4a9e2b1..7358d0cd 100644 ---- a/daemon/push_test.go -+++ b/daemon/push_test.go -@@ -69,7 +69,7 @@ func TestPush(t *testing.T) { - Format: "docker", - }, - wantErr: true, -- errString: "failed to parse image", -+ errString: "not found in local store", - }, - { - name: "manifestNotExist", -diff --git a/daemon/remove.go b/daemon/remove.go -index 89e68cfc..d4e0bbd9 100644 ---- a/daemon/remove.go -+++ b/daemon/remove.go -@@ -49,7 +49,7 @@ func (b *Backend) Remove(req *pb.RemoveRequest, stream pb.Control_RemoveServer) - } - - for _, imageID := range rmImageIDs { -- _, img, err := image.FindImageLocally(s, imageID) -+ _, img, err := image.FindImage(s, imageID) - if err != nil { - rmFailed = true - errMsg := fmt.Sprintf("Find local image %q failed: %v", imageID, err) -diff --git a/daemon/save.go b/daemon/save.go -index 7ad12851..1a2d3fed 100644 ---- a/daemon/save.go -+++ b/daemon/save.go -@@ -186,7 +186,7 @@ func filterImageName(opts *saveOptions) error { - } - visitedImage[imageName] = true - -- _, img, err := image.FindImageLocally(opts.localStore, imageName) -+ _, img, err := image.FindImage(opts.localStore, imageName) - if err != nil { - return errors.Wrapf(err, "filter image name failed when finding image name %q", imageName) - } -diff --git a/daemon/save_test.go b/daemon/save_test.go -index a59086a8..c1b37342 100644 ---- a/daemon/save_test.go -+++ b/daemon/save_test.go -@@ -129,7 +129,7 @@ func TestSave(t *testing.T) { - Format: "docker", - }, - wantErr: true, -- errString: "failed to parse image", -+ errString: "not found in local store", - }, - { - name: "abnormal case wrong image format", -diff --git a/exporter/docker/archive/archive.go b/exporter/docker/archive/archive.go -index cc6b8721..36a28811 100644 ---- a/exporter/docker/archive/archive.go -+++ b/exporter/docker/archive/archive.go -@@ -60,7 +60,7 @@ func (d *dockerArchiveExporter) Init(opts exporter.ExportOptions, src, destSpec - // destSpec could be "file:name:tag" or "file:name" or just "file" with transport "docker-archive", such as docker-archive:output.tar:name:tag - // When more than two parts, build must be called - if parts := strings.Split(destSpec, ":"); len(parts) > partsNum { -- srcReference, _, err = image.FindImageLocally(localStore, src) -+ srcReference, _, err = image.FindImage(localStore, src) - if err != nil { - return errors.Wrapf(err, "find src image: %q failed with transport %q", src, d.Name()) - } -@@ -89,7 +89,7 @@ func (d *dockerArchiveExporter) Init(opts exporter.ExportOptions, src, destSpec - DockerArchiveExporter.InitArchiveWriter(opts.ExportID, archWriter) - } - -- srcReference, _, err = image.FindImageLocally(localStore, src) -+ srcReference, _, err = image.FindImage(localStore, src) - if err != nil { - return errors.Wrapf(err, "find src image: %q failed with transport %q", src, d.Name()) - } -diff --git a/image/image.go b/image/image.go -index 5ae7245e..4a1ca881 100644 ---- a/image/image.go -+++ b/image/image.go -@@ -468,71 +468,29 @@ func ResolveImageName(s string, resolveArg func(string) string) (string, error) - return newStr, nil - } - --// FindImage get the image from storage by image describe -+// FindImage get the image from local storage by image describe - func FindImage(store *store.Store, image string) (types.ImageReference, *storage.Image, error) { -- names, _, err := ResolveName(image, nil, store) -- if err != nil { -- return nil, nil, errors.Wrapf(err, "error parsing name %q", image) -- } -- -- ref, img, err := ParseImagesToReference(store, names) -- if err != nil { -- return nil, nil, errors.Wrapf(err, "locating image %q failed", image) -- } -- return ref, img, nil --} -- --// FindImageLocally get the image from local storage by image describe --func FindImageLocally(store *store.Store, image string) (types.ImageReference, *storage.Image, error) { - // 1. check name valid -- if image == "" { -- return nil, nil, errors.Errorf("image name %q cannot be empty string", image) -+ if _, err := reference.Parse(image); err != nil { -+ return nil, nil, errors.Wrapf(err, "parse image %q failed", image) - } - - // 2. try to find image with name or id in local store - localName := tryResolveNameInStore(image, store) - if localName == "" { -- return nil, nil, errors.Errorf("no image %q in local store", image) -+ return nil, nil, errors.Errorf("image %q not found in local store", image) - } - -- // 3. parse to image reference -- ref, img, err := ParseImagesToReference(store, []string{localName}) -+ // 3. get image reference and storage.Image -+ ref, err := is.Transport.ParseStoreReference(store, localName) - if err != nil { -- return nil, nil, errors.Wrapf(err, "locating image %q locally failed", image) -- } -- return ref, img, nil --} -- --// ParseImagesToReference get the image reference in store --func ParseImagesToReference(store *store.Store, names []string) (types.ImageReference, *storage.Image, error) { -- var ( -- ref types.ImageReference -- img *storage.Image -- err error -- ) -- for _, name := range names { -- ref, err = is.Transport.ParseStoreReference(store, name) -- if err != nil { -- logrus.Debugf("Error parsing reference to image %q: %v", name, err) -- continue -- } -- -- var gErr error -- img, gErr = is.Transport.GetStoreImage(store, ref) -- // When name is the format of name[:rag] with out registry domain, err is storage.ErrImageUnknown -- if gErr != nil { -- img2, err2 := store.Image(name) -- if err2 != nil { -- logrus.Debugf("Error locating image %q: %v", name, err2) -- continue -- } -- img = img2 -- } -- break -+ return nil, nil, errors.Wrapf(err, "error parsing reference to image %q", localName) - } -- if ref == nil || img == nil || err != nil { -- return nil, nil, errors.Errorf("failed to parse image %v in local store", names) -+ img, err := is.Transport.GetStoreImage(store, ref) -+ if err != nil { -+ return nil, nil, errors.Wrapf(err, "failed to parse image %q in local store", localName) - } -+ - return ref, img, nil - } - -@@ -572,14 +530,13 @@ func ResolveName(name string, sc *types.SystemContext, store *store.Store) ([]st - } - - func tryResolveNameInStore(name string, store *store.Store) string { -- defaultTag := "latest" -- - logrus.Infof("Try to find image: %s in local storage", name) - img, err := store.Image(name) - if err == nil { - return img.ID - } - -+ defaultTag := "latest" - logrus.Infof("Try to find image: %s:%s in local storage", name, defaultTag) - img, err = store.Image(fmt.Sprintf("%s:%s", name, defaultTag)) - if err != nil { -diff --git a/image/image_test.go b/image/image_test.go -index c698b4d8..43d936f5 100644 ---- a/image/image_test.go -+++ b/image/image_test.go -@@ -51,7 +51,7 @@ func TestFindImageWhenImageNameIsEmpty(t *testing.T) { - - src := "" - srcReference, _, err := FindImage(&localStore, src) -- assert.ErrorContains(t, err, "locating image") -+ assert.ErrorContains(t, err, "repository name must have at least one component") - assert.Assert(t, cmp.Nil(srcReference)) - } - --- -2.31.1 - diff --git a/patch/0065-clean-code-delete-channel-within-the-same-goroutine.patch b/patch/0065-clean-code-delete-channel-within-the-same-goroutine.patch deleted file mode 100644 index b93d908..0000000 --- a/patch/0065-clean-code-delete-channel-within-the-same-goroutine.patch +++ /dev/null @@ -1,89 +0,0 @@ -From 7050a0ec5cdff61cd289bc8d03dbdd7d46bcda0d Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Sat, 28 Aug 2021 21:56:02 +0800 -Subject: [PATCH 16/20] clean code: delete channel within the same goroutine - ---- - daemon/manifest.go | 13 ++----------- - daemon/pull.go | 13 ++----------- - daemon/push.go | 13 ++----------- - 3 files changed, 6 insertions(+), 33 deletions(-) - -diff --git a/daemon/manifest.go b/daemon/manifest.go -index fc28998f..36e76749 100644 ---- a/daemon/manifest.go -+++ b/daemon/manifest.go -@@ -212,18 +212,9 @@ func (b *Backend) ManifestPush(req *pb.ManifestPushRequest, stream pb.Control_Ma - eg, egCtx := errgroup.WithContext(stream.Context()) - eg.Go(manifestPushHandler(egCtx, opt)) - eg.Go(manifestPushMessageHandler(stream, cliLogger)) -- errC := make(chan error, 1) - -- errC <- eg.Wait() -- defer close(errC) -- -- err, ok := <-errC -- if !ok { -- logrus.WithField(util.LogKeySessionID, manifestName).Info("Channel errC closed") -- return nil -- } -- if err != nil { -- logrus.WithField(util.LogKeySessionID, manifestName).Warnf("Stream closed with: %v", err) -+ if err := eg.Wait(); err != nil { -+ logrus.WithField(util.LogKeySessionID, manifestName).Warnf("Manifest push stream closed with: %v", err) - return err - } - -diff --git a/daemon/pull.go b/daemon/pull.go -index 6d2e33d9..90be2a91 100644 ---- a/daemon/pull.go -+++ b/daemon/pull.go -@@ -62,18 +62,9 @@ func (b *Backend) Pull(req *pb.PullRequest, stream pb.Control_PullServer) error - eg, egCtx := errgroup.WithContext(ctx) - eg.Go(pullHandler(egCtx, opt)) - eg.Go(pullMessageHandler(stream, opt.logger)) -- errC := make(chan error, 1) - -- errC <- eg.Wait() -- defer close(errC) -- -- err, ok := <-errC -- if !ok { -- logrus.WithField(util.LogKeySessionID, opt.pullID).Info("Channel errC closed") -- return nil -- } -- if err != nil { -- logrus.WithField(util.LogKeySessionID, opt.pullID).Warnf("Stream closed with: %v", err) -+ if err := eg.Wait(); err != nil { -+ logrus.WithField(util.LogKeySessionID, opt.pullID).Warnf("Pull stream closed with: %v", err) - return err - } - -diff --git a/daemon/push.go b/daemon/push.go -index e36198dc..d3f5571e 100644 ---- a/daemon/push.go -+++ b/daemon/push.go -@@ -84,18 +84,9 @@ func (b *Backend) Push(req *pb.PushRequest, stream pb.Control_PushServer) error - - eg.Go(pushHandler(egCtx, opt)) - eg.Go(pushMessageHandler(stream, opt.logger)) -- errC := make(chan error, 1) - -- errC <- eg.Wait() -- defer close(errC) -- -- err, ok := <-errC -- if !ok { -- logrus.WithField(util.LogKeySessionID, opt.pushID).Info("Channel errC closed") -- return nil -- } -- if err != nil { -- logrus.WithField(util.LogKeySessionID, opt.pushID).Warnf("Stream closed with: %v", err) -+ if err := eg.Wait(); err != nil { -+ logrus.WithField(util.LogKeySessionID, opt.pushID).Warnf("Push stream closed with: %v", err) - return err - } - --- -2.31.1 - diff --git a/patch/0067-fix-golangci-lint-warnings.patch b/patch/0067-fix-golangci-lint-warnings.patch deleted file mode 100644 index 3b8815d..0000000 --- a/patch/0067-fix-golangci-lint-warnings.patch +++ /dev/null @@ -1,43 +0,0 @@ -From f08b682ec8caab1a50aff1d37c10729e941d4721 Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Tue, 31 Aug 2021 11:52:29 +0800 -Subject: [PATCH 17/20] fix golangci-lint warnings - ---- - builder/dockerfile/run.go | 6 +++--- - cmd/cli/grpc_client.go | 2 +- - 2 files changed, 4 insertions(+), 4 deletions(-) - -diff --git a/builder/dockerfile/run.go b/builder/dockerfile/run.go -index 828fe676..d33573f9 100644 ---- a/builder/dockerfile/run.go -+++ b/builder/dockerfile/run.go -@@ -97,9 +97,9 @@ func (c *cmdBuilder) setupRuntimeSpec(command []string) (*specs.Spec, error) { - // set specific runtime spec config - user := c.stage.docker.Config.User - if user != "" { -- pair, err := util.GetChownOptions(user, c.stage.mountpoint) -- if err != nil { -- return nil, err -+ pair, gErr := util.GetChownOptions(user, c.stage.mountpoint) -+ if gErr != nil { -+ return nil, gErr - } - g.SetProcessUID(uint32(pair.UID)) - g.SetProcessGID(uint32(pair.GID)) -diff --git a/cmd/cli/grpc_client.go b/cmd/cli/grpc_client.go -index cab59503..44c00c09 100644 ---- a/cmd/cli/grpc_client.go -+++ b/cmd/cli/grpc_client.go -@@ -82,7 +82,7 @@ func NewClient(ctx context.Context) (*GrpcClient, error) { - defer cancel() - connected, err := cli.HealthCheck(healthCtx) - if !connected || err != nil { -- return nil, errors.Errorf( "Cannot connect to the isula-builder at %s. Is the isula-builder running?\nError: %v", constant.DefaultGRPCAddress, err) -+ return nil, errors.Errorf("Cannot connect to the isula-builder at %s. Is the isula-builder running?\nError: %v", constant.DefaultGRPCAddress, err) - } - - return cli, nil --- -2.31.1 - diff --git a/patch/0068-change-golangci-lint-config-and-remove-redundant-che.patch b/patch/0068-change-golangci-lint-config-and-remove-redundant-che.patch deleted file mode 100644 index 16e7ae2..0000000 --- a/patch/0068-change-golangci-lint-config-and-remove-redundant-che.patch +++ /dev/null @@ -1,97 +0,0 @@ -From 0b2d60bd700378eea88641cdb2d6fd3ff5bdc6ee Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Wed, 1 Sep 2021 01:12:11 +0800 -Subject: [PATCH 19/20] change golangci-lint config and remove redundant check - ---- - .golangci.yml | 12 ++---------- - image/image.go | 18 +++++------------- - util/util.go | 3 ++- - 3 files changed, 9 insertions(+), 24 deletions(-) - -diff --git a/.golangci.yml b/.golangci.yml -index 46ef20b3..50a31292 100644 ---- a/.golangci.yml -+++ b/.golangci.yml -@@ -38,16 +38,7 @@ linters: - - misspell - - goconst - disable: -- - lll -- - wsl -- - depguard -- - stylecheck -- - dupl -- - interfacer -- - gosimple - - unused -- - rowserrcheck -- - goprintffuncname - fast: false - - output: -@@ -68,7 +59,8 @@ linters-settings: - gomnd: - settings: - mnd: -- checks: argument, case, condition, return -+ checks: argument, case, condition, operation, return, assign -+ ignored-numbers: 2 - gocritic: - disabled-checks: - - ifElseChain -diff --git a/image/image.go b/image/image.go -index 892f2593..b5a3757c 100644 ---- a/image/image.go -+++ b/image/image.go -@@ -254,28 +254,20 @@ func getLocalImageNameFromRef(store storage.Store, srcRef types.ImageReference) - if err := exporter.CheckArchiveFormat(srcRef.Transport().Name()); err == nil { - return stringid.GenerateRandomID() + ":" + stringid.GenerateRandomID(), nil - } -- - if srcRef.Transport().Name() != constant.DockerTransport { - return "", errors.Errorf("the %s transport is not supported yet", srcRef.Transport().Name()) - } - -- var name string - ref := srcRef.DockerReference() - if ref == nil { - return "", errors.New("get the docker reference associated with source reference failed") - } -- -- if named, ok := ref.(reference.Named); ok { -- name = named.Name() -- if tag, ok := ref.(reference.NamedTagged); ok { -- name = name + ":" + tag.Tag() -- } -- if dig, ok := ref.(reference.Canonical); ok { -- name = name + "@" + dig.Digest().String() -- } -+ name := ref.Name() -+ if tag, ok := ref.(reference.NamedTagged); ok { -+ name = name + ":" + tag.Tag() - } -- if _, err := is.Transport.ParseStoreReference(store, name); err != nil { -- return "", errors.Wrapf(err, "parsing image name %q failed", name) -+ if dig, ok := ref.(reference.Canonical); ok { -+ name = name + "@" + dig.Digest().String() - } - - return name, nil -diff --git a/util/util.go b/util/util.go -index f5276080..8c1e09e4 100644 ---- a/util/util.go -+++ b/util/util.go -@@ -318,7 +318,8 @@ func ChangeGroup(path, g string) error { - - // GenerateNonCryptoID generate none crypto id with length 32 - func GenerateNonCryptoID() string { -- b := make([]byte, 32) -+ nonCryptoIDLength := 32 -+ b := make([]byte, nonCryptoIDLength) - _, err := rand.Read(b) - if err != nil { - panic(err) // This shouldn't happen --- -2.31.1 - diff --git a/patch/0069-make-add-make-info-for-Makefile.patch b/patch/0069-make-add-make-info-for-Makefile.patch deleted file mode 100644 index a44c251..0000000 --- a/patch/0069-make-add-make-info-for-Makefile.patch +++ /dev/null @@ -1,151 +0,0 @@ -From a7c81c6997cb6f9eb25b227430789555f700fa4c Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Wed, 15 Sep 2021 11:32:16 +0800 -Subject: [PATCH 20/20] make: add make info for Makefile - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - Makefile | 73 +++++++++++++++++++++++++++++++++----------------------- - 1 file changed, 43 insertions(+), 30 deletions(-) - -diff --git a/Makefile b/Makefile -index d5b1c537..d41a9fdb 100644 ---- a/Makefile -+++ b/Makefile -@@ -39,22 +39,30 @@ else - export GO_BUILD=$(GO) build - endif - -+##@ Help -+.PHONY: help -+help: ## Display the help info -+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) -+ -+##@ Build -+ -+.PHONY: all ## Build both isula-build and isula-builder - all: isula-build isula-builder - - .PHONY: isula-build --isula-build: ./cmd/cli -+isula-build: ./cmd/cli ## Build isula-build only - @echo "Making isula-build..." - $(GO_BUILD) -ldflags '$(LDFLAGS)' -o bin/isula-build $(BUILDFLAGS) ./cmd/cli - @echo "isula-build done!" - - .PHONY: isula-builder --isula-builder: ./cmd/daemon -+isula-builder: ./cmd/daemon ## Build isula-builder only - @echo "Making isula-builder..." - $(GO_BUILD) -ldflags '$(LDFLAGS)' -o bin/isula-builder $(BUILDFLAGS) ./cmd/daemon - @echo "isula-builder done!" - - .PHONY: safe --safe: -+safe: ## Build binary with secure compile flag enabled - @echo "Safe building isula-build..." - mkdir -p ${TMPDIR} - $(GO_BUILD) -ldflags '$(SAFEBUILDFLAGS) $(STATIC_LDFLAGS)' -o bin/isula-build $(BUILDFLAGS) ./cmd/cli 2>/dev/null -@@ -62,7 +70,7 @@ safe: - @echo "Safe build isula-build done!" - - .PHONY: debug --debug: -+debug: ## Build binary with debug info inside - @echo "Debug building isula-build..." - @cp -f ./hack/profiling ./daemon/profiling.go - $(GO_BUILD) -ldflags '$(LDFLAGS)' -gcflags="all=-N -l" -o bin/isula-build $(BUILDFLAGS) ./cmd/cli -@@ -70,59 +78,64 @@ debug: - @rm -f ./daemon/profiling.go - @echo "Debug build isula-build done!" - --.PHONY: build-image --build-image: -- isula-build ctr-img build -f Dockerfile.proto ${IMAGE_BUILDARGS} -o isulad:${IMAGE_NAME}:latest . -+.PHONY: install -+install: ## Install binary and configs -+ install -D -m0550 bin/isula-build $(BINDIR) -+ install -D -m0550 bin/isula-builder $(BINDIR) -+ @( getent group isula > /dev/null ) || ( groupadd --system isula ) -+ @[ ! -d ${CONFIG_DIR}/${CONFIG_FILE} ] && install -dm0650 ${CONFIG_DIR} -+ @( [ -f ${CONFIG_DIR}/${CONFIG_FILE} ] && printf "%-20s %s\n" "${CONFIG_FILE}" "already exist in ${CONFIG_DIR}, please replace it manually." ) || install -D -m0600 ${LOCAL_CONF_PREFIX}/${CONFIG_FILE} ${CONFIG_DIR}/${CONFIG_FILE} -+ @( [ -f ${CONFIG_DIR}/${POLICY_FILE} ] && printf "%-20s %s\n" "${POLICY_FILE}" "already exist in ${CONFIG_DIR}, please replace it manually." ) || install -D -m0600 ${LOCAL_CONF_PREFIX}/${POLICY_FILE} ${CONFIG_DIR}/${POLICY_FILE} -+ @( [ -f ${CONFIG_DIR}/${REGIST_FILE} ] && printf "%-20s %s\n" "${REGIST_FILE}" "already exist in ${CONFIG_DIR}, please replace it manually." ) || install -D -m0600 ${LOCAL_CONF_PREFIX}/${REGIST_FILE} ${CONFIG_DIR}/${REGIST_FILE} -+ @( [ -f ${CONFIG_DIR}/${STORAGE_FILE} ] && printf "%-20s %s\n" "${STORAGE_FILE}" "already exist in ${CONFIG_DIR}, please replace it manually." ) || install -D -m0600 ${LOCAL_CONF_PREFIX}/${STORAGE_FILE} ${CONFIG_DIR}/${STORAGE_FILE} -+ -+ -+##@ Test - --tests: test-unit test-integration -+tests: test-base test-unit test-integration ## Test all - - .PHONY: test-base --test-base: -+test-base: ## Test base case - @echo "Base test starting..." - @./tests/test.sh base - @echo "Base test done!" - - .PHONY: test-unit --test-unit: -+test-unit: ## Test unit case - @echo "Unit test starting..." - @./hack/unit_test.sh - @echo "Unit test done!" - - .PHONY: test-integration --test-integration: debug install -+test-integration: ## Test integration case - @echo "Integration test starting..." -- @./tests/test.sh base - @./tests/test.sh integration - @echo "Integration test done!" - -+##@ Development -+ -+.PHONY: build-image -+build-image: ## Build protobuf compile environment container image -+ isula-build ctr-img build -f Dockerfile.proto ${IMAGE_BUILDARGS} -o isulad:${IMAGE_NAME}:latest . -+ - .PHONY: proto --proto: -+proto: ## Generate protobuf file - @echo "Generating protobuf..." - isula run -i --rm --runtime runc -v ${PWD}:/go/src/isula.org/isula-build ${IMAGE_NAME} ./hack/generate_proto.sh - @echo "Protobuf files have been generated!" - --.PHONY: install --install: -- install -D -m0550 bin/isula-build $(BINDIR) -- install -D -m0550 bin/isula-builder $(BINDIR) -- @( getent group isula > /dev/null ) || ( groupadd --system isula ) -- @[ ! -d ${CONFIG_DIR}/${CONFIG_FILE} ] && install -dm0650 ${CONFIG_DIR} -- @( [ -f ${CONFIG_DIR}/${CONFIG_FILE} ] && printf "%-20s %s\n" "${CONFIG_FILE}" "already exist in ${CONFIG_DIR}, please replace it manually." ) || install -D -m0600 ${LOCAL_CONF_PREFIX}/${CONFIG_FILE} ${CONFIG_DIR}/${CONFIG_FILE} -- @( [ -f ${CONFIG_DIR}/${POLICY_FILE} ] && printf "%-20s %s\n" "${POLICY_FILE}" "already exist in ${CONFIG_DIR}, please replace it manually." ) || install -D -m0600 ${LOCAL_CONF_PREFIX}/${POLICY_FILE} ${CONFIG_DIR}/${POLICY_FILE} -- @( [ -f ${CONFIG_DIR}/${REGIST_FILE} ] && printf "%-20s %s\n" "${REGIST_FILE}" "already exist in ${CONFIG_DIR}, please replace it manually." ) || install -D -m0600 ${LOCAL_CONF_PREFIX}/${REGIST_FILE} ${CONFIG_DIR}/${REGIST_FILE} -- @( [ -f ${CONFIG_DIR}/${STORAGE_FILE} ] && printf "%-20s %s\n" "${STORAGE_FILE}" "already exist in ${CONFIG_DIR}, please replace it manually." ) || install -D -m0600 ${LOCAL_CONF_PREFIX}/${STORAGE_FILE} ${CONFIG_DIR}/${STORAGE_FILE} -+.PHONY: check -+check: ## Static check for current commit -+ @echo "Static check start for last commit" -+ @./hack/static_check.sh last -+ @echo "Static check last commit finished" - - .PHONY: checkall --checkall: -+checkall: ## Static check for whole project - @echo "Static check start for whole project" - @./hack/static_check.sh all - @echo "Static check project finished" --.PHONY: check --check: -- @echo "Static check start for last commit" -- @./hack/static_check.sh last -- @echo "Static check last commit finished" - - .PHONY: clean --clean: -+clean: ## Clean project - rm -rf ./bin --- -2.31.1 - diff --git a/patch/0070-clean-code-all-latest-tag-checks-take-the-FindImage-.patch b/patch/0070-clean-code-all-latest-tag-checks-take-the-FindImage-.patch deleted file mode 100644 index 324d4fb..0000000 --- a/patch/0070-clean-code-all-latest-tag-checks-take-the-FindImage-.patch +++ /dev/null @@ -1,391 +0,0 @@ -From d6c6a81dd3cb73685c5cdf029cf9dd5602d3d44d Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Sun, 29 Aug 2021 01:07:58 +0800 -Subject: [PATCH 4/5] clean code: all latest tag checks take the FindImage as - the entrance - ---- - builder/dockerfile/builder.go | 2 +- - constant.go | 2 + - daemon/push.go | 6 --- - daemon/save.go | 24 ++++------ - exporter/docker/archive/archive.go | 56 +++++++++++++--------- - image/image.go | 40 ++++++++-------- - image/image_test.go | 96 ++++++++++++++++++++++++++++++++++++++ - 7 files changed, 160 insertions(+), 66 deletions(-) - -diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go -index 1e1e24d..e28fac9 100644 ---- a/builder/dockerfile/builder.go -+++ b/builder/dockerfile/builder.go -@@ -646,7 +646,7 @@ func CheckAndExpandTag(tag string) (reference.Named, string, error) { - if sepLastIndex == -1 || (sepLastIndex < slashLastIndex) { - // isula - // localhost:5000/isula -- newTag += ":latest" -+ newTag = fmt.Sprintf("%s:%s", newTag, constant.DefaultTag) - } - - const longestTagFieldsLen = 3 -diff --git a/constant.go b/constant.go -index bfe399b..4d1596a 100644 ---- a/constant.go -+++ b/constant.go -@@ -89,6 +89,8 @@ const ( - IsuladTransport = "isulad" - // ManifestTransport used to export manifest list - ManifestTransport = "manifest" -+ // DefaultTag is latest -+ DefaultTag = "latest" - ) - - var ( -diff --git a/daemon/push.go b/daemon/push.go -index d3f5571..ac05383 100644 ---- a/daemon/push.go -+++ b/daemon/push.go -@@ -68,12 +68,6 @@ func (b *Backend) Push(req *pb.PushRequest, stream pb.Control_PushServer) error - return err - } - -- imageName, err := image.CheckAndAddDefaultTag(opt.imageName, opt.localStore) -- if err != nil { -- return err -- } -- opt.imageName = imageName -- - manifestType, gErr := exporter.GetManifestType(opt.format) - if gErr != nil { - return gErr -diff --git a/daemon/save.go b/daemon/save.go -index 35b67de..ee70691 100644 ---- a/daemon/save.go -+++ b/daemon/save.go -@@ -16,6 +16,7 @@ package daemon - import ( - "context" - "os" -+ "strings" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" -@@ -76,7 +77,7 @@ func (b *Backend) Save(req *pb.SaveRequest, stream pb.Control_SaveServer) error - var err error - opts := b.getSaveOptions(req) - -- if err = checkFormatAndExpandTag(&opts); err != nil { -+ if err = checkFormat(&opts); err != nil { - return err - } - if err = filterImageName(&opts); err != nil { -@@ -159,7 +160,7 @@ func messageHandler(stream pb.Control_SaveServer, cliLogger *logger.Logger) func - } - } - --func checkFormatAndExpandTag(opts *saveOptions) error { -+func checkFormat(opts *saveOptions) error { - switch opts.format { - case constant.DockerTransport: - opts.format = constant.DockerArchiveTransport -@@ -169,14 +170,6 @@ func checkFormatAndExpandTag(opts *saveOptions) error { - return errors.New("wrong image format provided") - } - -- for i, imageName := range opts.oriImgList { -- nameWithTag, err := image.CheckAndAddDefaultTag(imageName, opts.localStore) -- if err != nil { -- return errors.Wrapf(err, "check format and expand tag failed with image name %q", imageName) -- } -- opts.oriImgList[i] = nameWithTag -- } -- - return nil - } - -@@ -205,12 +198,11 @@ func filterImageName(opts *saveOptions) error { - opts.finalImageOrdered = append(opts.finalImageOrdered, img.ID) - } - -- ref, err := reference.Parse(imageName) -- if err != nil { -- return errors.Wrapf(err, "filter image name failed when parsing name %q", imageName) -- } -- tagged, withTag := ref.(reference.NamedTagged) -- if withTag { -+ if !strings.HasPrefix(img.ID, imageName) { -+ tagged, _, err := image.GetNamedTaggedReference(imageName) -+ if err != nil { -+ return errors.Wrapf(err, "get named tagged reference failed when saving image %q", imageName) -+ } - finalImage.tags = append(finalImage.tags, tagged) - } - opts.finalImageSet[img.ID] = finalImage -diff --git a/exporter/docker/archive/archive.go b/exporter/docker/archive/archive.go -index 36a2881..60e67fd 100644 ---- a/exporter/docker/archive/archive.go -+++ b/exporter/docker/archive/archive.go -@@ -19,7 +19,6 @@ import ( - "sync" - - "github.com/containers/image/v5/docker/archive" -- "github.com/containers/image/v5/transports/alltransports" - "github.com/containers/image/v5/types" - "github.com/pkg/errors" - -@@ -50,24 +49,47 @@ func (d *dockerArchiveExporter) Name() string { - } - - func (d *dockerArchiveExporter) Init(opts exporter.ExportOptions, src, destSpec string, localStore *store.Store) error { -- var ( -- srcReference types.ImageReference -- destReference types.ImageReference -- err error -- ) -+ var archiveFilePath string -+ - const partsNum = 2 -- // src could be form of ImageID digest or name[:tag] -+ -+ // src is an imageid digest - // destSpec could be "file:name:tag" or "file:name" or just "file" with transport "docker-archive", such as docker-archive:output.tar:name:tag -- // When more than two parts, build must be called -+ if parts := strings.Split(destSpec, ":"); len(parts) < partsNum { -+ return errors.Errorf("image name %q is invalid", destSpec) -+ } else if len(parts) == partsNum { -+ archiveFilePath = strings.SplitN(destSpec, ":", partsNum)[1] -+ } else { -+ fileNameTag := strings.SplitN(destSpec, ":", partsNum)[1] -+ archiveFilePath = strings.SplitN(fileNameTag, ":", partsNum)[0] -+ } -+ -+ if DockerArchiveExporter.GetArchiveWriter(opts.ExportID) == nil { -+ archWriter, err := archive.NewWriter(opts.SystemContext, archiveFilePath) -+ if err != nil { -+ return errors.Wrapf(err, "create archive writer failed") -+ } -+ DockerArchiveExporter.InitArchiveWriter(opts.ExportID, archWriter) -+ } -+ -+ // when destSpec is more than two parts, build operation must be called - if parts := strings.Split(destSpec, ":"); len(parts) > partsNum { -- srcReference, _, err = image.FindImage(localStore, src) -+ srcReference, _, err := image.FindImage(localStore, src) - if err != nil { - return errors.Wrapf(err, "find src image: %q failed with transport %q", src, d.Name()) - } -- destReference, err = alltransports.ParseImageName(destSpec) -+ -+ // removing docker.io/library/ prefix by not using alltransports.ParseImageName -+ namedTagged, _, err := image.GetNamedTaggedReference(strings.Join(parts[2:], ":")) -+ if err != nil { -+ return errors.Wrapf(err, "get named tagged reference of image %q failed", strings.Join(parts[2:], ":")) -+ } -+ archiveWriter := DockerArchiveExporter.GetArchiveWriter(opts.ExportID) -+ destReference, err := archiveWriter.NewReference(namedTagged) - if err != nil { - return errors.Wrapf(err, "parse dest spec: %q failed with transport %q", destSpec, d.Name()) - } -+ - d.Lock() - d.items[opts.ExportID] = exporter.Bus{ - SrcRef: srcReference, -@@ -79,23 +101,13 @@ func (d *dockerArchiveExporter) Init(opts exporter.ExportOptions, src, destSpec - } - - // from build or save, we can get path from the other part -- archiveFilePath := strings.SplitN(destSpec, ":", partsNum)[1] -- -- if DockerArchiveExporter.GetArchiveWriter(opts.ExportID) == nil { -- archWriter, wErr := archive.NewWriter(opts.SystemContext, archiveFilePath) -- if wErr != nil { -- return errors.Wrapf(wErr, "create archive writer failed") -- } -- DockerArchiveExporter.InitArchiveWriter(opts.ExportID, archWriter) -- } -- -- srcReference, _, err = image.FindImage(localStore, src) -+ srcReference, _, err := image.FindImage(localStore, src) - if err != nil { - return errors.Wrapf(err, "find src image: %q failed with transport %q", src, d.Name()) - } - - archiveWriter := DockerArchiveExporter.GetArchiveWriter(opts.ExportID) -- destReference, err = archiveWriter.NewReference(nil) -+ destReference, err := archiveWriter.NewReference(nil) - if err != nil { - return errors.Wrapf(err, "parse dest spec: %q failed", destSpec) - } -diff --git a/image/image.go b/image/image.go -index b6210f2..5dda185 100644 ---- a/image/image.go -+++ b/image/image.go -@@ -482,7 +482,7 @@ func FindImage(store *store.Store, image string) (types.ImageReference, *storage - if err != nil { - return nil, nil, errors.Wrapf(err, "failed to parse image %q in local store", localName) - } -- -+ - return ref, img, nil - } - -@@ -528,9 +528,8 @@ func tryResolveNameInStore(name string, store *store.Store) string { - return img.ID - } - -- defaultTag := "latest" -- logrus.Infof("Try to find image: %s:%s in local storage", name, defaultTag) -- img, err = store.Image(fmt.Sprintf("%s:%s", name, defaultTag)) -+ logrus.Infof("Try to find image: %s:%s in local storage", name, constant.DefaultTag) -+ img, err = store.Image(fmt.Sprintf("%s:%s", name, constant.DefaultTag)) - if err != nil { - return "" - } -@@ -621,25 +620,24 @@ func tryResolveNameInRegistries(name string, sc *types.SystemContext) ([]string, - return candidates, constant.DockerTransport - } - --// CheckAndAddDefaultTag checks if src is format of repository[:tag], add default tag if src without tag --func CheckAndAddDefaultTag(imageName string, store *store.Store) (string, error) { -- _, img, err := FindImage(store, imageName) -- if err != nil { -- return "", errors.Wrapf(err, "find src image: %q failed", imageName) -+// GetNamedTaggedReference checks an image name, if it does not include a tag, default tag "latest" will be added to it. -+func GetNamedTaggedReference(image string) (reference.NamedTagged, string, error) { -+ if image == "" { -+ return nil, "", nil - } - -- defaultTag := "latest" -- for _, name := range img.Names { -- // for imageName is the format of repository[:tag] -- if imageName == name { -- return imageName, nil -- } -- // for name is the format of repository -- if fmt.Sprintf("%s:%s", imageName, defaultTag) == name { -- return name, nil -- } -+ if slashLastIndex, sepLastIndex := strings.LastIndex(image, "/"), strings.LastIndex(image, ":"); sepLastIndex == -1 || (sepLastIndex < slashLastIndex) { -+ image = fmt.Sprintf("%s:%s", image, constant.DefaultTag) -+ } -+ -+ ref, err := reference.Parse(image) -+ if err != nil { -+ return nil, "", errors.Wrapf(err, "filter image name failed when parsing name %q", image) -+ } -+ tagged, withTag := ref.(reference.NamedTagged) -+ if !withTag { -+ return nil, "", errors.Errorf("image %q does not contain a tag even though the default tag is added", image) - } - -- // for imageName is the format of imageID -- return imageName, nil -+ return tagged, image, nil - } -diff --git a/image/image_test.go b/image/image_test.go -index 43d936f..15b13e1 100644 ---- a/image/image_test.go -+++ b/image/image_test.go -@@ -123,3 +123,99 @@ registries = [] - assert.Assert(t, cmp.Contains(candidates, "localhost/busybox:latest")) - assert.Equal(t, transport, constant.DockerTransport) - } -+ -+func TestGetNamedTaggedReference(t *testing.T) { -+ type testcase struct { -+ name string -+ tag string -+ output string -+ wantErr bool -+ errString string -+ } -+ testcases := []testcase{ -+ { -+ name: "test 1", -+ tag: "isula/test", -+ output: "isula/test:latest", -+ wantErr: false, -+ }, -+ { -+ name: "test 2", -+ tag: "localhost:5000/test", -+ output: "localhost:5000/test:latest", -+ wantErr: false, -+ }, -+ { -+ name: "test 3", -+ tag: "isula/test:latest", -+ output: "isula/test:latest", -+ wantErr: false, -+ }, -+ { -+ name: "test 4", -+ tag: "localhost:5000/test:latest", -+ output: "localhost:5000/test:latest", -+ wantErr: false, -+ }, -+ { -+ name: "test 5", -+ tag: "localhost:5000:aaa/test:latest", -+ output: "", -+ wantErr: true, -+ errString: "invalid reference format", -+ }, -+ { -+ name: "test 6", -+ tag: "localhost:5000:aaa/test", -+ output: "", -+ wantErr: true, -+ errString: "invalid reference format", -+ }, -+ { -+ name: "test 7", -+ tag: "localhost:5000/test:latest:latest", -+ output: "", -+ wantErr: true, -+ errString: "invalid reference format", -+ }, -+ { -+ name: "test 8", -+ tag: "test:latest:latest", -+ output: "", -+ wantErr: true, -+ errString: "invalid reference format", -+ }, -+ { -+ name: "test 9", -+ tag: "", -+ output: "", -+ wantErr: false, -+ }, -+ { -+ name: "test 10", -+ tag: "abc efg:latest", -+ output: "", -+ wantErr: true, -+ errString: "invalid reference format", -+ }, -+ { -+ name: "test 11", -+ tag: "abc!@#:latest", -+ output: "", -+ wantErr: true, -+ errString: "invalid reference format", -+ }, -+ } -+ for _, tc := range testcases { -+ t.Run(tc.name, func(t *testing.T) { -+ _, tag, err := GetNamedTaggedReference(tc.tag) -+ if !tc.wantErr { -+ assert.Equal(t, tag, tc.output, tc.name) -+ } -+ if tc.wantErr { -+ assert.ErrorContains(t, err, tc.errString) -+ } -+ }) -+ -+ } -+} --- -1.8.3.1 - diff --git a/patch/0071-use-image.GetNamedTaggedReference-instead-of-dockerf.patch b/patch/0071-use-image.GetNamedTaggedReference-instead-of-dockerf.patch deleted file mode 100644 index 9e19dc8..0000000 --- a/patch/0071-use-image.GetNamedTaggedReference-instead-of-dockerf.patch +++ /dev/null @@ -1,236 +0,0 @@ -From 7117427081e16b18eca768bf4e3274bd3e1fbb2b Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Wed, 1 Sep 2021 22:29:12 +0800 -Subject: [PATCH 5/5] use image.GetNamedTaggedReference instead of - dockerfile.CheckAndExpandTag - ---- - builder/dockerfile/builder.go | 44 +------------------- - builder/dockerfile/builder_test.go | 84 -------------------------------------- - daemon/import.go | 2 +- - daemon/manifest.go | 3 +- - daemon/tag.go | 3 +- - 5 files changed, 5 insertions(+), 131 deletions(-) - -diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go -index e28fac9..df8c6ce 100644 ---- a/builder/dockerfile/builder.go -+++ b/builder/dockerfile/builder.go -@@ -27,7 +27,6 @@ import ( - "strings" - "time" - -- "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/manifest" - securejoin "github.com/cyphar/filepath-securejoin" - "github.com/opencontainers/go-digest" -@@ -203,14 +202,14 @@ func parseTag(output, additionalTag string) (string, string, error) { - addTag string - ) - if tag = parseOutputTag(output); tag != "" { -- _, tag, err = CheckAndExpandTag(tag) -+ _, tag, err = image.GetNamedTaggedReference(tag) - if err != nil { - return "", "", err - } - } - - if additionalTag != "" { -- _, addTag, err = CheckAndExpandTag(additionalTag) -+ _, addTag, err = image.GetNamedTaggedReference(additionalTag) - if err != nil { - return "", "", err - } -@@ -633,42 +632,3 @@ func parseOutputTag(output string) string { - - return tag - } -- --// CheckAndExpandTag checks tag name. If it not include a tag, "latest" will be added. --func CheckAndExpandTag(tag string) (reference.Named, string, error) { -- if tag == "" { -- return nil, "", nil -- } -- -- newTag := tag -- slashLastIndex := strings.LastIndex(newTag, "/") -- sepLastIndex := strings.LastIndex(newTag, ":") -- if sepLastIndex == -1 || (sepLastIndex < slashLastIndex) { -- // isula -- // localhost:5000/isula -- newTag = fmt.Sprintf("%s:%s", newTag, constant.DefaultTag) -- } -- -- const longestTagFieldsLen = 3 -- if len(strings.Split(newTag, ":")) > longestTagFieldsLen { -- // localhost:5000:5000/isula:latest -- return nil, "", errors.Errorf("invalid tag: %v", newTag) -- } -- -- oriRef, err := reference.ParseNormalizedNamed(newTag) -- if err != nil { -- return nil, "", errors.Wrapf(err, "parse tag err, invalid tag: %v", newTag) -- } -- -- tagWithoutRepo := newTag[slashLastIndex+1:] -- _, err = reference.ParseNormalizedNamed(tagWithoutRepo) -- if err != nil { -- // isula:latest:latest -- // localhost/isula:latest:latest -- // isula!@#:latest -- // isula :latest -- return nil, "", errors.Wrapf(err, "parse tag err, invalid tag: %v", newTag) -- } -- -- return oriRef, newTag, nil --} -diff --git a/builder/dockerfile/builder_test.go b/builder/dockerfile/builder_test.go -index 60668e1..c2fec6c 100644 ---- a/builder/dockerfile/builder_test.go -+++ b/builder/dockerfile/builder_test.go -@@ -1470,87 +1470,3 @@ func TestNewBuilder(t *testing.T) { - }) - } - } -- --func TestCheckAndExpandTag(t *testing.T) { -- type testcase struct { -- name string -- tag string -- output string -- wantErr bool -- } -- testcases := []testcase{ -- { -- name: "test 1", -- tag: "isula/test", -- output: "isula/test:latest", -- wantErr: false, -- }, -- { -- name: "test 2", -- tag: "localhost:5000/test", -- output: "localhost:5000/test:latest", -- wantErr: false, -- }, -- { -- name: "test 3", -- tag: "isula/test:latest", -- output: "isula/test:latest", -- wantErr: false, -- }, -- { -- name: "test 4", -- tag: "localhost:5000/test:latest", -- output: "localhost:5000/test:latest", -- wantErr: false, -- }, -- { -- name: "test 5", -- tag: "localhost:5000:aaa/test:latest", -- output: "", -- wantErr: true, -- }, -- { -- name: "test 6", -- tag: "localhost:5000:aaa/test", -- output: "", -- wantErr: true, -- }, -- { -- name: "test 7", -- tag: "localhost:5000/test:latest:latest", -- output: "", -- wantErr: true, -- }, -- { -- name: "test 8", -- tag: "test:latest:latest", -- output: "", -- wantErr: true, -- }, -- { -- name: "test 9", -- tag: "", -- output: "", -- wantErr: false, -- }, -- { -- name: "test 10", -- tag: "abc efg:latest", -- output: "", -- wantErr: true, -- }, -- { -- name: "test 10", -- tag: "abc!@#:latest", -- output: "", -- wantErr: true, -- }, -- } -- for _, tc := range testcases { -- _, tag, err := CheckAndExpandTag(tc.tag) -- assert.Equal(t, tag, tc.output, tc.name) -- if (err != nil) != tc.wantErr { -- t.Errorf("getCheckAndExpandTag() error = %v, wantErr %v", err, tc.wantErr) -- } -- } --} -diff --git a/daemon/import.go b/daemon/import.go -index 40a0a92..2da36be 100644 ---- a/daemon/import.go -+++ b/daemon/import.go -@@ -62,7 +62,7 @@ func (b *Backend) Import(req *pb.ImportRequest, stream pb.Control_ImportServer) - logEntry.Error(err) - return err - } -- _, reference, err = dockerfile.CheckAndExpandTag(reference) -+ _, reference, err = image.GetNamedTaggedReference(reference) - if err != nil { - logEntry.Error(err) - return err -diff --git a/daemon/manifest.go b/daemon/manifest.go -index 36e7674..940850e 100644 ---- a/daemon/manifest.go -+++ b/daemon/manifest.go -@@ -26,7 +26,6 @@ import ( - - constant "isula.org/isula-build" - pb "isula.org/isula-build/api/services" -- "isula.org/isula-build/builder/dockerfile" - "isula.org/isula-build/exporter" - "isula.org/isula-build/image" - "isula.org/isula-build/pkg/logger" -@@ -61,7 +60,7 @@ func (b *Backend) ManifestCreate(ctx context.Context, req *pb.ManifestCreateRequ - } - - // expand list name -- _, imageName, err := dockerfile.CheckAndExpandTag(manifestName) -+ _, imageName, err := image.GetNamedTaggedReference(manifestName) - if err != nil { - logrus.WithField(util.LogKeySessionID, manifestName).Errorf("Check and expand list name err: %v", err) - return &pb.ManifestCreateResponse{}, err -diff --git a/daemon/tag.go b/daemon/tag.go -index fe6a5a2..57de15c 100644 ---- a/daemon/tag.go -+++ b/daemon/tag.go -@@ -21,7 +21,6 @@ import ( - "github.com/sirupsen/logrus" - - pb "isula.org/isula-build/api/services" -- "isula.org/isula-build/builder/dockerfile" - "isula.org/isula-build/image" - ) - -@@ -40,7 +39,7 @@ func (b *Backend) Tag(ctx context.Context, req *pb.TagRequest) (*gogotypes.Empty - return emptyResp, errors.Wrapf(err, "find local image %v error", req.Image) - } - -- _, imageName, err := dockerfile.CheckAndExpandTag(req.Tag) -+ _, imageName, err := image.GetNamedTaggedReference(req.Tag) - if err != nil { - return emptyResp, err - } --- -1.8.3.1 - diff --git a/patch/0072-protocol-define-separator-protocol.patch b/patch/0072-protocol-define-separator-protocol.patch deleted file mode 100644 index 131f4af..0000000 --- a/patch/0072-protocol-define-separator-protocol.patch +++ /dev/null @@ -1,754 +0,0 @@ -From 4de32e443640b4b4481c619aeb2571d1872f9008 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Tue, 26 Oct 2021 14:18:34 +0800 -Subject: [PATCH 01/16] protocol:define separator protocol - -reason: define separator protocol -save: add SeparatorSave(base, lib, rename, dest) -load: add LoadID, SeparatorLoad(app, dir, base, lib, skipCheck) - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - api/services/control.pb.go | 491 ++++++++++++++++++++++++++----------- - api/services/control.proto | 36 +++ - 2 files changed, 381 insertions(+), 146 deletions(-) - -diff --git a/api/services/control.pb.go b/api/services/control.pb.go -index 0c8b6394..4f386671 100644 ---- a/api/services/control.pb.go -+++ b/api/services/control.pb.go -@@ -1054,10 +1054,16 @@ func (m *LogoutResponse) GetResult() string { - - type LoadRequest struct { - // path is the path of loading file -- Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` -- XXX_NoUnkeyedLiteral struct{} `json:"-"` -- XXX_unrecognized []byte `json:"-"` -- XXX_sizecache int32 `json:"-"` -+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` -+ // loadID is the unique ID for each time load -+ // also is the part of construct temporary path to -+ // store transport file -+ LoadID string `protobuf:"bytes,2,opt,name=loadID,proto3" json:"loadID,omitempty"` -+ // SeparatorLoad is the info to load separated image -+ Sep *SeparatorLoad `protobuf:"bytes,3,opt,name=sep,proto3" json:"sep,omitempty"` -+ XXX_NoUnkeyedLiteral struct{} `json:"-"` -+ XXX_unrecognized []byte `json:"-"` -+ XXX_sizecache int32 `json:"-"` - } - - func (m *LoadRequest) Reset() { *m = LoadRequest{} } -@@ -1091,6 +1097,104 @@ func (m *LoadRequest) GetPath() string { - return "" - } - -+func (m *LoadRequest) GetLoadID() string { -+ if m != nil { -+ return m.LoadID -+ } -+ return "" -+} -+ -+func (m *LoadRequest) GetSep() *SeparatorLoad { -+ if m != nil { -+ return m.Sep -+ } -+ return nil -+} -+ -+type SeparatorLoad struct { -+ // app is application image name -+ App string `protobuf:"bytes,1,opt,name=app,proto3" json:"app,omitempty"` -+ // dir is image tarballs directory -+ Dir string `protobuf:"bytes,2,opt,name=dir,proto3" json:"dir,omitempty"` -+ // base is base image tarball path -+ Base string `protobuf:"bytes,3,opt,name=base,proto3" json:"base,omitempty"` -+ // lib is library image tarball path -+ Lib string `protobuf:"bytes,4,opt,name=lib,proto3" json:"lib,omitempty"` -+ // skipCheck is flag to skip sha256 check sum for images -+ SkipCheck bool `protobuf:"varint,5,opt,name=skipCheck,proto3" json:"skipCheck,omitempty"` -+ // enabled is flag to indicate the separator function enabled or not -+ Enabled bool `protobuf:"varint,6,opt,name=enabled,proto3" json:"enabled,omitempty"` -+ XXX_NoUnkeyedLiteral struct{} `json:"-"` -+ XXX_unrecognized []byte `json:"-"` -+ XXX_sizecache int32 `json:"-"` -+} -+ -+func (m *SeparatorLoad) Reset() { *m = SeparatorLoad{} } -+func (m *SeparatorLoad) String() string { return proto.CompactTextString(m) } -+func (*SeparatorLoad) ProtoMessage() {} -+func (*SeparatorLoad) Descriptor() ([]byte, []int) { -+ return fileDescriptor_d71ef680555cb937, []int{19} -+} -+func (m *SeparatorLoad) XXX_Unmarshal(b []byte) error { -+ return xxx_messageInfo_SeparatorLoad.Unmarshal(m, b) -+} -+func (m *SeparatorLoad) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { -+ return xxx_messageInfo_SeparatorLoad.Marshal(b, m, deterministic) -+} -+func (m *SeparatorLoad) XXX_Merge(src proto.Message) { -+ xxx_messageInfo_SeparatorLoad.Merge(m, src) -+} -+func (m *SeparatorLoad) XXX_Size() int { -+ return xxx_messageInfo_SeparatorLoad.Size(m) -+} -+func (m *SeparatorLoad) XXX_DiscardUnknown() { -+ xxx_messageInfo_SeparatorLoad.DiscardUnknown(m) -+} -+ -+var xxx_messageInfo_SeparatorLoad proto.InternalMessageInfo -+ -+func (m *SeparatorLoad) GetApp() string { -+ if m != nil { -+ return m.App -+ } -+ return "" -+} -+ -+func (m *SeparatorLoad) GetDir() string { -+ if m != nil { -+ return m.Dir -+ } -+ return "" -+} -+ -+func (m *SeparatorLoad) GetBase() string { -+ if m != nil { -+ return m.Base -+ } -+ return "" -+} -+ -+func (m *SeparatorLoad) GetLib() string { -+ if m != nil { -+ return m.Lib -+ } -+ return "" -+} -+ -+func (m *SeparatorLoad) GetSkipCheck() bool { -+ if m != nil { -+ return m.SkipCheck -+ } -+ return false -+} -+ -+func (m *SeparatorLoad) GetEnabled() bool { -+ if m != nil { -+ return m.Enabled -+ } -+ return false -+} -+ - type LoadResponse struct { - // log is the log sent to client - Log string `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` -@@ -1103,7 +1207,7 @@ func (m *LoadResponse) Reset() { *m = LoadResponse{} } - func (m *LoadResponse) String() string { return proto.CompactTextString(m) } - func (*LoadResponse) ProtoMessage() {} - func (*LoadResponse) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{19} -+ return fileDescriptor_d71ef680555cb937, []int{20} - } - func (m *LoadResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LoadResponse.Unmarshal(m, b) -@@ -1146,7 +1250,7 @@ func (m *PushRequest) Reset() { *m = PushRequest{} } - func (m *PushRequest) String() string { return proto.CompactTextString(m) } - func (*PushRequest) ProtoMessage() {} - func (*PushRequest) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{20} -+ return fileDescriptor_d71ef680555cb937, []int{21} - } - func (m *PushRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PushRequest.Unmarshal(m, b) -@@ -1199,7 +1303,7 @@ func (m *PushResponse) Reset() { *m = PushResponse{} } - func (m *PushResponse) String() string { return proto.CompactTextString(m) } - func (*PushResponse) ProtoMessage() {} - func (*PushResponse) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{21} -+ return fileDescriptor_d71ef680555cb937, []int{22} - } - func (m *PushResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PushResponse.Unmarshal(m, b) -@@ -1240,7 +1344,7 @@ func (m *PullRequest) Reset() { *m = PullRequest{} } - func (m *PullRequest) String() string { return proto.CompactTextString(m) } - func (*PullRequest) ProtoMessage() {} - func (*PullRequest) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{22} -+ return fileDescriptor_d71ef680555cb937, []int{23} - } - func (m *PullRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PullRequest.Unmarshal(m, b) -@@ -1286,7 +1390,7 @@ func (m *PullResponse) Reset() { *m = PullResponse{} } - func (m *PullResponse) String() string { return proto.CompactTextString(m) } - func (*PullResponse) ProtoMessage() {} - func (*PullResponse) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{23} -+ return fileDescriptor_d71ef680555cb937, []int{24} - } - func (m *PullResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PullResponse.Unmarshal(m, b) -@@ -1323,17 +1427,19 @@ type SaveRequest struct { - // path is location for output tarball - Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` - // format is the format of image saved to archive file, such as docker-archive, oci-archive -- Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"` -- XXX_NoUnkeyedLiteral struct{} `json:"-"` -- XXX_unrecognized []byte `json:"-"` -- XXX_sizecache int32 `json:"-"` -+ Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"` -+ // SeparatorSave is the info to save separated image -+ Sep *SeparatorSave `protobuf:"bytes,5,opt,name=sep,proto3" json:"sep,omitempty"` -+ XXX_NoUnkeyedLiteral struct{} `json:"-"` -+ XXX_unrecognized []byte `json:"-"` -+ XXX_sizecache int32 `json:"-"` - } - - func (m *SaveRequest) Reset() { *m = SaveRequest{} } - func (m *SaveRequest) String() string { return proto.CompactTextString(m) } - func (*SaveRequest) ProtoMessage() {} - func (*SaveRequest) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{24} -+ return fileDescriptor_d71ef680555cb937, []int{25} - } - func (m *SaveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SaveRequest.Unmarshal(m, b) -@@ -1381,6 +1487,88 @@ func (m *SaveRequest) GetFormat() string { - return "" - } - -+func (m *SaveRequest) GetSep() *SeparatorSave { -+ if m != nil { -+ return m.Sep -+ } -+ return nil -+} -+ -+type SeparatorSave struct { -+ // base is base image name -+ Base string `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"` -+ // lib is library image name -+ Lib string `protobuf:"bytes,2,opt,name=lib,proto3" json:"lib,omitempty"` -+ // rename is rename json file -+ Rename string `protobuf:"bytes,3,opt,name=rename,proto3" json:"rename,omitempty"` -+ // dest is destination file directory -+ Dest string `protobuf:"bytes,4,opt,name=dest,proto3" json:"dest,omitempty"` -+ // enabled is flag to indicate the separator function enabled or not -+ Enabled bool `protobuf:"varint,5,opt,name=enabled,proto3" json:"enabled,omitempty"` -+ XXX_NoUnkeyedLiteral struct{} `json:"-"` -+ XXX_unrecognized []byte `json:"-"` -+ XXX_sizecache int32 `json:"-"` -+} -+ -+func (m *SeparatorSave) Reset() { *m = SeparatorSave{} } -+func (m *SeparatorSave) String() string { return proto.CompactTextString(m) } -+func (*SeparatorSave) ProtoMessage() {} -+func (*SeparatorSave) Descriptor() ([]byte, []int) { -+ return fileDescriptor_d71ef680555cb937, []int{26} -+} -+func (m *SeparatorSave) XXX_Unmarshal(b []byte) error { -+ return xxx_messageInfo_SeparatorSave.Unmarshal(m, b) -+} -+func (m *SeparatorSave) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { -+ return xxx_messageInfo_SeparatorSave.Marshal(b, m, deterministic) -+} -+func (m *SeparatorSave) XXX_Merge(src proto.Message) { -+ xxx_messageInfo_SeparatorSave.Merge(m, src) -+} -+func (m *SeparatorSave) XXX_Size() int { -+ return xxx_messageInfo_SeparatorSave.Size(m) -+} -+func (m *SeparatorSave) XXX_DiscardUnknown() { -+ xxx_messageInfo_SeparatorSave.DiscardUnknown(m) -+} -+ -+var xxx_messageInfo_SeparatorSave proto.InternalMessageInfo -+ -+func (m *SeparatorSave) GetBase() string { -+ if m != nil { -+ return m.Base -+ } -+ return "" -+} -+ -+func (m *SeparatorSave) GetLib() string { -+ if m != nil { -+ return m.Lib -+ } -+ return "" -+} -+ -+func (m *SeparatorSave) GetRename() string { -+ if m != nil { -+ return m.Rename -+ } -+ return "" -+} -+ -+func (m *SeparatorSave) GetDest() string { -+ if m != nil { -+ return m.Dest -+ } -+ return "" -+} -+ -+func (m *SeparatorSave) GetEnabled() bool { -+ if m != nil { -+ return m.Enabled -+ } -+ return false -+} -+ - type SaveResponse struct { - // log is log send to cli - Log string `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` -@@ -1393,7 +1581,7 @@ func (m *SaveResponse) Reset() { *m = SaveResponse{} } - func (m *SaveResponse) String() string { return proto.CompactTextString(m) } - func (*SaveResponse) ProtoMessage() {} - func (*SaveResponse) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{25} -+ return fileDescriptor_d71ef680555cb937, []int{27} - } - func (m *SaveResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SaveResponse.Unmarshal(m, b) -@@ -1438,7 +1626,7 @@ func (m *MemData) Reset() { *m = MemData{} } - func (m *MemData) String() string { return proto.CompactTextString(m) } - func (*MemData) ProtoMessage() {} - func (*MemData) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{26} -+ return fileDescriptor_d71ef680555cb937, []int{28} - } - func (m *MemData) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MemData.Unmarshal(m, b) -@@ -1508,7 +1696,7 @@ func (m *MemStat) Reset() { *m = MemStat{} } - func (m *MemStat) String() string { return proto.CompactTextString(m) } - func (*MemStat) ProtoMessage() {} - func (*MemStat) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{27} -+ return fileDescriptor_d71ef680555cb937, []int{29} - } - func (m *MemStat) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MemStat.Unmarshal(m, b) -@@ -1584,7 +1772,7 @@ func (m *StorageData) Reset() { *m = StorageData{} } - func (m *StorageData) String() string { return proto.CompactTextString(m) } - func (*StorageData) ProtoMessage() {} - func (*StorageData) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{28} -+ return fileDescriptor_d71ef680555cb937, []int{30} - } - func (m *StorageData) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StorageData.Unmarshal(m, b) -@@ -1634,7 +1822,7 @@ func (m *RegistryData) Reset() { *m = RegistryData{} } - func (m *RegistryData) String() string { return proto.CompactTextString(m) } - func (*RegistryData) ProtoMessage() {} - func (*RegistryData) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{29} -+ return fileDescriptor_d71ef680555cb937, []int{31} - } - func (m *RegistryData) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RegistryData.Unmarshal(m, b) -@@ -1686,7 +1874,7 @@ func (m *InfoRequest) Reset() { *m = InfoRequest{} } - func (m *InfoRequest) String() string { return proto.CompactTextString(m) } - func (*InfoRequest) ProtoMessage() {} - func (*InfoRequest) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{30} -+ return fileDescriptor_d71ef680555cb937, []int{32} - } - func (m *InfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InfoRequest.Unmarshal(m, b) -@@ -1743,7 +1931,7 @@ func (m *InfoResponse) Reset() { *m = InfoResponse{} } - func (m *InfoResponse) String() string { return proto.CompactTextString(m) } - func (*InfoResponse) ProtoMessage() {} - func (*InfoResponse) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{31} -+ return fileDescriptor_d71ef680555cb937, []int{33} - } - func (m *InfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InfoResponse.Unmarshal(m, b) -@@ -1845,7 +2033,7 @@ func (m *ManifestCreateRequest) Reset() { *m = ManifestCreateRequest{} } - func (m *ManifestCreateRequest) String() string { return proto.CompactTextString(m) } - func (*ManifestCreateRequest) ProtoMessage() {} - func (*ManifestCreateRequest) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{32} -+ return fileDescriptor_d71ef680555cb937, []int{34} - } - func (m *ManifestCreateRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ManifestCreateRequest.Unmarshal(m, b) -@@ -1890,7 +2078,7 @@ func (m *ManifestCreateResponse) Reset() { *m = ManifestCreateResponse{} - func (m *ManifestCreateResponse) String() string { return proto.CompactTextString(m) } - func (*ManifestCreateResponse) ProtoMessage() {} - func (*ManifestCreateResponse) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{33} -+ return fileDescriptor_d71ef680555cb937, []int{35} - } - func (m *ManifestCreateResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ManifestCreateResponse.Unmarshal(m, b) -@@ -1933,7 +2121,7 @@ func (m *ManifestAnnotateRequest) Reset() { *m = ManifestAnnotateRequest - func (m *ManifestAnnotateRequest) String() string { return proto.CompactTextString(m) } - func (*ManifestAnnotateRequest) ProtoMessage() {} - func (*ManifestAnnotateRequest) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{34} -+ return fileDescriptor_d71ef680555cb937, []int{36} - } - func (m *ManifestAnnotateRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ManifestAnnotateRequest.Unmarshal(m, b) -@@ -2006,7 +2194,7 @@ func (m *ManifestInspectRequest) Reset() { *m = ManifestInspectRequest{} - func (m *ManifestInspectRequest) String() string { return proto.CompactTextString(m) } - func (*ManifestInspectRequest) ProtoMessage() {} - func (*ManifestInspectRequest) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{35} -+ return fileDescriptor_d71ef680555cb937, []int{37} - } - func (m *ManifestInspectRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ManifestInspectRequest.Unmarshal(m, b) -@@ -2044,7 +2232,7 @@ func (m *ManifestInspectResponse) Reset() { *m = ManifestInspectResponse - func (m *ManifestInspectResponse) String() string { return proto.CompactTextString(m) } - func (*ManifestInspectResponse) ProtoMessage() {} - func (*ManifestInspectResponse) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{36} -+ return fileDescriptor_d71ef680555cb937, []int{38} - } - func (m *ManifestInspectResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ManifestInspectResponse.Unmarshal(m, b) -@@ -2083,7 +2271,7 @@ func (m *ManifestPushRequest) Reset() { *m = ManifestPushRequest{} } - func (m *ManifestPushRequest) String() string { return proto.CompactTextString(m) } - func (*ManifestPushRequest) ProtoMessage() {} - func (*ManifestPushRequest) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{37} -+ return fileDescriptor_d71ef680555cb937, []int{39} - } - func (m *ManifestPushRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ManifestPushRequest.Unmarshal(m, b) -@@ -2128,7 +2316,7 @@ func (m *ManifestPushResponse) Reset() { *m = ManifestPushResponse{} } - func (m *ManifestPushResponse) String() string { return proto.CompactTextString(m) } - func (*ManifestPushResponse) ProtoMessage() {} - func (*ManifestPushResponse) Descriptor() ([]byte, []int) { -- return fileDescriptor_d71ef680555cb937, []int{38} -+ return fileDescriptor_d71ef680555cb937, []int{40} - } - func (m *ManifestPushResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ManifestPushResponse.Unmarshal(m, b) -@@ -2177,12 +2365,14 @@ func init() { - proto.RegisterType((*LogoutRequest)(nil), "isula.build.v1.LogoutRequest") - proto.RegisterType((*LogoutResponse)(nil), "isula.build.v1.LogoutResponse") - proto.RegisterType((*LoadRequest)(nil), "isula.build.v1.LoadRequest") -+ proto.RegisterType((*SeparatorLoad)(nil), "isula.build.v1.SeparatorLoad") - proto.RegisterType((*LoadResponse)(nil), "isula.build.v1.LoadResponse") - proto.RegisterType((*PushRequest)(nil), "isula.build.v1.PushRequest") - proto.RegisterType((*PushResponse)(nil), "isula.build.v1.PushResponse") - proto.RegisterType((*PullRequest)(nil), "isula.build.v1.PullRequest") - proto.RegisterType((*PullResponse)(nil), "isula.build.v1.PullResponse") - proto.RegisterType((*SaveRequest)(nil), "isula.build.v1.SaveRequest") -+ proto.RegisterType((*SeparatorSave)(nil), "isula.build.v1.SeparatorSave") - proto.RegisterType((*SaveResponse)(nil), "isula.build.v1.SaveResponse") - proto.RegisterType((*MemData)(nil), "isula.build.v1.MemData") - proto.RegisterType((*MemStat)(nil), "isula.build.v1.MemStat") -@@ -2202,124 +2392,133 @@ func init() { - func init() { proto.RegisterFile("api/services/control.proto", fileDescriptor_d71ef680555cb937) } - - var fileDescriptor_d71ef680555cb937 = []byte{ -- // 1861 bytes of a gzipped FileDescriptorProto -- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0x5f, 0x73, 0x1c, 0x47, -- 0x11, 0xe7, 0xfe, 0xe8, 0x5f, 0xdf, 0xe9, 0xac, 0x9a, 0x18, 0x67, 0xeb, 0xac, 0x24, 0x62, 0x63, -- 0x62, 0x61, 0x8a, 0xb3, 0x2c, 0x78, 0x20, 0x14, 0x50, 0xe8, 0x4f, 0x6c, 0x0e, 0x6c, 0x19, 0x56, -- 0x4a, 0x28, 0x2a, 0x55, 0xb8, 0x46, 0x77, 0xa3, 0xd3, 0x96, 0x77, 0x77, 0x96, 0x99, 0x59, 0xc5, -- 0x07, 0x5f, 0x83, 0x37, 0xe0, 0x33, 0xf0, 0x98, 0x4f, 0xc0, 0x13, 0x5f, 0x8a, 0xea, 0xf9, 0xb3, -- 0x3b, 0x7b, 0x7b, 0x27, 0x39, 0x6f, 0xfb, 0xeb, 0xee, 0xe9, 0xee, 0xe9, 0xee, 0xe9, 0xe9, 0x59, -- 0x18, 0xd2, 0x3c, 0x7e, 0x2a, 0x99, 0xb8, 0x89, 0x27, 0x4c, 0x3e, 0x9d, 0xf0, 0x4c, 0x09, 0x9e, -- 0x8c, 0x72, 0xc1, 0x15, 0x27, 0x83, 0x58, 0x16, 0x09, 0x1d, 0x5d, 0x16, 0x71, 0x32, 0x1d, 0xdd, -- 0x3c, 0x1b, 0x3e, 0x9c, 0x71, 0x3e, 0x4b, 0xd8, 0x53, 0xcd, 0xbd, 0x2c, 0xae, 0x9e, 0xb2, 0x34, -- 0x57, 0x73, 0x23, 0x3c, 0xfc, 0x64, 0x91, 0xa9, 0xe2, 0x94, 0x49, 0x45, 0xd3, 0xdc, 0x08, 0x84, -- 0xff, 0xed, 0x40, 0xff, 0x18, 0x55, 0x45, 0xec, 0xaf, 0x05, 0x93, 0x8a, 0x04, 0xb0, 0xa1, 0x55, -- 0x8f, 0x4f, 0x83, 0xd6, 0x5e, 0x6b, 0x7f, 0x2b, 0x72, 0x90, 0xec, 0xc2, 0x96, 0xfe, 0xbc, 0x98, -- 0xe7, 0x2c, 0x68, 0x6b, 0x5e, 0x45, 0x20, 0x1f, 0x03, 0xa0, 0x9f, 0xec, 0x9d, 0x3a, 0x8d, 0x45, -- 0xd0, 0xd1, 0x6c, 0x8f, 0x42, 0xf6, 0xa0, 0x77, 0x15, 0x27, 0xec, 0x04, 0x29, 0x99, 0x0a, 0xba, -- 0x5a, 0xc0, 0x27, 0x91, 0x07, 0xb0, 0xce, 0x0b, 0x95, 0x17, 0x2a, 0x58, 0xd3, 0x4c, 0x8b, 0x4a, -- 0xbb, 0x47, 0x62, 0x26, 0x83, 0xf5, 0xbd, 0x4e, 0x69, 0x17, 0x09, 0xe4, 0x3e, 0xac, 0xe5, 0x82, -- 0xbf, 0x9b, 0x07, 0x1b, 0x7b, 0xad, 0xfd, 0xcd, 0xc8, 0x00, 0xdc, 0x45, 0x1c, 0x4f, 0x51, 0x7b, -- 0xb0, 0x69, 0x76, 0x61, 0x21, 0xf9, 0x15, 0xf4, 0xf4, 0xe2, 0x73, 0x45, 0x55, 0x3c, 0x09, 0xb6, -- 0xf6, 0x5a, 0xfb, 0xbd, 0xc3, 0x87, 0xa3, 0x7a, 0x50, 0x47, 0xc7, 0x95, 0x48, 0xe4, 0xcb, 0x93, -- 0x47, 0xb0, 0x4d, 0xa7, 0xd3, 0x58, 0xc5, 0x3c, 0xa3, 0xc9, 0x05, 0x9d, 0x05, 0xa0, 0xd5, 0xd7, -- 0x89, 0x3a, 0x18, 0x34, 0x3f, 0x9a, 0x4e, 0x5f, 0xc6, 0x52, 0x05, 0x3d, 0xed, 0xb3, 0x47, 0x21, -- 0x43, 0xd8, 0x64, 0x99, 0x8a, 0xd5, 0x7c, 0x7c, 0x1a, 0xf4, 0xb5, 0x82, 0x12, 0xe3, 0x76, 0x59, -- 0x36, 0x11, 0xf3, 0x5c, 0xb1, 0x69, 0xb0, 0xad, 0x37, 0x55, 0x11, 0x30, 0x48, 0x57, 0x5c, 0xa4, -- 0x54, 0x05, 0x03, 0x13, 0x24, 0x83, 0x42, 0x0a, 0xdb, 0xe3, 0x34, 0xe7, 0x42, 0xb9, 0x3c, 0x0e, -- 0x61, 0x33, 0xd6, 0x84, 0x32, 0x91, 0x25, 0x46, 0x25, 0x92, 0x17, 0x62, 0xe2, 0xd2, 0x68, 0x11, -- 0x9a, 0x16, 0xec, 0x8a, 0x09, 0x96, 0x4d, 0x98, 0x4d, 0x61, 0x45, 0x08, 0x43, 0x18, 0x38, 0x13, -- 0x32, 0xe7, 0x99, 0x64, 0x64, 0x07, 0x3a, 0x09, 0x9f, 0x59, 0xf5, 0xf8, 0x19, 0xbe, 0x80, 0x9e, -- 0x17, 0x3a, 0xf2, 0x73, 0x57, 0x32, 0x71, 0xca, 0xb4, 0x58, 0xef, 0x70, 0x38, 0x32, 0x25, 0x39, -- 0x72, 0x25, 0x39, 0xba, 0x70, 0x25, 0x19, 0x55, 0xc2, 0xe1, 0x8f, 0x60, 0xdb, 0x96, 0xa5, 0xb5, -- 0x85, 0x19, 0x4d, 0xe9, 0x8c, 0x55, 0x75, 0x69, 0x21, 0x8a, 0xa2, 0xb9, 0x42, 0xde, 0x59, 0xc2, -- 0xe1, 0x13, 0x18, 0x38, 0xd1, 0x4a, 0xed, 0xc4, 0x96, 0xa4, 0x95, 0xb5, 0x30, 0xfc, 0x31, 0xf4, -- 0x30, 0x57, 0x4e, 0xe9, 0x2e, 0x6c, 0x69, 0x83, 0x67, 0xd4, 0x6e, 0x65, 0x2b, 0xaa, 0x08, 0xe1, -- 0xcf, 0x00, 0x2e, 0xe8, 0xcc, 0xc9, 0xde, 0x87, 0x35, 0xcd, 0xb2, 0x72, 0x06, 0x60, 0xb4, 0x14, -- 0x9d, 0xd9, 0x90, 0xe3, 0x67, 0xf8, 0xbf, 0x16, 0xf4, 0x8d, 0x0d, 0xeb, 0xcd, 0xaf, 0x61, 0x5d, -- 0xcb, 0xca, 0xa0, 0xb5, 0xd7, 0xd9, 0xef, 0x1d, 0x7e, 0xb6, 0x58, 0x97, 0xbe, 0xf4, 0x68, 0xac, -- 0x03, 0x90, 0x5d, 0xf1, 0xc8, 0xae, 0x1a, 0xfe, 0x1d, 0xb6, 0x4a, 0x22, 0x16, 0xa1, 0x60, 0x39, -- 0x97, 0xb1, 0xe2, 0x62, 0x6e, 0x5d, 0xf1, 0x28, 0x4d, 0x7f, 0xc8, 0x00, 0xda, 0xf1, 0xd4, 0x26, -- 0xbe, 0x1d, 0x4f, 0x75, 0x70, 0x04, 0xa3, 0x58, 0x88, 0x5d, 0x1b, 0x1c, 0x03, 0x09, 0x81, 0xae, -- 0x8c, 0xff, 0xc6, 0xec, 0x49, 0xd5, 0xdf, 0xe1, 0xbf, 0x5b, 0x70, 0xef, 0x2b, 0x26, 0x64, 0xcc, -- 0x33, 0x3f, 0xbc, 0x37, 0x86, 0xe4, 0xc2, 0x6b, 0x21, 0xc6, 0x73, 0xc6, 0xad, 0xb8, 0xeb, 0x26, -- 0x25, 0x41, 0x73, 0x63, 0x75, 0xc2, 0xd3, 0x34, 0x56, 0xae, 0x12, 0x4b, 0x42, 0xd5, 0x89, 0xb0, -- 0xac, 0xba, 0x7e, 0x27, 0x8a, 0x53, 0xa6, 0xfb, 0x88, 0x3c, 0x12, 0x93, 0xeb, 0xb2, 0x8f, 0x68, -- 0x14, 0xfe, 0x11, 0xb6, 0x23, 0x96, 0xf2, 0x1b, 0xe6, 0xd5, 0x49, 0x55, 0x52, 0x1d, 0xaf, 0xa4, -- 0x30, 0x34, 0x34, 0x49, 0xb4, 0x5b, 0x9b, 0x11, 0x7e, 0x9a, 0x36, 0x53, 0x64, 0xe6, 0x58, 0xe8, -- 0x36, 0x53, 0x64, 0x98, 0xf6, 0x81, 0x53, 0x69, 0x37, 0x1c, 0x42, 0x3f, 0xa1, 0x73, 0x26, 0x5e, -- 0x31, 0x29, 0xab, 0x0a, 0xa8, 0xd1, 0xc2, 0x7f, 0xb5, 0xe0, 0x83, 0xdf, 0x32, 0x9a, 0xa8, 0xeb, -- 0x93, 0x6b, 0x36, 0x79, 0x5b, 0xae, 0x1d, 0xc3, 0xba, 0xd4, 0xd5, 0xa9, 0x57, 0x0d, 0x0e, 0x9f, -- 0x2d, 0x66, 0x7f, 0xc9, 0xa2, 0xd1, 0x39, 0xde, 0x12, 0xd9, 0xcc, 0x96, 0xb5, 0x55, 0x10, 0xfe, -- 0x02, 0xb6, 0x6b, 0x0c, 0xd2, 0x83, 0x8d, 0x2f, 0xcf, 0x7e, 0x7f, 0xf6, 0xfa, 0x4f, 0x67, 0x3b, -- 0xdf, 0x43, 0x70, 0xfe, 0x45, 0xf4, 0xd5, 0xf8, 0xec, 0xc5, 0x4e, 0x8b, 0xdc, 0x83, 0xde, 0xd9, -- 0xeb, 0x8b, 0x37, 0x8e, 0xd0, 0x0e, 0xff, 0x02, 0xfd, 0x97, 0x7c, 0x16, 0x67, 0x2e, 0x4c, 0xd8, -- 0x2d, 0x98, 0xb8, 0x61, 0xc2, 0x6e, 0xc6, 0x22, 0xec, 0x30, 0x85, 0x64, 0x22, 0xc3, 0x03, 0x61, -- 0x12, 0x58, 0x62, 0xe4, 0xe5, 0x54, 0xca, 0x6f, 0xb8, 0x70, 0xf5, 0x54, 0x62, 0x3c, 0xaf, 0x56, -- 0xff, 0x9d, 0x67, 0xf0, 0x73, 0x2d, 0xca, 0x0b, 0x75, 0x97, 0x2f, 0x8d, 0x84, 0x85, 0xfb, 0x30, -- 0x70, 0x4b, 0xad, 0x99, 0x07, 0xb0, 0x2e, 0x98, 0x2c, 0x12, 0x67, 0xc5, 0xa2, 0xf0, 0x07, 0xd0, -- 0x7b, 0xc9, 0x69, 0x79, 0x01, 0x12, 0xe8, 0xe6, 0x54, 0x5d, 0x5b, 0x21, 0xfd, 0x1d, 0xee, 0x61, -- 0x48, 0xe8, 0xf4, 0x96, 0xc6, 0xf7, 0x35, 0xf4, 0xfe, 0x50, 0xc8, 0x6b, 0xcf, 0xcf, 0xbc, 0x90, -- 0xd7, 0x65, 0x07, 0xb2, 0xa8, 0xde, 0x45, 0xda, 0x0b, 0x5d, 0xc4, 0x6b, 0xee, 0x9d, 0x5a, 0x73, -- 0x7f, 0x02, 0x7d, 0xa3, 0xdc, 0x9a, 0x1f, 0xc2, 0xa6, 0xb0, 0xdf, 0xae, 0xb7, 0x3b, 0x1c, 0x9e, -- 0xa0, 0x23, 0x49, 0x52, 0x73, 0x24, 0x49, 0x7c, 0x47, 0x10, 0xdd, 0xee, 0x88, 0x31, 0x88, 0x4a, -- 0xde, 0xc3, 0x60, 0x0c, 0xbd, 0x73, 0x5a, 0x1d, 0x2a, 0xcc, 0x10, 0xbd, 0xa9, 0xda, 0xb4, 0x45, -- 0x48, 0xb7, 0xad, 0xad, 0xad, 0xcf, 0x9a, 0x45, 0x65, 0xb8, 0x3b, 0x55, 0xb8, 0xbd, 0x38, 0x74, -- 0x6b, 0x71, 0xd8, 0x83, 0xbe, 0x31, 0xb5, 0x32, 0x0d, 0x73, 0xd8, 0x78, 0xc5, 0xd2, 0x53, 0xaa, -- 0x28, 0xfa, 0x9c, 0xb2, 0xf4, 0x82, 0x2b, 0x9a, 0x68, 0x89, 0x4e, 0x54, 0x62, 0xac, 0xb8, 0x94, -- 0xa5, 0xcf, 0x05, 0x33, 0x7b, 0xef, 0x44, 0x0e, 0x62, 0x5c, 0xe4, 0x37, 0x34, 0x37, 0xcb, 0x3a, -- 0x9a, 0x57, 0x11, 0x50, 0x27, 0x02, 0xbd, 0xb0, 0x6b, 0x74, 0x3a, 0x1c, 0x7e, 0xdb, 0xd2, 0xb6, -- 0xf1, 0xbc, 0xe1, 0x06, 0x52, 0x96, 0x9e, 0xcf, 0xcd, 0x49, 0xee, 0x46, 0x16, 0xa1, 0xdd, 0x6b, -- 0x46, 0x73, 0x64, 0xb4, 0x35, 0xc3, 0x41, 0xb4, 0x8b, 0x9f, 0x47, 0x49, 0xc2, 0x27, 0xda, 0x6e, -- 0x37, 0xaa, 0x08, 0x8e, 0x3b, 0xce, 0xbe, 0x94, 0xc6, 0xb0, 0xe5, 0x6a, 0x02, 0x7a, 0xa5, 0xc1, -- 0x34, 0x31, 0x0d, 0xb9, 0x1b, 0x95, 0x18, 0xfb, 0x11, 0x7e, 0x47, 0x2c, 0x61, 0x54, 0xb2, 0x69, -- 0xb0, 0xae, 0xf9, 0x35, 0x5a, 0xf8, 0x06, 0x7a, 0xe7, 0x8a, 0x0b, 0x3a, 0x63, 0x3a, 0x70, 0x8f, -- 0x60, 0x5b, 0x5a, 0x28, 0xe2, 0xea, 0xa8, 0xd5, 0x89, 0xe4, 0x09, 0xec, 0x58, 0xc2, 0x31, 0x9d, -- 0xbc, 0x8d, 0xb3, 0xd9, 0x73, 0x69, 0xeb, 0xa8, 0x41, 0x0f, 0xff, 0xd1, 0x82, 0x7e, 0xc4, 0x66, -- 0xb1, 0x54, 0x62, 0xae, 0x4d, 0x3c, 0x81, 0x1d, 0x61, 0x70, 0xcc, 0xe4, 0x39, 0xa3, 0xd8, 0xac, -- 0x4d, 0x0b, 0x6e, 0xd0, 0xc9, 0x08, 0x48, 0x45, 0x1b, 0x67, 0x92, 0x4d, 0x0a, 0xc1, 0x6c, 0x11, -- 0x2d, 0xe1, 0x90, 0x7d, 0xb8, 0x57, 0x51, 0x8f, 0x13, 0x3e, 0x79, 0x1b, 0x74, 0xb4, 0xf0, 0x22, -- 0x39, 0x7c, 0x0c, 0x3d, 0x7d, 0x7b, 0x56, 0xd7, 0xc1, 0x0d, 0x13, 0x97, 0xdc, 0xd6, 0xf8, 0x66, -- 0xe4, 0x60, 0xf8, 0x9f, 0x0e, 0xf4, 0x8d, 0xa4, 0x2d, 0xbc, 0x67, 0xba, 0x7e, 0x90, 0x64, 0xa7, -- 0x9a, 0x0f, 0x17, 0x5b, 0xb5, 0xad, 0xc2, 0xc8, 0xc9, 0xe1, 0xdc, 0x69, 0xe3, 0xa2, 0x97, 0xb5, -- 0x97, 0xcf, 0x9d, 0x5e, 0x1e, 0x22, 0x5f, 0x9e, 0xfc, 0x06, 0xfa, 0xd6, 0xfd, 0xb9, 0x5e, 0xdf, -- 0xd1, 0xeb, 0x77, 0x17, 0xd7, 0xfb, 0x51, 0x8e, 0x6a, 0x2b, 0xb0, 0x4a, 0xa6, 0x48, 0xe5, 0xdc, -- 0x1d, 0xab, 0x12, 0xe3, 0xd6, 0x45, 0x91, 0x69, 0x96, 0xb9, 0x33, 0x1d, 0xc4, 0x21, 0xe2, 0xf5, -- 0xc9, 0x38, 0x2a, 0x32, 0x7c, 0x38, 0xe8, 0xea, 0xd9, 0x8a, 0x3c, 0x0a, 0xf2, 0xb5, 0x71, 0x26, -- 0xce, 0x8a, 0x54, 0xcf, 0xe0, 0x9d, 0xc8, 0xa3, 0x20, 0x7f, 0xc6, 0x23, 0x5e, 0xa8, 0x38, 0x63, -- 0x52, 0xcf, 0xe2, 0x9d, 0xc8, 0xa3, 0xd8, 0x48, 0xe2, 0xa1, 0xb1, 0xa3, 0xf8, 0xb2, 0x48, 0x22, -- 0x3b, 0x72, 0x72, 0x58, 0xd2, 0xec, 0x5d, 0xce, 0x44, 0x9c, 0xb2, 0x0c, 0x4f, 0x29, 0xe8, 0x64, -- 0xd5, 0x68, 0xe1, 0x9f, 0xe1, 0xfb, 0xaf, 0x68, 0x16, 0x5f, 0x31, 0xa9, 0x4e, 0xf4, 0xc8, 0xe2, -- 0x92, 0x1c, 0x42, 0x3f, 0xb5, 0x0c, 0x3d, 0x9b, 0xdb, 0xfb, 0xd9, 0xa7, 0xe1, 0x69, 0x73, 0xd8, -- 0x75, 0xab, 0x8a, 0x10, 0x1e, 0xc2, 0x83, 0x45, 0xd5, 0x77, 0x8e, 0xa8, 0xdf, 0xb6, 0xe0, 0x43, -- 0xb7, 0xe8, 0x28, 0xcb, 0xb8, 0xfa, 0x8e, 0x1e, 0x61, 0x2f, 0xb3, 0xd8, 0x5d, 0xb5, 0x0e, 0x63, -- 0x03, 0xd5, 0xe7, 0xc7, 0x36, 0x50, 0x7d, 0x66, 0x06, 0xd0, 0xe6, 0xd2, 0x66, 0xb9, 0xcd, 0x25, -- 0x66, 0x81, 0xcb, 0xe7, 0x8c, 0xaa, 0x42, 0x30, 0x19, 0xac, 0x99, 0xf7, 0x48, 0x45, 0xd1, 0xa5, -- 0x4f, 0x45, 0x4c, 0x33, 0x65, 0x53, 0xec, 0x60, 0xf8, 0xcb, 0x6a, 0xb7, 0xe3, 0x4c, 0xe6, 0x6c, -- 0xa2, 0xbe, 0x83, 0xdf, 0xe1, 0x4f, 0xaa, 0x6d, 0x97, 0xab, 0x6d, 0xb0, 0x08, 0x74, 0xb1, 0xfc, -- 0xf4, 0xb2, 0x7e, 0xa4, 0xbf, 0xc3, 0x57, 0xf0, 0x81, 0x13, 0xf7, 0x2f, 0xd3, 0xf7, 0x89, 0x10, -- 0xaa, 0xab, 0xa2, 0xa3, 0xbf, 0xc3, 0x11, 0xdc, 0xaf, 0xab, 0xbb, 0x7d, 0x10, 0x38, 0xfc, 0x67, -- 0x0f, 0x36, 0x4e, 0xcc, 0x5b, 0x9b, 0x9c, 0xc2, 0x9a, 0x7e, 0x7f, 0x90, 0xdd, 0xa5, 0x4f, 0x43, -- 0xeb, 0xda, 0xf0, 0xa3, 0x15, 0xdc, 0x6a, 0xa2, 0xb3, 0xf3, 0xd7, 0x47, 0xcd, 0x93, 0xee, 0x3d, -- 0x59, 0x86, 0x1f, 0xaf, 0x62, 0x1b, 0x45, 0x07, 0x2d, 0x72, 0x04, 0x5d, 0xbd, 0xd1, 0x87, 0xcb, -- 0x9f, 0x04, 0x46, 0xcd, 0xee, 0x6d, 0xef, 0x05, 0x72, 0x0c, 0x1b, 0x6e, 0xbe, 0x7e, 0xd0, 0x78, -- 0x85, 0x7d, 0x91, 0xe6, 0x6a, 0x3e, 0xfc, 0x64, 0x51, 0xc1, 0xe2, 0x40, 0x7f, 0x02, 0x5d, 0x8c, -- 0x65, 0xd3, 0x0d, 0x2f, 0x61, 0x4d, 0x37, 0xfc, 0xf0, 0x1f, 0xb4, 0x8c, 0x92, 0x24, 0x59, 0xa6, -- 0xa4, 0x9c, 0x5c, 0x96, 0x29, 0xa9, 0x26, 0x92, 0x83, 0x16, 0xc6, 0xd6, 0xcc, 0xde, 0xcd, 0xd8, -- 0xd6, 0xc6, 0xfc, 0x66, 0x6c, 0xeb, 0x23, 0xfb, 0x41, 0x8b, 0xfc, 0x0e, 0x7a, 0xde, 0x68, 0xbd, -- 0x32, 0x38, 0x9f, 0xbe, 0xc7, 0x3c, 0x8e, 0x85, 0xa3, 0xa7, 0xdb, 0x66, 0xe1, 0xf8, 0x43, 0x75, -- 0xb3, 0x70, 0xea, 0x23, 0xf1, 0x0b, 0x58, 0x37, 0xd3, 0x2b, 0x59, 0x26, 0x58, 0x0d, 0xc4, 0xcd, -- 0xcd, 0x2d, 0x0c, 0xbd, 0x27, 0xd0, 0xc5, 0xc9, 0x75, 0x49, 0xd9, 0x54, 0x23, 0xef, 0x92, 0xb2, -- 0xf1, 0x86, 0x5d, 0x13, 0x6a, 0xf3, 0xf2, 0x6f, 0x7a, 0x53, 0xfb, 0xe9, 0xd0, 0xf4, 0xa6, 0xfe, -- 0xc3, 0xe0, 0xa0, 0x45, 0x3e, 0x87, 0xce, 0x05, 0x9d, 0x91, 0xe1, 0xa2, 0x60, 0xf5, 0x7a, 0x1e, -- 0xae, 0x08, 0x3f, 0x6e, 0x05, 0xa7, 0xbf, 0xe6, 0x56, 0xbc, 0xf1, 0xb3, 0xb9, 0x15, 0x7f, 0x60, -- 0x34, 0xc7, 0x48, 0xdf, 0x86, 0x0d, 0x25, 0xde, 0x24, 0xd0, 0x54, 0x52, 0xbb, 0xfc, 0xdf, 0xc0, -- 0xa0, 0x7e, 0x01, 0x90, 0x1f, 0x36, 0xee, 0xac, 0x65, 0x77, 0xcf, 0xf0, 0xb3, 0xbb, 0xc4, 0xac, -- 0x81, 0x73, 0xd8, 0x59, 0xbc, 0x2c, 0xc8, 0xe3, 0x55, 0x6b, 0x17, 0xae, 0x93, 0x95, 0xd1, 0xbb, -- 0x84, 0x7b, 0x0b, 0xad, 0x98, 0xac, 0xf4, 0xa7, 0xde, 0xe9, 0x87, 0x8f, 0xef, 0x94, 0xb3, 0x8e, -- 0x7f, 0x0d, 0x7d, 0xbf, 0xe1, 0x92, 0x4f, 0x57, 0x2d, 0xf4, 0x9b, 0xc5, 0xa3, 0xdb, 0x85, 0x5c, -- 0xe6, 0x2e, 0xd7, 0xf5, 0x86, 0x7e, 0xfa, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x16, 0xc7, 0x54, -- 0xaa, 0x1c, 0x15, 0x00, 0x00, -+ // 2001 bytes of a gzipped FileDescriptorProto -+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0xdd, 0x73, 0x1c, 0x47, -+ 0x11, 0x67, 0xef, 0x4e, 0x5f, 0x7d, 0xa7, 0xb3, 0x6a, 0x63, 0x9c, 0xab, 0xb3, 0x92, 0xa8, 0x36, -+ 0x26, 0x16, 0xa6, 0x38, 0xc9, 0x82, 0x07, 0x42, 0x01, 0x85, 0x3e, 0x62, 0x73, 0x60, 0xcb, 0xb0, -+ 0x52, 0x42, 0x51, 0xa9, 0xc2, 0x35, 0xba, 0x1b, 0x9d, 0x16, 0xef, 0xee, 0x2c, 0x33, 0xb3, 0x8a, -+ 0x8f, 0xfc, 0x15, 0x54, 0xf1, 0xc4, 0xc7, 0xdf, 0xc0, 0x63, 0xfe, 0x02, 0x9e, 0xf8, 0xa7, 0x52, -+ 0xdd, 0x33, 0xb3, 0x1f, 0xb7, 0x27, 0xc9, 0x79, 0xdb, 0xfe, 0x98, 0x9e, 0x9e, 0xee, 0xdf, 0x74, -+ 0x4f, 0x2f, 0x0c, 0x59, 0x16, 0xed, 0x29, 0x2e, 0xaf, 0xa3, 0x09, 0x57, 0x7b, 0x13, 0x91, 0x6a, -+ 0x29, 0xe2, 0x51, 0x26, 0x85, 0x16, 0x7e, 0x3f, 0x52, 0x79, 0xcc, 0x46, 0x17, 0x79, 0x14, 0x4f, -+ 0x47, 0xd7, 0x4f, 0x87, 0x0f, 0x67, 0x42, 0xcc, 0x62, 0xbe, 0x47, 0xd2, 0x8b, 0xfc, 0x72, 0x8f, -+ 0x27, 0x99, 0x9e, 0x1b, 0xe5, 0xe1, 0x47, 0x8b, 0x42, 0x1d, 0x25, 0x5c, 0x69, 0x96, 0x64, 0x46, -+ 0x21, 0xf8, 0x5f, 0x1b, 0x7a, 0x47, 0x68, 0x2a, 0xe4, 0x7f, 0xcd, 0xb9, 0xd2, 0xfe, 0x00, 0xd6, -+ 0xc8, 0xf4, 0xf8, 0x64, 0xe0, 0xed, 0x78, 0xbb, 0x1b, 0xa1, 0x23, 0xfd, 0x6d, 0xd8, 0xa0, 0xcf, -+ 0xf3, 0x79, 0xc6, 0x07, 0x2d, 0x92, 0x95, 0x0c, 0xff, 0x43, 0x00, 0xf4, 0x93, 0xbf, 0xd5, 0x27, -+ 0x91, 0x1c, 0xb4, 0x49, 0x5c, 0xe1, 0xf8, 0x3b, 0xd0, 0xbd, 0x8c, 0x62, 0x7e, 0x8c, 0x9c, 0x54, -+ 0x0f, 0x3a, 0xa4, 0x50, 0x65, 0xf9, 0x0f, 0x60, 0x55, 0xe4, 0x3a, 0xcb, 0xf5, 0x60, 0x85, 0x84, -+ 0x96, 0x2a, 0xf6, 0x3d, 0x94, 0x33, 0x35, 0x58, 0xdd, 0x69, 0x17, 0xfb, 0x22, 0xc3, 0xbf, 0x0f, -+ 0x2b, 0x99, 0x14, 0x6f, 0xe7, 0x83, 0xb5, 0x1d, 0x6f, 0x77, 0x3d, 0x34, 0x04, 0x9e, 0x22, 0x8a, -+ 0xa6, 0x68, 0x7d, 0xb0, 0x6e, 0x4e, 0x61, 0x49, 0xff, 0x97, 0xd0, 0xa5, 0xc5, 0x67, 0x9a, 0xe9, -+ 0x68, 0x32, 0xd8, 0xd8, 0xf1, 0x76, 0xbb, 0x07, 0x0f, 0x47, 0xf5, 0xa0, 0x8e, 0x8e, 0x4a, 0x95, -+ 0xb0, 0xaa, 0xef, 0x3f, 0x82, 0x4d, 0x36, 0x9d, 0x46, 0x3a, 0x12, 0x29, 0x8b, 0xcf, 0xd9, 0x6c, -+ 0x00, 0x64, 0xbe, 0xce, 0xa4, 0x60, 0xb0, 0xec, 0x70, 0x3a, 0x7d, 0x11, 0x29, 0x3d, 0xe8, 0x92, -+ 0xcf, 0x15, 0x8e, 0x3f, 0x84, 0x75, 0x9e, 0xea, 0x48, 0xcf, 0xc7, 0x27, 0x83, 0x1e, 0x19, 0x28, -+ 0x68, 0x3c, 0x2e, 0x4f, 0x27, 0x72, 0x9e, 0x69, 0x3e, 0x1d, 0x6c, 0xd2, 0xa1, 0x4a, 0x06, 0x06, -+ 0xe9, 0x52, 0xc8, 0x84, 0xe9, 0x41, 0xdf, 0x04, 0xc9, 0x50, 0x01, 0x83, 0xcd, 0x71, 0x92, 0x09, -+ 0xa9, 0x5d, 0x1e, 0x87, 0xb0, 0x1e, 0x11, 0xa3, 0x48, 0x64, 0x41, 0xa3, 0x11, 0x25, 0x72, 0x39, -+ 0x71, 0x69, 0xb4, 0x14, 0x6e, 0x2d, 0xf9, 0x25, 0x97, 0x3c, 0x9d, 0x70, 0x9b, 0xc2, 0x92, 0x11, -+ 0x04, 0xd0, 0x77, 0x5b, 0xa8, 0x4c, 0xa4, 0x8a, 0xfb, 0x5b, 0xd0, 0x8e, 0xc5, 0xcc, 0x9a, 0xc7, -+ 0xcf, 0xe0, 0x39, 0x74, 0x2b, 0xa1, 0xf3, 0x7f, 0xe6, 0x20, 0x13, 0x25, 0x9c, 0xd4, 0xba, 0x07, -+ 0xc3, 0x91, 0x81, 0xe4, 0xc8, 0x41, 0x72, 0x74, 0xee, 0x20, 0x19, 0x96, 0xca, 0xc1, 0x0f, 0x61, -+ 0xd3, 0xc2, 0xd2, 0xee, 0x85, 0x19, 0x4d, 0xd8, 0x8c, 0x97, 0xb8, 0xb4, 0x24, 0xaa, 0xe2, 0x76, -+ 0xb9, 0xba, 0x13, 0xc2, 0xc1, 0x13, 0xe8, 0x3b, 0xd5, 0xd2, 0xec, 0xc4, 0x42, 0xd2, 0xea, 0x5a, -+ 0x32, 0xf8, 0x11, 0x74, 0x31, 0x57, 0xce, 0xe8, 0x36, 0x6c, 0xd0, 0x86, 0xa7, 0xcc, 0x1e, 0x65, -+ 0x23, 0x2c, 0x19, 0xc1, 0x4f, 0x01, 0xce, 0xd9, 0xcc, 0xe9, 0xde, 0x87, 0x15, 0x12, 0x59, 0x3d, -+ 0x43, 0x60, 0xb4, 0x34, 0x9b, 0xd9, 0x90, 0xe3, 0x67, 0xf0, 0x7f, 0x0f, 0x7a, 0x66, 0x0f, 0xeb, -+ 0xcd, 0xaf, 0x60, 0x95, 0x74, 0xd5, 0xc0, 0xdb, 0x69, 0xef, 0x76, 0x0f, 0x3e, 0x59, 0xc4, 0x65, -+ 0x55, 0x7b, 0x34, 0xa6, 0x00, 0xa4, 0x97, 0x22, 0xb4, 0xab, 0x86, 0x5f, 0xc3, 0x46, 0xc1, 0x44, -+ 0x10, 0x4a, 0x9e, 0x09, 0x15, 0x69, 0x21, 0xe7, 0xd6, 0x95, 0x0a, 0xa7, 0xe9, 0x8f, 0xdf, 0x87, -+ 0x56, 0x34, 0xb5, 0x89, 0x6f, 0x45, 0x53, 0x0a, 0x8e, 0xe4, 0x0c, 0x81, 0xd8, 0xb1, 0xc1, 0x31, -+ 0xa4, 0xef, 0x43, 0x47, 0x45, 0x7f, 0xe3, 0xf6, 0xa6, 0xd2, 0x77, 0xf0, 0x1f, 0x0f, 0xee, 0x7d, -+ 0xc1, 0xa5, 0x8a, 0x44, 0x5a, 0x0d, 0xef, 0xb5, 0x61, 0xb9, 0xf0, 0x5a, 0x12, 0xe3, 0x39, 0x13, -+ 0x56, 0xdd, 0x55, 0x93, 0x82, 0x41, 0xd2, 0x48, 0x1f, 0x8b, 0x24, 0x89, 0xb4, 0x43, 0x62, 0xc1, -+ 0x28, 0x2b, 0x11, 0xc2, 0xaa, 0x53, 0xad, 0x44, 0x51, 0xc2, 0xa9, 0x8e, 0xa8, 0x43, 0x39, 0xb9, -+ 0x2a, 0xea, 0x08, 0x51, 0xc1, 0x1f, 0x60, 0x33, 0xe4, 0x89, 0xb8, 0xe6, 0x15, 0x9c, 0x94, 0x90, -+ 0x6a, 0x57, 0x20, 0x85, 0xa1, 0x61, 0x71, 0x4c, 0x6e, 0xad, 0x87, 0xf8, 0x69, 0xca, 0x4c, 0x9e, -+ 0x9a, 0x6b, 0x41, 0x65, 0x26, 0x4f, 0x31, 0xed, 0x7d, 0x67, 0xd2, 0x1e, 0x38, 0x80, 0x5e, 0xcc, -+ 0xe6, 0x5c, 0xbe, 0xe4, 0x4a, 0x95, 0x08, 0xa8, 0xf1, 0x82, 0x7f, 0x7b, 0xf0, 0xde, 0x6f, 0x38, -+ 0x8b, 0xf5, 0xd5, 0xf1, 0x15, 0x9f, 0xbc, 0x29, 0xd6, 0x8e, 0x61, 0x55, 0x11, 0x3a, 0x69, 0x55, -+ 0xff, 0xe0, 0xe9, 0x62, 0xf6, 0x97, 0x2c, 0x1a, 0x9d, 0x61, 0x97, 0x48, 0x67, 0x16, 0xd6, 0xd6, -+ 0x40, 0xf0, 0x73, 0xd8, 0xac, 0x09, 0xfc, 0x2e, 0xac, 0x7d, 0x7e, 0xfa, 0xbb, 0xd3, 0x57, 0x7f, -+ 0x3c, 0xdd, 0xfa, 0x1e, 0x12, 0x67, 0x9f, 0x85, 0x5f, 0x8c, 0x4f, 0x9f, 0x6f, 0x79, 0xfe, 0x3d, -+ 0xe8, 0x9e, 0xbe, 0x3a, 0x7f, 0xed, 0x18, 0xad, 0xe0, 0xcf, 0xd0, 0x7b, 0x21, 0x66, 0x51, 0xea, -+ 0xc2, 0x84, 0xd5, 0x82, 0xcb, 0x6b, 0x2e, 0xed, 0x61, 0x2c, 0x85, 0x15, 0x26, 0x57, 0x5c, 0xa6, -+ 0x78, 0x21, 0x4c, 0x02, 0x0b, 0x1a, 0x65, 0x19, 0x53, 0xea, 0x2b, 0x21, 0x1d, 0x9e, 0x0a, 0x1a, -+ 0xef, 0xab, 0xb5, 0x7f, 0xe7, 0x1d, 0xfc, 0x94, 0x54, 0x45, 0xae, 0xef, 0xf2, 0xa5, 0x91, 0xb0, -+ 0x60, 0x17, 0xfa, 0x6e, 0xa9, 0xdd, 0xe6, 0x01, 0xac, 0x4a, 0xae, 0xf2, 0xd8, 0xed, 0x62, 0xa9, -+ 0xe0, 0x2f, 0xd0, 0x7d, 0x21, 0x58, 0xd1, 0x00, 0x7d, 0xe8, 0x64, 0x4c, 0x5f, 0x59, 0x25, 0xfa, -+ 0xc6, 0xa5, 0xb1, 0x60, 0x58, 0x50, 0x6c, 0xc1, 0x34, 0x94, 0xbf, 0x07, 0x6d, 0xc5, 0x33, 0x3a, -+ 0x61, 0xf7, 0xe0, 0x83, 0xc5, 0x74, 0x9d, 0xf1, 0x8c, 0x49, 0xa6, 0x85, 0x24, 0xf3, 0xa8, 0x19, -+ 0xfc, 0xdd, 0xc3, 0xc4, 0x54, 0xd8, 0xe4, 0x79, 0x96, 0xb9, 0x1a, 0xca, 0xb2, 0x0c, 0x39, 0xd3, -+ 0x48, 0xba, 0x7b, 0x39, 0x8d, 0x24, 0xba, 0x74, 0xc1, 0x94, 0x2b, 0xc9, 0xf4, 0x4d, 0xb5, 0x37, -+ 0xba, 0xb0, 0xe8, 0xc7, 0x4f, 0xbc, 0x15, 0xea, 0x4d, 0x94, 0x11, 0x3c, 0x08, 0xfa, 0xeb, 0x61, -+ 0xc9, 0xc0, 0x20, 0xf3, 0x94, 0x5d, 0xc4, 0x7c, 0x3a, 0x58, 0x25, 0x99, 0x23, 0x83, 0x1d, 0xcc, -+ 0x37, 0x9b, 0xde, 0x52, 0xd5, 0xbf, 0x84, 0xee, 0xef, 0x73, 0x75, 0x55, 0x49, 0x42, 0x96, 0xab, -+ 0xab, 0xa2, 0xbc, 0x5a, 0xaa, 0x5e, 0x22, 0x5b, 0x0b, 0x25, 0xb2, 0xd2, 0xb9, 0xda, 0xb5, 0xce, -+ 0xf5, 0x04, 0x7a, 0xc6, 0xb8, 0xdd, 0x7e, 0x08, 0xeb, 0xd2, 0x7e, 0xbb, 0xc6, 0xe5, 0xe8, 0xe0, -+ 0x18, 0x1d, 0x89, 0xe3, 0x9a, 0x23, 0x71, 0x5c, 0x75, 0x04, 0xa9, 0xdb, 0x1d, 0x31, 0x1b, 0xa2, -+ 0x91, 0x77, 0xd8, 0xf0, 0x9f, 0x1e, 0x74, 0xcf, 0x58, 0x59, 0x32, 0x10, 0x7f, 0xec, 0xba, 0x6c, -+ 0x42, 0x96, 0x42, 0xbe, 0x2d, 0xdc, 0x2d, 0xaa, 0x24, 0x96, 0x2a, 0xc0, 0xd4, 0xae, 0x83, 0xc9, -+ 0x06, 0xa2, 0x53, 0x0d, 0x84, 0x03, 0xd3, 0xca, 0x1d, 0x60, 0x22, 0x77, 0x08, 0x4c, 0x5f, 0x57, -+ 0xb0, 0x84, 0xdc, 0x02, 0x27, 0x5e, 0x13, 0x27, 0xad, 0x12, 0x27, 0x74, 0x0f, 0xe8, 0xd6, 0xb6, -+ 0xdd, 0x3d, 0xa0, 0x3b, 0xeb, 0x43, 0x67, 0xca, 0x95, 0xf3, 0x8a, 0xbe, 0xab, 0xa8, 0x59, 0x69, -+ 0xa0, 0xc6, 0x04, 0xe6, 0x46, 0xd4, 0xcc, 0x61, 0xed, 0x25, 0x4f, 0x4e, 0x98, 0x66, 0x18, 0xe2, -+ 0x84, 0x27, 0xe7, 0x42, 0xb3, 0x98, 0x34, 0xda, 0x61, 0x41, 0xe3, 0x16, 0x09, 0x4f, 0x9e, 0x49, -+ 0x6e, 0x52, 0xd5, 0x0e, 0x1d, 0x49, 0x80, 0xfe, 0x8a, 0x65, 0x66, 0x59, 0x9b, 0x64, 0x25, 0x03, -+ 0x6d, 0x22, 0x41, 0x0b, 0x3b, 0xc6, 0xa6, 0xa3, 0x83, 0x6f, 0x3c, 0xda, 0x1b, 0x6b, 0x1f, 0x1e, -+ 0x37, 0xe1, 0xc9, 0xd9, 0xdc, 0x54, 0xd5, 0x4e, 0x68, 0x29, 0xdc, 0xf7, 0x8a, 0xb3, 0x0c, 0x05, -+ 0x2d, 0x12, 0x38, 0x12, 0xf7, 0xc5, 0xcf, 0xc3, 0x38, 0x16, 0x13, 0xda, 0xb7, 0x13, 0x96, 0x0c, -+ 0x27, 0x1d, 0xa7, 0x9f, 0x2b, 0xb3, 0xb1, 0x95, 0x12, 0x03, 0xbd, 0x22, 0x62, 0x1a, 0x9b, 0xe6, -+ 0xd8, 0x09, 0x0b, 0x1a, 0x7b, 0x03, 0x7e, 0x87, 0x3c, 0xe6, 0x4c, 0xd9, 0x7b, 0xd8, 0x09, 0x6b, -+ 0xbc, 0xe0, 0x35, 0x74, 0xcf, 0xb4, 0x90, 0x6c, 0xc6, 0x29, 0x70, 0x8f, 0x60, 0x53, 0x59, 0x52, -+ 0x46, 0x65, 0xd9, 0xab, 0x33, 0xfd, 0x27, 0xb0, 0x65, 0x19, 0x47, 0x6c, 0xf2, 0x26, 0x4a, 0x67, -+ 0xcf, 0x94, 0x4d, 0x78, 0x83, 0x1f, 0xfc, 0xc3, 0x83, 0x5e, 0xc8, 0x67, 0x91, 0xd2, 0x72, 0x4e, -+ 0x5b, 0x3c, 0x81, 0x2d, 0x69, 0xe8, 0x88, 0xab, 0x33, 0xce, 0xb0, 0x71, 0x9a, 0x76, 0xd8, 0xe0, -+ 0xfb, 0x23, 0xf0, 0x4b, 0xde, 0x38, 0x55, 0x7c, 0x92, 0x4b, 0x6e, 0x21, 0xbf, 0x44, 0xe2, 0xef, -+ 0xc2, 0xbd, 0x92, 0x7b, 0x14, 0x8b, 0xc9, 0x9b, 0x41, 0x9b, 0x94, 0x17, 0xd9, 0xc1, 0x63, 0xe8, -+ 0xd2, 0x4b, 0xa6, 0x6c, 0xcd, 0xd7, 0x5c, 0x5e, 0x08, 0x0b, 0xe6, 0xf5, 0xd0, 0x91, 0xc1, 0x7f, -+ 0xdb, 0xd0, 0x33, 0x9a, 0x16, 0x78, 0x4f, 0x09, 0x3f, 0xc8, 0xb2, 0x2f, 0xcc, 0xf7, 0x17, 0xaf, -+ 0x8e, 0x45, 0x61, 0xe8, 0xf4, 0x70, 0x06, 0xb0, 0x71, 0xa1, 0x65, 0xad, 0xe5, 0x33, 0x40, 0x25, -+ 0x0f, 0x61, 0x55, 0xdf, 0xff, 0x35, 0xf4, 0xac, 0xfb, 0x73, 0x5a, 0x6f, 0xca, 0xff, 0xf6, 0xe2, -+ 0xfa, 0x6a, 0x94, 0xc3, 0xda, 0x0a, 0x44, 0xc9, 0x14, 0xb9, 0x42, 0xb8, 0xeb, 0x56, 0xd0, 0x78, -+ 0x74, 0x99, 0xa7, 0x24, 0x32, 0xef, 0x17, 0x47, 0xe2, 0x83, 0xee, 0xd5, 0xf1, 0x38, 0xcc, 0x53, -+ 0x1c, 0xe2, 0x08, 0x3d, 0x1b, 0x61, 0x85, 0x83, 0x72, 0xda, 0x9c, 0xcb, 0xd3, 0x3c, 0xa1, 0x79, -+ 0xa8, 0x1d, 0x56, 0x38, 0x28, 0x9f, 0x89, 0x50, 0xe4, 0x3a, 0x4a, 0xb9, 0xa2, 0xb9, 0xa8, 0x1d, -+ 0x56, 0x38, 0x36, 0x92, 0x78, 0x69, 0xec, 0x58, 0xb4, 0x2c, 0x92, 0x28, 0x0e, 0x9d, 0x1e, 0x42, -+ 0x9a, 0xbf, 0xcd, 0xb8, 0x8c, 0x12, 0x9e, 0xe2, 0x2d, 0x05, 0x4a, 0x56, 0x8d, 0x17, 0xfc, 0x09, -+ 0xbe, 0xff, 0x92, 0xa5, 0xd1, 0x25, 0x57, 0xfa, 0x98, 0x9e, 0x8f, 0x2e, 0xc9, 0x01, 0xf4, 0x12, -+ 0x2b, 0xa0, 0x39, 0xc9, 0xbe, 0x95, 0xaa, 0x3c, 0xbc, 0x6d, 0x8e, 0x76, 0xb5, 0xb5, 0x64, 0x04, -+ 0x07, 0xf0, 0x60, 0xd1, 0xf4, 0x9d, 0xe3, 0xc2, 0x37, 0x1e, 0xbc, 0xef, 0x16, 0x1d, 0xa6, 0xa9, -+ 0xd0, 0xdf, 0xd1, 0x23, 0xac, 0x65, 0x96, 0x76, 0xcf, 0x1e, 0x47, 0x63, 0x09, 0xa5, 0xfb, 0x63, -+ 0xcb, 0x3d, 0xdd, 0x99, 0x3e, 0xb4, 0x84, 0xb2, 0x59, 0x6e, 0x09, 0x85, 0x59, 0x10, 0xea, 0x19, -+ 0x67, 0x3a, 0x97, 0x5c, 0x0d, 0x56, 0xcc, 0x6c, 0x58, 0x72, 0x08, 0xfa, 0x4c, 0x46, 0x2c, 0xd5, -+ 0x36, 0xc5, 0x8e, 0x0c, 0x7e, 0x51, 0x9e, 0x76, 0x9c, 0xaa, 0x8c, 0x4f, 0xf4, 0x77, 0xf0, 0x3b, -+ 0xf8, 0x71, 0x79, 0xec, 0x62, 0xb5, 0x0d, 0x16, 0x56, 0x7e, 0xa6, 0x19, 0x2d, 0xeb, 0x85, 0xf4, -+ 0x1d, 0xbc, 0x84, 0xf7, 0x9c, 0x7a, 0xb5, 0xf7, 0xbf, 0x4b, 0x84, 0x5c, 0x23, 0x69, 0x95, 0x8d, -+ 0x24, 0x18, 0xc1, 0xfd, 0xba, 0xb9, 0xdb, 0x1f, 0x65, 0x07, 0xff, 0xea, 0xc2, 0xda, 0xb1, 0xf9, -+ 0xef, 0xe1, 0x9f, 0xc0, 0x0a, 0xcd, 0x82, 0xfe, 0xf6, 0xd2, 0x31, 0xdd, 0xba, 0x36, 0xfc, 0xe0, -+ 0x06, 0x69, 0xf9, 0xba, 0xb6, 0x6f, 0xe1, 0x66, 0x6f, 0xad, 0x8e, 0x8f, 0xc3, 0x0f, 0x6f, 0x12, -+ 0x1b, 0x43, 0xfb, 0x9e, 0x7f, 0x08, 0x1d, 0x3a, 0xe8, 0xc3, 0xe5, 0xe3, 0x99, 0x31, 0xb3, 0x7d, -+ 0xdb, 0xec, 0xe6, 0x1f, 0xc1, 0x9a, 0x9b, 0x75, 0x1e, 0x34, 0x26, 0xe2, 0xcf, 0x92, 0x4c, 0xcf, -+ 0x87, 0x1f, 0x2d, 0x1a, 0x58, 0x1c, 0xae, 0x8e, 0xa1, 0x83, 0xb1, 0x6c, 0xba, 0x51, 0x49, 0x58, -+ 0xd3, 0x8d, 0x6a, 0xf8, 0xf7, 0x3d, 0x63, 0x24, 0x8e, 0x97, 0x19, 0x29, 0x1e, 0x5a, 0xcb, 0x8c, -+ 0x94, 0x0f, 0xa8, 0x7d, 0x0f, 0x63, 0x6b, 0xe6, 0xa0, 0x66, 0x6c, 0x6b, 0x23, 0x57, 0x33, 0xb6, -+ 0xf5, 0xf1, 0x69, 0xdf, 0xf3, 0x7f, 0x0b, 0xdd, 0xca, 0x98, 0x73, 0x63, 0x70, 0x3e, 0x7e, 0x87, -+ 0xd9, 0x08, 0x81, 0x43, 0x93, 0x46, 0x13, 0x38, 0xd5, 0x01, 0xa7, 0x09, 0x9c, 0xfa, 0x78, 0xf2, -+ 0x1c, 0x56, 0xcd, 0x24, 0xe1, 0x2f, 0x53, 0x2c, 0x87, 0x93, 0xe6, 0xe1, 0x16, 0x06, 0x90, 0x63, -+ 0xe8, 0xd0, 0x93, 0xbf, 0x09, 0x9b, 0x72, 0xfc, 0x58, 0x02, 0x9b, 0xca, 0xdb, 0xdc, 0x84, 0xda, -+ 0xfc, 0x85, 0x69, 0x7a, 0x53, 0xfb, 0x01, 0xd4, 0xf4, 0xa6, 0xfe, 0xf3, 0x66, 0xdf, 0xf3, 0x3f, -+ 0x85, 0xf6, 0x39, 0x9b, 0xf9, 0xc3, 0x45, 0xc5, 0xf2, 0x4f, 0xc6, 0xf0, 0x86, 0xf0, 0xe3, 0x51, -+ 0xe8, 0xc5, 0xd9, 0x6c, 0x9a, 0xe5, 0x63, 0xb9, 0x79, 0x94, 0xea, 0x83, 0xd1, 0x5c, 0x23, 0xea, -+ 0x86, 0x0d, 0x23, 0x95, 0x97, 0x40, 0xd3, 0x48, 0xad, 0xf9, 0xbf, 0x86, 0x7e, 0xbd, 0x01, 0xf8, -+ 0x3f, 0x68, 0xf4, 0xac, 0x65, 0xbd, 0x67, 0xf8, 0xc9, 0x5d, 0x6a, 0x76, 0x83, 0x33, 0xd8, 0x5a, -+ 0x6c, 0x16, 0xfe, 0xe3, 0x9b, 0xd6, 0x2e, 0xb4, 0x93, 0x1b, 0xa3, 0x77, 0x01, 0xf7, 0x16, 0x4a, -+ 0xb1, 0x7f, 0xa3, 0x3f, 0xf5, 0x4a, 0x3f, 0x7c, 0x7c, 0xa7, 0x9e, 0x75, 0xfc, 0x4b, 0xe8, 0x55, -+ 0x0b, 0xae, 0xff, 0xf1, 0x4d, 0x0b, 0xab, 0xc5, 0xe2, 0xd1, 0xed, 0x4a, 0x2e, 0x73, 0x17, 0xab, -+ 0x74, 0xa0, 0x9f, 0x7c, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x24, 0x62, 0xe9, 0xd4, 0xa8, 0x16, 0x00, -+ 0x00, - } - - // Reference imports to suppress errors if they are not otherwise used. -diff --git a/api/services/control.proto b/api/services/control.proto -index e67b8e93..5eb2b576 100644 ---- a/api/services/control.proto -+++ b/api/services/control.proto -@@ -215,6 +215,27 @@ message LogoutResponse { - message LoadRequest { - // path is the path of loading file - string path = 1; -+ // loadID is the unique ID for each time load -+ // also is the part of construct temporary path to -+ // store transport file -+ string loadID = 2; -+ // SeparatorLoad is the info to load separated image -+ SeparatorLoad sep = 3; -+} -+ -+message SeparatorLoad { -+ // app is application image name -+ string app = 1; -+ // dir is image tarballs directory -+ string dir = 2; -+ // base is base image tarball path -+ string base = 3; -+ // lib is library image tarball path -+ string lib = 4; -+ // skipCheck is flag to skip sha256 check sum for images -+ bool skipCheck = 5; -+ // enabled is flag to indicate the separator function enabled or not -+ bool enabled = 6; - } - - message LoadResponse { -@@ -259,6 +280,21 @@ message SaveRequest { - string path = 3; - // format is the format of image saved to archive file, such as docker-archive, oci-archive - string format = 4; -+ // SeparatorSave is the info to save separated image -+ SeparatorSave sep = 5; -+} -+ -+message SeparatorSave { -+ // base is base image name -+ string base = 1; -+ // lib is library image name -+ string lib = 2; -+ // rename is rename json file -+ string rename = 3; -+ // dest is destination file directory -+ string dest = 4; -+ // enabled is flag to indicate the separator function enabled or not -+ bool enabled = 5; - } - - message SaveResponse { --- -2.27.0 - diff --git a/patch/0073-cli-finish-client-save-separated-image.patch b/patch/0073-cli-finish-client-save-separated-image.patch deleted file mode 100644 index 41e89af..0000000 --- a/patch/0073-cli-finish-client-save-separated-image.patch +++ /dev/null @@ -1,248 +0,0 @@ -From 8bb2cb6f3904f13d0010cc207e9b00bafe043805 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Tue, 26 Oct 2021 14:19:10 +0800 -Subject: [PATCH 02/16] cli:finish client save separated image - -reason: support isula-build client side process info for save separated image -ABI change:(client) -- --dest: destination file direcotry to store seprated image -- --base: base image name of separated images -- --lib: lib image name of separated images -- --rename: rename json file path of separated images - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - cmd/cli/save.go | 121 ++++++++++++++++++++++++++++++++++++++---------- - util/common.go | 24 ++++++++++ - 2 files changed, 121 insertions(+), 24 deletions(-) - -diff --git a/cmd/cli/save.go b/cmd/cli/save.go -index cb78ecfb..4d22798a 100644 ---- a/cmd/cli/save.go -+++ b/cmd/cli/save.go -@@ -29,8 +29,17 @@ import ( - "isula.org/isula-build/util" - ) - -+type separatorSaveOption struct { -+ baseImgName string -+ libImageName string -+ renameFile string -+ destPath string -+ enabled bool -+} -+ - type saveOptions struct { - images []string -+ sep separatorSaveOption - path string - saveID string - format string -@@ -41,7 +50,9 @@ var saveOpts saveOptions - const ( - saveExample = `isula-build ctr-img save busybox:latest -o busybox.tar - isula-build ctr-img save 21c3e96ac411 -o myimage.tar --isula-build ctr-img save busybox:latest alpine:3.9 -o all.tar` -+isula-build ctr-img save busybox:latest alpine:3.9 -o all.tar -+isula-build ctr-img save app:latest app1:latest -d Images -+isula-build ctr-img save app:latest app1:latest -d Images -b busybox:latest -l lib:latest -r rename.json` - ) - - // NewSaveCmd cmd for container image saving -@@ -54,6 +65,10 @@ func NewSaveCmd() *cobra.Command { - } - - saveCmd.PersistentFlags().StringVarP(&saveOpts.path, "output", "o", "", "Path to save the tarball") -+ saveCmd.PersistentFlags().StringVarP(&saveOpts.sep.destPath, "dest", "d", "Images", "Destination file directory to store separated images") -+ saveCmd.PersistentFlags().StringVarP(&saveOpts.sep.baseImgName, "base", "b", "", "Base image name of separated images") -+ saveCmd.PersistentFlags().StringVarP(&saveOpts.sep.libImageName, "lib", "l", "", "Lib image name of separated images") -+ saveCmd.PersistentFlags().StringVarP(&saveOpts.sep.renameFile, "rename", "r", "", "Rename json file path of separated images") - if util.CheckCliExperimentalEnabled() { - saveCmd.PersistentFlags().StringVarP(&saveOpts.format, "format", "f", "oci", "Format of image saving to local tarball") - } else { -@@ -67,16 +82,7 @@ func saveCommand(cmd *cobra.Command, args []string) error { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - -- if len(args) == 0 { -- return errors.New("save accepts at least one image") -- } -- if saveOpts.format == constant.OCITransport && len(args) >= 2 { -- return errors.New("oci image format now only supports saving single image") -- } -- if err := util.CheckImageFormat(saveOpts.format); err != nil { -- return err -- } -- if err := checkSavePath(); err != nil { -+ if err := saveOpts.checkSaveOpts(args); err != nil { - return err - } - -@@ -88,25 +94,79 @@ func saveCommand(cmd *cobra.Command, args []string) error { - return runSave(ctx, cli, args) - } - --func checkSavePath() error { -- if len(saveOpts.path) == 0 { -- return errors.New("output path should not be empty") -+func (sep *separatorSaveOption) check(pwd string) error { -+ if len(sep.baseImgName) != 0 { -+ if !util.IsValidImageName(sep.baseImgName) { -+ return errors.Errorf("invalid base image name %s", sep.baseImgName) -+ } - } -+ if len(sep.libImageName) != 0 { -+ if !util.IsValidImageName(sep.libImageName) { -+ return errors.Errorf("invalid lib image name %s", sep.libImageName) -+ } -+ } -+ if len(sep.destPath) == 0 { -+ sep.destPath = "Images" -+ } -+ if !filepath.IsAbs(sep.destPath) { -+ sep.destPath = util.MakeAbsolute(sep.destPath, pwd) -+ } -+ if util.IsExist(sep.destPath) { -+ return errors.Errorf("output file already exist: %q, try to remove existing tarball or rename output file", sep.destPath) -+ } -+ if len(sep.renameFile) != 0 { -+ if !filepath.IsAbs(sep.renameFile) { -+ sep.renameFile = util.MakeAbsolute(sep.renameFile, pwd) -+ } -+ } -+ -+ return nil -+} - -- if strings.Contains(saveOpts.path, ":") { -- return errors.Errorf("colon in path %q is not supported", saveOpts.path) -+func (opt *saveOptions) checkSaveOpts(args []string) error { -+ if len(args) == 0 { -+ return errors.New("save accepts at least one image") - } - -- if !filepath.IsAbs(saveOpts.path) { -- pwd, err := os.Getwd() -- if err != nil { -- return errors.New("get current path failed") -+ if strings.Contains(opt.path, ":") || strings.Contains(opt.sep.destPath, ":") { -+ return errors.Errorf("colon in path %q is not supported", opt.path) -+ } -+ pwd, err := os.Getwd() -+ if err != nil { -+ return errors.New("get current path failed") -+ } -+ -+ // normal save -+ if !opt.sep.isEnabled() { -+ // only check oci format when doing normal save operation -+ if opt.format == constant.OCITransport && len(args) >= 2 { -+ return errors.New("oci image format now only supports saving single image") -+ } -+ if err := util.CheckImageFormat(opt.format); err != nil { -+ return err -+ } -+ if len(opt.path) == 0 { -+ return errors.New("output path should not be empty") - } -- saveOpts.path = util.MakeAbsolute(saveOpts.path, pwd) -+ if !filepath.IsAbs(opt.path) { -+ opt.path = util.MakeAbsolute(opt.path, pwd) -+ } -+ if util.IsExist(opt.path) { -+ return errors.Errorf("output file already exist: %q, try to remove existing tarball or rename output file", opt.path) -+ } -+ return nil - } - -- if util.IsExist(saveOpts.path) { -- return errors.Errorf("output file already exist: %q, try to remove existing tarball or rename output file", saveOpts.path) -+ // separator save -+ opt.sep.enabled = true -+ if len(opt.path) != 0 { -+ return errors.New("conflict options between -o and [-b -l -r]") -+ } -+ // separate image only support docker image spec -+ opt.format = constant.DockerTransport -+ -+ if err := opt.sep.check(pwd); err != nil { -+ return err - } - - return nil -@@ -116,11 +176,20 @@ func runSave(ctx context.Context, cli Cli, args []string) error { - saveOpts.saveID = util.GenerateNonCryptoID()[:constant.DefaultIDLen] - saveOpts.images = args - -+ sep := &pb.SeparatorSave{ -+ Base: saveOpts.sep.baseImgName, -+ Lib: saveOpts.sep.libImageName, -+ Rename: saveOpts.sep.renameFile, -+ Dest: saveOpts.sep.destPath, -+ Enabled: saveOpts.sep.enabled, -+ } -+ - saveStream, err := cli.Client().Save(ctx, &pb.SaveRequest{ - Images: saveOpts.images, - Path: saveOpts.path, - SaveID: saveOpts.saveID, - Format: saveOpts.format, -+ Sep: sep, - }) - if err != nil { - return err -@@ -137,7 +206,11 @@ func runSave(ctx context.Context, cli Cli, args []string) error { - fmt.Printf("Save success with image: %s\n", saveOpts.images) - return nil - } -- return errors.Errorf("save image failed: %v", err) -+ return errors.Errorf("save image failed: %v", err.Error()) - } - } - } -+ -+func (sep *separatorSaveOption) isEnabled() bool { -+ return util.AnyFlagSet(sep.baseImgName, sep.libImageName, sep.renameFile) -+} -diff --git a/util/common.go b/util/common.go -index 00b1b941..4782b2ec 100644 ---- a/util/common.go -+++ b/util/common.go -@@ -21,6 +21,7 @@ import ( - "strings" - - securejoin "github.com/cyphar/filepath-securejoin" -+ "github.com/docker/distribution/reference" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/sys/unix" -@@ -184,3 +185,26 @@ func FormatSize(size, base float64) string { - func CheckCliExperimentalEnabled() bool { - return os.Getenv("ISULABUILD_CLI_EXPERIMENTAL") == "enabled" - } -+ -+// IsValidImageName will check the validity of image name -+func IsValidImageName(name string) bool { -+ ref, err := reference.ParseNormalizedNamed(name) -+ if err != nil { -+ return false -+ } -+ if _, canonical := ref.(reference.Canonical); canonical { -+ return false -+ } -+ return true -+} -+ -+// AnyFlagSet is a checker to indicate there exist flag's length not empty -+// If all flags are empty, will return false -+func AnyFlagSet(flags ...string) bool { -+ for _, flag := range flags { -+ if len(flag) != 0 { -+ return true -+ } -+ } -+ return false -+} --- -2.27.0 - diff --git a/patch/0074-daemon-finish-daemon-save-separated-image.patch b/patch/0074-daemon-finish-daemon-save-separated-image.patch deleted file mode 100644 index 68f70c5..0000000 --- a/patch/0074-daemon-finish-daemon-save-separated-image.patch +++ /dev/null @@ -1,995 +0,0 @@ -From 8cf5db787c507ce9d4c78191395b25b2f5e0d253 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Tue, 26 Oct 2021 14:19:27 +0800 -Subject: [PATCH 03/16] daemon:finish daemon save separated image - -reason: support isula-build daemon side save separated image -ABI change(daemon): none -Save process changes: -1. add separate image action at the end of save process(already got tarball) - - input: saved tarball - - output: separated images - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - daemon/save.go | 647 ++++++++++++++++++++++++++++++++++++++++++++++++- - util/cipher.go | 78 ++++++ - util/file.go | 153 ++++++++++++ - 3 files changed, 872 insertions(+), 6 deletions(-) - create mode 100644 util/file.go - -diff --git a/daemon/save.go b/daemon/save.go -index ee706911..ecac5b68 100644 ---- a/daemon/save.go -+++ b/daemon/save.go -@@ -15,11 +15,17 @@ package daemon - - import ( - "context" -+ "encoding/json" -+ "fmt" -+ "io/ioutil" - "os" -+ "path/filepath" - "strings" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" -+ "github.com/containers/storage/pkg/archive" -+ "github.com/docker/docker/pkg/ioutils" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" -@@ -34,6 +40,22 @@ import ( - "isula.org/isula-build/util" - ) - -+const ( -+ manifestDataFile = "manifest.json" -+ manifestFile = "manifest" -+ repositoriesFile = "repositories" -+ baseTarNameSuffix = "_base_image.tar.gz" -+ appTarNameSuffix = "_app_image.tar.gz" -+ libTarNameSuffix = "_lib_image.tar.gz" -+ untarTempDirName = "untar" -+ baseUntarTempDirName = "base_images" -+ appUntarTempDirName = "app_images" -+ libUntarTempDirName = "lib_images" -+ unionTarName = "all.tar" -+ layerTarName = "layer.tar" -+ tarSuffix = ".tar" -+) -+ - type savedImage struct { - exist bool - tags []reference.NamedTagged -@@ -42,18 +64,92 @@ type savedImage struct { - type saveOptions struct { - sysCtx *types.SystemContext - localStore *store.Store -+ logger *logger.Logger -+ logEntry *logrus.Entry - saveID string - format string -+ outputPath string - oriImgList []string - finalImageOrdered []string - finalImageSet map[string]*savedImage -- outputPath string -- logger *logger.Logger -- logEntry *logrus.Entry -+ sep separatorSave - } - --func (b *Backend) getSaveOptions(req *pb.SaveRequest) saveOptions { -- return saveOptions{ -+type separatorSave struct { -+ renameData []renames -+ tmpDir imageTmpDir -+ log *logrus.Entry -+ base string -+ lib string -+ dest string -+ enabled bool -+} -+ -+type renames struct { -+ Name string `json:"name"` -+ Rename string `json:"rename"` -+} -+ -+type imageTmpDir struct { -+ app string -+ base string -+ lib string -+ untar string -+ root string -+} -+ -+type layer struct { -+ all []string -+ base []string -+ lib []string -+ app []string -+} -+ -+type imageInfo struct { -+ layers layer -+ repoTags []string -+ config string -+ name string -+ tag string -+ nameTag string -+ topLayer string -+} -+ -+// imageManifest return image's manifest info -+type imageManifest struct { -+ Config string `json:"Config"` -+ RepoTags []string `json:"RepoTags"` -+ Layers []string `json:"Layers"` -+ // Not shown in the json file -+ HashMap map[string]string `json:"-"` -+} -+ -+type imageLayersMap map[string]string -+ -+type tarballInfo struct { -+ AppTarName string `json:"app"` -+ AppHash string `json:"appHash"` -+ AppLayers []string `json:"appLayers"` -+ LibTarName string `json:"lib"` -+ LibHash string `json:"libHash"` -+ LibImageName string `json:"libImageName"` -+ LibLayers []string `json:"libLayers"` -+ BaseTarName string `json:"base"` -+ BaseHash string `json:"baseHash"` -+ BaseImageName string `json:"baseImageName"` -+ BaseLayers []string `json:"baseLayer"` -+} -+ -+func (b *Backend) getSaveOptions(req *pb.SaveRequest) (saveOptions, error) { -+ var sep = separatorSave{ -+ base: req.GetSep().GetBase(), -+ lib: req.GetSep().GetLib(), -+ dest: req.GetSep().GetDest(), -+ log: logrus.WithFields(logrus.Fields{"SaveID": req.GetSaveID()}), -+ enabled: req.GetSep().GetEnabled(), -+ } -+ -+ var opt = saveOptions{ - sysCtx: image.GetSystemContext(), - localStore: b.daemon.localStore, - saveID: req.GetSaveID(), -@@ -64,7 +160,38 @@ func (b *Backend) getSaveOptions(req *pb.SaveRequest) saveOptions { - outputPath: req.GetPath(), - logger: logger.NewCliLogger(constant.CliLogBufferLen), - logEntry: logrus.WithFields(logrus.Fields{"SaveID": req.GetSaveID(), "Format": req.GetFormat()}), -+ sep: sep, -+ } -+ // normal save -+ if !sep.enabled { -+ return opt, nil -+ } -+ -+ // save separated image -+ tmpRoot := filepath.Join(b.daemon.opts.DataRoot, filepath.Join(dataRootTmpDirPrefix, req.GetSaveID())) -+ untar := filepath.Join(tmpRoot, untarTempDirName) -+ appDir := filepath.Join(tmpRoot, appUntarTempDirName) -+ baseDir := filepath.Join(tmpRoot, baseUntarTempDirName) -+ libDir := filepath.Join(tmpRoot, libUntarTempDirName) -+ -+ opt.sep.tmpDir = imageTmpDir{ -+ app: appDir, -+ base: baseDir, -+ lib: libDir, -+ untar: untar, -+ root: tmpRoot, - } -+ opt.outputPath = filepath.Join(untar, unionTarName) -+ renameFile := req.GetSep().GetRename() -+ if len(renameFile) != 0 { -+ var reName []renames -+ if err := util.LoadJSONFile(renameFile, &reName); err != nil { -+ return saveOptions{}, err -+ } -+ opt.sep.renameData = reName -+ } -+ -+ return opt, nil - } - - // Save receives a save request and save the image(s) into tarball -@@ -75,7 +202,10 @@ func (b *Backend) Save(req *pb.SaveRequest, stream pb.Control_SaveServer) error - }).Info("SaveRequest received") - - var err error -- opts := b.getSaveOptions(req) -+ opts, err := b.getSaveOptions(req) -+ if err != nil { -+ return errors.Wrap(err, "process save options failed") -+ } - - if err = checkFormat(&opts); err != nil { - return err -@@ -103,6 +233,11 @@ func (b *Backend) Save(req *pb.SaveRequest, stream pb.Control_SaveServer) error - return err - } - -+ // separatorSave found -+ if opts.sep.enabled { -+ return separateImage(opts) -+ } -+ - return nil - } - -@@ -118,6 +253,9 @@ func exportHandler(ctx context.Context, opts *saveOptions) func() error { - } - }() - -+ if err := os.MkdirAll(filepath.Dir(opts.outputPath), constant.DefaultRootFileMode); err != nil { -+ return err -+ } - for _, imageID := range opts.finalImageOrdered { - copyCtx := *opts.sysCtx - if opts.format == constant.DockerArchiveTransport { -@@ -210,3 +348,500 @@ func filterImageName(opts *saveOptions) error { - - return nil - } -+ -+func getLayerHashFromStorage(store *store.Store, name string) ([]string, error) { -+ if len(name) == 0 { -+ return nil, nil -+ } -+ _, img, err := image.FindImage(store, name) -+ if err != nil { -+ return nil, err -+ } -+ -+ layer, err := store.Layer(img.TopLayer) -+ if err != nil { -+ return nil, errors.Wrapf(err, "failed to get top layer for image %s", name) -+ } -+ -+ var layers []string -+ // add each layer in the layers until reach the root layer -+ for layer != nil { -+ fields := strings.Split(layer.UncompressedDigest.String(), ":") -+ if len(fields) != 2 { -+ return nil, errors.Errorf("error format of layer of image %s", name) -+ } -+ layers = append(layers, fields[1]) -+ if layer.Parent == "" { -+ break -+ } -+ layer, err = store.Layer(layer.Parent) -+ if err != nil { -+ return nil, errors.Wrapf(err, "unable to read layer %q", layer.Parent) -+ } -+ } -+ -+ return layers, nil -+} -+ -+// process physic file -+func (s *separatorSave) constructLayerMap() (map[string]string, error) { -+ path := s.tmpDir.untar -+ files, rErr := ioutil.ReadDir(path) -+ if rErr != nil { -+ return nil, rErr -+ } -+ -+ var layerMap = make(map[string]string, len(files)) -+ // process layer's file -+ for _, file := range files { -+ if file.IsDir() { -+ layerFile := filepath.Join(path, file.Name(), layerTarName) -+ oriFile, err := os.Readlink(layerFile) -+ if err != nil { -+ return nil, err -+ } -+ physicFile := filepath.Join(path, file.Name(), oriFile) -+ layerMap[filepath.Base(physicFile)] = filepath.Join(file.Name(), layerTarName) -+ if err := os.Rename(physicFile, layerFile); err != nil { -+ return nil, err -+ } -+ } -+ } -+ -+ return layerMap, nil -+} -+ -+func getLayerHashFromTar(layerMap map[string]string, layer []string) map[string]string { -+ hashMap := make(map[string]string, len(layer)) -+ // first reverse map since it's is unique -+ revMap := make(map[string]string, len(layerMap)) -+ for k, v := range layerMap { -+ revMap[v] = k -+ } -+ for _, l := range layer { -+ if v, ok := revMap[l]; ok { -+ // format is like xxx(hash): xxx/layer.tar -+ hashMap[strings.TrimSuffix(v, tarSuffix)] = l -+ } -+ } -+ -+ return hashMap -+} -+ -+func (s *separatorSave) adjustLayers() ([]imageManifest, error) { -+ s.log.Info("Adjusting layers for saving separated image") -+ -+ layerMap, err := s.constructLayerMap() -+ if err != nil { -+ s.log.Errorf("Process layers failed: %v", err) -+ return nil, err -+ } -+ -+ // process manifest file -+ var man []imageManifest -+ if lErr := util.LoadJSONFile(filepath.Join(s.tmpDir.untar, manifestDataFile), &man); lErr != nil { -+ return nil, lErr -+ } -+ -+ for i, img := range man { -+ layers := make([]string, len(img.Layers)) -+ for i, layer := range img.Layers { -+ layers[i] = layerMap[layer] -+ } -+ man[i].Layers = layers -+ man[i].HashMap = getLayerHashFromTar(layerMap, layers) -+ } -+ buf, err := json.Marshal(&man) -+ if err != nil { -+ return nil, err -+ } -+ if err := ioutils.AtomicWriteFile(manifestFile, buf, constant.DefaultSharedFileMode); err != nil { -+ return nil, err -+ } -+ -+ return man, nil -+} -+ -+func separateImage(opt saveOptions) error { -+ s := &opt.sep -+ s.log.Infof("Start saving separated images %v", opt.oriImgList) -+ var errList []error -+ -+ if err := os.MkdirAll(s.dest, constant.DefaultRootDirMode); err != nil { -+ return err -+ } -+ -+ defer func() { -+ if tErr := os.RemoveAll(s.tmpDir.root); tErr != nil && !os.IsNotExist(tErr) { -+ s.log.Warnf("Removing save tmp directory %q failed: %v", s.tmpDir.root, tErr) -+ } -+ if len(errList) != 0 { -+ if rErr := os.RemoveAll(s.dest); rErr != nil && !os.IsNotExist(rErr) { -+ s.log.Warnf("Removing save dest directory %q failed: %v", s.dest, rErr) -+ } -+ } -+ }() -+ if err := util.UnpackFile(opt.outputPath, s.tmpDir.untar, archive.Gzip, true); err != nil { -+ errList = append(errList, err) -+ return errors.Wrapf(err, "unpack %q failed", opt.outputPath) -+ } -+ manifest, err := s.adjustLayers() -+ if err != nil { -+ errList = append(errList, err) -+ return errors.Wrap(err, "adjust layers failed") -+ } -+ -+ imgInfos, err := s.constructImageInfos(manifest, opt.localStore) -+ if err != nil { -+ errList = append(errList, err) -+ return errors.Wrap(err, "process image infos failed") -+ } -+ -+ if err := s.processImageLayers(imgInfos); err != nil { -+ errList = append(errList, err) -+ return err -+ } -+ -+ return nil -+} -+ -+func (s *separatorSave) processImageLayers(imgInfos map[string]imageInfo) error { -+ s.log.Info("Processing image layers") -+ var ( -+ tarballs = make(map[string]tarballInfo) -+ baseImagesMap = make(imageLayersMap, 1) -+ libImagesMap = make(imageLayersMap, 1) -+ appImagesMap = make(imageLayersMap, 1) -+ ) -+ for _, info := range imgInfos { -+ if err := s.clearDirs(true); err != nil { -+ return errors.Wrap(err, "clear tmp dirs failed") -+ } -+ var t tarballInfo -+ // process base -+ if err := info.processBaseImg(s, baseImagesMap, &t); err != nil { -+ return errors.Wrapf(err, "process base images %s failed", info.nameTag) -+ } -+ // process lib -+ if err := info.processLibImg(s, libImagesMap, &t); err != nil { -+ return errors.Wrapf(err, "process lib images %s failed", info.nameTag) -+ } -+ // process app -+ if err := info.processAppImg(s, appImagesMap, &t); err != nil { -+ return errors.Wrapf(err, "process app images %s failed", info.nameTag) -+ } -+ tarballs[info.nameTag] = t -+ } -+ buf, err := json.Marshal(&tarballs) -+ if err != nil { -+ return err -+ } -+ // manifest file -+ manifestFile := filepath.Join(s.dest, manifestFile) -+ if err := ioutils.AtomicWriteFile(manifestFile, buf, constant.DefaultRootFileMode); err != nil { -+ return err -+ } -+ -+ s.log.Info("Save separated image succeed") -+ return nil -+} -+ -+func (s *separatorSave) clearDirs(reCreate bool) error { -+ tmpDir := s.tmpDir -+ dirs := []string{tmpDir.base, tmpDir.app, tmpDir.lib} -+ var mkTmpDirs = func(dirs []string) error { -+ for _, dir := range dirs { -+ if err := os.MkdirAll(dir, constant.DefaultRootDirMode); err != nil { -+ return err -+ } -+ } -+ return nil -+ } -+ -+ var rmTmpDirs = func(dirs []string) error { -+ for _, dir := range dirs { -+ if err := os.RemoveAll(dir); err != nil { -+ return err -+ } -+ } -+ return nil -+ } -+ -+ if err := rmTmpDirs(dirs); err != nil { -+ return err -+ } -+ if reCreate { -+ if err := mkTmpDirs(dirs); err != nil { -+ return err -+ } -+ } -+ return nil -+} -+ -+// processTarName will trim the prefix of image name like example.io/library/myapp:v1 -+// after processed, the name will be myapp_v1_suffix -+// mind: suffix here should not contain path separator -+func (info imageInfo) processTarName(suffix string) string { -+ originNames := strings.Split(info.name, string(os.PathSeparator)) -+ originTags := strings.Split(info.tag, string(os.PathSeparator)) -+ // get the last element of the list, which mast be the right name without prefix -+ name := originNames[len(originNames)-1] -+ tag := originTags[len(originTags)-1] -+ -+ return fmt.Sprintf("%s_%s%s", name, tag, suffix) -+} -+ -+func (info *imageInfo) processBaseImg(sep *separatorSave, baseImagesMap map[string]string, tarball *tarballInfo) error { -+ // process base -+ tarball.BaseImageName = sep.base -+ for _, layerID := range info.layers.base { -+ tarball.BaseLayers = append(tarball.BaseLayers, layerID) -+ if baseImg, ok := baseImagesMap[layerID]; !ok { -+ srcLayerPath := filepath.Join(sep.tmpDir.untar, layerID) -+ destLayerPath := filepath.Join(sep.tmpDir.base, layerID) -+ if err := os.Rename(srcLayerPath, destLayerPath); err != nil { -+ return err -+ } -+ baseTarName := info.processTarName(baseTarNameSuffix) -+ baseTarName = sep.rename(baseTarName) -+ baseTarPath := filepath.Join(sep.dest, baseTarName) -+ if err := util.PackFiles(sep.tmpDir.base, baseTarPath, archive.Gzip, true); err != nil { -+ return err -+ } -+ baseImagesMap[layerID] = baseTarPath -+ tarball.BaseTarName = baseTarName -+ digest, err := util.SHA256Sum(baseTarPath) -+ if err != nil { -+ return errors.Wrapf(err, "check sum for new base image %s failed", baseTarName) -+ } -+ tarball.BaseHash = digest -+ } else { -+ tarball.BaseTarName = filepath.Base(baseImg) -+ digest, err := util.SHA256Sum(baseImg) -+ if err != nil { -+ return errors.Wrapf(err, "check sum for reuse base image %s failed", baseImg) -+ } -+ tarball.BaseHash = digest -+ } -+ } -+ -+ return nil -+} -+ -+func (info *imageInfo) processLibImg(sep *separatorSave, libImagesMap map[string]string, tarball *tarballInfo) error { -+ // process lib -+ if info.layers.lib == nil { -+ return nil -+ } -+ -+ tarball.LibImageName = sep.lib -+ for _, layerID := range info.layers.lib { -+ tarball.LibLayers = append(tarball.LibLayers, layerID) -+ if libImg, ok := libImagesMap[layerID]; !ok { -+ srcLayerPath := filepath.Join(sep.tmpDir.untar, layerID) -+ destLayerPath := filepath.Join(sep.tmpDir.lib, layerID) -+ if err := os.Rename(srcLayerPath, destLayerPath); err != nil { -+ return err -+ } -+ libTarName := info.processTarName(libTarNameSuffix) -+ libTarName = sep.rename(libTarName) -+ libTarPath := filepath.Join(sep.dest, libTarName) -+ if err := util.PackFiles(sep.tmpDir.lib, libTarPath, archive.Gzip, true); err != nil { -+ return err -+ } -+ libImagesMap[layerID] = libTarPath -+ tarball.LibTarName = libTarName -+ digest, err := util.SHA256Sum(libTarPath) -+ if err != nil { -+ return errors.Wrapf(err, "check sum for lib image %s failed", sep.lib) -+ } -+ tarball.LibHash = digest -+ } else { -+ tarball.LibTarName = filepath.Base(libImg) -+ digest, err := util.SHA256Sum(libImg) -+ if err != nil { -+ return errors.Wrapf(err, "check sum for lib image %s failed", sep.lib) -+ } -+ tarball.LibHash = digest -+ } -+ } -+ -+ return nil -+} -+ -+func (info *imageInfo) processAppImg(sep *separatorSave, appImagesMap map[string]string, tarball *tarballInfo) error { -+ // process app -+ appTarName := info.processTarName(appTarNameSuffix) -+ appTarName = sep.rename(appTarName) -+ appTarPath := filepath.Join(sep.dest, appTarName) -+ for _, layerID := range info.layers.app { -+ srcLayerPath := filepath.Join(sep.tmpDir.untar, layerID) -+ destLayerPath := filepath.Join(sep.tmpDir.app, layerID) -+ if err := os.Rename(srcLayerPath, destLayerPath); err != nil { -+ if appImg, ok := appImagesMap[layerID]; ok { -+ return errors.Errorf("lib layers %s already saved in %s for image %s", -+ layerID, appImg, info.nameTag) -+ } -+ } -+ appImagesMap[layerID] = appTarPath -+ tarball.AppLayers = append(tarball.AppLayers, layerID) -+ } -+ // create config file -+ if err := info.createManifestFile(sep); err != nil { -+ return err -+ } -+ if err := info.createRepositoriesFile(sep); err != nil { -+ return err -+ } -+ -+ srcConfigPath := filepath.Join(sep.tmpDir.untar, info.config) -+ destConfigPath := filepath.Join(sep.tmpDir.app, info.config) -+ if err := os.Rename(srcConfigPath, destConfigPath); err != nil { -+ return err -+ } -+ -+ if err := util.PackFiles(sep.tmpDir.app, appTarPath, archive.Gzip, true); err != nil { -+ return err -+ } -+ tarball.AppTarName = appTarName -+ digest, err := util.SHA256Sum(appTarPath) -+ if err != nil { -+ return errors.Wrapf(err, "check sum for app image %s failed", info.nameTag) -+ } -+ tarball.AppHash = digest -+ -+ return nil -+} -+ -+func (info imageInfo) createRepositoriesFile(sep *separatorSave) error { -+ // create repositories -+ type repoItem map[string]string -+ repo := make(map[string]repoItem, 1) -+ item := make(repoItem, 1) -+ if _, ok := item[info.tag]; !ok { -+ item[info.tag] = info.topLayer -+ } -+ repo[info.name] = item -+ buf, err := json.Marshal(repo) -+ if err != nil { -+ return err -+ } -+ repositoryFile := filepath.Join(sep.tmpDir.app, repositoriesFile) -+ if err := ioutils.AtomicWriteFile(repositoryFile, buf, constant.DefaultRootFileMode); err != nil { -+ return err -+ } -+ return nil -+} -+ -+func (info imageInfo) createManifestFile(sep *separatorSave) error { -+ // create manifest.json -+ var s = imageManifest{ -+ Config: info.config, -+ Layers: info.layers.all, -+ RepoTags: info.repoTags, -+ } -+ var m []imageManifest -+ m = append(m, s) -+ buf, err := json.Marshal(&m) -+ if err != nil { -+ return err -+ } -+ data := filepath.Join(sep.tmpDir.app, manifestDataFile) -+ if err := ioutils.AtomicWriteFile(data, buf, constant.DefaultRootFileMode); err != nil { -+ return err -+ } -+ return nil -+} -+ -+func getLayersID(layer []string) []string { -+ var after = make([]string, len(layer)) -+ for i, v := range layer { -+ after[i] = strings.Split(v, "/")[0] -+ } -+ return after -+} -+ -+func (s *separatorSave) constructSingleImgInfo(mani imageManifest, store *store.Store) (imageInfo, error) { -+ var libLayers, appLayers []string -+ imageRepoFields := strings.Split(mani.RepoTags[0], ":") -+ imageLayers := getLayersID(mani.Layers) -+ -+ libs, bases, err := s.checkLayersHash(mani.HashMap, store) -+ if err != nil { -+ return imageInfo{}, errors.Wrap(err, "compare layers failed") -+ } -+ baseLayers := imageLayers[0:len(bases)] -+ if len(libs) != 0 { -+ libLayers = imageLayers[len(bases):len(libs)] -+ appLayers = imageLayers[len(libs):] -+ } else { -+ libLayers = nil -+ appLayers = imageLayers[len(bases):] -+ } -+ -+ return imageInfo{ -+ config: mani.Config, -+ repoTags: mani.RepoTags, -+ nameTag: mani.RepoTags[0], -+ name: strings.Join(imageRepoFields[0:len(imageRepoFields)-1], ":"), -+ tag: imageRepoFields[len(imageRepoFields)-1], -+ layers: layer{app: appLayers, lib: libLayers, base: baseLayers, all: mani.Layers}, -+ topLayer: imageLayers[len(imageLayers)-1], -+ }, nil -+} -+ -+func (s *separatorSave) checkLayersHash(layerHashMap map[string]string, store *store.Store) ([]string, []string, error) { -+ libHash, err := getLayerHashFromStorage(store, s.lib) -+ if err != nil { -+ return nil, nil, errors.Wrapf(err, "get lib image %s layers failed", s.lib) -+ } -+ baseHash, err := getLayerHashFromStorage(store, s.base) -+ if err != nil { -+ return nil, nil, errors.Wrapf(err, "get base image %s layers failed", s.base) -+ } -+ if len(libHash) >= len(layerHashMap) || len(baseHash) >= len(layerHashMap) { -+ return nil, nil, errors.Errorf("number of base or lib layers is equal or greater than saved app layers") -+ } -+ -+ for _, l := range libHash { -+ if _, ok := layerHashMap[l]; !ok { -+ return nil, nil, errors.Errorf("dismatch checksum for lib image %s", s.lib) -+ } -+ } -+ for _, b := range baseHash { -+ if _, ok := layerHashMap[b]; !ok { -+ return nil, nil, errors.Errorf("dismatch checksum for base image %s", s.base) -+ } -+ } -+ -+ return libHash, baseHash, nil -+} -+ -+func (s *separatorSave) constructImageInfos(manifest []imageManifest, store *store.Store) (map[string]imageInfo, error) { -+ s.log.Info("Constructing image info") -+ -+ var imgInfos = make(map[string]imageInfo, 1) -+ for _, mani := range manifest { -+ imgInfo, err := s.constructSingleImgInfo(mani, store) -+ if err != nil { -+ s.log.Errorf("Constructing image info failed: %v", err) -+ return nil, errors.Wrap(err, "construct image info failed") -+ } -+ if _, ok := imgInfos[imgInfo.nameTag]; !ok { -+ imgInfos[imgInfo.nameTag] = imgInfo -+ } -+ } -+ return imgInfos, nil -+} -+ -+func (s *separatorSave) rename(name string) string { -+ if len(s.renameData) != 0 { -+ s.log.Info("Renaming image tarballs") -+ for _, item := range s.renameData { -+ if item.Name == name { -+ return item.Rename -+ } -+ } -+ } -+ return name -+} -diff --git a/util/cipher.go b/util/cipher.go -index b2aea2a9..d92705c3 100644 ---- a/util/cipher.go -+++ b/util/cipher.go -@@ -19,9 +19,11 @@ import ( - "crypto/cipher" - "crypto/rand" - "crypto/rsa" -+ "crypto/sha256" - "crypto/x509" - "encoding/hex" - "encoding/pem" -+ "fmt" - "hash" - "io" - "io/ioutil" -@@ -229,3 +231,79 @@ func ReadPublicKey(path string) (rsa.PublicKey, error) { - - return *key, nil - } -+ -+func hashFile(path string) (string, error) { -+ cleanPath := filepath.Clean(path) -+ if len(cleanPath) == 0 { -+ return "", errors.New("failed to hash empty path") -+ } -+ if f, err := os.Stat(cleanPath); err != nil { -+ return "", errors.Errorf("failed to stat file %q", cleanPath) -+ } else if f.IsDir() { -+ return "", errors.New("failed to hash directory") -+ } -+ -+ file, err := ioutil.ReadFile(cleanPath) // nolint:gosec -+ if err != nil { -+ return "", errors.Wrapf(err, "hash file failed") -+ } -+ -+ return fmt.Sprintf("%x", sha256.Sum256(file)), nil -+} -+ -+func hashDir(path string) (string, error) { -+ var checkSum string -+ if err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error { -+ cleanPath := filepath.Clean(path) -+ if err != nil { -+ return err -+ } -+ if !info.Mode().IsRegular() { -+ return nil -+ } -+ if !info.IsDir() { -+ f, err := ioutil.ReadFile(cleanPath) // nolint:gosec -+ if err != nil { -+ return err -+ } -+ fileHash := fmt.Sprintf("%x", sha256.Sum256(f)) -+ checkSum = fmt.Sprintf("%s%s", checkSum, fileHash) -+ } -+ return nil -+ }); err != nil { -+ return "", err -+ } -+ -+ return fmt.Sprintf("%x", sha256.Sum256([]byte(checkSum))), nil -+} -+ -+// SHA256Sum will calculate sha256 checksum for path(file or directory) -+// When calculate directory, each file of folder will be calculated and -+// the checksum will be concatenated to next checksum until every file -+// counted, the result will be used for final checksum calculation -+func SHA256Sum(path string) (string, error) { -+ path = filepath.Clean(path) -+ f, err := os.Stat(path) -+ if err != nil { -+ return "", err -+ } -+ if f.IsDir() { -+ return hashDir(path) -+ } -+ -+ return hashFile(path) -+} -+ -+// CheckSum will calculate the sha256sum for path and compare it with -+// the target, if not match, return error -+func CheckSum(path, target string) error { -+ digest, err := SHA256Sum(path) -+ if err != nil { -+ return err -+ } -+ if digest != target { -+ return errors.Errorf("check sum for path %s failed, got %s, want %s", -+ path, digest, target) -+ } -+ return nil -+} -diff --git a/util/file.go b/util/file.go -new file mode 100644 -index 00000000..cd4a75d5 ---- /dev/null -+++ b/util/file.go -@@ -0,0 +1,153 @@ -+// Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+// isula-build licensed under the Mulan PSL v2. -+// You can use this software according to the terms and conditions of the Mulan PSL v2. -+// You may obtain a copy of Mulan PSL v2 at: -+// http://license.coscl.org.cn/MulanPSL2 -+// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+// IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+// PURPOSE. -+// See the Mulan PSL v2 for more details. -+// Author: Xiang Li -+// Create: 2021-08-24 -+// Description: file manipulation related common functions -+ -+package util -+ -+import ( -+ "encoding/json" -+ "io" -+ "io/ioutil" -+ "os" -+ "path/filepath" -+ "time" -+ -+ "github.com/containers/storage/pkg/archive" -+ "github.com/pkg/errors" -+) -+ -+const ( -+ fileMaxSize = 10 * 1024 * 1024 // 10MB -+) -+ -+// ReadSmallFile read small file less than 10MB -+func ReadSmallFile(path string) ([]byte, error) { -+ st, err := os.Lstat(path) -+ if err != nil { -+ return nil, err -+ } -+ if st.Size() > fileMaxSize { -+ return nil, errors.Errorf("file %q too big", path) -+ } -+ return ioutil.ReadFile(path) // nolint: gosec -+} -+ -+// LoadJSONFile load json files and store it into v -+func LoadJSONFile(file string, v interface{}) error { -+ f, err := ReadSmallFile(file) -+ if err != nil { -+ return err -+ } -+ return json.Unmarshal(f, v) -+} -+ -+// ChangeDirModifyTime changes modify time of directory -+func ChangeDirModifyTime(dir string) error { -+ fs, rErr := ioutil.ReadDir(dir) -+ if rErr != nil { -+ return rErr -+ } -+ for _, f := range fs { -+ src := filepath.Join(dir, f.Name()) -+ if err := ChangeFileModifyTime(src); err != nil { -+ return err -+ } -+ if f.IsDir() { -+ if err := ChangeDirModifyTime(src); err != nil { -+ return err -+ } -+ } -+ } -+ return nil -+} -+ -+// ChangeFileModifyTime changes modify time of file by fixing time at 2017-01-01 00:00:00 -+func ChangeFileModifyTime(path string) error { -+ mtime := time.Date(2017, time.January, 0, 0, 0, 0, 0, time.UTC) -+ atime := time.Date(2017, time.January, 0, 0, 0, 0, 0, time.UTC) -+ if _, err := os.Lstat(path); err != nil { -+ return err -+ } -+ if err := os.Chtimes(path, atime, mtime); err != nil { -+ return err -+ } -+ return nil -+} -+ -+// PackFiles will pack files in "src" directory to "dest" file -+// by using different compression method defined by "com" -+// the files' modify time attribute will be set to a fix time "2017-01-01 00:00:00" -+// if set "modifyTime" to true -+func PackFiles(src, dest string, com archive.Compression, modifyTime bool) (err error) { -+ if modifyTime { -+ if err = ChangeDirModifyTime(src); err != nil { -+ return err -+ } -+ } -+ -+ reader, err := archive.Tar(src, com) -+ if err != nil { -+ return err -+ } -+ -+ f, err := os.Create(dest) -+ if err != nil { -+ return err -+ } -+ -+ defer func() { -+ cErr := f.Close() -+ if cErr != nil && err == nil { -+ err = cErr -+ } -+ }() -+ -+ if _, err = io.Copy(f, reader); err != nil { -+ return err -+ } -+ -+ return nil -+} -+ -+// UnpackFile will unpack "src" file to "dest" directory -+// by using different compression method defined by "com" -+// The src file will be remove if set "rm" to true -+func UnpackFile(src, dest string, com archive.Compression, rm bool) (err error) { -+ cleanPath := filepath.Clean(src) -+ f, err := os.Open(cleanPath) // nolint:gosec -+ if err != nil { -+ return errors.Wrapf(err, "unpack: open %q failed", src) -+ } -+ -+ defer func() { -+ cErr := f.Close() -+ if cErr != nil && err == nil { -+ err = cErr -+ } -+ }() -+ -+ if err = archive.Untar(f, dest, &archive.TarOptions{Compression: com}); err != nil { -+ return errors.Wrapf(err, "unpack file %q failed", src) -+ } -+ -+ if err = ChangeDirModifyTime(dest); err != nil { -+ return errors.Wrapf(err, "change modify time for directory %q failed", dest) -+ } -+ -+ if rm { -+ if err = os.RemoveAll(src); err != nil { -+ return errors.Errorf("unpack: remove %q failed: %v ", src, err) -+ } -+ } -+ -+ return nil -+} --- -2.27.0 - diff --git a/patch/0075-cli-finish-client-load-separated-image.patch b/patch/0075-cli-finish-client-load-separated-image.patch deleted file mode 100644 index bfa546e..0000000 --- a/patch/0075-cli-finish-client-load-separated-image.patch +++ /dev/null @@ -1,274 +0,0 @@ -From 5749a92be53a3e8a135b4f7e59e8fd6d470fbd55 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Tue, 26 Oct 2021 14:20:07 +0800 -Subject: [PATCH 05/16] cli:finish client load separated image - -reason: support isula-build client side process info for load separated -image -ABI change:(client) -- --input: name of app images when load separated images -- --dir: path to separated images' tarball directory -- --base: base image tarball path of separated images -- --lib: lib image tarball path of separated images -- --no-check: skip sha256 check sum for legacy separated images loading - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - cmd/cli/load.go | 113 ++++++++++++++++++++++++++++++++++++++++--- - cmd/cli/load_test.go | 50 +++++++++++++++++-- - cmd/cli/mock.go | 10 ++++ - 3 files changed, 160 insertions(+), 13 deletions(-) - -diff --git a/cmd/cli/load.go b/cmd/cli/load.go -index 16e90a26..2a9df772 100644 ---- a/cmd/cli/load.go -+++ b/cmd/cli/load.go -@@ -25,18 +25,32 @@ import ( - "github.com/pkg/errors" - "github.com/spf13/cobra" - -+ constant "isula.org/isula-build" - pb "isula.org/isula-build/api/services" - "isula.org/isula-build/util" - ) - -+type separatorLoadOption struct { -+ app string -+ base string -+ lib string -+ dir string -+ skipCheck bool -+ enabled bool -+} -+ - type loadOptions struct { -- path string -+ path string -+ loadID string -+ sep separatorLoadOption - } - - var loadOpts loadOptions - - const ( -- loadExample = `isula-build ctr-img load -i busybox.tar` -+ loadExample = `isula-build ctr-img load -i busybox.tar -+isula-build ctr-img load -i app:latest -d /home/Images -+isula-build ctr-img load -i app:latest -d /home/Images -b /home/Images/base.tar.gz -l /home/Images/lib.tar.gz` - ) - - // NewLoadCmd returns image load command -@@ -49,12 +63,20 @@ func NewLoadCmd() *cobra.Command { - RunE: loadCommand, - } - -- loadCmd.PersistentFlags().StringVarP(&loadOpts.path, "input", "i", "", "Path to local tarball") -+ loadCmd.PersistentFlags().StringVarP(&loadOpts.path, "input", "i", "", "Path to local tarball(or app image name when load separated images)") -+ loadCmd.PersistentFlags().StringVarP(&loadOpts.sep.dir, "dir", "d", "", "Path to separated image tarballs directory") -+ loadCmd.PersistentFlags().StringVarP(&loadOpts.sep.base, "base", "b", "", "Base image tarball path of separated images") -+ loadCmd.PersistentFlags().StringVarP(&loadOpts.sep.lib, "lib", "l", "", "Library image tarball path of separated images") -+ loadCmd.PersistentFlags().BoolVarP(&loadOpts.sep.skipCheck, "no-check", "", false, "Skip sha256 check sum for legacy separated images loading") - - return loadCmd - } - - func loadCommand(cmd *cobra.Command, args []string) error { -+ if err := loadOpts.checkLoadOpts(); err != nil { -+ return errors.Wrapf(err, "check load options failed") -+ } -+ - ctx := context.Background() - cli, err := NewClient(ctx) - if err != nil { -@@ -65,14 +87,20 @@ func loadCommand(cmd *cobra.Command, args []string) error { - } - - func runLoad(ctx context.Context, cli Cli) error { -- var err error -- -- if loadOpts.path, err = resolveLoadPath(loadOpts.path); err != nil { -- return err -+ loadOpts.loadID = util.GenerateNonCryptoID()[:constant.DefaultIDLen] -+ sep := &pb.SeparatorLoad{ -+ App: loadOpts.sep.app, -+ Dir: loadOpts.sep.dir, -+ Base: loadOpts.sep.base, -+ Lib: loadOpts.sep.lib, -+ SkipCheck: loadOpts.sep.skipCheck, -+ Enabled: loadOpts.sep.enabled, - } - - resp, err := cli.Client().Load(ctx, &pb.LoadRequest{ -- Path: loadOpts.path, -+ Path: loadOpts.path, -+ LoadID: loadOpts.loadID, -+ Sep: sep, - }) - if err != nil { - return err -@@ -114,3 +142,72 @@ func resolveLoadPath(path string) (string, error) { - - return path, nil - } -+ -+func (opt *loadOptions) checkLoadOpts() error { -+ // normal load -+ if !opt.sep.isEnabled() { -+ path, err := resolveLoadPath(opt.path) -+ if err != nil { -+ return err -+ } -+ opt.path = path -+ -+ return nil -+ } -+ -+ // load separated image -+ opt.sep.enabled = true -+ if len(opt.path) == 0 { -+ return errors.New("app image should not be empty") -+ } -+ -+ // Use opt.path as app image name when operating separated images -+ // this can be mark as a switch for handling separated images -+ opt.sep.app = opt.path -+ -+ if err := opt.sep.check(); err != nil { -+ return err -+ } -+ -+ return nil -+} -+ -+func (sep *separatorLoadOption) isEnabled() bool { -+ return util.AnyFlagSet(sep.dir, sep.base, sep.lib, sep.app) -+} -+ -+func (sep *separatorLoadOption) check() error { -+ pwd, err := os.Getwd() -+ if err != nil { -+ return errors.New("get current path failed") -+ } -+ if !util.IsValidImageName(sep.app) { -+ return errors.Errorf("invalid image name: %s", sep.app) -+ } -+ -+ if len(sep.base) != 0 { -+ path, err := resolveLoadPath(sep.base) -+ if err != nil { -+ return errors.Wrap(err, "resolve base tarball path failed") -+ } -+ sep.base = path -+ } -+ if len(sep.lib) != 0 { -+ path, err := resolveLoadPath(sep.lib) -+ if err != nil { -+ return errors.Wrap(err, "resolve lib tarball path failed") -+ } -+ sep.lib = path -+ } -+ if len(sep.dir) == 0 { -+ return errors.New("image tarball directory should not be empty") -+ } -+ if !filepath.IsAbs(sep.dir) { -+ sep.dir = util.MakeAbsolute(sep.dir, pwd) -+ } -+ if !util.IsExist(sep.dir) { -+ return errors.Errorf("image tarball directory %s is not exist", sep.dir) -+ } -+ -+ return nil -+} -diff --git a/cmd/cli/load_test.go b/cmd/cli/load_test.go -index 9c753e23..b7bf2a57 100644 ---- a/cmd/cli/load_test.go -+++ b/cmd/cli/load_test.go -@@ -15,19 +15,59 @@ package main - - import ( - "context" -+ "io/ioutil" - "path/filepath" - "testing" - - "gotest.tools/v3/assert" - "gotest.tools/v3/fs" -+ constant "isula.org/isula-build" - ) - - func TestLoadCmd(t *testing.T) { -- cmd := NewLoadCmd() -- err := cmd.Execute() -- assert.Equal(t, err != nil, true) -- err = loadCommand(cmd, nil) -- assert.ErrorContains(t, err, "isula_build") -+ tmpDir := fs.NewFile(t, t.Name()) -+ err := ioutil.WriteFile(tmpDir.Path(), []byte("This is test file"), constant.DefaultSharedFileMode) -+ assert.NilError(t, err) -+ defer tmpDir.Remove() -+ -+ type testcase struct { -+ name string -+ path string -+ errString string -+ args []string -+ wantErr bool -+ sep separatorLoadOption -+ } -+ // For normal cases, default err is "invalid socket path: unix:///var/run/isula_build.sock". -+ // As daemon is not running as we run unit test. -+ var testcases = []testcase{ -+ { -+ name: "TC1 - normal case", -+ path: tmpDir.Path(), -+ errString: "isula_build.sock", -+ wantErr: true, -+ }, -+ } -+ -+ for _, tc := range testcases { -+ t.Run(tc.name, func(t *testing.T) { -+ loadCmd := NewLoadCmd() -+ loadOpts = loadOptions{ -+ path: tc.path, -+ sep: tc.sep, -+ } -+ err := loadCmd.Execute() -+ assert.Equal(t, err != nil, true) -+ -+ err = loadCommand(loadCmd, tc.args) -+ if tc.wantErr { -+ assert.ErrorContains(t, err, tc.errString) -+ } -+ if !tc.wantErr { -+ assert.NilError(t, err) -+ } -+ }) -+ } - } - - func TestRunLoad(t *testing.T) { -diff --git a/cmd/cli/mock.go b/cmd/cli/mock.go -index 2ae07d56..142c87fa 100644 ---- a/cmd/cli/mock.go -+++ b/cmd/cli/mock.go -@@ -318,6 +318,16 @@ func (f *mockDaemon) importImage(_ context.Context, opts ...grpc.CallOption) (pb - - func (f *mockDaemon) load(_ context.Context, in *pb.LoadRequest, opts ...grpc.CallOption) (pb.Control_LoadClient, error) { - f.loadReq = in -+ path := f.loadReq.Path -+ sep := f.loadReq.Sep -+ if !sep.Enabled { -+ if path == "" { -+ return &mockLoadClient{}, errors.Errorf("tarball path should not be empty") -+ } -+ _, err := resolveLoadPath(path) -+ return &mockLoadClient{}, err -+ } -+ - return &mockLoadClient{}, nil - } - --- -2.27.0 - diff --git a/patch/0076-daemon-finish-daemon-load-separated-image.patch b/patch/0076-daemon-finish-daemon-load-separated-image.patch deleted file mode 100644 index 5bbf1d4..0000000 --- a/patch/0076-daemon-finish-daemon-load-separated-image.patch +++ /dev/null @@ -1,389 +0,0 @@ -From 6545a2222419954045cf4b80cc9f03f918e568af Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Tue, 26 Oct 2021 14:21:02 +0800 -Subject: [PATCH] daemon:finish daemon load separated image - -reason: support isula-build daemon side load separated image -ABI change(daemon): none -Load process changes: -1. construct image tarball at the beginning of load process - - input: separated images - - output: none - - addition: new images in local storages - -Signed-off-by: DCCooper <1866858@gmail.com> -Signed-off-by: lixiang ---- - daemon/load.go | 306 +++++++++++++++++++++++++++++++++++++++++++++++-- - 1 file changed, 294 insertions(+), 12 deletions(-) - -diff --git a/daemon/load.go b/daemon/load.go -index 2fb8e27d..41690abc 100644 ---- a/daemon/load.go -+++ b/daemon/load.go -@@ -14,11 +14,16 @@ - package daemon - - import ( -+ "io/ioutil" -+ "os" -+ "path/filepath" -+ - "github.com/containers/image/v5/docker/tarfile" - ociarchive "github.com/containers/image/v5/oci/archive" - "github.com/containers/image/v5/transports/alltransports" - "github.com/containers/image/v5/types" - "github.com/containers/storage" -+ "github.com/containers/storage/pkg/archive" - securejoin "github.com/cyphar/filepath-securejoin" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -@@ -33,30 +38,108 @@ import ( - "isula.org/isula-build/util" - ) - -+const ( -+ tmpBaseDirName = "base" -+ tmpAppDirName = "app" -+ tmpLibDirName = "lib" -+ unionCompressedTarName = "all.tar.gz" -+) -+ -+type loadImageTmpDir struct { -+ app string -+ base string -+ lib string -+ root string -+} -+ -+type separatorLoad struct { -+ log *logrus.Entry -+ tmpDir loadImageTmpDir -+ info tarballInfo -+ appName string -+ basePath string -+ appPath string -+ libPath string -+ dir string -+ skipCheck bool -+ enabled bool -+} -+ - type loadOptions struct { -- path string -- format string -+ path string -+ format string -+ logEntry *logrus.Entry -+ sep separatorLoad - } - --func (b *Backend) getLoadOptions(req *pb.LoadRequest) loadOptions { -- return loadOptions{ -+func (b *Backend) getLoadOptions(req *pb.LoadRequest) (loadOptions, error) { -+ var opt = loadOptions{ - path: req.GetPath(), -+ sep: separatorLoad{ -+ appName: req.GetSep().GetApp(), -+ basePath: req.GetSep().GetBase(), -+ libPath: req.GetSep().GetLib(), -+ dir: req.GetSep().GetDir(), -+ skipCheck: req.GetSep().GetSkipCheck(), -+ enabled: req.GetSep().GetEnabled(), -+ }, -+ logEntry: logrus.WithFields(logrus.Fields{"LoadID": req.GetLoadID()}), - } -+ -+ // normal loadOptions -+ if !opt.sep.enabled { -+ if err := util.CheckLoadFile(opt.path); err != nil { -+ return loadOptions{}, err -+ } -+ return opt, nil -+ } -+ -+ // load separated images -+ // log is used for sep methods -+ opt.sep.log = opt.logEntry -+ tmpRoot := filepath.Join(b.daemon.opts.DataRoot, filepath.Join(dataRootTmpDirPrefix, req.GetLoadID())) -+ opt.sep.tmpDir.root = tmpRoot -+ opt.sep.tmpDir.base = filepath.Join(tmpRoot, tmpBaseDirName) -+ opt.sep.tmpDir.app = filepath.Join(tmpRoot, tmpAppDirName) -+ opt.sep.tmpDir.lib = filepath.Join(tmpRoot, tmpLibDirName) -+ -+ // check image name and add "latest" tag if not present -+ _, appImgName, err := image.GetNamedTaggedReference(opt.sep.appName) -+ if err != nil { -+ return loadOptions{}, err -+ } -+ opt.sep.appName = appImgName -+ -+ return opt, nil - } - - // Load loads the image - func (b *Backend) Load(req *pb.LoadRequest, stream pb.Control_LoadServer) error { -- logrus.Info("LoadRequest received") -+ logrus.WithFields(logrus.Fields{ -+ "LoadID": req.GetLoadID(), -+ }).Info("LoadRequest received") - - var ( - si *storage.Image - repoTags [][]string -- err error - ) -- opts := b.getLoadOptions(req) -+ opts, err := b.getLoadOptions(req) -+ if err != nil { -+ return errors.Wrap(err, "process load options failed") -+ } -+ -+ defer func() { -+ if tErr := os.RemoveAll(opts.sep.tmpDir.root); tErr != nil { -+ opts.logEntry.Warnf("Removing load tmp directory %q failed: %v", opts.sep.tmpDir.root, tErr) -+ } -+ }() - -- if cErr := util.CheckLoadFile(req.Path); cErr != nil { -- return cErr -+ // construct separated images -+ if opts.sep.enabled { -+ if lErr := loadSeparatedImage(&opts); lErr != nil { -+ opts.logEntry.Errorf("Load separated image for %s failed: %v", opts.sep.appName, lErr) -+ return lErr -+ } - } - - repoTags, err = tryToParseImageFormatFromTarball(b.daemon.opts.DataRoot, &opts) -@@ -149,8 +232,13 @@ func getDockerRepoTagFromImageTar(systemContext *types.SystemContext, path strin - // tmp dir will be removed after NewSourceFromFileWithContext - tarfileSource, err := tarfile.NewSourceFromFileWithContext(systemContext, path) - if err != nil { -- return nil, errors.Wrapf(err, "failed to get the source of loading tar file") -+ return nil, errors.Wrap(err, "failed to get the source of loading tar file") - } -+ defer func() { -+ if cErr := tarfileSource.Close(); cErr != nil { -+ logrus.Warnf("tar file source close failed: %v", cErr) -+ } -+ }() - - topLevelImageManifest, err := tarfileSource.LoadTarManifest() - if err != nil || len(topLevelImageManifest) == 0 { -@@ -172,12 +260,12 @@ func getOCIRepoTagFromImageTar(systemContext *types.SystemContext, path string) - - srcRef, err := alltransports.ParseImageName(exporter.FormatTransport(constant.OCIArchiveTransport, path)) - if err != nil { -- return nil, errors.Wrapf(err, "failed to parse image name of oci image format") -+ return nil, errors.Wrap(err, "failed to parse image name of oci image format") - } - - tarManifest, err := ociarchive.LoadManifestDescriptorWithContext(systemContext, srcRef) - if err != nil { -- return nil, errors.Wrapf(err, "failed to load manifest descriptor of oci image format") -+ return nil, errors.Wrap(err, "failed to load manifest descriptor of oci image format") - } - - // For now, we only support load single image in archive file -@@ -187,3 +275,197 @@ func getOCIRepoTagFromImageTar(systemContext *types.SystemContext, path string) - - return [][]string{{}}, nil - } -+ -+func loadSeparatedImage(opt *loadOptions) error { -+ s := &opt.sep -+ s.log.Infof("Starting load separated image %s", s.appName) -+ -+ // load manifest file to get tarball info -+ if err := s.getTarballInfo(); err != nil { -+ return errors.Wrap(err, "failed to get tarball info") -+ } -+ if err := s.constructTarballInfo(); err != nil { -+ return err -+ } -+ // checksum for image tarballs -+ if err := s.tarballCheckSum(); err != nil { -+ return err -+ } -+ // process image tarballs and get final constructed image tarball -+ tarPath, err := s.processTarballs() -+ if err != nil { -+ return err -+ } -+ opt.path = tarPath -+ -+ return nil -+} -+ -+func (s *separatorLoad) getTarballInfo() error { -+ manifest, err := securejoin.SecureJoin(s.dir, manifestFile) -+ if err != nil { -+ return errors.Wrap(err, "join manifest file path failed") -+ } -+ -+ var t = make(map[string]tarballInfo) -+ if err = util.LoadJSONFile(manifest, &t); err != nil { -+ return errors.Wrap(err, "load manifest file failed") -+ } -+ -+ tarball, ok := t[s.appName] -+ if !ok { -+ return errors.Errorf("failed to find app image %s", s.appName) -+ } -+ s.info = tarball -+ -+ return nil -+} -+ -+func (s *separatorLoad) constructTarballInfo() (err error) { -+ s.log.Infof("construct image tarball info for %s", s.appName) -+ // fill up path for separator -+ // this case should not happened since client side already check this flag -+ if len(s.appName) == 0 { -+ return errors.New("app image name should not be empty") -+ } -+ s.appPath, err = securejoin.SecureJoin(s.dir, s.info.AppTarName) -+ if err != nil { -+ return err -+ } -+ -+ if len(s.basePath) == 0 { -+ if len(s.info.BaseTarName) == 0 { -+ return errors.Errorf("base image %s tarball can not be empty", s.info.BaseImageName) -+ } -+ s.log.Info("Base image path is empty, use path from manifest") -+ s.basePath, err = securejoin.SecureJoin(s.dir, s.info.BaseTarName) -+ if err != nil { -+ return err -+ } -+ } -+ if len(s.libPath) == 0 && len(s.info.LibTarName) != 0 { -+ s.log.Info("Lib image path is empty, use path from manifest") -+ s.libPath, err = securejoin.SecureJoin(s.dir, s.info.LibTarName) -+ if err != nil { -+ return err -+ } -+ } -+ -+ return nil -+} -+ -+func (s *separatorLoad) tarballCheckSum() error { -+ if s.skipCheck { -+ s.log.Info("Skip checksum for tarballs") -+ return nil -+ } -+ -+ // app image tarball can not be empty -+ if len(s.appPath) == 0 { -+ return errors.New("app image tarball path can not be empty") -+ } -+ if err := util.CheckSum(s.appPath, s.info.AppHash); err != nil { -+ return errors.Wrapf(err, "check sum for file %q failed", s.appPath) -+ } -+ -+ // base image tarball can not be empty -+ if len(s.basePath) == 0 { -+ return errors.New("base image tarball path can not be empty") -+ } -+ if err := util.CheckSum(s.basePath, s.info.BaseHash); err != nil { -+ return errors.Wrapf(err, "check sum for file %q failed", s.basePath) -+ } -+ -+ // lib image may be empty image -+ if len(s.libPath) != 0 { -+ if err := util.CheckSum(s.libPath, s.info.LibHash); err != nil { -+ return errors.Wrapf(err, "check sum for file %q failed", s.libPath) -+ } -+ } -+ -+ return nil -+} -+ -+func (s *separatorLoad) processTarballs() (string, error) { -+ if err := s.unpackTarballs(); err != nil { -+ return "", err -+ } -+ -+ if err := s.reconstructImage(); err != nil { -+ return "", err -+ } -+ -+ // pack app image to tarball -+ tarPath := filepath.Join(s.tmpDir.root, unionCompressedTarName) -+ if err := util.PackFiles(s.tmpDir.base, tarPath, archive.Gzip, true); err != nil { -+ return "", err -+ } -+ -+ return tarPath, nil -+} -+ -+func (s *separatorLoad) unpackTarballs() error { -+ if err := s.makeTempDir(); err != nil { -+ return errors.Wrap(err, "failed to make temporary directories") -+ } -+ -+ // unpack base first and the later images will be moved here -+ if err := util.UnpackFile(s.basePath, s.tmpDir.base, archive.Gzip, false); err != nil { -+ return errors.Wrapf(err, "unpack base tarball %q failed", s.basePath) -+ } -+ -+ if err := util.UnpackFile(s.appPath, s.tmpDir.app, archive.Gzip, false); err != nil { -+ return errors.Wrapf(err, "unpack app tarball %q failed", s.appPath) -+ } -+ -+ if len(s.libPath) != 0 { -+ if err := util.UnpackFile(s.libPath, s.tmpDir.lib, archive.Gzip, false); err != nil { -+ return errors.Wrapf(err, "unpack lib tarball %q failed", s.libPath) -+ } -+ } -+ -+ return nil -+} -+ -+func (s *separatorLoad) reconstructImage() error { -+ files, err := ioutil.ReadDir(s.tmpDir.app) -+ if err != nil { -+ return err -+ } -+ -+ for _, f := range files { -+ src := filepath.Join(s.tmpDir.app, f.Name()) -+ dest := filepath.Join(s.tmpDir.base, f.Name()) -+ if err := os.Rename(src, dest); err != nil { -+ return errors.Wrapf(err, "reconstruct app file %q failed", s.info.AppTarName) -+ } -+ } -+ -+ if len(s.libPath) != 0 { -+ files, err := ioutil.ReadDir(s.tmpDir.lib) -+ if err != nil { -+ return err -+ } -+ -+ for _, f := range files { -+ src := filepath.Join(s.tmpDir.lib, f.Name()) -+ dest := filepath.Join(s.tmpDir.base, f.Name()) -+ if err := os.Rename(src, dest); err != nil { -+ return errors.Wrapf(err, "reconstruct lib file %q failed", s.info.LibTarName) -+ } -+ } -+ } -+ -+ return nil -+} -+ -+func (s *separatorLoad) makeTempDir() error { -+ dirs := []string{s.tmpDir.root, s.tmpDir.app, s.tmpDir.base, s.tmpDir.lib} -+ for _, dir := range dirs { -+ if err := os.MkdirAll(dir, constant.DefaultRootDirMode); err != nil { -+ return err -+ } -+ } -+ -+ return nil -+} --- -2.27.0 - diff --git a/patch/0077-test-optimize-save-client-options-and-add-unit-test.patch b/patch/0077-test-optimize-save-client-options-and-add-unit-test.patch deleted file mode 100644 index 2b65137..0000000 --- a/patch/0077-test-optimize-save-client-options-and-add-unit-test.patch +++ /dev/null @@ -1,440 +0,0 @@ -From 6e321766a0b4ace2211c9d39cfce58bf4627e63f Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Wed, 27 Oct 2021 21:32:12 +0800 -Subject: [PATCH 04/16] test: optimize save client options and add unit test - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - cmd/cli/save.go | 84 ++++++++-------- - cmd/cli/save_test.go | 232 ++++++++++++++++++++++++++++++++++++++++++- - 2 files changed, 270 insertions(+), 46 deletions(-) - -diff --git a/cmd/cli/save.go b/cmd/cli/save.go -index 4d22798a..599d394d 100644 ---- a/cmd/cli/save.go -+++ b/cmd/cli/save.go -@@ -18,7 +18,6 @@ import ( - "fmt" - "io" - "os" -- "path/filepath" - "strings" - - "github.com/pkg/errors" -@@ -51,21 +50,21 @@ const ( - saveExample = `isula-build ctr-img save busybox:latest -o busybox.tar - isula-build ctr-img save 21c3e96ac411 -o myimage.tar - isula-build ctr-img save busybox:latest alpine:3.9 -o all.tar --isula-build ctr-img save app:latest app1:latest -d Images -+isula-build ctr-img save app:latest -b busybox:latest -d Images - isula-build ctr-img save app:latest app1:latest -d Images -b busybox:latest -l lib:latest -r rename.json` - ) - - // NewSaveCmd cmd for container image saving - func NewSaveCmd() *cobra.Command { - saveCmd := &cobra.Command{ -- Use: "save IMAGE [IMAGE...] [FLAGS]", -+ Use: "save IMAGE [IMAGE...] FLAGS", - Short: "Save image to tarball", - Example: saveExample, - RunE: saveCommand, - } - - saveCmd.PersistentFlags().StringVarP(&saveOpts.path, "output", "o", "", "Path to save the tarball") -- saveCmd.PersistentFlags().StringVarP(&saveOpts.sep.destPath, "dest", "d", "Images", "Destination file directory to store separated images") -+ saveCmd.PersistentFlags().StringVarP(&saveOpts.sep.destPath, "dest", "d", "", "Destination file directory to store separated images") - saveCmd.PersistentFlags().StringVarP(&saveOpts.sep.baseImgName, "base", "b", "", "Base image name of separated images") - saveCmd.PersistentFlags().StringVarP(&saveOpts.sep.libImageName, "lib", "l", "", "Lib image name of separated images") - saveCmd.PersistentFlags().StringVarP(&saveOpts.sep.renameFile, "rename", "r", "", "Rename json file path of separated images") -@@ -95,12 +94,16 @@ func saveCommand(cmd *cobra.Command, args []string) error { - } - - func (sep *separatorSaveOption) check(pwd string) error { -- if len(sep.baseImgName) != 0 { -- if !util.IsValidImageName(sep.baseImgName) { -- return errors.Errorf("invalid base image name %s", sep.baseImgName) -- } -+ if len(sep.baseImgName) == 0 { -+ return errors.New("base image name(-b) must be provided") -+ } -+ if !util.IsValidImageName(sep.baseImgName) { -+ return errors.Errorf("invalid base image name %s", sep.baseImgName) - } - if len(sep.libImageName) != 0 { -+ if sep.libImageName == sep.baseImgName { -+ return errors.New("base and lib images are the same") -+ } - if !util.IsValidImageName(sep.libImageName) { - return errors.Errorf("invalid lib image name %s", sep.libImageName) - } -@@ -108,16 +111,12 @@ func (sep *separatorSaveOption) check(pwd string) error { - if len(sep.destPath) == 0 { - sep.destPath = "Images" - } -- if !filepath.IsAbs(sep.destPath) { -- sep.destPath = util.MakeAbsolute(sep.destPath, pwd) -- } -+ sep.destPath = util.MakeAbsolute(sep.destPath, pwd) - if util.IsExist(sep.destPath) { -- return errors.Errorf("output file already exist: %q, try to remove existing tarball or rename output file", sep.destPath) -+ return errors.Errorf("dest path already exist: %q, try to remove or rename it", sep.destPath) - } - if len(sep.renameFile) != 0 { -- if !filepath.IsAbs(sep.renameFile) { -- sep.renameFile = util.MakeAbsolute(sep.renameFile, pwd) -- } -+ sep.renameFile = util.MakeAbsolute(sep.renameFile, pwd) - } - - return nil -@@ -136,39 +135,36 @@ func (opt *saveOptions) checkSaveOpts(args []string) error { - return errors.New("get current path failed") - } - -- // normal save -- if !opt.sep.isEnabled() { -- // only check oci format when doing normal save operation -- if opt.format == constant.OCITransport && len(args) >= 2 { -- return errors.New("oci image format now only supports saving single image") -+ // separator save -+ if opt.sep.isEnabled() { -+ if len(opt.path) != 0 { -+ return errors.New("conflict flags between -o and [-b -l -r -d]") - } -- if err := util.CheckImageFormat(opt.format); err != nil { -+ // separate image only support docker image spec -+ opt.format = constant.DockerTransport -+ if err := opt.sep.check(pwd); err != nil { - return err - } -- if len(opt.path) == 0 { -- return errors.New("output path should not be empty") -- } -- if !filepath.IsAbs(opt.path) { -- opt.path = util.MakeAbsolute(opt.path, pwd) -- } -- if util.IsExist(opt.path) { -- return errors.Errorf("output file already exist: %q, try to remove existing tarball or rename output file", opt.path) -- } -+ opt.sep.enabled = true -+ - return nil - } - -- // separator save -- opt.sep.enabled = true -- if len(opt.path) != 0 { -- return errors.New("conflict options between -o and [-b -l -r]") -+ // normal save -+ // only check oci format when doing normal save operation -+ if len(opt.path) == 0 { -+ return errors.New("output path(-o) should not be empty") - } -- // separate image only support docker image spec -- opt.format = constant.DockerTransport -- -- if err := opt.sep.check(pwd); err != nil { -+ if opt.format == constant.OCITransport && len(args) >= 2 { -+ return errors.New("oci image format now only supports saving single image") -+ } -+ if err := util.CheckImageFormat(opt.format); err != nil { - return err - } -- -+ opt.path = util.MakeAbsolute(opt.path, pwd) -+ if util.IsExist(opt.path) { -+ return errors.Errorf("output file already exist: %q, try to remove existing tarball or rename output file", opt.path) -+ } - return nil - } - -@@ -177,10 +173,10 @@ func runSave(ctx context.Context, cli Cli, args []string) error { - saveOpts.images = args - - sep := &pb.SeparatorSave{ -- Base: saveOpts.sep.baseImgName, -- Lib: saveOpts.sep.libImageName, -- Rename: saveOpts.sep.renameFile, -- Dest: saveOpts.sep.destPath, -+ Base: saveOpts.sep.baseImgName, -+ Lib: saveOpts.sep.libImageName, -+ Rename: saveOpts.sep.renameFile, -+ Dest: saveOpts.sep.destPath, - Enabled: saveOpts.sep.enabled, - } - -@@ -212,5 +208,5 @@ func runSave(ctx context.Context, cli Cli, args []string) error { - } - - func (sep *separatorSaveOption) isEnabled() bool { -- return util.AnyFlagSet(sep.baseImgName, sep.libImageName, sep.renameFile) -+ return util.AnyFlagSet(sep.baseImgName, sep.libImageName, sep.renameFile, sep.destPath) - } -diff --git a/cmd/cli/save_test.go b/cmd/cli/save_test.go -index 3fe6bf81..72f6ded3 100644 ---- a/cmd/cli/save_test.go -+++ b/cmd/cli/save_test.go -@@ -16,10 +16,13 @@ package main - import ( - "context" - "fmt" -+ "os" -+ "path/filepath" - "testing" - - "gotest.tools/v3/assert" - "gotest.tools/v3/fs" -+ constant "isula.org/isula-build" - ) - - func TestSaveCommand(t *testing.T) { -@@ -38,7 +41,7 @@ func TestSaveCommand(t *testing.T) { - wantErr bool - } - -- // For normal cases, default err is "invalid socket path: unix:///var/run/isula_build.sock". -+ // For normal cases, default err is "invalid socket path: unix:///var/run/isula_build.sock". - // As daemon is not running as we run unit test. - var testcases = []testcase{ - { -@@ -86,7 +89,7 @@ func TestSaveCommand(t *testing.T) { - path: "", - args: []string{"testImage"}, - wantErr: true, -- errString: "output path should not be empty", -+ errString: "output path(-o) should not be empty", - format: "docker", - }, - { -@@ -194,3 +197,228 @@ func TestRunSave(t *testing.T) { - }) - } - } -+ -+func TestCheckSaveOpts(t *testing.T) { -+ pwd, err := os.Getwd() -+ assert.NilError(t, err) -+ existDirPath := filepath.Join(pwd, "DirAlreadyExist") -+ existFilePath := filepath.Join(pwd, "FileAlreadExist") -+ err = os.Mkdir(existDirPath, constant.DefaultRootDirMode) -+ assert.NilError(t, err) -+ _, err = os.Create(existFilePath) -+ assert.NilError(t, err) -+ defer os.Remove(existDirPath) -+ defer os.Remove(existFilePath) -+ -+ type fields struct { -+ images []string -+ sep separatorSaveOption -+ path string -+ saveID string -+ format string -+ } -+ type args struct { -+ args []string -+ } -+ tests := []struct { -+ name string -+ fields fields -+ args args -+ wantErr bool -+ }{ -+ { -+ name: "TC-normal save", -+ args: args{[]string{"app:latest", "app1:latest"}}, -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ path: "test.tar", -+ format: constant.DockerTransport, -+ }, -+ }, -+ { -+ name: "TC-normal save with empty args", -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-normal save with path has colon in it", -+ args: args{[]string{"app:latest", "app1:latest"}}, -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ path: "invalid:path.tar", -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-normal save without path", -+ args: args{[]string{"app:latest", "app1:latest"}}, -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-normal save with oci format", -+ args: args{[]string{"app:latest", "app1:latest"}}, -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ path: "test.tar", -+ format: constant.OCITransport, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-normal save with invalid format", -+ args: args{[]string{"app:latest", "app1:latest"}}, -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ path: "test.tar", -+ format: "invalidFormat", -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-normal save with path already exist", -+ args: args{[]string{"app:latest", "app1:latest"}}, -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ path: existFilePath, -+ format: constant.DockerTransport, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-separated save", -+ args: args{[]string{"app:latest", "app1:latest"}}, -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ format: constant.DockerTransport, -+ sep: separatorSaveOption{ -+ baseImgName: "base", -+ libImageName: "lib", -+ renameFile: "rename.json", -+ destPath: "Images", -+ }, -+ }, -+ }, -+ { -+ name: "TC-separated save with -o flag", -+ args: args{[]string{"app:latest", "app1:latest"}}, -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ path: "test.tar", -+ format: constant.DockerTransport, -+ sep: separatorSaveOption{ -+ baseImgName: "base", -+ libImageName: "lib", -+ renameFile: "rename.json", -+ destPath: "Images", -+ }, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-separated save without -b flag", -+ args: args{[]string{"app:latest", "app1:latest"}}, -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ format: constant.DockerTransport, -+ sep: separatorSaveOption{ -+ libImageName: "lib", -+ renameFile: "rename.json", -+ destPath: "Images", -+ }, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-separated save invalid base image name", -+ args: args{[]string{"app:latest", "app1:latest"}}, -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ format: constant.DockerTransport, -+ sep: separatorSaveOption{ -+ baseImgName: "in:valid:base:name", -+ libImageName: "lib", -+ renameFile: "rename.json", -+ destPath: "Images", -+ }, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-separated save invalid lib image name", -+ args: args{[]string{"app:latest", "app1:latest"}}, -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ format: constant.DockerTransport, -+ sep: separatorSaveOption{ -+ baseImgName: "base", -+ libImageName: "in:valid:lib:name", -+ renameFile: "rename.json", -+ destPath: "Images", -+ }, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-separated save without dest option", -+ args: args{[]string{"app:latest", "app1:latest"}}, -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ format: constant.DockerTransport, -+ sep: separatorSaveOption{ -+ baseImgName: "base", -+ libImageName: "lib", -+ renameFile: "rename.json", -+ }, -+ }, -+ }, -+ { -+ name: "TC-separated save with dest already exist", -+ args: args{[]string{"app:latest", "app1:latest"}}, -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ format: constant.DockerTransport, -+ sep: separatorSaveOption{ -+ baseImgName: "base", -+ libImageName: "lib", -+ renameFile: "rename.json", -+ destPath: existDirPath, -+ }, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-separated save with same base and lib image", -+ args: args{[]string{"app:latest", "app1:latest"}}, -+ fields: fields{ -+ images: []string{"app:latest", "app1:latest"}, -+ format: constant.DockerTransport, -+ sep: separatorSaveOption{ -+ baseImgName: "same:image", -+ libImageName: "same:image", -+ renameFile: "rename.json", -+ destPath: existDirPath, -+ }, -+ }, -+ wantErr: true, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ opt := &saveOptions{ -+ images: tt.fields.images, -+ sep: tt.fields.sep, -+ path: tt.fields.path, -+ saveID: tt.fields.saveID, -+ format: tt.fields.format, -+ } -+ if err := opt.checkSaveOpts(tt.args.args); (err != nil) != tt.wantErr { -+ t.Errorf("saveOptions.checkSaveOpts() error = %v, wantErr %v", err, tt.wantErr) -+ } -+ }) -+ } -+} --- -2.27.0 - diff --git a/patch/0078-test-optimize-load-client-options-and-add-unit-test.patch b/patch/0078-test-optimize-load-client-options-and-add-unit-test.patch deleted file mode 100644 index 7d6b607..0000000 --- a/patch/0078-test-optimize-load-client-options-and-add-unit-test.patch +++ /dev/null @@ -1,409 +0,0 @@ -From 2f8f5aa8c8444e9d9c39eba2c060e4e9fa4089bc Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Thu, 28 Oct 2021 15:03:04 +0800 -Subject: [PATCH 06/16] test: optimize load client options and add unit test - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - cmd/cli/load.go | 77 ++++++++-------- - cmd/cli/load_test.go | 209 +++++++++++++++++++++++++++++++++++++++++++ - cmd/cli/mock.go | 7 +- - 3 files changed, 252 insertions(+), 41 deletions(-) - -diff --git a/cmd/cli/load.go b/cmd/cli/load.go -index 2a9df772..cf142592 100644 ---- a/cmd/cli/load.go -+++ b/cmd/cli/load.go -@@ -20,7 +20,6 @@ import ( - "fmt" - "io" - "os" -- "path/filepath" - - "github.com/pkg/errors" - "github.com/spf13/cobra" -@@ -56,7 +55,7 @@ isula-build ctr-img load -i app:latest -d /home/Images -b /home/Images/base.tar. - // NewLoadCmd returns image load command - func NewLoadCmd() *cobra.Command { - loadCmd := &cobra.Command{ -- Use: "load [FLAGS]", -+ Use: "load FLAGS", - Short: "Load images", - Example: loadExample, - Args: util.NoArgs, -@@ -122,20 +121,13 @@ func runLoad(ctx context.Context, cli Cli) error { - return err - } - --func resolveLoadPath(path string) (string, error) { -+func resolveLoadPath(path, pwd string) (string, error) { - // check input - if path == "" { - return "", errors.New("tarball path should not be empty") - } - -- if !filepath.IsAbs(path) { -- pwd, err := os.Getwd() -- if err != nil { -- return "", errors.Wrap(err, "get current path failed while loading image") -- } -- path = util.MakeAbsolute(path, pwd) -- } -- -+ path = util.MakeAbsolute(path, pwd) - if err := util.CheckLoadFile(path); err != nil { - return "", err - } -@@ -144,30 +136,35 @@ func resolveLoadPath(path string) (string, error) { - } - - func (opt *loadOptions) checkLoadOpts() error { -- // normal load -- if !opt.sep.isEnabled() { -- path, err := resolveLoadPath(opt.path) -- if err != nil { -- return err -- } -- opt.path = path -- -- return nil -+ pwd, err := os.Getwd() -+ if err != nil { -+ return errors.New("get current path failed") - } - - // load separated image -- opt.sep.enabled = true -- if len(opt.path) == 0 { -- return errors.New("app image should not be empty") -- } -+ if opt.sep.isEnabled() { -+ // Use opt.path as app image name when operating separated images -+ // this can be mark as a switch for handling separated images -+ opt.sep.app = opt.path -+ -+ if len(opt.sep.app) == 0 { -+ return errors.New("app image name(-i) should not be empty") -+ } -+ -+ if cErr := opt.sep.check(pwd); cErr != nil { -+ return cErr -+ } -+ opt.sep.enabled = true - -- // Use opt.path as app image name when operating separated images -- // this can be mark as a switch for handling separated images -- opt.sep.app = opt.path -+ return nil -+ } - -- if err := opt.sep.check(); err != nil { -+ // normal load -+ path, err := resolveLoadPath(opt.path, pwd) -+ if err != nil { - return err - } -+ opt.path = path - - return nil - } -@@ -176,35 +173,35 @@ func (sep *separatorLoadOption) isEnabled() bool { - return util.AnyFlagSet(sep.dir, sep.base, sep.lib, sep.app) - } - --func (sep *separatorLoadOption) check() error { -- pwd, err := os.Getwd() -- if err != nil { -- return errors.New("get current path failed") -+func (sep *separatorLoadOption) check(pwd string) error { -+ if len(sep.dir) == 0 { -+ return errors.New("image tarball directory should not be empty") - } -+ -+ if sep.base == sep.lib { -+ return errors.New("base and lib tarballs are the same") -+ } -+ - if !util.IsValidImageName(sep.app) { - return errors.Errorf("invalid image name: %s", sep.app) - } - - if len(sep.base) != 0 { -- path, err := resolveLoadPath(sep.base) -+ path, err := resolveLoadPath(sep.base, pwd) - if err != nil { - return errors.Wrap(err, "resolve base tarball path failed") - } - sep.base = path - } - if len(sep.lib) != 0 { -- path, err := resolveLoadPath(sep.lib) -+ path, err := resolveLoadPath(sep.lib, pwd) - if err != nil { - return errors.Wrap(err, "resolve lib tarball path failed") - } - sep.lib = path - } -- if len(sep.dir) == 0 { -- return errors.New("image tarball directory should not be empty") -- } -- if !filepath.IsAbs(sep.dir) { -- sep.dir = util.MakeAbsolute(sep.dir, pwd) -- } -+ -+ sep.dir = util.MakeAbsolute(sep.dir, pwd) - if !util.IsExist(sep.dir) { - return errors.Errorf("image tarball directory %s is not exist", sep.dir) - } -diff --git a/cmd/cli/load_test.go b/cmd/cli/load_test.go -index b7bf2a57..0bad4cbd 100644 ---- a/cmd/cli/load_test.go -+++ b/cmd/cli/load_test.go -@@ -16,6 +16,7 @@ package main - import ( - "context" - "io/ioutil" -+ "os" - "path/filepath" - "testing" - -@@ -121,3 +122,211 @@ func TestRunLoad(t *testing.T) { - }) - } - } -+ -+func TestResolveLoadPath(t *testing.T) { -+ dir := fs.NewDir(t, t.Name()) -+ fileWithContent := fs.NewFile(t, filepath.Join(t.Name(), "test.tar")) -+ ioutil.WriteFile(fileWithContent.Path(), []byte("This is test file"), constant.DefaultRootFileMode) -+ emptyFile := fs.NewFile(t, filepath.Join(t.Name(), "empty.tar")) -+ -+ defer dir.Remove() -+ defer fileWithContent.Remove() -+ defer emptyFile.Remove() -+ -+ type args struct { -+ path string -+ pwd string -+ } -+ tests := []struct { -+ name string -+ args args -+ want string -+ wantErr bool -+ }{ -+ { -+ name: "TC-normal load path", -+ args: args{ -+ path: fileWithContent.Path(), -+ pwd: dir.Path(), -+ }, -+ want: fileWithContent.Path(), -+ }, -+ { -+ name: "TC-empty load path", -+ args: args{ -+ pwd: dir.Path(), -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-empty load file", -+ args: args{ -+ path: emptyFile.Path(), -+ pwd: dir.Path(), -+ }, -+ wantErr: true, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ got, err := resolveLoadPath(tt.args.path, tt.args.pwd) -+ if (err != nil) != tt.wantErr { -+ t.Errorf("resolveLoadPath() error = %v, wantErr %v", err, tt.wantErr) -+ return -+ } -+ if got != tt.want { -+ t.Errorf("resolveLoadPath() = %v, want %v", got, tt.want) -+ } -+ }) -+ } -+} -+ -+func TestCheckLoadOpts(t *testing.T) { -+ root := fs.NewDir(t, t.Name()) -+ defer root.Remove() -+ emptyFile, err := os.Create(filepath.Join(root.Path(), "empty.tar")) -+ assert.NilError(t, err) -+ fileWithContent, err := os.Create(filepath.Join(root.Path(), "test.tar")) -+ assert.NilError(t, err) -+ ioutil.WriteFile(fileWithContent.Name(), []byte("This is test file"), constant.DefaultRootFileMode) -+ baseFile, err := os.Create(filepath.Join(root.Path(), "base.tar")) -+ assert.NilError(t, err) -+ ioutil.WriteFile(baseFile.Name(), []byte("This is base file"), constant.DefaultRootFileMode) -+ libFile, err := os.Create(filepath.Join(root.Path(), "lib.tar")) -+ ioutil.WriteFile(libFile.Name(), []byte("This is lib file"), constant.DefaultRootFileMode) -+ -+ type fields struct { -+ path string -+ loadID string -+ sep separatorLoadOption -+ } -+ tests := []struct { -+ name string -+ fields fields -+ wantErr bool -+ }{ -+ { -+ name: "TC-normal load options", -+ fields: fields{ -+ path: fileWithContent.Name(), -+ }, -+ }, -+ { -+ name: "TC-empty load path", -+ wantErr: true, -+ }, -+ { -+ name: "TC-empty load file", -+ fields: fields{ -+ path: emptyFile.Name(), -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-separated load", -+ fields: fields{ -+ path: "app:latest", -+ sep: separatorLoadOption{ -+ dir: root.Path(), -+ app: "app:latest", -+ base: baseFile.Name(), -+ lib: libFile.Name(), -+ }, -+ }, -+ }, -+ { -+ name: "TC-separated load with empty app name", -+ fields: fields{ -+ sep: separatorLoadOption{ -+ dir: root.Path(), -+ base: baseFile.Name(), -+ lib: libFile.Name(), -+ }, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-separated load with empty dir", -+ fields: fields{ -+ path: "app:latest", -+ sep: separatorLoadOption{ -+ base: baseFile.Name(), -+ lib: libFile.Name(), -+ }, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-separated load with invalid app name", -+ fields: fields{ -+ path: "invalid:app:name", -+ sep: separatorLoadOption{ -+ dir: root.Path(), -+ base: baseFile.Name(), -+ lib: libFile.Name(), -+ }, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-separated load with empty base tarball", -+ fields: fields{ -+ path: "app:latest", -+ sep: separatorLoadOption{ -+ dir: root.Path(), -+ base: emptyFile.Name(), -+ lib: libFile.Name(), -+ }, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-separated load with empty lib tarball", -+ fields: fields{ -+ path: "app:latest", -+ sep: separatorLoadOption{ -+ dir: root.Path(), -+ base: baseFile.Name(), -+ lib: emptyFile.Name(), -+ }, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-separated load with same base and lib tarball", -+ fields: fields{ -+ path: "app:latest", -+ sep: separatorLoadOption{ -+ dir: root.Path(), -+ base: fileWithContent.Name(), -+ lib: fileWithContent.Name(), -+ }, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-separated load with dir not exist", -+ fields: fields{ -+ path: "app:latest", -+ sep: separatorLoadOption{ -+ dir: "path not exist", -+ base: baseFile.Name(), -+ lib: libFile.Name(), -+ }, -+ }, -+ wantErr: true, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ opt := &loadOptions{ -+ path: tt.fields.path, -+ loadID: tt.fields.loadID, -+ sep: tt.fields.sep, -+ } -+ if err := opt.checkLoadOpts(); (err != nil) != tt.wantErr { -+ t.Errorf("loadOptions.checkLoadOpts() error = %v, wantErr %v", err, tt.wantErr) -+ } -+ }) -+ } -+} -diff --git a/cmd/cli/mock.go b/cmd/cli/mock.go -index 142c87fa..23a8a031 100644 ---- a/cmd/cli/mock.go -+++ b/cmd/cli/mock.go -@@ -16,6 +16,7 @@ package main - import ( - "context" - "io" -+ "os" - "testing" - - "github.com/gogo/protobuf/types" -@@ -324,7 +325,11 @@ func (f *mockDaemon) load(_ context.Context, in *pb.LoadRequest, opts ...grpc.Ca - if path == "" { - return &mockLoadClient{}, errors.Errorf("tarball path should not be empty") - } -- _, err := resolveLoadPath(path) -+ pwd, err := os.Getwd() -+ if err != nil { -+ return &mockLoadClient{}, err -+ } -+ _, err = resolveLoadPath(path, pwd) - return &mockLoadClient{}, err - } - --- -2.27.0 - diff --git a/patch/0079-bugfix-fix-when-load-separated-image-error-return.patch b/patch/0079-bugfix-fix-when-load-separated-image-error-return.patch deleted file mode 100644 index 9155ec7..0000000 --- a/patch/0079-bugfix-fix-when-load-separated-image-error-return.patch +++ /dev/null @@ -1,203 +0,0 @@ -From c5fe173afd31636bf014dac31f6e601d91e1ae53 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Sat, 30 Oct 2021 10:12:40 +0800 -Subject: [PATCH 12/16] bugfix: fix when load separated image error return - -reason: if base and lib dir are both not provided, daemon -side will read the info from "manifest" file in the dest dir -automatically, so no error return here - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - cmd/cli/load.go | 4 +-- - cmd/cli/load_test.go | 59 +++++++++++++++++++++++++++++++++----------- - 2 files changed, 47 insertions(+), 16 deletions(-) - -diff --git a/cmd/cli/load.go b/cmd/cli/load.go -index cf142592..44fefdd2 100644 ---- a/cmd/cli/load.go -+++ b/cmd/cli/load.go -@@ -178,7 +178,7 @@ func (sep *separatorLoadOption) check(pwd string) error { - return errors.New("image tarball directory should not be empty") - } - -- if sep.base == sep.lib { -+ if len(sep.base) != 0 && sep.base == sep.lib { - return errors.New("base and lib tarballs are the same") - } - -@@ -203,7 +203,7 @@ func (sep *separatorLoadOption) check(pwd string) error { - - sep.dir = util.MakeAbsolute(sep.dir, pwd) - if !util.IsExist(sep.dir) { -- return errors.Errorf("image tarball directory %s is not exist", sep.dir) -+ return errors.Errorf("image tarball directory %q is not exist", sep.dir) - } - - return nil -diff --git a/cmd/cli/load_test.go b/cmd/cli/load_test.go -index 0bad4cbd..cb8217ce 100644 ---- a/cmd/cli/load_test.go -+++ b/cmd/cli/load_test.go -@@ -15,6 +15,7 @@ package main - - import ( - "context" -+ "fmt" - "io/ioutil" - "os" - "path/filepath" -@@ -22,7 +23,9 @@ import ( - - "gotest.tools/v3/assert" - "gotest.tools/v3/fs" -+ - constant "isula.org/isula-build" -+ "isula.org/isula-build/util" - ) - - func TestLoadCmd(t *testing.T) { -@@ -182,6 +185,8 @@ func TestResolveLoadPath(t *testing.T) { - } - - func TestCheckLoadOpts(t *testing.T) { -+ pwd, err := os.Getwd() -+ assert.NilError(t, err) - root := fs.NewDir(t, t.Name()) - defer root.Remove() - emptyFile, err := os.Create(filepath.Join(root.Path(), "empty.tar")) -@@ -201,9 +206,10 @@ func TestCheckLoadOpts(t *testing.T) { - sep separatorLoadOption - } - tests := []struct { -- name string -- fields fields -- wantErr bool -+ name string -+ fields fields -+ wantErr bool -+ errMessage string - }{ - { - name: "TC-normal load options", -@@ -212,15 +218,17 @@ func TestCheckLoadOpts(t *testing.T) { - }, - }, - { -- name: "TC-empty load path", -- wantErr: true, -+ name: "TC-empty load path", -+ wantErr: true, -+ errMessage: "tarball path should not be empty", - }, - { - name: "TC-empty load file", - fields: fields{ - path: emptyFile.Name(), - }, -- wantErr: true, -+ wantErr: true, -+ errMessage: "loading file is empty", - }, - { - name: "TC-separated load", -@@ -243,7 +251,8 @@ func TestCheckLoadOpts(t *testing.T) { - lib: libFile.Name(), - }, - }, -- wantErr: true, -+ wantErr: true, -+ errMessage: "app image name(-i) should not be empty", - }, - { - name: "TC-separated load with empty dir", -@@ -254,7 +263,8 @@ func TestCheckLoadOpts(t *testing.T) { - lib: libFile.Name(), - }, - }, -- wantErr: true, -+ wantErr: true, -+ errMessage: "image tarball directory should not be empty", - }, - { - name: "TC-separated load with invalid app name", -@@ -266,7 +276,8 @@ func TestCheckLoadOpts(t *testing.T) { - lib: libFile.Name(), - }, - }, -- wantErr: true, -+ wantErr: true, -+ errMessage: fmt.Sprintf("invalid image name: %s", "invalid:app:name"), - }, - { - name: "TC-separated load with empty base tarball", -@@ -278,7 +289,8 @@ func TestCheckLoadOpts(t *testing.T) { - lib: libFile.Name(), - }, - }, -- wantErr: true, -+ wantErr: true, -+ errMessage: "resolve base tarball path failed: loading file is empty", - }, - { - name: "TC-separated load with empty lib tarball", -@@ -290,7 +302,8 @@ func TestCheckLoadOpts(t *testing.T) { - lib: emptyFile.Name(), - }, - }, -- wantErr: true, -+ wantErr: true, -+ errMessage: "resolve lib tarball path failed: loading file is empty", - }, - { - name: "TC-separated load with same base and lib tarball", -@@ -302,7 +315,8 @@ func TestCheckLoadOpts(t *testing.T) { - lib: fileWithContent.Name(), - }, - }, -- wantErr: true, -+ wantErr: true, -+ errMessage: "base and lib tarballs are the same", - }, - { - name: "TC-separated load with dir not exist", -@@ -314,7 +328,20 @@ func TestCheckLoadOpts(t *testing.T) { - lib: libFile.Name(), - }, - }, -- wantErr: true, -+ wantErr: true, -+ errMessage: fmt.Sprintf("image tarball directory %q is not exist", util.MakeAbsolute("path not exist", pwd)), -+ }, -+ { -+ // if base and lib dir are both not provided, daemon side will read -+ // the info from "manifest" file in the dest dir automatically -+ // so no error return here -+ name: "TC-base and lib dir both not provided", -+ fields: fields{ -+ path: "app:latest", -+ sep: separatorLoadOption{ -+ dir: root.Path(), -+ }, -+ }, - }, - } - for _, tt := range tests { -@@ -324,9 +351,13 @@ func TestCheckLoadOpts(t *testing.T) { - loadID: tt.fields.loadID, - sep: tt.fields.sep, - } -- if err := opt.checkLoadOpts(); (err != nil) != tt.wantErr { -+ err := opt.checkLoadOpts() -+ if (err != nil) != tt.wantErr { - t.Errorf("loadOptions.checkLoadOpts() error = %v, wantErr %v", err, tt.wantErr) - } -+ if err != nil && err.Error() != tt.errMessage { -+ t.Errorf("loadOptions.checkLoadOpts() error = %v, wantErr %v", err, tt.errMessage) -+ } - }) - } - } --- -2.27.0 - diff --git a/patch/0080-util-add-unit-test-for-file.go.patch b/patch/0080-util-add-unit-test-for-file.go.patch deleted file mode 100644 index 9bd3cfd..0000000 --- a/patch/0080-util-add-unit-test-for-file.go.patch +++ /dev/null @@ -1,674 +0,0 @@ -From d578f50d5ec200a7af83186b282a22cceb927f1b Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Thu, 28 Oct 2021 22:41:18 +0800 -Subject: [PATCH 08/16] util: add unit test for file.go - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - util/file.go | 42 +++- - util/file_test.go | 547 ++++++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 578 insertions(+), 11 deletions(-) - create mode 100644 util/file_test.go - -diff --git a/util/file.go b/util/file.go -index cd4a75d5..e0353898 100644 ---- a/util/file.go -+++ b/util/file.go -@@ -29,12 +29,26 @@ const ( - fileMaxSize = 10 * 1024 * 1024 // 10MB - ) - -+var ( -+ modifyTime = time.Date(2017, time.January, 0, 0, 0, 0, 0, time.UTC) -+ accessTime = time.Date(2017, time.January, 0, 0, 0, 0, 0, time.UTC) -+) -+ - // ReadSmallFile read small file less than 10MB - func ReadSmallFile(path string) ([]byte, error) { - st, err := os.Lstat(path) - if err != nil { - return nil, err - } -+ -+ if !st.Mode().IsRegular() { -+ return nil, errors.Errorf("loading file %s should be a regular file", st.Name()) -+ } -+ -+ if st.Size() == 0 { -+ return nil, errors.New("loading file is empty") -+ } -+ - if st.Size() > fileMaxSize { - return nil, errors.Errorf("file %q too big", path) - } -@@ -51,18 +65,18 @@ func LoadJSONFile(file string, v interface{}) error { - } - - // ChangeDirModifyTime changes modify time of directory --func ChangeDirModifyTime(dir string) error { -+func ChangeDirModifyTime(dir string, accessTime, modifyTime time.Time) error { - fs, rErr := ioutil.ReadDir(dir) - if rErr != nil { - return rErr - } - for _, f := range fs { - src := filepath.Join(dir, f.Name()) -- if err := ChangeFileModifyTime(src); err != nil { -+ if err := ChangeFileModifyTime(src, accessTime, modifyTime); err != nil { - return err - } - if f.IsDir() { -- if err := ChangeDirModifyTime(src); err != nil { -+ if err := ChangeDirModifyTime(src, accessTime, modifyTime); err != nil { - return err - } - } -@@ -71,13 +85,11 @@ func ChangeDirModifyTime(dir string) error { - } - - // ChangeFileModifyTime changes modify time of file by fixing time at 2017-01-01 00:00:00 --func ChangeFileModifyTime(path string) error { -- mtime := time.Date(2017, time.January, 0, 0, 0, 0, 0, time.UTC) -- atime := time.Date(2017, time.January, 0, 0, 0, 0, 0, time.UTC) -+func ChangeFileModifyTime(path string, accessTime, modifyTime time.Time) error { - if _, err := os.Lstat(path); err != nil { - return err - } -- if err := os.Chtimes(path, atime, mtime); err != nil { -+ if err := os.Chtimes(path, accessTime, modifyTime); err != nil { - return err - } - return nil -@@ -87,9 +99,9 @@ func ChangeFileModifyTime(path string) error { - // by using different compression method defined by "com" - // the files' modify time attribute will be set to a fix time "2017-01-01 00:00:00" - // if set "modifyTime" to true --func PackFiles(src, dest string, com archive.Compression, modifyTime bool) (err error) { -- if modifyTime { -- if err = ChangeDirModifyTime(src); err != nil { -+func PackFiles(src, dest string, com archive.Compression, needModifyTime bool) (err error) { -+ if needModifyTime { -+ if err = ChangeDirModifyTime(src, accessTime, modifyTime); err != nil { - return err - } - } -@@ -122,6 +134,14 @@ func PackFiles(src, dest string, com archive.Compression, modifyTime bool) (err - // by using different compression method defined by "com" - // The src file will be remove if set "rm" to true - func UnpackFile(src, dest string, com archive.Compression, rm bool) (err error) { -+ if len(dest) == 0 { -+ return errors.New("unpack: dest path should not be empty") -+ } -+ d, err := os.Stat(dest) -+ if err != nil || !d.IsDir() { -+ return errors.Wrapf(err, "unpack: invalid dest path") -+ } -+ - cleanPath := filepath.Clean(src) - f, err := os.Open(cleanPath) // nolint:gosec - if err != nil { -@@ -139,7 +159,7 @@ func UnpackFile(src, dest string, com archive.Compression, rm bool) (err error) - return errors.Wrapf(err, "unpack file %q failed", src) - } - -- if err = ChangeDirModifyTime(dest); err != nil { -+ if err = ChangeDirModifyTime(dest, modifyTime, accessTime); err != nil { - return errors.Wrapf(err, "change modify time for directory %q failed", dest) - } - -diff --git a/util/file_test.go b/util/file_test.go -new file mode 100644 -index 00000000..09aed41d ---- /dev/null -+++ b/util/file_test.go -@@ -0,0 +1,547 @@ -+// Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+// isula-build licensed under the Mulan PSL v2. -+// You can use this software according to the terms and conditions of the Mulan PSL v2. -+// You may obtain a copy of Mulan PSL v2 at: -+// http://license.coscl.org.cn/MulanPSL2 -+// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+// IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+// PURPOSE. -+// See the Mulan PSL v2 for more details. -+// Author: Xiang Li -+// Create: 2021-08-24 -+// Description: file manipulation related common functions -+ -+package util -+ -+import ( -+ "encoding/json" -+ "io/ioutil" -+ "os" -+ "path/filepath" -+ "reflect" -+ "testing" -+ "time" -+ -+ "github.com/containers/storage/pkg/archive" -+ "gotest.tools/v3/assert" -+ "gotest.tools/v3/fs" -+ constant "isula.org/isula-build" -+) -+ -+func TestReadSmallFile(t *testing.T) { -+ smallFile := fs.NewFile(t, t.Name()) -+ defer smallFile.Remove() -+ err := ioutil.WriteFile(smallFile.Path(), []byte("small file"), constant.DefaultRootFileMode) -+ assert.NilError(t, err) -+ -+ root := fs.NewDir(t, t.Name()) -+ defer root.Remove() -+ -+ bigFile := filepath.Join(root.Path(), "bigFile") -+ f, err := os.Create(bigFile) -+ assert.NilError(t, err) -+ defer os.Remove(f.Name()) -+ err = f.Truncate(fileMaxSize + 1) -+ assert.NilError(t, err) -+ -+ emptyFile := fs.NewFile(t, t.Name()) -+ defer emptyFile.Remove() -+ -+ type args struct { -+ path string -+ } -+ tests := []struct { -+ name string -+ args args -+ want []byte -+ wantErr bool -+ }{ -+ { -+ name: "TC-normal read", -+ args: args{path: smallFile.Path()}, -+ want: []byte("small file"), -+ }, -+ { -+ name: "TC-not exist path", -+ wantErr: true, -+ }, -+ { -+ name: "TC-file too big", -+ args: args{path: bigFile}, -+ wantErr: true, -+ }, -+ { -+ name: "TC-empty file", -+ args: args{path: emptyFile.Path()}, -+ wantErr: true, -+ }, -+ { -+ name: "TC-invalid file", -+ args: args{path: "/dev/cdrom"}, -+ wantErr: true, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ got, err := ReadSmallFile(tt.args.path) -+ if (err != nil) != tt.wantErr { -+ t.Errorf("ReadSmallFile() error = %v, wantErr %v", err, tt.wantErr) -+ return -+ } -+ if !reflect.DeepEqual(got, tt.want) { -+ t.Errorf("ReadSmallFile() = %v, want %v", got, tt.want) -+ } -+ }) -+ } -+} -+ -+func TestLoadJSONFile(t *testing.T) { -+ type rename struct { -+ Name string `json:"name"` -+ Rename string `json:"rename"` -+ } -+ type args struct { -+ file string -+ v rename -+ } -+ -+ smallJSONFile := fs.NewFile(t, t.Name()) -+ defer smallJSONFile.Remove() -+ validData := rename{ -+ Name: "origin name", -+ Rename: "modified name", -+ } -+ b, err := json.Marshal(validData) -+ assert.NilError(t, err) -+ ioutil.WriteFile(smallJSONFile.Path(), b, constant.DefaultRootFileMode) -+ -+ tests := []struct { -+ name string -+ args args -+ wantKey string -+ wantValue string -+ wantErr bool -+ }{ -+ { -+ name: "TC-normal json file", -+ args: args{ -+ file: smallJSONFile.Path(), -+ v: rename{}, -+ }, -+ wantKey: "origin name", -+ wantValue: "modified name", -+ }, -+ { -+ name: "TC-json file not exist", -+ wantErr: true, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ if err := LoadJSONFile(tt.args.file, &tt.args.v); (err != nil) != tt.wantErr { -+ t.Errorf("LoadJSONFile() error = %v, wantErr %v", err, tt.wantErr) -+ } -+ if err == nil { -+ assert.Equal(t, tt.args.v.Name, tt.wantKey) -+ assert.Equal(t, tt.args.v.Rename, tt.wantValue) -+ } -+ }) -+ } -+} -+ -+func TestChangeFileModifyTime(t *testing.T) { -+ normalFile := fs.NewFile(t, t.Name()) -+ defer normalFile.Remove() -+ -+ pwd, err := os.Getwd() -+ assert.NilError(t, err) -+ immutableFile := filepath.Join(pwd, "immutableFile") -+ _, err = os.Create(immutableFile) -+ defer os.Remove(immutableFile) -+ -+ type args struct { -+ path string -+ mtime time.Time -+ atime time.Time -+ } -+ tests := []struct { -+ name string -+ args args -+ wantErr bool -+ needHook bool -+ preHookFun func(t *testing.T) -+ postHookFun func(t *testing.T) -+ }{ -+ { -+ name: "TC-change file modify time", -+ args: args{ -+ path: immutableFile, -+ mtime: modifyTime, -+ atime: accessTime, -+ }, -+ }, -+ { -+ name: "TC-file path empty", -+ wantErr: true, -+ }, -+ { -+ name: "TC-lack of permession", -+ args: args{ -+ path: immutableFile, -+ atime: accessTime, -+ mtime: modifyTime, -+ }, -+ needHook: true, -+ preHookFun: func(t *testing.T) { Immutable(immutableFile, true) }, -+ postHookFun: func(t *testing.T) { Immutable(immutableFile, false) }, -+ wantErr: true, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ if tt.needHook { -+ tt.preHookFun(t) -+ } -+ err := ChangeFileModifyTime(tt.args.path, tt.args.atime, tt.args.mtime) -+ if tt.needHook { -+ defer tt.postHookFun(t) -+ } -+ if (err != nil) != tt.wantErr { -+ t.Errorf("ChangeFileModifyTime() error = %v, wantErr %v", err, tt.wantErr) -+ } -+ if err == nil { -+ f, err := os.Stat(tt.args.path) -+ assert.NilError(t, err) -+ assert.Equal(t, true, f.ModTime().Equal(modifyTime)) -+ } -+ }) -+ } -+} -+ -+type tempDirs struct { -+ root string -+ subDir1 string -+ subDir11 string -+ file1 string -+ file11 string -+} -+ -+func createDirs(t *testing.T) tempDirs { -+ pwd, err := os.Getwd() -+ assert.NilError(t, err) -+ -+ root := filepath.Join(pwd, t.Name()) -+ assert.NilError(t, os.Mkdir(root, constant.DefaultRootDirMode)) -+ -+ rootSubDir1 := filepath.Join(root, "rootSubDir1") -+ assert.NilError(t, os.Mkdir(rootSubDir1, constant.DefaultRootDirMode)) -+ -+ rootSubDir11 := filepath.Join(rootSubDir1, "rootSubDir11") -+ assert.NilError(t, os.Mkdir(rootSubDir11, constant.DefaultRootDirMode)) -+ -+ file1 := filepath.Join(rootSubDir1, "file1") -+ _, err = os.Create(file1) -+ assert.NilError(t, err) -+ -+ file11 := filepath.Join(rootSubDir11, "file11") -+ _, err = os.Create(file11) -+ assert.NilError(t, err) -+ -+ return tempDirs{ -+ root: root, -+ subDir1: rootSubDir1, -+ subDir11: rootSubDir11, -+ file1: file1, -+ file11: file11, -+ } -+} -+ -+func (tmp *tempDirs) removeAll(t *testing.T) { -+ assert.NilError(t, os.RemoveAll(tmp.root)) -+ assert.NilError(t, os.RemoveAll(tmp.subDir1)) -+ assert.NilError(t, os.RemoveAll(tmp.subDir11)) -+ assert.NilError(t, os.RemoveAll(tmp.file1)) -+ assert.NilError(t, os.RemoveAll(tmp.file11)) -+} -+ -+func TestChangeDirModifyTime(t *testing.T) { -+ tempDirs := createDirs(t) -+ defer tempDirs.removeAll(t) -+ root := tempDirs.root -+ -+ type args struct { -+ dir string -+ mtime time.Time -+ atime time.Time -+ } -+ tests := []struct { -+ name string -+ args args -+ wantErr bool -+ needPreHook bool -+ needPostHook bool -+ preWalkFun func(path string, info os.FileInfo, err error) error -+ postWalkFun func(path string, info os.FileInfo, err error) error -+ }{ -+ { -+ name: "TC-normal case modify directory", -+ args: args{ -+ dir: root, -+ mtime: modifyTime, -+ atime: accessTime, -+ }, -+ needPostHook: true, -+ postWalkFun: func(path string, info os.FileInfo, err error) error { -+ assert.Assert(t, true, info.ModTime().Equal(modifyTime)) -+ return nil -+ }, -+ }, -+ { -+ name: "TC-empty path", -+ wantErr: true, -+ }, -+ { -+ name: "TC-lack of permission", -+ args: args{ -+ dir: root, -+ mtime: modifyTime, -+ atime: accessTime, -+ }, -+ wantErr: true, -+ needPreHook: true, -+ needPostHook: true, -+ preWalkFun: func(path string, info os.FileInfo, err error) error { -+ if !info.IsDir() { -+ Immutable(path, true) -+ } -+ return nil -+ }, -+ postWalkFun: func(path string, info os.FileInfo, err error) error { -+ if !info.IsDir() { -+ Immutable(path, false) -+ } -+ return nil -+ }, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ if tt.needPreHook { -+ wErr := filepath.Walk(tt.args.dir, tt.preWalkFun) -+ assert.NilError(t, wErr) -+ } -+ err := ChangeDirModifyTime(tt.args.dir, tt.args.mtime, tt.args.atime) -+ if (err != nil) != tt.wantErr { -+ t.Errorf("ChangeDirModifyTime() error = %v, wantErr %v", err, tt.wantErr) -+ } -+ if tt.needPostHook { -+ wErr := filepath.Walk(tt.args.dir, tt.postWalkFun) -+ assert.NilError(t, wErr) -+ } -+ }) -+ } -+} -+ -+func TestPackFiles(t *testing.T) { -+ dirs := createDirs(t) -+ defer dirs.removeAll(t) -+ dest := fs.NewFile(t, t.Name()) -+ defer dest.Remove() -+ -+ type args struct { -+ src string -+ dest string -+ com archive.Compression -+ needModifyTime bool -+ } -+ tests := []struct { -+ name string -+ args args -+ wantErr bool -+ needPreHook bool -+ needPostHook bool -+ preWalkFun func(path string, info os.FileInfo, err error) error -+ postWalkFun func(path string, info os.FileInfo, err error) error -+ }{ -+ { -+ name: "TC-normal pack", -+ args: args{ -+ src: dirs.root, -+ dest: dest.Path(), -+ com: archive.Gzip, -+ needModifyTime: true, -+ }, -+ }, -+ { -+ name: "TC-empty dest", -+ args: args{ -+ src: dirs.root, -+ com: archive.Gzip, -+ needModifyTime: true, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-invalid compression", -+ args: args{ -+ src: dirs.root, -+ dest: dest.Path(), -+ com: archive.Compression(-1), -+ needModifyTime: true, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "TC-lack of permission", -+ args: args{ -+ src: dirs.root, -+ dest: dest.Path(), -+ com: archive.Gzip, -+ needModifyTime: true, -+ }, -+ wantErr: true, -+ needPreHook: true, -+ needPostHook: true, -+ preWalkFun: func(path string, info os.FileInfo, err error) error { -+ if !info.IsDir() { -+ Immutable(path, true) -+ } -+ return nil -+ }, -+ postWalkFun: func(path string, info os.FileInfo, err error) error { -+ if !info.IsDir() { -+ Immutable(path, false) -+ } -+ return nil -+ }, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ if tt.needPreHook { -+ wErr := filepath.Walk(tt.args.src, tt.preWalkFun) -+ assert.NilError(t, wErr) -+ } -+ if err := PackFiles(tt.args.src, tt.args.dest, tt.args.com, tt.args.needModifyTime); (err != nil) != tt.wantErr { -+ t.Errorf("PackFiles() error = %v, wantErr %v", err, tt.wantErr) -+ } -+ if tt.needPostHook { -+ wErr := filepath.Walk(tt.args.src, tt.postWalkFun) -+ assert.NilError(t, wErr) -+ } -+ }) -+ } -+} -+ -+func TestUnpackFile(t *testing.T) { -+ folderToBePacked := createDirs(t) -+ defer folderToBePacked.removeAll(t) -+ pwd, err := os.Getwd() -+ assert.NilError(t, err) -+ -+ tarName := filepath.Join(pwd, "test.tar") -+ assert.NilError(t, PackFiles(folderToBePacked.root, tarName, archive.Gzip, true)) -+ defer os.RemoveAll(tarName) -+ -+ invalidTar := filepath.Join(pwd, "invalid.tar") -+ err = ioutil.WriteFile(invalidTar, []byte("invalid tar"), constant.DefaultRootFileMode) -+ assert.NilError(t, err) -+ defer os.RemoveAll(invalidTar) -+ -+ unpackDest := filepath.Join(pwd, "unpack") -+ assert.NilError(t, os.MkdirAll(unpackDest, constant.DefaultRootDirMode)) -+ defer os.RemoveAll(unpackDest) -+ -+ type args struct { -+ src string -+ dest string -+ com archive.Compression -+ rm bool -+ } -+ tests := []struct { -+ name string -+ args args -+ needPreHook bool -+ needPostHook bool -+ wantErr bool -+ }{ -+ { -+ name: "normal unpack file", -+ args: args{ -+ src: tarName, -+ dest: unpackDest, -+ com: archive.Gzip, -+ rm: true, -+ }, -+ }, -+ { -+ name: "empty unpack destation path", -+ args: args{ -+ src: tarName, -+ com: archive.Gzip, -+ rm: false, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "unpack src path not exist", -+ args: args{ -+ src: "path not exist", -+ dest: unpackDest, -+ com: archive.Gzip, -+ rm: false, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "unpack destation path not exist", -+ args: args{ -+ src: tarName, -+ dest: "path not exist", -+ com: archive.Gzip, -+ rm: false, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "invalid tarball", -+ args: args{ -+ src: invalidTar, -+ dest: unpackDest, -+ com: archive.Gzip, -+ rm: false, -+ }, -+ wantErr: true, -+ }, -+ { -+ name: "no permission for src", -+ args: args{ -+ src: tarName, -+ dest: unpackDest, -+ com: archive.Gzip, -+ rm: true, -+ }, -+ wantErr: true, -+ needPreHook: true, -+ needPostHook: true, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ if tt.needPreHook { -+ assert.NilError(t, Immutable(tt.args.src, true)) -+ } -+ err := UnpackFile(tt.args.src, tt.args.dest, tt.args.com, tt.args.rm) -+ if (err != nil) != tt.wantErr { -+ t.Errorf("UnpackFile() error = %v, wantErr %v", err, tt.wantErr) -+ } -+ if tt.needPostHook { -+ assert.NilError(t, Immutable(tt.args.src, false)) -+ } -+ if tt.args.rm && err == nil { -+ tarName := filepath.Join(pwd, "test.tar") -+ assert.NilError(t, PackFiles(folderToBePacked.root, tarName, archive.Gzip, true)) -+ } -+ }) -+ } -+} --- -2.27.0 - diff --git a/patch/0081-test-cleancode-test-for-better-experience.patch b/patch/0081-test-cleancode-test-for-better-experience.patch deleted file mode 100644 index 278c787..0000000 --- a/patch/0081-test-cleancode-test-for-better-experience.patch +++ /dev/null @@ -1,391 +0,0 @@ -From ed8d2d30e7d298fa05395a79cc3502240d9c0721 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Mon, 1 Nov 2021 16:24:14 +0800 -Subject: [PATCH 13/16] test:cleancode test for better experience - -change: -1. shellcheck fix for scripts in used hack -2. use busyobx instead of openeuler base image to speed up test -3. add test-unit-cover, test-sdv-cover, test-cover for project to - generate coverage files - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - Makefile | 18 ++++ - hack/all_coverage.sh | 26 ++++++ - hack/merge_coverage.sh | 55 +++++++++++ - hack/sdv_coverage.sh | 104 +++++++++++++++++++++ - hack/unit_test.sh | 82 +++++++++++----- - tests/src/integration_test_set_new_root.sh | 7 +- - 6 files changed, 268 insertions(+), 24 deletions(-) - create mode 100755 hack/all_coverage.sh - create mode 100644 hack/merge_coverage.sh - create mode 100755 hack/sdv_coverage.sh - -diff --git a/Makefile b/Makefile -index d41a9fdb..73482a41 100644 ---- a/Makefile -+++ b/Makefile -@@ -112,6 +112,24 @@ test-integration: ## Test integration case - @./tests/test.sh integration - @echo "Integration test done!" - -+.PHONY: test-unit-cover -+test-unit-cover: ## Test unit case and generate coverage -+ @echo "Unit test cover starting..." -+ @./hack/unit_test.sh cover -+ @echo "Unit test cover done!" -+ -+.PHONY: test-sdv-cover -+test-sdv-cover: ## Test integration case and generate coverage -+ @echo "Integration test cover starting..." -+ @./hack/sdv_coverage.sh -+ @echo "Integration test cover done!" -+ -+.PHONY: test-cover -+test-cover: test-sdv-cover test-unit-cover ## Test both unit and sdv case and generate unity coverage -+ @echo "Test cover starting..." -+ @./hack/all_coverage.sh -+ @echo "Test cover done!" -+ - ##@ Development - - .PHONY: build-image -diff --git a/hack/all_coverage.sh b/hack/all_coverage.sh -new file mode 100755 -index 00000000..9f9eb5ff ---- /dev/null -+++ b/hack/all_coverage.sh -@@ -0,0 +1,26 @@ -+#!/bin/bash -+ -+# Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+# isula-build licensed under the Mulan PSL v2. -+# You can use this software according to the terms and conditions of the Mulan PSL v2. -+# You may obtain a copy of Mulan PSL v2 at: -+# http://license.coscl.org.cn/MulanPSL2 -+# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+# PURPOSE. -+# See the Mulan PSL v2 for more details. -+# Author: Xiang Li -+# Create: 2020-03-01 -+# Description: shell script for all coverage -+# Note: use this file by typing make test-cover -+# Do not run this script directly -+ -+SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" || exit; pwd) -+# shellcheck disable=SC1091 -+source "${SCRIPT_DIR}"/merge_coverage.sh -+ -+unit_coverage=${PWD}/cover_unit_test_all.out -+sdv_coverage=${PWD}/cover_sdv_test_all.out -+output_file=${PWD}/cover_test_all -+ -+merge_cover "${output_file}" "${sdv_coverage}" "${unit_coverage}" -diff --git a/hack/merge_coverage.sh b/hack/merge_coverage.sh -new file mode 100644 -index 00000000..6e529a34 ---- /dev/null -+++ b/hack/merge_coverage.sh -@@ -0,0 +1,55 @@ -+#!/bin/bash -+ -+# Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+# isula-build licensed under the Mulan PSL v2. -+# You can use this software according to the terms and conditions of the Mulan PSL v2. -+# You may obtain a copy of Mulan PSL v2 at: -+# http://license.coscl.org.cn/MulanPSL2 -+# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+# PURPOSE. -+# See the Mulan PSL v2 for more details. -+# Author: Xiang Li -+# Create: 2021-11-01 -+# Description: merge coverage from input coverage files -+# Note: Do not run this script directly -+ -+# Usage: merge_cover outputfile file1 file2 ... fileN -+# Input: first: outputfile name -+# remaining: coverage files -+function merge_cover() { -+ output_file_name="$1" -+ input_coverages=( "${@:2}" ) -+ -+ output_coverage_file=${output_file_name}.out -+ output_html_file=${output_file_name}.html -+ output_merge_cover=${output_file_name}.merge -+ grep -r -h -v "^mode:" "${input_coverages[@]}" | sort > "$output_merge_cover" -+ current="" -+ count=0 -+ echo "mode: set" > "$output_coverage_file" -+ # read the cover report from merge_cover, convert it, write to final coverage -+ while read -r line; do -+ block=$(echo "$line" | cut -d ' ' -f1-2) -+ num=$(echo "$line" | cut -d ' ' -f3) -+ if [ "$current" == "" ]; then -+ current=$block -+ count=$num -+ elif [ "$block" == "$current" ]; then -+ count=$((count + num)) -+ else -+ # if the sorted two lines are not in the same code block, write the statics result of last code block to the final coverage -+ echo "$current" $count >> "${output_coverage_file}" -+ current=$block -+ count=$num -+ fi -+ done < "$output_merge_cover" -+ rm -rf "${output_merge_cover}" -+ -+ # merge the results of last line to the final coverage -+ if [ "$current" != "" ]; then -+ echo "$current" "$count" >> "${output_coverage_file}" -+ fi -+ -+ go tool cover -html="${output_coverage_file}" -o "$output_html_file" -+} -diff --git a/hack/sdv_coverage.sh b/hack/sdv_coverage.sh -new file mode 100755 -index 00000000..874d9373 ---- /dev/null -+++ b/hack/sdv_coverage.sh -@@ -0,0 +1,104 @@ -+#!/bin/bash -+ -+# Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+# isula-build licensed under the Mulan PSL v2. -+# You can use this software according to the terms and conditions of the Mulan PSL v2. -+# You may obtain a copy of Mulan PSL v2 at: -+# http://license.coscl.org.cn/MulanPSL2 -+# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+# PURPOSE. -+# See the Mulan PSL v2 for more details. -+# Author: Xiang Li -+# Create: 2020-03-01 -+# Description: shell script for coverage -+# Note: use this file by typing make test-sdv-cover or make test-cover -+# Do not run this script directly -+ -+project_root=${PWD} -+vendor_name="isula.org" -+project_name="isula-build" -+main_relative_path="cmd/daemon" -+exclude_pattern="gopkgs|api/services" -+go_test_mod_method="-mod=vendor" -+go_test_count_method="-count=1" -+go_test_cover_method="-covermode=set" -+main_pkg="${vendor_name}/${project_name}/${main_relative_path}" -+main_test_file=${project_root}/${main_relative_path}/main_test.go -+main_file=${project_root}/${main_relative_path}/main.go -+coverage_file=${project_root}/cover_sdv_test_all.out -+coverage_html=${project_root}/cover_sdv_test_all.html -+coverage_log=${project_root}/cover_sdv_test_all.log -+main_test_binary_file=${project_root}/main.test -+ -+function precheck() { -+ if pgrep isula-builder > /dev/null 2>&1; then -+ echo "isula-builder is already running, please stop it first" -+ exit 1 -+ fi -+} -+ -+function modify_main_test() { -+ # first backup file -+ cp "${main_file}" "${main_file}".bk -+ cp "${main_test_file}" "${main_test_file}".bk -+ # delete Args field for main.go -+ local comment_pattern="Args: util.NoArgs" -+ sed -i "/$comment_pattern/s/^#*/\/\/ /" "${main_file}" -+ # add new line for main_test.go -+ code_snippet="func TestMain(t *testing.T) { main() }" -+ echo "$code_snippet" >> "${main_test_file}" -+} -+ -+function recover_main_test() { -+ mv "${main_file}".bk "${main_file}" -+ mv "${main_test_file}".bk "${main_test_file}" -+} -+ -+function build_main_test_binary() { -+ pkgs=$(go list ${go_test_mod_method} "${project_root}"/... | grep -Ev ${exclude_pattern} | tr "\r\n" ",") -+ go test -coverpkg="${pkgs}" ${main_pkg} ${go_test_mod_method} ${go_test_cover_method} ${go_test_count_method} -c -o="${main_test_binary_file}" -+} -+ -+function run_main_test_binary() { -+ ${main_test_binary_file} -test.coverprofile="${coverage_file}" > "${coverage_log}" 2>&1 & -+ main_test_pid=$! -+ for _ in $(seq 1 10); do -+ if isula-build info > /dev/null 2>&1; then -+ break -+ else -+ sleep 1 -+ fi -+ done -+} -+ -+function run_coverage_test() { -+ # do cover tests -+ echo "sdv coverage test" -+ # cover_test_xxx -+ # cover_test_xxx -+ # cover_test_xxx -+ # cover_test_xxx -+} -+ -+function finish_coverage_test() { -+ kill -15 $main_test_pid -+} -+ -+function generate_coverage() { -+ go tool cover -html="${coverage_file}" -o="${coverage_html}" -+} -+ -+function cleanup() { -+ rm "$main_test_binary_file" -+} -+ -+precheck -+modify_main_test -+build_main_test_binary -+recover_main_test -+run_main_test_binary -+run_coverage_test -+finish_coverage_test -+generate_coverage -+cleanup -diff --git a/hack/unit_test.sh b/hack/unit_test.sh -index a94a2d38..94a44a95 100755 ---- a/hack/unit_test.sh -+++ b/hack/unit_test.sh -@@ -12,32 +12,72 @@ - # Author: iSula Team - # Create: 2020-07-11 - # Description: go test script -+# Note: use this file by typing make unit-test or make unit-test-cover -+# Do not run this script directly -+ -+SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" || exit; pwd) -+# shellcheck disable=SC1091 -+source "${SCRIPT_DIR}"/merge_coverage.sh - - export GO111MODULE=on -+run_coverage=$1 -+covers_folder=${PWD}/covers -+testlog=${PWD}"/unit_test_log" -+exclude_pattern="gopkgs|api/services" -+go_test_mod_method="-mod=vendor" -+go_test_count_method="-count=1" -+go_test_timeout_flag="-timeout=300s" -+go_test_race_flag="-race" - --TEST_ARGS="" --if [ ! -z "${TEST_REG}" ]; then -- TEST_ARGS+=" -args TEST_REG=${TEST_REG}" --fi --if [ ! -z "${SKIP_REG}" ]; then -- TEST_ARGS+=" -args SKIP_REG=${SKIP_REG}" --fi --echo "Testing with args ${TEST_ARGS}" -+function precheck() { -+ if pgrep isula-builder > /dev/null 2>&1; then -+ echo "isula-builder is already running, please stop it first" -+ exit 1 -+ fi -+} - --testlog=${PWD}"/unit_test_log" --rm -f "${testlog}" --touch "${testlog}" --golist=$(go list ./... | grep -v gopkgs) --for path in ${golist}; do -- echo "Start to test: ${path}" -- # TEST_ARGS is " -args SKIP_REG=foo", so no double quote for it -- go test -race -mod=vendor -cover -count=1 -timeout 300s -v "${path}" ${TEST_ARGS} >> "${testlog}" -- cat "${testlog}" | grep -E -- "--- FAIL:|^FAIL" -- if [ $? -eq 0 ]; then -+function run_unit_test() { -+ TEST_ARGS="" -+ if [ -n "${TEST_REG}" ]; then -+ TEST_ARGS+=" -args TEST_REG=${TEST_REG}" -+ fi -+ if [ -n "${SKIP_REG}" ]; then -+ TEST_ARGS+=" -args SKIP_REG=${SKIP_REG}" -+ fi -+ echo "Testing with args ${TEST_ARGS}" -+ -+ rm -f "${testlog}" -+ if [[ -n $run_coverage ]]; then -+ mkdir -p "${covers_folder}" -+ fi -+ for package in $(go list ${go_test_mod_method} ./... | grep -Ev ${exclude_pattern}); do -+ echo "Start to test: ${package}" -+ if [[ -n $run_coverage ]]; then -+ coverprofile_file="${covers_folder}/$(echo "$package" | tr / -).cover" -+ coverprofile_flag="-coverprofile=${coverprofile_file}" -+ go_test_covermode_flag="-covermode=set" -+ go_test_race_flag="" -+ fi -+ # TEST_ARGS is " -args SKIP_REG=foo", so no double quote for it -+ # shellcheck disable=SC2086 -+ go test -v ${go_test_race_flag} ${go_test_mod_method} ${coverprofile_flag} ${go_test_covermode_flag} -coverpkg=${package} ${go_test_count_method} ${go_test_timeout_flag} "${package}" ${TEST_ARGS} >> "${testlog}" -+ done -+ -+ if grep -E -- "--- FAIL:|^FAIL" "${testlog}"; then - echo "Testing failed... Please check ${testlog}" -- exit 1 - fi - tail -n 1 "${testlog}" --done - --rm -f "${testlog}" -+ rm -f "${testlog}" -+} -+ -+function generate_unit_test_coverage() { -+ if [[ -n ${run_coverage} ]]; then -+ merge_cover "cover_unit_test_all" "${covers_folder}" -+ rm -rf "${covers_folder}" -+ fi -+} -+ -+precheck -+run_unit_test -+generate_unit_test_coverage -diff --git a/tests/src/integration_test_set_new_root.sh b/tests/src/integration_test_set_new_root.sh -index bb11a080..ae8d436b 100644 ---- a/tests/src/integration_test_set_new_root.sh -+++ b/tests/src/integration_test_set_new_root.sh -@@ -12,6 +12,7 @@ - # Author: Weizheng Xing - # Create: 2021-05-29 - # Description: test set new run and data root in configuration.toml -+# History: 2021-8-18 Xiang Li use busyobx instead of openeuler base image to speed up test - - top_dir=$(git rev-parse --show-toplevel) - # shellcheck disable=SC1091 -@@ -20,7 +21,7 @@ source "$top_dir"/tests/lib/common.sh - run_root="/var/run/new-isula-build" - data_root="/var/lib/new-isula-build" - config_file="/etc/isula-build/configuration.toml" --base_image="hub.oepkgs.net/openeuler/openeuler:21.03" -+image="hub.oepkgs.net/openeuler/busybox:latest" - - function clean() - { -@@ -47,10 +48,10 @@ function pre_test() - function do_test() - { - tree_node_befor=$(tree -L 3 $data_root | wc -l) -- run_with_debug "isula-build ctr-img pull $base_image" -+ run_with_debug "isula-build ctr-img pull $image" - tree_node_after=$(tree -L 3 $data_root | wc -l) - -- if [ $((tree_node_after - tree_node_befor)) -eq 8 ] && run_with_debug "isula-build ctr-img rm $base_image"; then -+ if [ $((tree_node_after - tree_node_befor)) -eq 8 ] && run_with_debug "isula-build ctr-img rm $image"; then - echo "PASS" - else - echo "Sets of run and data root are not effective" --- -2.27.0 - diff --git a/patch/0082-test-optimize-scripts-in-hack.patch b/patch/0082-test-optimize-scripts-in-hack.patch deleted file mode 100644 index b0b0e6c..0000000 --- a/patch/0082-test-optimize-scripts-in-hack.patch +++ /dev/null @@ -1,253 +0,0 @@ -From 5d3a9a0f2e5510e68040d252190070925ee89fd0 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Mon, 1 Nov 2021 23:37:44 +0800 -Subject: [PATCH 14/16] test: optimize scripts in hack - -reason: -1. add framework for integration tests -2. shellcheck for scripts - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - Makefile | 10 ++--- - hack/all_coverage.sh | 4 +- - ...dv_coverage.sh => integration_coverage.sh} | 42 ++++++++++++------- - hack/merge_coverage.sh | 30 ++++++------- - hack/unit_test.sh | 10 ++--- - 5 files changed, 53 insertions(+), 43 deletions(-) - rename hack/{sdv_coverage.sh => integration_coverage.sh} (63%) - -diff --git a/Makefile b/Makefile -index 73482a41..c5384e07 100644 ---- a/Makefile -+++ b/Makefile -@@ -42,7 +42,7 @@ endif - ##@ Help - .PHONY: help - help: ## Display the help info -- @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) -+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-25s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) - - ##@ Build - -@@ -118,14 +118,14 @@ test-unit-cover: ## Test unit case and generate coverage - @./hack/unit_test.sh cover - @echo "Unit test cover done!" - --.PHONY: test-sdv-cover --test-sdv-cover: ## Test integration case and generate coverage -+.PHONY: test-integration-cover -+test-integration-cover: ## Test integration case and generate coverage - @echo "Integration test cover starting..." -- @./hack/sdv_coverage.sh -+ @./hack/integration_coverage.sh - @echo "Integration test cover done!" - - .PHONY: test-cover --test-cover: test-sdv-cover test-unit-cover ## Test both unit and sdv case and generate unity coverage -+test-cover: test-integration-cover test-unit-cover ## Test both unit and integration case and generate unity coverage - @echo "Test cover starting..." - @./hack/all_coverage.sh - @echo "Test cover done!" -diff --git a/hack/all_coverage.sh b/hack/all_coverage.sh -index 9f9eb5ff..0f23e9d4 100755 ---- a/hack/all_coverage.sh -+++ b/hack/all_coverage.sh -@@ -20,7 +20,7 @@ SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" || exit; pwd) - source "${SCRIPT_DIR}"/merge_coverage.sh - - unit_coverage=${PWD}/cover_unit_test_all.out --sdv_coverage=${PWD}/cover_sdv_test_all.out -+integration_coverage=${PWD}/cover_integration_test_all.out - output_file=${PWD}/cover_test_all - --merge_cover "${output_file}" "${sdv_coverage}" "${unit_coverage}" -+merge_cover "${output_file}" "${integration_coverage}" "${unit_coverage}" -diff --git a/hack/sdv_coverage.sh b/hack/integration_coverage.sh -similarity index 63% -rename from hack/sdv_coverage.sh -rename to hack/integration_coverage.sh -index 874d9373..7462c545 100755 ---- a/hack/sdv_coverage.sh -+++ b/hack/integration_coverage.sh -@@ -12,7 +12,7 @@ - # Author: Xiang Li - # Create: 2020-03-01 - # Description: shell script for coverage --# Note: use this file by typing make test-sdv-cover or make test-cover -+# Note: use this file by typing make test-integration-cover or make test-cover - # Do not run this script directly - - project_root=${PWD} -@@ -26,9 +26,10 @@ go_test_cover_method="-covermode=set" - main_pkg="${vendor_name}/${project_name}/${main_relative_path}" - main_test_file=${project_root}/${main_relative_path}/main_test.go - main_file=${project_root}/${main_relative_path}/main.go --coverage_file=${project_root}/cover_sdv_test_all.out --coverage_html=${project_root}/cover_sdv_test_all.html --coverage_log=${project_root}/cover_sdv_test_all.log -+coverage_file=${project_root}/cover_integration_test_all.out -+coverage_html=${project_root}/cover_integration_test_all.html -+coverage_daemon_log=${project_root}/cover_integration_test_all_daemon.log -+coverage_client_log=${project_root}/cover_integration_test_all_client.log - main_test_binary_file=${project_root}/main.test - - function precheck() { -@@ -44,10 +45,10 @@ function modify_main_test() { - cp "${main_test_file}" "${main_test_file}".bk - # delete Args field for main.go - local comment_pattern="Args: util.NoArgs" -- sed -i "/$comment_pattern/s/^#*/\/\/ /" "${main_file}" -+ sed -i "/${comment_pattern}/s/^#*/\/\/ /" "${main_file}" - # add new line for main_test.go - code_snippet="func TestMain(t *testing.T) { main() }" -- echo "$code_snippet" >> "${main_test_file}" -+ echo "${code_snippet}" >> "${main_test_file}" - } - - function recover_main_test() { -@@ -56,12 +57,12 @@ function recover_main_test() { - } - - function build_main_test_binary() { -- pkgs=$(go list ${go_test_mod_method} "${project_root}"/... | grep -Ev ${exclude_pattern} | tr "\r\n" ",") -- go test -coverpkg="${pkgs}" ${main_pkg} ${go_test_mod_method} ${go_test_cover_method} ${go_test_count_method} -c -o="${main_test_binary_file}" -+ pkgs=$(go list "${go_test_mod_method}" "${project_root}"/... | grep -Ev "${exclude_pattern}" | tr "\r\n" ",") -+ go test -coverpkg="${pkgs}" "${main_pkg}" "${go_test_mod_method}" "${go_test_cover_method}" "${go_test_count_method}" -c -o="${main_test_binary_file}" > /dev/null 2>&1 - } - - function run_main_test_binary() { -- ${main_test_binary_file} -test.coverprofile="${coverage_file}" > "${coverage_log}" 2>&1 & -+ ${main_test_binary_file} -test.coverprofile="${coverage_file}" > "${coverage_daemon_log}" 2>&1 & - main_test_pid=$! - for _ in $(seq 1 10); do - if isula-build info > /dev/null 2>&1; then -@@ -74,15 +75,22 @@ function run_main_test_binary() { - - function run_coverage_test() { - # do cover tests -- echo "sdv coverage test" -- # cover_test_xxx -- # cover_test_xxx -- # cover_test_xxx -- # cover_test_xxx -+ while IFS= read -r testfile; do -+ printf "%-60s" "test $(basename "${testfile}"): " -+ echo -e "\n$(basename "${testfile}"):" >> "${coverage_client_log}" -+ if ! bash "${testfile}" >> "${coverage_client_log}" 2>&1; then -+ echo "FAIL" -+ return_code=1 -+ else -+ echo "PASS" -+ fi -+ done < <(find "${project_root}"/tests/src -maxdepth 1 -name "cover_test_*" -type f -print) -+ # shellcheck disable=SC2248 -+ return ${return_code} - } - - function finish_coverage_test() { -- kill -15 $main_test_pid -+ kill -15 "${main_test_pid}" - } - - function generate_coverage() { -@@ -90,7 +98,7 @@ function generate_coverage() { - } - - function cleanup() { -- rm "$main_test_binary_file" -+ rm "${main_test_binary_file}" - } - - precheck -@@ -102,3 +110,5 @@ run_coverage_test - finish_coverage_test - generate_coverage - cleanup -+# shellcheck disable=SC2248 -+exit ${return_code} -diff --git a/hack/merge_coverage.sh b/hack/merge_coverage.sh -index 6e529a34..f043dfaf 100644 ---- a/hack/merge_coverage.sh -+++ b/hack/merge_coverage.sh -@@ -24,32 +24,32 @@ function merge_cover() { - output_coverage_file=${output_file_name}.out - output_html_file=${output_file_name}.html - output_merge_cover=${output_file_name}.merge -- grep -r -h -v "^mode:" "${input_coverages[@]}" | sort > "$output_merge_cover" -+ grep -r -h -v "^mode:" "${input_coverages[@]}" | sort > "${output_merge_cover}" - current="" - count=0 -- echo "mode: set" > "$output_coverage_file" -+ echo "mode: set" > "${output_coverage_file}" - # read the cover report from merge_cover, convert it, write to final coverage - while read -r line; do -- block=$(echo "$line" | cut -d ' ' -f1-2) -- num=$(echo "$line" | cut -d ' ' -f3) -- if [ "$current" == "" ]; then -- current=$block -- count=$num -- elif [ "$block" == "$current" ]; then -+ block=$(echo "${line}" | cut -d ' ' -f1-2) -+ num=$(echo "${line}" | cut -d ' ' -f3) -+ if [ "${current}" == "" ]; then -+ current=${block} -+ count=${num} -+ elif [ "${block}" == "${current}" ]; then - count=$((count + num)) - else - # if the sorted two lines are not in the same code block, write the statics result of last code block to the final coverage -- echo "$current" $count >> "${output_coverage_file}" -- current=$block -- count=$num -+ echo "${current} ${count}" >> "${output_coverage_file}" -+ current=${block} -+ count=${num} - fi -- done < "$output_merge_cover" -+ done < "${output_merge_cover}" - rm -rf "${output_merge_cover}" - - # merge the results of last line to the final coverage -- if [ "$current" != "" ]; then -- echo "$current" "$count" >> "${output_coverage_file}" -+ if [ "${current}" != "" ]; then -+ echo "${current} ${count}" >> "${output_coverage_file}" - fi - -- go tool cover -html="${output_coverage_file}" -o "$output_html_file" -+ go tool cover -html="${output_coverage_file}" -o "${output_html_file}" - } -diff --git a/hack/unit_test.sh b/hack/unit_test.sh -index 94a44a95..161feb6b 100755 ---- a/hack/unit_test.sh -+++ b/hack/unit_test.sh -@@ -47,20 +47,20 @@ function run_unit_test() { - echo "Testing with args ${TEST_ARGS}" - - rm -f "${testlog}" -- if [[ -n $run_coverage ]]; then -+ if [[ -n ${run_coverage} ]]; then - mkdir -p "${covers_folder}" - fi -- for package in $(go list ${go_test_mod_method} ./... | grep -Ev ${exclude_pattern}); do -+ for package in $(go list "${go_test_mod_method}" ./... | grep -Ev "${exclude_pattern}"); do - echo "Start to test: ${package}" -- if [[ -n $run_coverage ]]; then -- coverprofile_file="${covers_folder}/$(echo "$package" | tr / -).cover" -+ if [[ -n ${run_coverage} ]]; then -+ coverprofile_file="${covers_folder}/$(echo "${package}" | tr / -).cover" - coverprofile_flag="-coverprofile=${coverprofile_file}" - go_test_covermode_flag="-covermode=set" - go_test_race_flag="" - fi - # TEST_ARGS is " -args SKIP_REG=foo", so no double quote for it - # shellcheck disable=SC2086 -- go test -v ${go_test_race_flag} ${go_test_mod_method} ${coverprofile_flag} ${go_test_covermode_flag} -coverpkg=${package} ${go_test_count_method} ${go_test_timeout_flag} "${package}" ${TEST_ARGS} >> "${testlog}" -+ go test -v ${go_test_race_flag} "${go_test_mod_method}" ${coverprofile_flag} "${go_test_covermode_flag}" -coverpkg=${package} "${go_test_count_method}" "${go_test_timeout_flag}" "${package}" ${TEST_ARGS} >> "${testlog}" - done - - if grep -E -- "--- FAIL:|^FAIL" "${testlog}"; then --- -2.27.0 - diff --git a/patch/0083-test-add-common-function-for-testing-separated-image.patch b/patch/0083-test-add-common-function-for-testing-separated-image.patch deleted file mode 100644 index 50c7340..0000000 --- a/patch/0083-test-add-common-function-for-testing-separated-image.patch +++ /dev/null @@ -1,124 +0,0 @@ -From 08ebd389b5e3bb5104035c36891f8add75e18f57 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Mon, 1 Nov 2021 23:37:56 +0800 -Subject: [PATCH 15/16] test: add common function for testing separated image - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - tests/lib/separator.sh | 104 +++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 104 insertions(+) - create mode 100644 tests/lib/separator.sh - -diff --git a/tests/lib/separator.sh b/tests/lib/separator.sh -new file mode 100644 -index 00000000..ad05eb55 ---- /dev/null -+++ b/tests/lib/separator.sh -@@ -0,0 +1,104 @@ -+#!/bin/bash -+ -+# Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+# isula-build licensed under the Mulan PSL v2. -+# You can use this software according to the terms and conditions of the Mulan PSL v2. -+# You may obtain a copy of Mulan PSL v2 at: -+# http://license.coscl.org.cn/MulanPSL2 -+# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+# PURPOSE. -+# See the Mulan PSL v2 for more details. -+# Author: Xiang Li -+# Create: 2021-11-01 -+# Description: common function for save/load separated image -+ -+exit_flag=0 -+ -+# $1: from image name -+# $2: build image name -+# $3: layers number -+# $4: Dockerfile path -+function touch_dockerfile() { -+ cat > "$4" << EOF -+FROM $1 -+MAINTAINER DCCooper -+EOF -+ for i in $(seq "$3"); do -+ echo "RUN echo \"This is $2 layer ${i}: ${RANDOM}\" > line.${i}" >> "$4" -+ done -+} -+ -+# $1: from image name -+# $2: build image name -+# $3: layers number -+# $4: Dockerfile path -+function touch_bad_dockerfile() { -+ cat > "$4" << EOF -+FROM $1 -+MAINTAINER DCCooper -+EOF -+ for i in $(seq "$3"); do -+ echo "RUN echo \"This is $2 layer ${i}: ${RANDOM}\"" >> "$4" -+ done -+} -+ -+# $1: image name -+# $2: context dir -+function build_image() { -+ isula-build ctr-img build -t "$1" "$2" -+} -+ -+function touch_rename_json() { -+ cat > "$1" << EOF -+[ -+ { -+ "name": "app1_latest_app_image.tar.gz", -+ "rename": "app1.tar.gz" -+ }, -+ { -+ "name": "app2_latest_app_image.tar.gz", -+ "rename": "app2.tar.gz" -+ }, -+ { -+ "name": "app1_latest_base_image.tar.gz", -+ "rename": "base1.tar.gz" -+ }, -+ { -+ "name": "app2_latest_base_image.tar.gz", -+ "rename": "base2.tar.gz" -+ }, -+ { -+ "name": "app1_latest_lib_image.tar.gz", -+ "rename": "lib1.tar.gz" -+ }, -+ { -+ "name": "app2_latest_lib_image.tar.gz", -+ "rename": "lib2.tar.gz" -+ } -+] -+EOF -+} -+ -+function touch_bad_rename_json() { -+ touch_rename_json "$1" -+ sed -i '2d' "$1" -+} -+ -+function check_result_equal() { -+ if [[ $1 -eq $2 ]]; then -+ return 0 -+ else -+ ((exit_flag++)) -+ return 1 -+ fi -+} -+ -+function check_result_not_equal() { -+ if [[ $1 -ne $2 ]]; then -+ return 0 -+ else -+ ((exit_flag++)) -+ return 1 -+ fi -+} --- -2.27.0 - diff --git a/patch/0084-test-add-integration-tests-for-saving-and-loading-se.patch b/patch/0084-test-add-integration-tests-for-saving-and-loading-se.patch deleted file mode 100644 index 00b419d..0000000 --- a/patch/0084-test-add-integration-tests-for-saving-and-loading-se.patch +++ /dev/null @@ -1,523 +0,0 @@ -From b3e96588a3e236cec8ec5e62a1fb884cf2eabc80 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Mon, 1 Nov 2021 23:38:06 +0800 -Subject: [PATCH 16/16] test: add integration tests for saving and loading - separated image - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - .../cover_test_load_separated_image_failed.sh | 145 ++++++++++++++++++ - ...cover_test_load_separated_image_success.sh | 89 +++++++++++ - .../cover_test_save_separated_image_failed.sh | 107 +++++++++++++ - ...cover_test_save_separated_image_success.sh | 54 +++++++ - ...r_test_save_separated_image_with_rename.sh | 75 +++++++++ - 5 files changed, 470 insertions(+) - create mode 100644 tests/src/cover_test_load_separated_image_failed.sh - create mode 100644 tests/src/cover_test_load_separated_image_success.sh - create mode 100644 tests/src/cover_test_save_separated_image_failed.sh - create mode 100644 tests/src/cover_test_save_separated_image_success.sh - create mode 100644 tests/src/cover_test_save_separated_image_with_rename.sh - -diff --git a/tests/src/cover_test_load_separated_image_failed.sh b/tests/src/cover_test_load_separated_image_failed.sh -new file mode 100644 -index 00000000..26590d0c ---- /dev/null -+++ b/tests/src/cover_test_load_separated_image_failed.sh -@@ -0,0 +1,145 @@ -+#!/bin/bash -+ -+# Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+# isula-build licensed under the Mulan PSL v2. -+# You can use this software according to the terms and conditions of the Mulan PSL v2. -+# You may obtain a copy of Mulan PSL v2 at: -+# http://license.coscl.org.cn/MulanPSL2 -+# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+# PURPOSE. -+# See the Mulan PSL v2 for more details. -+# Author: Xiang Li -+# Create: 2021-11-01 -+# Description: cover test for load separated image -+ -+test_name=${BASH_SOURCE[0]} -+workspace=/tmp/${test_name}.$(date +%s) -+mkdir -p "${workspace}" -+dockerfile=${workspace}/Dockerfile -+tarball_dir=${workspace}/Images -+rename_json=${workspace}/rename.json -+top_dir=$(git rev-parse --show-toplevel) -+# shellcheck disable=SC1091 -+source "${top_dir}"/tests/lib/separator.sh -+ -+function pre_run() { -+ base_image_name="hub.oepkgs.net/library/busybox:latest" -+ lib_image_name="lib:latest" -+ app1_image_name="app1:latest" -+ app2_image_name="app2:latest" -+ lib_layer_number=5 -+ app1_layer_number=4 -+ app2_layer_number=3 -+ touch_dockerfile "${base_image_name}" "${lib_image_name}" "${lib_layer_number}" "${dockerfile}" -+ build_image "${lib_image_name}" "${workspace}" -+ touch_dockerfile "${lib_image_name}" "${app1_image_name}" "${app1_layer_number}" "${dockerfile}" -+ build_image "${app1_image_name}" "${workspace}" -+ touch_dockerfile "${lib_image_name}" "${app2_image_name}" "${app2_layer_number}" "${dockerfile}" -+ build_image "${app2_image_name}" "${workspace}" -+ touch_rename_json "${rename_json}" -+ isula-build ctr-img save -b "${base_image_name}" -l "${lib_image_name}" -d "${tarball_dir}" "${app1_image_name}" "${app2_image_name}" -r "${rename_json}" -+ check_result_equal $? 0 -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+# empty -d flag and missing -b -+function test_run1() { -+ isula-build ctr-img load -l "${tarball_dir}"/base1.tar.gz -i "${app1_image_name}" -+ check_result_not_equal $? 0 -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+# empty -d flag and missing -l -+function test_run2() { -+ isula-build ctr-img load -b "${tarball_dir}"/base1.tar.gz -i "${app1_image_name}" -+ check_result_not_equal $? 0 -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+# empty -d, -b, -l flag -+function test_run3() { -+ isula-build ctr-img load -i "${app1_image_name}" -+ check_result_not_equal $? 0 -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+# use lib as base image tarball -+function test_run4() { -+ isula-build ctr-img load -d "${tarball_dir}" -b "${tarball_dir}"/lib1.tar.gz -i "${app1_image_name}" -+ check_result_not_equal $? 0 -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+# missing app tarball -+function test_run5() { -+ mv "${tarball_dir}"/app1.tar.gz "${workspace}" -+ isula-build ctr-img load -d "${tarball_dir}" -l "${tarball_dir}"/lib1.tar.gz -i "${app1_image_name}" -+ check_result_not_equal $? 0 -+ mv "${workspace}"/app1.tar.gz "${tarball_dir}" -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+# lib tarball not exist -+function test_run6() { -+ isula-build ctr-img load -d "${tarball_dir}" -l not_exist_lib.tar -i "${app1_image_name}" -+ check_result_not_equal $? 0 -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+# base tarball not exist -+function test_run7() { -+ isula-build ctr-img load -d "${tarball_dir}" -b not_exist_base.tar -i "${app1_image_name}" -+ check_result_not_equal $? 0 -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+# invalid base tarball -+function test_run8() { -+ invalid_tarball=${workspace}/base1.tar -+ echo "invalid base tarball" >> "${invalid_tarball}" -+ isula-build ctr-img load -d "${tarball_dir}" -b "${invalid_tarball}" -i "${app1_image_name}" -+ check_result_not_equal $? 0 -+ rm -rf "${invalid_tarball}" -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+# invalid lib tarball -+function test_run9() { -+ invalid_tarball=${workspace}/lib1.tar -+ echo "invalid lib tarball" >> "${invalid_tarball}" -+ isula-build ctr-img load -d "${tarball_dir}" -l "${invalid_tarball}" -i "${app1_image_name}" -+ check_result_not_equal $? 0 -+ rm -rf "${invalid_tarball}" -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+# manifest file corruption -+function test_run10() { -+ cp "${tarball_dir}"/manifest "${tarball_dir}"/manifest.bk -+ sed -i "1d" "${tarball_dir}"/manifest -+ isula-build ctr-img load -d "${tarball_dir}" -d "${tarball_dir}" -i "${app1_image_name}" -+ check_result_not_equal $? 0 -+ mv "${tarball_dir}"/manifest.bk "${tarball_dir}"/manifest -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+function cleanup() { -+ rm -rf "${workspace}" -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+pre_run -+test_run1 -+test_run2 -+test_run3 -+test_run4 -+test_run5 -+test_run6 -+test_run7 -+test_run8 -+test_run9 -+test_run10 -+cleanup -+# shellcheck disable=SC2154 -+exit "${exit_flag}" -diff --git a/tests/src/cover_test_load_separated_image_success.sh b/tests/src/cover_test_load_separated_image_success.sh -new file mode 100644 -index 00000000..266b3eba ---- /dev/null -+++ b/tests/src/cover_test_load_separated_image_success.sh -@@ -0,0 +1,89 @@ -+#!/bin/bash -+ -+# Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+# isula-build licensed under the Mulan PSL v2. -+# You can use this software according to the terms and conditions of the Mulan PSL v2. -+# You may obtain a copy of Mulan PSL v2 at: -+# http://license.coscl.org.cn/MulanPSL2 -+# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+# PURPOSE. -+# See the Mulan PSL v2 for more details. -+# Author: Xiang Li -+# Create: 2021-11-01 -+# Description: cover test for load separated image -+ -+test_name=${BASH_SOURCE[0]} -+workspace=/tmp/${test_name}.$(date +%s) -+mkdir -p "${workspace}" -+dockerfile=${workspace}/Dockerfile -+tarball_dir=${workspace}/Images -+rename_json=${workspace}/rename.json -+top_dir=$(git rev-parse --show-toplevel) -+# shellcheck disable=SC1091 -+source "${top_dir}"/tests/lib/separator.sh -+ -+function pre_run() { -+ base_image_name="hub.oepkgs.net/library/busybox:latest" -+ lib_image_name="lib:latest" -+ app1_image_name="app1:latest" -+ app2_image_name="app2:latest" -+ lib_layer_number=5 -+ app1_layer_number=4 -+ app2_layer_number=3 -+ touch_dockerfile "${base_image_name}" "${lib_image_name}" "${lib_layer_number}" "${dockerfile}" -+ build_image "${lib_image_name}" "${workspace}" -+ touch_dockerfile "${lib_image_name}" "${app1_image_name}" "${app1_layer_number}" "${dockerfile}" -+ build_image "${app1_image_name}" "${workspace}" -+ touch_dockerfile "${lib_image_name}" "${app2_image_name}" "${app2_layer_number}" "${dockerfile}" -+ build_image "${app2_image_name}" "${workspace}" -+ touch_rename_json "${rename_json}" -+ isula-build ctr-img save -b "${base_image_name}" -l "${lib_image_name}" -d "${tarball_dir}" "${app1_image_name}" "${app2_image_name}" -r "${rename_json}" -+ check_result_equal $? 0 -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+function test_run1() { -+ isula-build ctr-img load -d "${tarball_dir}" -b "${tarball_dir}"/base1.tar.gz -l "${tarball_dir}"/lib1.tar.gz -i "${app1_image_name}" -+ check_result_equal $? 0 -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+function test_run2() { -+ isula-build ctr-img load -d "${tarball_dir}" -b "${tarball_dir}"/base1.tar.gz -i "${app1_image_name}" -+ check_result_equal $? 0 -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+function test_run3() { -+ isula-build ctr-img load -d "${tarball_dir}" -l "${tarball_dir}"/lib1.tar.gz -i "${app1_image_name}" -+ check_result_equal $? 0 -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+function test_run4() { -+ isula-build ctr-img load -d "${tarball_dir}" -i "${app1_image_name}" -+ check_result_equal $? 0 -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+function test_run5() { -+ isula-build ctr-img load -d "${tarball_dir}" -i "${app1_image_name}" --no-check -+ check_result_equal $? 0 -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+function cleanup() { -+ rm -rf "${workspace}" -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+pre_run -+test_run1 -+test_run2 -+test_run3 -+test_run4 -+test_run5 -+cleanup -+# shellcheck disable=SC2154 -+exit "${exit_flag}" -diff --git a/tests/src/cover_test_save_separated_image_failed.sh b/tests/src/cover_test_save_separated_image_failed.sh -new file mode 100644 -index 00000000..c64dcf5d ---- /dev/null -+++ b/tests/src/cover_test_save_separated_image_failed.sh -@@ -0,0 +1,107 @@ -+#!/bin/bash -+ -+# Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+# isula-build licensed under the Mulan PSL v2. -+# You can use this software according to the terms and conditions of the Mulan PSL v2. -+# You may obtain a copy of Mulan PSL v2 at: -+# http://license.coscl.org.cn/MulanPSL2 -+# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+# PURPOSE. -+# See the Mulan PSL v2 for more details. -+# Author: Xiang Li -+# Create: 2021-11-01 -+# Description: cover test for save separated image -+ -+test_name=${BASH_SOURCE[0]} -+workspace=/tmp/${test_name}.$(date +%s) -+mkdir -p "${workspace}" -+dockerfile=${workspace}/Dockerfile -+top_dir=$(git rev-parse --show-toplevel) -+# shellcheck disable=SC1091 -+source "${top_dir}"/tests/lib/separator.sh -+ -+function pre_run() { -+ base_image_name="hub.oepkgs.net/library/busybox:latest" -+ bad_lib_image_name="lib:bad" -+ bad_app1_image_name="app1:bad" -+ bad_app2_image_name="app2:bad" -+ lib_image_name="lib:latest" -+ app1_image_name="app1:latest" -+ app2_image_name="app2:latest" -+ bad_lib_layer_number=5 -+ bad_app1_layer_number=4 -+ bad_app2_layer_number=3 -+ lib_layer_number=5 -+ app1_layer_number=6 -+ app2_layer_number=7 -+ -+ # build bad dockerfile -+ touch_bad_dockerfile "${base_image_name}" "${bad_lib_image_name}" "${bad_lib_layer_number}" "${dockerfile}" -+ build_image "${bad_lib_image_name}" "${workspace}" -+ touch_bad_dockerfile "${bad_lib_image_name}" "${bad_app1_image_name}" "${bad_app1_layer_number}" "${dockerfile}" -+ build_image "${bad_app1_image_name}" "${workspace}" -+ touch_bad_dockerfile "${bad_lib_image_name}" "${bad_app2_image_name}" "${bad_app2_layer_number}" "${dockerfile}" -+ build_image "${bad_app2_image_name}" "${workspace}" -+ -+ # build normal dockerfile -+ touch_dockerfile "${base_image_name}" "${lib_image_name}" "${lib_layer_number}" "${dockerfile}" -+ build_image "${lib_image_name}" "${workspace}" -+ touch_dockerfile "${lib_image_name}" "${app1_image_name}" "${app1_layer_number}" "${dockerfile}" -+ build_image "${app1_image_name}" "${workspace}" -+ touch_dockerfile "${lib_image_name}" "${app2_image_name}" "${app2_layer_number}" "${dockerfile}" -+ build_image "${app2_image_name}" "${workspace}" -+} -+ -+function test_run1() { -+ isula-build ctr-img save -b "${base_image_name}" -l "${bad_lib_image_name}" -d "${workspace}"/Images "${bad_app1_image_name}" "${bad_app2_image_name}" -+ check_result_not_equal $? 0 -+ rm -rf "${workspace}"/Images -+} -+ -+function test_run2() { -+ isula-build ctr-img save -b "invalid:base" -l "${bad_lib_image_name}" -d "${workspace}"/Images "${bad_app1_image_name}" "${bad_app2_image_name}" -+ check_result_not_equal $? 0 -+ rm -rf "${workspace}"/Images -+} -+ -+function test_run3() { -+ isula-build ctr-img save -b "${base_image_name}" -l "livalid:lib" -d "${workspace}"/Images "${bad_app1_image_name}" "${bad_app2_image_name}" -+ check_result_not_equal $? 0 -+ rm -rf "${workspace}"/Images -+} -+ -+function test_run4() { -+ isula-build ctr-img save -b "${base_image_name}" -l "${bad_lib_image_name}" -d "${workspace}"/Images "invalid:app" "${bad_app2_image_name}" -+ check_result_not_equal $? 0 -+ rm -rf "${workspace}"/Images -+} -+ -+function test_run5() { -+ isula-build ctr-img save -b "${base_image_name}" -l "${bad_lib_image_name}" -d "${workspace}"/Images "${app1_image_name}" "${app2_image_name}" -+ check_result_not_equal $? 0 -+ rm -rf "${workspace}"/Images -+} -+ -+function test_run6() { -+ isula-build ctr-img save -b "${base_image_name}" -l "${lib_image_name}" -d "${workspace}"/Images "${bad_app1_image_name}" "${bad_app2_image_name}" -+ check_result_not_equal $? 0 -+ rm -rf "${workspace}"/Images -+} -+ -+function cleanup() { -+ rm -rf "${workspace}" -+ isula-build ctr-img rm "${bad_lib_image_name}" "${bad_app1_image_name}" "${bad_app2_image_name}" "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+ isula-build ctr-img rm -p -+} -+ -+pre_run -+test_run1 -+test_run2 -+test_run3 -+test_run4 -+test_run5 -+test_run6 -+cleanup -+# shellcheck disable=SC2154 -+exit "${exit_flag}" -diff --git a/tests/src/cover_test_save_separated_image_success.sh b/tests/src/cover_test_save_separated_image_success.sh -new file mode 100644 -index 00000000..2095bd33 ---- /dev/null -+++ b/tests/src/cover_test_save_separated_image_success.sh -@@ -0,0 +1,54 @@ -+#!/bin/bash -+ -+# Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+# isula-build licensed under the Mulan PSL v2. -+# You can use this software according to the terms and conditions of the Mulan PSL v2. -+# You may obtain a copy of Mulan PSL v2 at: -+# http://license.coscl.org.cn/MulanPSL2 -+# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+# PURPOSE. -+# See the Mulan PSL v2 for more details. -+# Author: Xiang Li -+# Create: 2021-11-01 -+# Description: cover test for save separated image -+ -+test_name=${BASH_SOURCE[0]} -+workspace=/tmp/${test_name}.$(date +%s) -+mkdir -p "${workspace}" -+dockerfile=${workspace}/Dockerfile -+top_dir=$(git rev-parse --show-toplevel) -+# shellcheck disable=SC1091 -+source "${top_dir}"/tests/lib/separator.sh -+ -+function pre_run() { -+ base_image_name="hub.oepkgs.net/library/busybox:latest" -+ lib_image_name="lib:latest" -+ app1_image_name="app1:latest" -+ app2_image_name="app2:latest" -+ lib_layer_number=5 -+ app1_layer_number=4 -+ app2_layer_number=3 -+ touch_dockerfile "${base_image_name}" "${lib_image_name}" "${lib_layer_number}" "${dockerfile}" -+ build_image "${lib_image_name}" "${workspace}" -+ touch_dockerfile "${lib_image_name}" "${app1_image_name}" "${app1_layer_number}" "${dockerfile}" -+ build_image "${app1_image_name}" "${workspace}" -+ touch_dockerfile "${lib_image_name}" "${app2_image_name}" "${app2_layer_number}" "${dockerfile}" -+ build_image "${app2_image_name}" "${workspace}" -+} -+ -+function test_run() { -+ isula-build ctr-img save -b "${base_image_name}" -l "${lib_image_name}" -d "${workspace}"/Images "${app1_image_name}" "${app2_image_name}" -+ check_result_equal $? 0 -+} -+ -+function cleanup() { -+ rm -rf "${workspace}" -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+pre_run -+test_run -+cleanup -+# shellcheck disable=SC2154 -+exit "${exit_flag}" -diff --git a/tests/src/cover_test_save_separated_image_with_rename.sh b/tests/src/cover_test_save_separated_image_with_rename.sh -new file mode 100644 -index 00000000..28904757 ---- /dev/null -+++ b/tests/src/cover_test_save_separated_image_with_rename.sh -@@ -0,0 +1,75 @@ -+#!/bin/bash -+ -+# Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+# isula-build licensed under the Mulan PSL v2. -+# You can use this software according to the terms and conditions of the Mulan PSL v2. -+# You may obtain a copy of Mulan PSL v2 at: -+# http://license.coscl.org.cn/MulanPSL2 -+# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+# PURPOSE. -+# See the Mulan PSL v2 for more details. -+# Author: Xiang Li -+# Create: 2021-11-01 -+# Description: cover test for save separated image -+ -+test_name=${BASH_SOURCE[0]} -+workspace=/tmp/${test_name}.$(date +%s) -+mkdir -p "${workspace}" -+dockerfile=${workspace}/Dockerfile -+rename_json=${workspace}/rename.json -+invalid_rename_json=${workspace}/invalid.json -+none_exist_rename_json=${workspace}/none_exist.json -+top_dir=$(git rev-parse --show-toplevel) -+# shellcheck disable=SC1091 -+source "${top_dir}"/tests/lib/separator.sh -+ -+function pre_run() { -+ base_image_name="hub.oepkgs.net/library/busybox:latest" -+ lib_image_name="lib:latest" -+ app1_image_name="app1:latest" -+ app2_image_name="app2:latest" -+ lib_layer_number=5 -+ app1_layer_number=4 -+ app2_layer_number=3 -+ touch_dockerfile "${base_image_name}" "${lib_image_name}" "${lib_layer_number}" "${dockerfile}" -+ build_image "${lib_image_name}" "${workspace}" -+ touch_dockerfile "${lib_image_name}" "${app1_image_name}" "${app1_layer_number}" "${dockerfile}" -+ build_image "${app1_image_name}" "${workspace}" -+ touch_dockerfile "${lib_image_name}" "${app2_image_name}" "${app2_layer_number}" "${dockerfile}" -+ build_image "${app2_image_name}" "${workspace}" -+} -+ -+function test_run1() { -+ touch_rename_json "${rename_json}" -+ isula-build ctr-img save -b "${base_image_name}" -l "${lib_image_name}" -d "${workspace}"/Images -r "${rename_json}" "${app1_image_name}" "${app2_image_name}" -+ check_result_equal $? 0 -+ rm -rf "${workspace}"/Images -+} -+ -+function test_run2() { -+ touch_bad_rename_json "${invalid_rename_json}" -+ isula-build ctr-img save -b "${base_image_name}" -l "${lib_image_name}" -d "${workspace}"/Images -r "${invalid_rename_json}" "${app1_image_name}" "${app2_image_name}" -+ check_result_not_equal $? 0 -+ rm -rf "${workspace}"/Images -+} -+ -+function test_run3() { -+ isula-build ctr-img save -b "${base_image_name}" -l "${lib_image_name}" -d "${workspace}"/Images -r "${none_exist_rename_json}" "${app1_image_name}" "${app2_image_name}" -+ check_result_not_equal $? 0 -+ rm -rf "${workspace}"/Images -+} -+ -+function cleanup() { -+ rm -rf "${workspace}" -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+} -+ -+ -+pre_run -+test_run1 -+test_run2 -+test_run3 -+cleanup -+# shellcheck disable=SC2154 -+exit "${exit_flag}" --- -2.27.0 - diff --git a/patch/0085-util-add-unit-test-for-increment-util-functions.patch b/patch/0085-util-add-unit-test-for-increment-util-functions.patch deleted file mode 100644 index f71b164..0000000 --- a/patch/0085-util-add-unit-test-for-increment-util-functions.patch +++ /dev/null @@ -1,463 +0,0 @@ -From 133e789d445905f5d94a6c8cc3459b3729fb7335 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Thu, 28 Oct 2021 18:55:24 +0800 -Subject: [PATCH 07/16] util: add unit test for increment util functions - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - util/cipher.go | 6 +- - util/cipher_test.go | 237 ++++++++++++++++++++++++++++++++++++++++++++ - util/common.go | 3 +- - util/common_test.go | 131 ++++++++++++++++++++++++ - 4 files changed, 373 insertions(+), 4 deletions(-) - -diff --git a/util/cipher.go b/util/cipher.go -index d92705c3..ce47b71e 100644 ---- a/util/cipher.go -+++ b/util/cipher.go -@@ -234,9 +234,6 @@ func ReadPublicKey(path string) (rsa.PublicKey, error) { - - func hashFile(path string) (string, error) { - cleanPath := filepath.Clean(path) -- if len(cleanPath) == 0 { -- return "", errors.New("failed to hash empty path") -- } - if f, err := os.Stat(cleanPath); err != nil { - return "", errors.Errorf("failed to stat file %q", cleanPath) - } else if f.IsDir() { -@@ -282,6 +279,9 @@ func hashDir(path string) (string, error) { - // the checksum will be concatenated to next checksum until every file - // counted, the result will be used for final checksum calculation - func SHA256Sum(path string) (string, error) { -+ if len(path) == 0 { -+ return "", errors.New("failed to hash empty path") -+ } - path = filepath.Clean(path) - f, err := os.Stat(path) - if err != nil { -diff --git a/util/cipher_test.go b/util/cipher_test.go -index 1c0d21c9..bab6dfe3 100644 ---- a/util/cipher_test.go -+++ b/util/cipher_test.go -@@ -19,12 +19,15 @@ import ( - "crypto/sha256" - "crypto/sha512" - "hash" -+ "io/ioutil" -+ "os" - "path/filepath" - "strings" - "testing" - - "gotest.tools/v3/assert" - "gotest.tools/v3/fs" -+ constant "isula.org/isula-build" - ) - - const ( -@@ -216,3 +219,237 @@ func benchmarkGenerateRSAKey(scale int, b *testing.B) { - func BenchmarkGenerateRSAKey2048(b *testing.B) { benchmarkGenerateRSAKey(2048, b) } - func BenchmarkGenerateRSAKey3072(b *testing.B) { benchmarkGenerateRSAKey(3072, b) } - func BenchmarkGenerateRSAKey4096(b *testing.B) { benchmarkGenerateRSAKey(4096, b) } -+ -+func TestHashFile(t *testing.T) { -+ emptyFile := fs.NewFile(t, t.Name()) -+ defer emptyFile.Remove() -+ fileWithContent := fs.NewFile(t, t.Name()) -+ err := ioutil.WriteFile(fileWithContent.Path(), []byte("hello"), constant.DefaultRootFileMode) -+ assert.NilError(t, err) -+ defer fileWithContent.Remove() -+ dir := fs.NewDir(t, t.Name()) -+ defer dir.Remove() -+ -+ type args struct { -+ path string -+ } -+ tests := []struct { -+ name string -+ args args -+ want string -+ wantErr bool -+ }{ -+ { -+ name: "TC-hash empty file", -+ args: args{path: emptyFile.Path()}, -+ // empty file sha256sum always is -+ want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", -+ }, -+ { -+ name: "TC-hash file with content", -+ args: args{path: fileWithContent.Path()}, -+ want: "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", -+ }, -+ { -+ name: "TC-hash file with empty path", -+ wantErr: true, -+ }, -+ { -+ name: "TC-hash file with invalid path", -+ args: args{path: "path not exist"}, -+ wantErr: true, -+ }, -+ { -+ name: "TC-hash file with directory path", -+ args: args{path: dir.Path()}, -+ wantErr: true, -+ }, -+ { -+ name: "TC-hash file with special device", -+ args: args{path: "/dev/cdrom"}, -+ wantErr: true, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ got, err := hashFile(tt.args.path) -+ if (err != nil) != tt.wantErr { -+ t.Errorf("hashFile() error = %v, wantErr %v", err, tt.wantErr) -+ return -+ } -+ if got != tt.want { -+ t.Errorf("hashFile() = %v, want %v", got, tt.want) -+ } -+ }) -+ } -+} -+ -+func TestHashDir(t *testing.T) { -+ root := fs.NewDir(t, t.Name()) -+ defer root.Remove() -+ -+ rootSub1 := root.Join("sub1") -+ os.MkdirAll(rootSub1, constant.DefaultRootDirMode) -+ defer os.RemoveAll(rootSub1) -+ rootSub1File := filepath.Join(rootSub1, "rootSub1File") -+ ioutil.WriteFile(rootSub1File, []byte("hello1"), constant.DefaultRootFileMode) -+ defer os.RemoveAll(rootSub1File) -+ -+ rootSub11 := filepath.Join(rootSub1, "sub11") -+ os.MkdirAll(rootSub11, constant.DefaultRootDirMode) -+ defer os.RemoveAll(rootSub11) -+ rootSub11File := filepath.Join(rootSub11, "rootSub11File") -+ ioutil.WriteFile(rootSub11File, []byte("hello11"), constant.DefaultRootFileMode) -+ defer os.RemoveAll(rootSub11File) -+ -+ emptyDir := fs.NewDir(t, t.Name()) -+ defer emptyDir.Remove() -+ emptyFile := root.Join("empty.tar") -+ _, err := os.Create(emptyFile) -+ assert.NilError(t, err) -+ defer os.RemoveAll(emptyFile) -+ -+ type args struct { -+ path string -+ } -+ tests := []struct { -+ name string -+ args args -+ want string -+ wantErr bool -+ }{ -+ { -+ name: "TC-hash empty dir", -+ args: args{path: emptyDir.Path()}, -+ want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", -+ }, -+ { -+ name: "TC-hash not exist dir", -+ args: args{path: "path not exist"}, -+ wantErr: true, -+ }, -+ { -+ name: "TC-hash multiple dirs", -+ args: args{path: root.Path()}, -+ want: "bdaaa88766b974876a14d85620b5a26795735c332445783a3a067e0052a59478", -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ got, err := hashDir(tt.args.path) -+ if (err != nil) != tt.wantErr { -+ t.Errorf("hashDir() error = %v, wantErr %v", err, tt.wantErr) -+ return -+ } -+ if got != tt.want { -+ t.Errorf("hashDir() = %v, want %v", got, tt.want) -+ } -+ }) -+ } -+} -+ -+func TestSHA256Sum(t *testing.T) { -+ root := fs.NewDir(t, t.Name()) -+ defer root.Remove() -+ -+ rootSub1 := root.Join("sub1") -+ os.MkdirAll(rootSub1, constant.DefaultRootDirMode) -+ defer os.RemoveAll(rootSub1) -+ rootSub1File := filepath.Join(rootSub1, "rootSub1File") -+ ioutil.WriteFile(rootSub1File, []byte("hello1"), constant.DefaultRootFileMode) -+ defer os.RemoveAll(rootSub1File) -+ -+ emptyDir := fs.NewDir(t, t.Name()) -+ defer emptyDir.Remove() -+ emptyFile := root.Join("empty.tar") -+ _, err := os.Create(emptyFile) -+ assert.NilError(t, err) -+ defer os.RemoveAll(emptyFile) -+ -+ type args struct { -+ path string -+ } -+ tests := []struct { -+ name string -+ args args -+ want string -+ wantErr bool -+ }{ -+ { -+ name: "TC-for dir", -+ args: args{path: root.Path()}, -+ want: "6a29015d578de92eabad6b20b3e3c0d4df521b03728cb4ee5667b15742154646", -+ }, -+ { -+ name: "TC-for file only", -+ args: args{path: emptyFile}, -+ want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", -+ }, -+ { -+ name: "TC-for invalid file", -+ args: args{path: "/dev/cdrom"}, -+ wantErr: true, -+ }, -+ { -+ name: "TC-for path not exist", -+ args: args{path: "path not exist"}, -+ wantErr: true, -+ }, -+ { -+ name: "TC-for empty path", -+ wantErr: true, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ got, err := SHA256Sum(tt.args.path) -+ if (err != nil) != tt.wantErr { -+ t.Errorf("SHA256Sum() error = %v, wantErr %v", err, tt.wantErr) -+ return -+ } -+ if got != tt.want { -+ t.Errorf("SHA256Sum() = %v, want %v", got, tt.want) -+ } -+ }) -+ } -+} -+ -+func TestCheckSum(t *testing.T) { -+ emptyFile := fs.NewFile(t, t.Name()) -+ defer emptyFile.Remove() -+ -+ type args struct { -+ path string -+ target string -+ } -+ tests := []struct { -+ name string -+ args args -+ wantErr bool -+ }{ -+ { -+ name: "TC-normal case", -+ args: args{ -+ path: emptyFile.Path(), -+ target: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", -+ }, -+ }, -+ { -+ name: "TC-check sum failed", -+ args: args{path: emptyFile.Path(), target: "wrong"}, -+ wantErr: true, -+ }, -+ { -+ name: "TC-empty path", -+ args: args{target: "wrong"}, -+ wantErr: true, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ if err := CheckSum(tt.args.path, tt.args.target); (err != nil) != tt.wantErr { -+ t.Errorf("CheckSum() error = %v, wantErr %v", err, tt.wantErr) -+ } -+ }) -+ } -+} -diff --git a/util/common.go b/util/common.go -index 4782b2ec..ff85da9c 100644 ---- a/util/common.go -+++ b/util/common.go -@@ -192,7 +192,8 @@ func IsValidImageName(name string) bool { - if err != nil { - return false - } -- if _, canonical := ref.(reference.Canonical); canonical { -+ -+ if _, isDigest := ref.(reference.Canonical); isDigest { - return false - } - return true -diff --git a/util/common_test.go b/util/common_test.go -index ed9edf6e..9831971a 100644 ---- a/util/common_test.go -+++ b/util/common_test.go -@@ -14,11 +14,14 @@ - package util - - import ( -+ "io/ioutil" -+ "os" - "path/filepath" - "testing" - - "gotest.tools/v3/assert" - "gotest.tools/v3/fs" -+ constant "isula.org/isula-build" - ) - - func TestCheckFileSize(t *testing.T) { -@@ -179,3 +182,131 @@ func TestParseServer(t *testing.T) { - }) - } - } -+ -+func TestIsValidImageName(t *testing.T) { -+ type args struct { -+ name string -+ } -+ tests := []struct { -+ name string -+ args args -+ want bool -+ }{ -+ { -+ name: "TC-valid image name", -+ args: args{name: "app:latest"}, -+ want: true, -+ }, -+ { -+ name: "TC-valid image name with domain", -+ args: args{name: "localhost:5000/app:latest"}, -+ want: true, -+ }, -+ { -+ name: "TC-invalid image name", -+ args: args{name: "app:latest:v1"}, -+ want: false, -+ }, -+ { -+ name: "TC-invalid image name with canonical format", -+ args: args{name: "alpine:3.2@sha256:a187dde48cd289ac374ad8539930628314bc581a481cdb41409c9289419ddb72"}, -+ want: false, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ if got := IsValidImageName(tt.args.name); got != tt.want { -+ t.Errorf("IsValidImageName() = %v, want %v", got, tt.want) -+ } -+ }) -+ } -+} -+ -+func TestAnyFlagSet(t *testing.T) { -+ type args struct { -+ flags []string -+ } -+ tests := []struct { -+ name string -+ args args -+ want bool -+ }{ -+ { -+ name: "TC-some flag set", -+ args: args{flags: []string{"flag1", "flag2"}}, -+ want: true, -+ }, -+ { -+ name: "TC-none flag set", -+ args: args{flags: []string{}}, -+ want: false, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ if got := AnyFlagSet(tt.args.flags...); got != tt.want { -+ t.Errorf("AnyFlagSet() = %v, want %v", got, tt.want) -+ } -+ }) -+ } -+} -+ -+func TestCheckLoadFile(t *testing.T) { -+ loadFile := fs.NewFile(t, t.Name()) -+ defer loadFile.Remove() -+ err := ioutil.WriteFile(loadFile.Path(), []byte("hello"), constant.DefaultRootFileMode) -+ assert.NilError(t, err) -+ -+ emptyFile := fs.NewFile(t, t.Name()) -+ defer emptyFile.Remove() -+ -+ root := fs.NewDir(t, t.Name()) -+ defer root.Remove() -+ -+ bigFile := filepath.Join(root.Path(), "bigFile") -+ f, err := os.Create(bigFile) -+ assert.NilError(t, err) -+ defer os.Remove(f.Name()) -+ err = f.Truncate(maxLoadFileSize + 1) -+ assert.NilError(t, err) -+ -+ type args struct { -+ path string -+ } -+ tests := []struct { -+ name string -+ args args -+ wantErr bool -+ }{ -+ { -+ name: "TC-normal load file", -+ args: args{path: loadFile.Path()}, -+ }, -+ { -+ name: "TC-load file not exist", -+ wantErr: true, -+ }, -+ { -+ name: "TC-empty load file", -+ args: args{path: emptyFile.Path()}, -+ wantErr: true, -+ }, -+ { -+ name: "TC-invalid load file", -+ args: args{path: "/dev/cdrom"}, -+ wantErr: true, -+ }, -+ { -+ name: "TC-load file too big", -+ args: args{path: bigFile}, -+ wantErr: true, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ if err := CheckLoadFile(tt.args.path); (err != nil) != tt.wantErr { -+ t.Errorf("CheckLoadFile() error = %v, wantErr %v", err, tt.wantErr) -+ } -+ }) -+ } -+} --- -2.27.0 - diff --git a/patch/0086-bugfix-fix-random-sequence-for-saving-separated-imag.patch b/patch/0086-bugfix-fix-random-sequence-for-saving-separated-imag.patch deleted file mode 100644 index b271e4e..0000000 --- a/patch/0086-bugfix-fix-random-sequence-for-saving-separated-imag.patch +++ /dev/null @@ -1,43 +0,0 @@ -From d6c302a3d5563286614c59a442f4cd65a8351ce2 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Tue, 2 Nov 2021 20:54:24 +0800 -Subject: [PATCH 1/2] bugfix: fix random sequence for saving separated image - tarball - -reason: sort the map key and read the key in alohabetical orger - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - daemon/save.go | 9 ++++++++- - 1 file changed, 8 insertions(+), 1 deletion(-) - -diff --git a/daemon/save.go b/daemon/save.go -index ecac5b6..9ad4e03 100644 ---- a/daemon/save.go -+++ b/daemon/save.go -@@ -20,6 +20,7 @@ import ( - "io/ioutil" - "os" - "path/filepath" -+ "sort" - "strings" - - "github.com/containers/image/v5/docker/reference" -@@ -513,7 +514,13 @@ func (s *separatorSave) processImageLayers(imgInfos map[string]imageInfo) error - libImagesMap = make(imageLayersMap, 1) - appImagesMap = make(imageLayersMap, 1) - ) -- for _, info := range imgInfos { -+ var sortedKey []string -+ for k := range imgInfos { -+ sortedKey = append(sortedKey, k) -+ } -+ sort.Strings(sortedKey) -+ for _, k := range sortedKey { -+ info := imgInfos[k] - if err := s.clearDirs(true); err != nil { - return errors.Wrap(err, "clear tmp dirs failed") - } --- -1.8.3.1 - diff --git a/patch/0087-bugfix-optimize-function-IsExist.patch b/patch/0087-bugfix-optimize-function-IsExist.patch deleted file mode 100644 index 5c15c45..0000000 --- a/patch/0087-bugfix-optimize-function-IsExist.patch +++ /dev/null @@ -1,443 +0,0 @@ -From 6866f2e7f80ac9d8decf0e34a34de31df17c25aa Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Tue, 2 Nov 2021 20:59:35 +0800 -Subject: [PATCH 2/2] bugfix: optimize function IsExist - -reason: IsExist should return two value: -1. err: if err is not nil, which means the - input path is not valid, so the caller - should just return -2. true/false: this boolean value indicate the - path is exist or not, the value only valid - when no err occured - -also add testcase for filepath.go file - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - cmd/cli/build.go | 4 +- - cmd/cli/load.go | 4 +- - cmd/cli/save.go | 8 +- - cmd/daemon/main.go | 20 +++- - util/cipher.go | 4 +- - util/filepath.go | 18 ++-- - util/filepath_test.go | 248 ++++++++++++++++++++++++++++++++++++++++++++++++++ - 7 files changed, 289 insertions(+), 17 deletions(-) - create mode 100644 util/filepath_test.go - -diff --git a/cmd/cli/build.go b/cmd/cli/build.go -index 3d9f549..b0f7765 100644 ---- a/cmd/cli/build.go -+++ b/cmd/cli/build.go -@@ -235,7 +235,9 @@ func checkAbsPath(path string) (string, error) { - } - path = util.MakeAbsolute(path, pwd) - } -- if util.IsExist(path) { -+ if exist, err := util.IsExist(path); err != nil { -+ return "", err -+ } else if exist { - return "", errors.Errorf("output file already exist: %q, try to remove existing tarball or rename output file", path) - } - -diff --git a/cmd/cli/load.go b/cmd/cli/load.go -index 44fefdd..90d189a 100644 ---- a/cmd/cli/load.go -+++ b/cmd/cli/load.go -@@ -202,7 +202,9 @@ func (sep *separatorLoadOption) check(pwd string) error { - } - - sep.dir = util.MakeAbsolute(sep.dir, pwd) -- if !util.IsExist(sep.dir) { -+ if exist, err := util.IsExist(sep.dir); err != nil { -+ return errors.Wrap(err, "resolve dir path failed") -+ } else if !exist { - return errors.Errorf("image tarball directory %q is not exist", sep.dir) - } - -diff --git a/cmd/cli/save.go b/cmd/cli/save.go -index 599d394..5a63e02 100644 ---- a/cmd/cli/save.go -+++ b/cmd/cli/save.go -@@ -112,7 +112,9 @@ func (sep *separatorSaveOption) check(pwd string) error { - sep.destPath = "Images" - } - sep.destPath = util.MakeAbsolute(sep.destPath, pwd) -- if util.IsExist(sep.destPath) { -+ if exist, err := util.IsExist(sep.destPath); err != nil { -+ return errors.Wrap(err, "check dest path failed") -+ } else if exist { - return errors.Errorf("dest path already exist: %q, try to remove or rename it", sep.destPath) - } - if len(sep.renameFile) != 0 { -@@ -162,7 +164,9 @@ func (opt *saveOptions) checkSaveOpts(args []string) error { - return err - } - opt.path = util.MakeAbsolute(opt.path, pwd) -- if util.IsExist(opt.path) { -+ if exist, err := util.IsExist(opt.path); err != nil { -+ return errors.Wrap(err, "check output path failed") -+ } else if exist { - return errors.Errorf("output file already exist: %q, try to remove existing tarball or rename output file", opt.path) - } - return nil -diff --git a/cmd/daemon/main.go b/cmd/daemon/main.go -index 4fd5356..3665f6b 100644 ---- a/cmd/daemon/main.go -+++ b/cmd/daemon/main.go -@@ -341,7 +341,9 @@ func setupWorkingDirectories() error { - return errors.Errorf("%q not an absolute dir, the \"dataroot\" and \"runroot\" must be an absolute path", dir) - } - -- if !util.IsExist(dir) { -+ if exist, err := util.IsExist(dir); err != nil { -+ return err -+ } else if !exist { - if err := os.MkdirAll(dir, constant.DefaultRootDirMode); err != nil { - return errors.Wrapf(err, "create directory for %q failed", dir) - } -@@ -363,7 +365,9 @@ func setupWorkingDirectories() error { - - func checkAndValidateConfig(cmd *cobra.Command) error { - // check if configuration.toml file exists, merge config if exists -- if !util.IsExist(constant.ConfigurationPath) { -+ if exist, err := util.IsExist(constant.ConfigurationPath); err != nil { -+ return err -+ } else if !exist { - logrus.Warnf("Main config file missing, the default configuration is used") - } else { - conf, err := loadConfig(constant.ConfigurationPath) -@@ -378,14 +382,18 @@ func checkAndValidateConfig(cmd *cobra.Command) error { - } - - // file policy.json must be exist -- if !util.IsExist(constant.SignaturePolicyPath) { -+ if exist, err := util.IsExist(constant.SignaturePolicyPath); err != nil { -+ return err -+ } else if !exist { - return errors.Errorf("policy config file %v is not exist", constant.SignaturePolicyPath) - } - - // check all config files - confFiles := []string{constant.RegistryConfigPath, constant.SignaturePolicyPath, constant.StorageConfigPath} - for _, file := range confFiles { -- if util.IsExist(file) { -+ if exist, err := util.IsExist(file); err != nil { -+ return err -+ } else if exist { - fi, err := os.Stat(file) - if err != nil { - return errors.Wrapf(err, "stat file %q failed", file) -@@ -402,7 +410,9 @@ func checkAndValidateConfig(cmd *cobra.Command) error { - } - - // if storage config file exists, merge storage config -- if util.IsExist(constant.StorageConfigPath) { -+ if exist, err := util.IsExist(constant.StorageConfigPath); err != nil { -+ return err -+ } else if exist { - return mergeStorageConfig(cmd) - } - -diff --git a/util/cipher.go b/util/cipher.go -index ce47b71..ecbbc47 100644 ---- a/util/cipher.go -+++ b/util/cipher.go -@@ -185,7 +185,9 @@ func DecryptRSA(data string, key *rsa.PrivateKey, h crypto.Hash) (string, error) - - // GenRSAPublicKeyFile store public key from rsa key pair into local file - func GenRSAPublicKeyFile(key *rsa.PrivateKey, path string) error { -- if IsExist(path) { -+ if exist, err := IsExist(path); err != nil { -+ return err -+ } else if exist { - if err := os.Remove(path); err != nil { - return errors.Errorf("failed to delete the residual key file: %v", err) - } -diff --git a/util/filepath.go b/util/filepath.go -index 59b22da..a10ed85 100644 ---- a/util/filepath.go -+++ b/util/filepath.go -@@ -56,14 +56,18 @@ func IsDirectory(path string) bool { - return fi.IsDir() - } - --// IsExist returns true if the path exists --func IsExist(path string) bool { -- if _, err := os.Lstat(path); err != nil { -- if os.IsNotExist(err) { -- return false -- } -+// IsExist returns true if the path exists when err is nil -+// and return false if path not exists when err is nil -+// Caller should focus on whether the err is nil or not -+func IsExist(path string) (bool, error) { -+ _, err := os.Lstat(path) -+ if err == nil { -+ return true, nil -+ } -+ if os.IsNotExist(err) { -+ return false, nil - } -- return true -+ return false, err - } - - // IsSymbolFile returns true if the path file is a symbol file -diff --git a/util/filepath_test.go b/util/filepath_test.go -new file mode 100644 -index 0000000..add4545 ---- /dev/null -+++ b/util/filepath_test.go -@@ -0,0 +1,248 @@ -+// Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. -+// isula-build licensed under the Mulan PSL v2. -+// You can use this software according to the terms and conditions of the Mulan PSL v2. -+// You may obtain a copy of Mulan PSL v2 at: -+// http://license.coscl.org.cn/MulanPSL2 -+// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -+// IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -+// PURPOSE. -+// See the Mulan PSL v2 for more details. -+// Author: Xiang Li -+// Create: 2021-11-02 -+// Description: testcase for filepath related common functions -+ -+package util -+ -+import ( -+ "os" -+ "path/filepath" -+ "strings" -+ "testing" -+ -+ "gotest.tools/v3/assert" -+ constant "isula.org/isula-build" -+) -+ -+func TestIsExist(t *testing.T) { -+ type args struct { -+ path string -+ workingDir string -+ } -+ tests := []struct { -+ name string -+ args args -+ want string -+ isExist bool -+ wantErr bool -+ preHook func(t *testing.T, path string) -+ postHook func(t *testing.T) -+ }{ -+ { -+ name: "TC-filename too long", -+ args: args{ -+ path: strings.Repeat("a", 256), -+ workingDir: "/tmp", -+ }, -+ want: filepath.Join("/tmp", strings.Repeat("a", 256)), -+ isExist: false, -+ wantErr: true, -+ }, -+ { -+ name: "TC-filename valid", -+ args: args{ -+ path: strings.Repeat("a", 255), -+ workingDir: "/tmp", -+ }, -+ want: filepath.Join("/tmp", strings.Repeat("a", 255)), -+ isExist: false, -+ wantErr: false, -+ }, -+ { -+ name: "TC-path too long", -+ args: args{ -+ path: strings.Repeat(strings.Repeat("a", 256)+"/", 16), -+ workingDir: "/tmp", -+ }, -+ want: filepath.Join("/tmp", strings.Repeat(strings.Repeat("a", 256)+"/", 16)) + "/", -+ isExist: false, -+ wantErr: true, -+ }, -+ { -+ name: "TC-path exist", -+ args: args{ -+ path: strings.Repeat(strings.Repeat("a", 255)+"/", 15), -+ workingDir: "/tmp", -+ }, -+ want: filepath.Join("/tmp", strings.Repeat(strings.Repeat("a", 255)+"/", 15)) + "/", -+ isExist: true, -+ wantErr: false, -+ preHook: func(t *testing.T, path string) { -+ err := os.MkdirAll(path, constant.DefaultRootDirMode) -+ assert.NilError(t, err) -+ }, -+ postHook: func(t *testing.T) { -+ err := os.RemoveAll(filepath.Join("/tmp", strings.Repeat("a", 255)+"/")) -+ assert.NilError(t, err) -+ }, -+ }, -+ { -+ name: "TC-path with dot exist", -+ args: args{ -+ path: ".", -+ workingDir: filepath.Join("/tmp", strings.Repeat("./"+strings.Repeat("a", 255)+"/", 15)), -+ }, -+ want: filepath.Join("/tmp", strings.Repeat(strings.Repeat("a", 255)+"/", 15)) + "/", -+ isExist: true, -+ wantErr: false, -+ preHook: func(t *testing.T, path string) { -+ err := os.MkdirAll(path, constant.DefaultRootDirMode) -+ assert.NilError(t, err) -+ }, -+ postHook: func(t *testing.T) { -+ err := os.RemoveAll(filepath.Join("/tmp", strings.Repeat("a", 255)+"/")) -+ assert.NilError(t, err) -+ }, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ got := MakeAbsolute(tt.args.path, tt.args.workingDir) -+ if got != tt.want { -+ t.Errorf("MakeAbsolute() = %v, want %v", got, tt.want) -+ t.Skip() -+ } -+ -+ if tt.preHook != nil { -+ tt.preHook(t, got) -+ } -+ exist, err := IsExist(got) -+ if exist != tt.isExist { -+ t.Errorf("IsExist() = %v, want %v", exist, tt.isExist) -+ } -+ if (err != nil) != tt.wantErr { -+ t.Errorf("IsExist() = %v, want %v", err, tt.wantErr) -+ } -+ if tt.postHook != nil { -+ tt.postHook(t) -+ } -+ }) -+ } -+} -+ -+func TestIsSymbolFile(t *testing.T) { -+ originFile := "/tmp/originFile" -+ symbolFile := "/tmp/symbolFile" -+ noneExistFile := "/tmp/none_exist_file" -+ type args struct { -+ path string -+ } -+ tests := []struct { -+ name string -+ args args -+ want bool -+ preHook func(t *testing.T) -+ postHook func(t *testing.T) -+ }{ -+ { -+ name: "TC-is symbol file", -+ args: args{path: "/tmp/symbolFile"}, -+ want: true, -+ preHook: func(t *testing.T) { -+ _, err := os.Create(originFile) -+ assert.NilError(t, err) -+ assert.NilError(t, os.Symlink(originFile, symbolFile)) -+ }, -+ postHook: func(t *testing.T) { -+ assert.NilError(t, os.RemoveAll(originFile)) -+ assert.NilError(t, os.RemoveAll(symbolFile)) -+ }, -+ }, -+ { -+ name: "TC-is normal file", -+ args: args{path: originFile}, -+ want: false, -+ preHook: func(t *testing.T) { -+ _, err := os.Create(originFile) -+ assert.NilError(t, err) -+ }, -+ postHook: func(t *testing.T) { -+ assert.NilError(t, os.RemoveAll(originFile)) -+ }, -+ }, -+ { -+ name: "TC-file not exist", -+ args: args{path: noneExistFile}, -+ want: false, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ if tt.preHook != nil { -+ tt.preHook(t) -+ } -+ if got := IsSymbolFile(tt.args.path); got != tt.want { -+ t.Errorf("IsSymbolFile() = %v, want %v", got, tt.want) -+ } -+ if tt.postHook != nil { -+ tt.postHook(t) -+ } -+ }) -+ } -+} -+ -+func TestIsDirectory(t *testing.T) { -+ dirPath := filepath.Join("/tmp", t.Name()) -+ filePath := filepath.Join("/tmp", t.Name()) -+ noneExistFile := "/tmp/none_exist_file" -+ -+ type args struct { -+ path string -+ } -+ tests := []struct { -+ name string -+ args args -+ want bool -+ preHook func(t *testing.T) -+ postHook func(t *testing.T) -+ }{ -+ { -+ name: "TC-is directory", -+ args: args{path: dirPath}, -+ preHook: func(t *testing.T) { -+ assert.NilError(t, os.MkdirAll(dirPath, constant.DefaultRootDirMode)) -+ }, -+ postHook: func(t *testing.T) { -+ assert.NilError(t, os.RemoveAll(dirPath)) -+ }, -+ want: true, -+ }, -+ { -+ name: "TC-is file", -+ args: args{path: dirPath}, -+ preHook: func(t *testing.T) { -+ _, err := os.Create(filePath) -+ assert.NilError(t, err) -+ }, -+ postHook: func(t *testing.T) { -+ assert.NilError(t, os.RemoveAll(filePath)) -+ }, -+ }, -+ { -+ name: "TC-path not exist", -+ args: args{path: noneExistFile}, -+ }, -+ } -+ for _, tt := range tests { -+ t.Run(tt.name, func(t *testing.T) { -+ if tt.preHook != nil { -+ tt.preHook(t) -+ } -+ if got := IsDirectory(tt.args.path); got != tt.want { -+ t.Errorf("IsDirectory() = %v, want %v", got, tt.want) -+ } -+ if tt.postHook != nil { -+ tt.postHook(t) -+ } -+ }) -+ } -+} --- -1.8.3.1 - diff --git a/patch/0088-bugfix-loaded-images-cover-existing-images-name-and-.patch b/patch/0088-bugfix-loaded-images-cover-existing-images-name-and-.patch deleted file mode 100644 index ad75be5..0000000 --- a/patch/0088-bugfix-loaded-images-cover-existing-images-name-and-.patch +++ /dev/null @@ -1,434 +0,0 @@ -From 6efcb2e785e452505894b0e1e589e72487439e17 Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Wed, 27 Oct 2021 18:27:11 +0800 -Subject: [PATCH] bugfix: loaded images cover existing images name and tag - ---- - daemon/load.go | 114 +++++++++++++++++++++++++++++++------------- - daemon/load_test.go | 26 +++++----- - image/image.go | 66 ++++++++++++------------- - 3 files changed, 129 insertions(+), 77 deletions(-) - -diff --git a/daemon/load.go b/daemon/load.go -index 41690ab..b6d675b 100644 ---- a/daemon/load.go -+++ b/daemon/load.go -@@ -17,6 +17,8 @@ import ( - "io/ioutil" - "os" - "path/filepath" -+ "context" -+ "strings" - - "github.com/containers/image/v5/docker/tarfile" - ociarchive "github.com/containers/image/v5/oci/archive" -@@ -25,6 +27,7 @@ import ( - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - securejoin "github.com/cyphar/filepath-securejoin" -+ digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -@@ -72,6 +75,12 @@ type loadOptions struct { - sep separatorLoad - } - -+type singleImage struct { -+ index int -+ id string -+ nameTag []string -+} -+ - func (b *Backend) getLoadOptions(req *pb.LoadRequest) (loadOptions, error) { - var opt = loadOptions{ - path: req.GetPath(), -@@ -119,10 +128,8 @@ func (b *Backend) Load(req *pb.LoadRequest, stream pb.Control_LoadServer) error - "LoadID": req.GetLoadID(), - }).Info("LoadRequest received") - -- var ( -- si *storage.Image -- repoTags [][]string -- ) -+ var si *storage.Image -+ - opts, err := b.getLoadOptions(req) - if err != nil { - return errors.Wrap(err, "process load options failed") -@@ -142,7 +149,7 @@ func (b *Backend) Load(req *pb.LoadRequest, stream pb.Control_LoadServer) error - } - } - -- repoTags, err = tryToParseImageFormatFromTarball(b.daemon.opts.DataRoot, &opts) -+ imagesInTar, err := tryToParseImageFormatFromTarball(b.daemon.opts.DataRoot, &opts) - if err != nil { - return err - } -@@ -163,24 +170,30 @@ func (b *Backend) Load(req *pb.LoadRequest, stream pb.Control_LoadServer) error - eg.Go(func() error { - defer log.CloseContent() - -- for index, nameAndTag := range repoTags { -+ for _, singleImage := range imagesInTar { - _, si, err = image.ResolveFromImage(&image.PrepareImageOptions{ - Ctx: ctx, - FromImage: exporter.FormatTransport(opts.format, opts.path), -+ ToImage: singleImage.id, - SystemContext: image.GetSystemContext(), - Store: b.daemon.localStore, - Reporter: log, -- ManifestIndex: index, -+ ManifestIndex: singleImage.index, - }) - if err != nil { - return err - } - -- if sErr := b.daemon.localStore.SetNames(si.ID, nameAndTag); sErr != nil { -- return sErr -+ originalNames, err := b.daemon.localStore.Names(si.ID) -+ if err != nil { -+ return err -+ } -+ if err = b.daemon.localStore.SetNames(si.ID, append(originalNames, singleImage.nameTag...)); err != nil { -+ return err - } - - log.Print("Loaded image as %s\n", si.ID) -+ logrus.Infof("Loaded image as %s", si.ID) - } - - return nil -@@ -189,17 +202,11 @@ func (b *Backend) Load(req *pb.LoadRequest, stream pb.Control_LoadServer) error - if wErr := eg.Wait(); wErr != nil { - return wErr - } -- logrus.Infof("Loaded image as %s", si.ID) - - return nil - } - --func tryToParseImageFormatFromTarball(dataRoot string, opts *loadOptions) ([][]string, error) { -- var ( -- allRepoTags [][]string -- err error -- ) -- -+func tryToParseImageFormatFromTarball(dataRoot string, opts *loadOptions) ([]singleImage, error) { - // tmp dir will be removed after NewSourceFromFileWithContext - tmpDir, err := securejoin.SecureJoin(dataRoot, dataRootTmpDirPrefix) - if err != nil { -@@ -208,19 +215,21 @@ func tryToParseImageFormatFromTarball(dataRoot string, opts *loadOptions) ([][]s - systemContext := image.GetSystemContext() - systemContext.BigFilesTemporaryDir = tmpDir - -- allRepoTags, err = getDockerRepoTagFromImageTar(systemContext, opts.path) -+ // try docker format loading -+ imagesInTar, err := getDockerRepoTagFromImageTar(systemContext, opts.path) - if err == nil { - logrus.Infof("Parse image successful with %q format", constant.DockerTransport) - opts.format = constant.DockerArchiveTransport -- return allRepoTags, nil -+ return imagesInTar, nil - } - logrus.Warnf("Try to Parse image of docker format failed with error: %v", err) - -- allRepoTags, err = getOCIRepoTagFromImageTar(systemContext, opts.path) -+ // try oci format loading -+ imagesInTar, err = getOCIRepoTagFromImageTar(systemContext, opts.path) - if err == nil { - logrus.Infof("Parse image successful with %q format", constant.OCITransport) - opts.format = constant.OCIArchiveTransport -- return allRepoTags, nil -+ return imagesInTar, nil - } - logrus.Warnf("Try to parse image of oci format failed with error: %v", err) - -@@ -228,7 +237,7 @@ func tryToParseImageFormatFromTarball(dataRoot string, opts *loadOptions) ([][]s - return nil, errors.Wrap(err, "wrong image format detected from local tarball") - } - --func getDockerRepoTagFromImageTar(systemContext *types.SystemContext, path string) ([][]string, error) { -+func getDockerRepoTagFromImageTar(systemContext *types.SystemContext, path string) ([]singleImage, error) { - // tmp dir will be removed after NewSourceFromFileWithContext - tarfileSource, err := tarfile.NewSourceFromFileWithContext(systemContext, path) - if err != nil { -@@ -245,35 +254,74 @@ func getDockerRepoTagFromImageTar(systemContext *types.SystemContext, path strin - return nil, errors.Errorf("failed to get the top level image manifest: %v", err) - } - -- var allRepoTags [][]string -- for _, manifestItem := range topLevelImageManifest { -- allRepoTags = append(allRepoTags, manifestItem.RepoTags) -+ imagesInTar := make([]singleImage, 0, len(topLevelImageManifest)) -+ for i, manifestItem := range topLevelImageManifest { -+ imageID, err := parseConfigID(manifestItem.Config) -+ if err != nil { -+ return nil, err -+ } -+ imagesInTar = append(imagesInTar, singleImage{index: i, id: imageID, nameTag: manifestItem.RepoTags}) - } - -- return allRepoTags, nil -+ return imagesInTar, nil - } - --func getOCIRepoTagFromImageTar(systemContext *types.SystemContext, path string) ([][]string, error) { -- var ( -- err error -- ) -- -+func getOCIRepoTagFromImageTar(systemContext *types.SystemContext, path string) ([]singleImage, error) { - srcRef, err := alltransports.ParseImageName(exporter.FormatTransport(constant.OCIArchiveTransport, path)) - if err != nil { - return nil, errors.Wrap(err, "failed to parse image name of oci image format") - } - -+ imageID, err := getLoadedImageID(srcRef, systemContext) -+ if err != nil { -+ return nil, err -+ } - tarManifest, err := ociarchive.LoadManifestDescriptorWithContext(systemContext, srcRef) - if err != nil { - return nil, errors.Wrap(err, "failed to load manifest descriptor of oci image format") - } - -- // For now, we only support load single image in archive file -+ // For now, we only support loading oci-archive file with one single image - if _, ok := tarManifest.Annotations[imgspecv1.AnnotationRefName]; ok { -- return [][]string{{tarManifest.Annotations[imgspecv1.AnnotationRefName]}}, nil -+ return []singleImage{{0, imageID, []string{tarManifest.Annotations[imgspecv1.AnnotationRefName]}}}, nil -+ } -+ return []singleImage{{0, imageID, []string{}}}, nil -+} -+ -+func parseConfigID(configID string) (string, error) { -+ parts := strings.SplitN(configID, ".", 2) -+ if len(parts) != 2 { -+ return "", errors.New("wrong config info of manifest.json") -+ } -+ -+ configDigest := "sha256:" + digest.Digest(parts[0]) -+ if err := configDigest.Validate(); err != nil { -+ return "", errors.Wrapf(err, "failed to get config info") -+ } -+ -+ return "@" + configDigest.Encoded(), nil -+} -+ -+func getLoadedImageID(imageRef types.ImageReference, systemContext *types.SystemContext) (string, error) { -+ if imageRef == nil || systemContext == nil { -+ return "", errors.New("nil image reference or system context when loading image") -+ } -+ -+ newImage, err := imageRef.NewImage(context.TODO(), systemContext) -+ if err != nil { -+ return "", err -+ } -+ defer func() { -+ if err = newImage.Close(); err != nil { -+ logrus.Errorf("failed to close image: %v", err) -+ } -+ }() -+ imageDigest := newImage.ConfigInfo().Digest -+ if err = imageDigest.Validate(); err != nil { -+ return "", errors.Wrap(err, "failed to get config info") - } - -- return [][]string{{}}, nil -+ return "@" + imageDigest.Encoded(), nil - } - - func loadSeparatedImage(opt *loadOptions) error { -diff --git a/daemon/load_test.go b/daemon/load_test.go -index cbcb5d8..860b897 100644 ---- a/daemon/load_test.go -+++ b/daemon/load_test.go -@@ -31,9 +31,9 @@ import ( - ) - - const ( -- loadedTarFile = "load.tar" -+ loadedTarFile = "load.tar" - manifestJSONFile = "manifest.json" -- indexJSONFile = "index.json" -+ indexJSONFile = "index.json" - ) - - var ( -@@ -167,8 +167,10 @@ func TestLoadSingleImage(t *testing.T) { - } - ] - }`, -- format: "oci", -- withTag: true, -+ format: "oci", -+ withTag: true, -+ wantErr: true, -+ errString: "no such file or directory", - }, - { - name: "TC3 normal case load docker tar with no RepoTags", -@@ -197,8 +199,10 @@ func TestLoadSingleImage(t *testing.T) { - } - ] - }`, -- format: "oci", -- withTag: false, -+ format: "oci", -+ withTag: false, -+ wantErr: true, -+ errString: "no such file or directory", - }, - { - name: "TC5 abnormal case load docker tar with wrong manifestJSON", -@@ -217,7 +221,7 @@ func TestLoadSingleImage(t *testing.T) { - format: "docker", - withTag: true, - wantErr: true, -- errString: "error loading index", -+ errString: "no such file or directory", - }, - { - name: "TC6 abnormal case with wrong tar path", -@@ -312,10 +316,10 @@ func TestLoadMultipleImages(t *testing.T) { - path := dir.Join(loadedTarFile) - repoTags, err := tryToParseImageFormatFromTarball(daemon.opts.DataRoot, &loadOptions{path: path}) - assert.NilError(t, err) -- assert.Equal(t, repoTags[0][0], "registry.example.com/sayhello:first") -- assert.Equal(t, repoTags[1][0], "registry.example.com/sayhello:second") -- assert.Equal(t, repoTags[1][1], "registry.example.com/sayhello:third") -- assert.Equal(t, len(repoTags[2]), 0) -+ assert.Equal(t, repoTags[0].nameTag[0], "registry.example.com/sayhello:first") -+ assert.Equal(t, repoTags[1].nameTag[0], "registry.example.com/sayhello:second") -+ assert.Equal(t, repoTags[1].nameTag[1], "registry.example.com/sayhello:third") -+ assert.Equal(t, len(repoTags[2].nameTag), 0) - - req := &pb.LoadRequest{Path: path} - stream := &controlLoadServer{} -diff --git a/image/image.go b/image/image.go -index 5dda185..b24cb41 100644 ---- a/image/image.go -+++ b/image/image.go -@@ -37,7 +37,6 @@ import ( - "github.com/containers/image/v5/transports/alltransports" - "github.com/containers/image/v5/types" - "github.com/containers/storage" -- "github.com/containers/storage/pkg/stringid" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -@@ -55,6 +54,7 @@ type PrepareImageOptions struct { - SystemContext *types.SystemContext - Ctx context.Context - FromImage string -+ ToImage string - Store *store.Store - Reporter io.Writer - ManifestIndex int -@@ -125,20 +125,17 @@ func PullAndGetImageInfo(opt *PrepareImageOptions) (types.ImageReference, *stora - } - - if transport == "" { -- // if the image can be obtained from the local storage by image id, -- // then only one image can be obtained. -+ // if the image can be obtained from the local storage by image id, then only one image can be obtained. - if len(candidates) != 1 { - return nil, nil, errors.New("transport is empty and multi or no image be found") - } -- img, err := opt.Store.Image(candidates[0]) -- if err != nil { -- pLog.Errorf("Failed to find the image %q in local store: %v", candidates[0], err) -- return nil, nil, err -- } -- ref, err := is.Transport.ParseStoreReference(opt.Store, img.ID) -- if err != nil { -- return nil, nil, errors.Wrapf(err, "failed to get the ref in store by %q", candidates[0]) -+ -+ ref, img, fErr := FindImage(opt.Store, candidates[0]) -+ if fErr != nil { -+ pLog.Errorf("Failed to find the image %q in local store: %v", candidates[0], fErr) -+ return nil, nil, fErr - } -+ - pLog.Infof("Get image from local store first search by %q", opt.FromImage) - return ref, img, nil - } -@@ -147,25 +144,33 @@ func PullAndGetImageInfo(opt *PrepareImageOptions) (types.ImageReference, *stora - var errPull error - for _, strImage := range candidates { - var ( -- srcRef types.ImageReference -- pErr error -+ srcRef types.ImageReference -+ destImage string - ) - - imageName := exporter.FormatTransport(transport, strImage) -- if transport == constant.DockerArchiveTransport { -- srcRef, pErr = alltransports.ParseImageName(imageName + ":@" + strconv.Itoa(opt.ManifestIndex)) -- } else { -- srcRef, pErr = alltransports.ParseImageName(imageName) -- } -- if pErr != nil { -- pLog.Debugf("Failed to parse the image %q: %v", imageName, pErr) -- continue -- } -- -- destImage, err := getLocalImageNameFromRef(opt.Store, srcRef) -- if err != nil { -- pLog.Debugf("Failed to get local image name for %q: %v", imageName, err) -- continue -+ switch transport { -+ case constant.DockerArchiveTransport: -+ if srcRef, err = alltransports.ParseImageName(imageName + ":@" + strconv.Itoa(opt.ManifestIndex)); err != nil { -+ pLog.Debugf("Failed to parse the image %q with %q transport: %v", imageName, constant.DockerArchiveTransport, err) -+ continue -+ } -+ destImage = opt.ToImage -+ case constant.OCIArchiveTransport: -+ if srcRef, err = alltransports.ParseImageName(imageName); err != nil { -+ pLog.Debugf("Failed to parse the image %q with %q transport: %v", imageName, constant.OCIArchiveTransport, err) -+ continue -+ } -+ destImage = opt.ToImage -+ default: -+ if srcRef, err = alltransports.ParseImageName(imageName); err != nil { -+ pLog.Debugf("Failed to get local image name for %q: %v", imageName, err) -+ continue -+ } -+ if destImage, err = getLocalImageNameFromRef(srcRef); err != nil { -+ pLog.Debugf("Failed to parse store reference for %q: %v", destImage, err) -+ continue -+ } - } - - destRef, err := is.Transport.ParseStoreReference(opt.Store, destImage) -@@ -173,7 +178,6 @@ func PullAndGetImageInfo(opt *PrepareImageOptions) (types.ImageReference, *stora - pLog.Debugf("Failed to parse store reference for %q: %v", destImage, err) - continue - } -- - img, err := is.Transport.GetStoreImage(opt.Store, destRef) - if err == nil { - // find the unique image in local store by name or digest -@@ -246,14 +250,10 @@ func instantiatingImage(ctx context.Context, sc *types.SystemContext, ref types. - return baseImg, nil - } - --func getLocalImageNameFromRef(store storage.Store, srcRef types.ImageReference) (string, error) { -+func getLocalImageNameFromRef(srcRef types.ImageReference) (string, error) { - if srcRef == nil { - return "", errors.Errorf("reference to image is empty") - } -- -- if err := exporter.CheckArchiveFormat(srcRef.Transport().Name()); err == nil { -- return stringid.GenerateRandomID() + ":" + stringid.GenerateRandomID(), nil -- } - if srcRef.Transport().Name() != constant.DockerTransport { - return "", errors.Errorf("the %s transport is not supported yet", srcRef.Transport().Name()) - } --- -2.27.0 - diff --git a/patch/0089-isula-build-fix-panic-when-using-image-ID-to-save-se.patch b/patch/0089-isula-build-fix-panic-when-using-image-ID-to-save-se.patch deleted file mode 100644 index 02188a7..0000000 --- a/patch/0089-isula-build-fix-panic-when-using-image-ID-to-save-se.patch +++ /dev/null @@ -1,305 +0,0 @@ -From 2b845fdb3e3c9d23b0fec856bcd5ce8ced868683 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Thu, 4 Nov 2021 14:38:20 +0800 -Subject: [PATCH] isula-build:fix panic when using image ID to save separated - image - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - daemon/save.go | 120 +++++++++++++-------- - .../src/cover_test_save_separated_image_failed.sh | 28 +++++ - .../src/cover_test_save_separated_image_success.sh | 24 ++++- - 3 files changed, 127 insertions(+), 45 deletions(-) - -diff --git a/daemon/save.go b/daemon/save.go -index 9ad4e03..9c5e563 100644 ---- a/daemon/save.go -+++ b/daemon/save.go -@@ -83,6 +83,7 @@ type separatorSave struct { - base string - lib string - dest string -+ renameFile string - enabled bool - } - -@@ -141,15 +142,7 @@ type tarballInfo struct { - BaseLayers []string `json:"baseLayer"` - } - --func (b *Backend) getSaveOptions(req *pb.SaveRequest) (saveOptions, error) { -- var sep = separatorSave{ -- base: req.GetSep().GetBase(), -- lib: req.GetSep().GetLib(), -- dest: req.GetSep().GetDest(), -- log: logrus.WithFields(logrus.Fields{"SaveID": req.GetSaveID()}), -- enabled: req.GetSep().GetEnabled(), -- } -- -+func (b *Backend) getSaveOptions(req *pb.SaveRequest) saveOptions { - var opt = saveOptions{ - sysCtx: image.GetSystemContext(), - localStore: b.daemon.localStore, -@@ -161,11 +154,19 @@ func (b *Backend) getSaveOptions(req *pb.SaveRequest) (saveOptions, error) { - outputPath: req.GetPath(), - logger: logger.NewCliLogger(constant.CliLogBufferLen), - logEntry: logrus.WithFields(logrus.Fields{"SaveID": req.GetSaveID(), "Format": req.GetFormat()}), -- sep: sep, - } - // normal save -- if !sep.enabled { -- return opt, nil -+ if !req.GetSep().GetEnabled() { -+ return opt -+ } -+ -+ opt.sep = separatorSave{ -+ base: req.GetSep().GetBase(), -+ lib: req.GetSep().GetLib(), -+ dest: req.GetSep().GetDest(), -+ log: logrus.WithFields(logrus.Fields{"SaveID": req.GetSaveID()}), -+ enabled: req.GetSep().GetEnabled(), -+ renameFile: req.GetSep().GetRename(), - } - - // save separated image -@@ -175,44 +176,22 @@ func (b *Backend) getSaveOptions(req *pb.SaveRequest) (saveOptions, error) { - baseDir := filepath.Join(tmpRoot, baseUntarTempDirName) - libDir := filepath.Join(tmpRoot, libUntarTempDirName) - -- opt.sep.tmpDir = imageTmpDir{ -- app: appDir, -- base: baseDir, -- lib: libDir, -- untar: untar, -- root: tmpRoot, -- } -+ opt.sep.tmpDir = imageTmpDir{app: appDir, base: baseDir, lib: libDir, untar: untar, root: tmpRoot} - opt.outputPath = filepath.Join(untar, unionTarName) -- renameFile := req.GetSep().GetRename() -- if len(renameFile) != 0 { -- var reName []renames -- if err := util.LoadJSONFile(renameFile, &reName); err != nil { -- return saveOptions{}, err -- } -- opt.sep.renameData = reName -- } - -- return opt, nil -+ return opt - } - - // Save receives a save request and save the image(s) into tarball --func (b *Backend) Save(req *pb.SaveRequest, stream pb.Control_SaveServer) error { -+func (b *Backend) Save(req *pb.SaveRequest, stream pb.Control_SaveServer) (err error) { - logrus.WithFields(logrus.Fields{ - "SaveID": req.GetSaveID(), - "Format": req.GetFormat(), - }).Info("SaveRequest received") - -- var err error -- opts, err := b.getSaveOptions(req) -- if err != nil { -- return errors.Wrap(err, "process save options failed") -- } -- -- if err = checkFormat(&opts); err != nil { -- return err -- } -- if err = filterImageName(&opts); err != nil { -- return err -+ opts := b.getSaveOptions(req) -+ if err = opts.check(); err != nil { -+ return errors.Wrap(err, "check save options failed") - } - - defer func() { -@@ -299,7 +278,47 @@ func messageHandler(stream pb.Control_SaveServer, cliLogger *logger.Logger) func - } - } - --func checkFormat(opts *saveOptions) error { -+func (opts *saveOptions) check() error { -+ if err := opts.checkImageNameIsID(); err != nil { -+ return err -+ } -+ if err := opts.checkFormat(); err != nil { -+ return err -+ } -+ if err := opts.filterImageName(); err != nil { -+ return err -+ } -+ if err := opts.checkRenameFile(); err != nil { -+ return err -+ } -+ -+ return nil -+} -+ -+func (opts *saveOptions) checkImageNameIsID() error { -+ imageNames := opts.oriImgList -+ if opts.sep.enabled { -+ if len(opts.sep.base) != 0 { -+ imageNames = append(imageNames, opts.sep.base) -+ } -+ if len(opts.sep.lib) != 0 { -+ imageNames = append(imageNames, opts.sep.lib) -+ } -+ } -+ for _, name := range imageNames { -+ _, img, err := image.FindImage(opts.localStore, name) -+ if err != nil { -+ return errors.Wrapf(err, "check image name failed when finding image name %q", name) -+ } -+ if strings.HasPrefix(img.ID, name) && opts.sep.enabled { -+ return errors.Errorf("using image ID %q as image name to save separated image is not allowed", name) -+ } -+ } -+ -+ return nil -+} -+ -+func (opts *saveOptions) checkFormat() error { - switch opts.format { - case constant.DockerTransport: - opts.format = constant.DockerArchiveTransport -@@ -312,7 +331,7 @@ func checkFormat(opts *saveOptions) error { - return nil - } - --func filterImageName(opts *saveOptions) error { -+func (opts *saveOptions) filterImageName() error { - if opts.format == constant.OCIArchiveTransport { - opts.finalImageOrdered = opts.oriImgList - return nil -@@ -350,6 +369,18 @@ func filterImageName(opts *saveOptions) error { - return nil - } - -+func (opts *saveOptions) checkRenameFile() error { -+ if len(opts.sep.renameFile) != 0 { -+ var reName []renames -+ if err := util.LoadJSONFile(opts.sep.renameFile, &reName); err != nil { -+ return errors.Wrap(err, "check rename file failed") -+ } -+ opts.sep.renameData = reName -+ } -+ -+ return nil -+} -+ - func getLayerHashFromStorage(store *store.Store, name string) ([]string, error) { - if len(name) == 0 { - return nil, nil -@@ -770,6 +801,11 @@ func getLayersID(layer []string) []string { - - func (s *separatorSave) constructSingleImgInfo(mani imageManifest, store *store.Store) (imageInfo, error) { - var libLayers, appLayers []string -+ // image name should not be empty here -+ if len(mani.RepoTags) == 0 { -+ return imageInfo{}, errors.New("image name and tag is empty") -+ } -+ // if there is more than one repoTag, will use first one as image name - imageRepoFields := strings.Split(mani.RepoTags[0], ":") - imageLayers := getLayersID(mani.Layers) - -diff --git a/tests/src/cover_test_save_separated_image_failed.sh b/tests/src/cover_test_save_separated_image_failed.sh -index c64dcf5..66db580 100644 ---- a/tests/src/cover_test_save_separated_image_failed.sh -+++ b/tests/src/cover_test_save_separated_image_failed.sh -@@ -89,6 +89,33 @@ function test_run6() { - rm -rf "${workspace}"/Images - } - -+# using image id to save -+function test_run7() { -+ base_image_id=$(isula-build ctr-img images ${base_image_name} | tail -n 2 | head -n 1 | awk '{print $3}') -+ lib_image_id=$(isula-build ctr-img images ${lib_image_name} | tail -n 2 | head -n 1 | awk '{print $3}') -+ app_image_id=$(isula-build ctr-img images ${app1_image_name} | tail -n 2 | head -n 1 | awk '{print $3}') -+ app1_image_id=$(isula-build ctr-img images ${app2_image_name} | tail -n 2 | head -n 1 | awk '{print $3}') -+ # all name is image id -+ isula-build ctr-img save -b "${base_image_id}" -l "${lib_image_id}" -d "${workspace}"/Images "${app1_image_id}" "${app2_image_id}" -+ check_result_not_equal $? 0 -+ rm -rf "${workspace}"/Images -+ -+ # app name is image id -+ isula-build ctr-img save -b "${base_image_name}" -l "${lib_image_name}" -d "${workspace}"/Images "${app1_image_id}" "${app2_image_name}" -+ check_result_not_equal $? 0 -+ rm -rf "${workspace}"/Images -+ -+ # lib name is image id -+ isula-build ctr-img save -b "${base_image_name}" -l "${lib_image_id}" -d "${workspace}"/Images "${app1_image_name}" "${app2_image_name}" -+ check_result_not_equal $? 0 -+ rm -rf "${workspace}"/Images -+ -+ # base name is image id -+ isula-build ctr-img save -b "${base_image_id}" -l "${lib_image_name}" -d "${workspace}"/Images "${app1_image_name}" "${app2_image_name}" -+ check_result_not_equal $? 0 -+ rm -rf "${workspace}"/Images -+} -+ - function cleanup() { - rm -rf "${workspace}" - isula-build ctr-img rm "${bad_lib_image_name}" "${bad_app1_image_name}" "${bad_app2_image_name}" "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -@@ -102,6 +129,7 @@ test_run3 - test_run4 - test_run5 - test_run6 -+test_run7 - cleanup - # shellcheck disable=SC2154 - exit "${exit_flag}" -diff --git a/tests/src/cover_test_save_separated_image_success.sh b/tests/src/cover_test_save_separated_image_success.sh -index 2095bd3..68d2cae 100644 ---- a/tests/src/cover_test_save_separated_image_success.sh -+++ b/tests/src/cover_test_save_separated_image_success.sh -@@ -26,6 +26,11 @@ function pre_run() { - lib_image_name="lib:latest" - app1_image_name="app1:latest" - app2_image_name="app2:latest" -+ base_image_short_name="b:latest" -+ lib_image_short_name="l:latest" -+ app1_image_short_name="a:latest" -+ app2_image_short_name="c:latest" -+ - lib_layer_number=5 - app1_layer_number=4 - app2_layer_number=3 -@@ -37,18 +42,31 @@ function pre_run() { - build_image "${app2_image_name}" "${workspace}" - } - --function test_run() { -+function test_run1() { - isula-build ctr-img save -b "${base_image_name}" -l "${lib_image_name}" -d "${workspace}"/Images "${app1_image_name}" "${app2_image_name}" - check_result_equal $? 0 -+ rm -rf "${workspace}" -+} -+ -+# use short image name -+function test_run2() { -+ isula-build ctr-img tag "${base_image_name}" "${base_image_short_name}" -+ isula-build ctr-img tag "${lib_image_name}" "${lib_image_short_name}" -+ isula-build ctr-img tag "${app1_image_name}" "${app1_image_short_name}" -+ isula-build ctr-img tag "${app2_image_name}" "${app2_image_short_name}" -+ isula-build ctr-img save -b "${base_image_short_name}" -l "${lib_image_short_name}" -d "${workspace}"/Images "${app1_image_short_name}" "${app2_image_short_name}" -+ check_result_equal $? 0 -+ rm -rf "${workspace}" - } - - function cleanup() { - rm -rf "${workspace}" -- isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" -+ isula-build ctr-img rm "${lib_image_name}" "${app1_image_name}" "${app2_image_name}" "${base_image_short_name}" "${lib_image_short_name}" "${app1_image_short_name}" "${app2_image_short_name}" - } - - pre_run --test_run -+test_run1 -+test_run2 - cleanup - # shellcheck disable=SC2154 - exit "${exit_flag}" --- -1.8.3.1 - diff --git a/patch/0090-enhancement-add-log-info-to-show-the-image-layer-num.patch b/patch/0090-enhancement-add-log-info-to-show-the-image-layer-num.patch deleted file mode 100644 index c2e1262..0000000 --- a/patch/0090-enhancement-add-log-info-to-show-the-image-layer-num.patch +++ /dev/null @@ -1,117 +0,0 @@ -From 38bed99e8cb58ad3c7fe0ef386c66c558d16b569 Mon Sep 17 00:00:00 2001 -From: DCCooper <1866858@gmail.com> -Date: Tue, 9 Nov 2021 19:18:45 +0800 -Subject: [PATCH] enhancement: add log info to show the image layer number - -reason: when save separated image, the layer number should be -printed out into log - -Signed-off-by: DCCooper <1866858@gmail.com> ---- - daemon/save.go | 18 +++++++++++++----- - hack/unit_test.sh | 6 ++++-- - 2 files changed, 17 insertions(+), 7 deletions(-) - -diff --git a/daemon/save.go b/daemon/save.go -index 9c5e563..f14a485 100644 ---- a/daemon/save.go -+++ b/daemon/save.go -@@ -139,7 +139,7 @@ type tarballInfo struct { - BaseTarName string `json:"base"` - BaseHash string `json:"baseHash"` - BaseImageName string `json:"baseImageName"` -- BaseLayers []string `json:"baseLayer"` -+ BaseLayer string `json:"baseLayer"` - } - - func (b *Backend) getSaveOptions(req *pb.SaveRequest) saveOptions { -@@ -381,7 +381,7 @@ func (opts *saveOptions) checkRenameFile() error { - return nil - } - --func getLayerHashFromStorage(store *store.Store, name string) ([]string, error) { -+func (s *separatorSave) getLayerHashFromStorage(store *store.Store, name string) ([]string, error) { - if len(name) == 0 { - return nil, nil - } -@@ -632,8 +632,11 @@ func (info imageInfo) processTarName(suffix string) string { - func (info *imageInfo) processBaseImg(sep *separatorSave, baseImagesMap map[string]string, tarball *tarballInfo) error { - // process base - tarball.BaseImageName = sep.base -+ if len(info.layers.base) != 0 { -+ sep.log.Infof("Base image %s has %d layers", sep.base, len(info.layers.base)) -+ tarball.BaseLayer = info.layers.base[0] -+ } - for _, layerID := range info.layers.base { -- tarball.BaseLayers = append(tarball.BaseLayers, layerID) - if baseImg, ok := baseImagesMap[layerID]; !ok { - srcLayerPath := filepath.Join(sep.tmpDir.untar, layerID) - destLayerPath := filepath.Join(sep.tmpDir.base, layerID) -@@ -673,6 +676,7 @@ func (info *imageInfo) processLibImg(sep *separatorSave, libImagesMap map[string - } - - tarball.LibImageName = sep.lib -+ sep.log.Infof("Lib image %s has %d layers", sep.lib, len(info.layers.lib)) - for _, layerID := range info.layers.lib { - tarball.LibLayers = append(tarball.LibLayers, layerID) - if libImg, ok := libImagesMap[layerID]; !ok { -@@ -709,6 +713,7 @@ func (info *imageInfo) processLibImg(sep *separatorSave, libImagesMap map[string - - func (info *imageInfo) processAppImg(sep *separatorSave, appImagesMap map[string]string, tarball *tarballInfo) error { - // process app -+ sep.log.Infof("App image %s has %d layers", info.nameTag, len(info.layers.app)) - appTarName := info.processTarName(appTarNameSuffix) - appTarName = sep.rename(appTarName) - appTarPath := filepath.Join(sep.dest, appTarName) -@@ -834,14 +839,17 @@ func (s *separatorSave) constructSingleImgInfo(mani imageManifest, store *store. - } - - func (s *separatorSave) checkLayersHash(layerHashMap map[string]string, store *store.Store) ([]string, []string, error) { -- libHash, err := getLayerHashFromStorage(store, s.lib) -+ libHash, err := s.getLayerHashFromStorage(store, s.lib) - if err != nil { - return nil, nil, errors.Wrapf(err, "get lib image %s layers failed", s.lib) - } -- baseHash, err := getLayerHashFromStorage(store, s.base) -+ baseHash, err := s.getLayerHashFromStorage(store, s.base) - if err != nil { - return nil, nil, errors.Wrapf(err, "get base image %s layers failed", s.base) - } -+ if len(baseHash) > 1 { -+ return nil, nil, errors.Errorf("number of base layers %d more than one", len(baseHash)) -+ } - if len(libHash) >= len(layerHashMap) || len(baseHash) >= len(layerHashMap) { - return nil, nil, errors.Errorf("number of base or lib layers is equal or greater than saved app layers") - } -diff --git a/hack/unit_test.sh b/hack/unit_test.sh -index 161feb6..b6a7978 100755 ---- a/hack/unit_test.sh -+++ b/hack/unit_test.sh -@@ -28,6 +28,8 @@ go_test_mod_method="-mod=vendor" - go_test_count_method="-count=1" - go_test_timeout_flag="-timeout=300s" - go_test_race_flag="-race" -+go_test_covermode_flag="-covermode=atomic" -+go_test_coverprofile_flag="-coverprofile=/dev/null" - - function precheck() { - if pgrep isula-builder > /dev/null 2>&1; then -@@ -54,13 +56,13 @@ function run_unit_test() { - echo "Start to test: ${package}" - if [[ -n ${run_coverage} ]]; then - coverprofile_file="${covers_folder}/$(echo "${package}" | tr / -).cover" -- coverprofile_flag="-coverprofile=${coverprofile_file}" -+ go_test_coverprofile_flag="-coverprofile=${coverprofile_file}" - go_test_covermode_flag="-covermode=set" - go_test_race_flag="" - fi - # TEST_ARGS is " -args SKIP_REG=foo", so no double quote for it - # shellcheck disable=SC2086 -- go test -v ${go_test_race_flag} "${go_test_mod_method}" ${coverprofile_flag} "${go_test_covermode_flag}" -coverpkg=${package} "${go_test_count_method}" "${go_test_timeout_flag}" "${package}" ${TEST_ARGS} >> "${testlog}" -+ go test -v "${go_test_race_flag}" "${go_test_mod_method}" "${go_test_coverprofile_flag}" "${go_test_covermode_flag}" -coverpkg=${package} "${go_test_count_method}" "${go_test_timeout_flag}" "${package}" ${TEST_ARGS} >> "${testlog}" - done - - if grep -E -- "--- FAIL:|^FAIL" "${testlog}"; then --- -1.8.3.1 - diff --git a/patch/0091-add-repo-to-local-image-when-output-transporter-is-d.patch b/patch/0091-add-repo-to-local-image-when-output-transporter-is-d.patch deleted file mode 100644 index 21b55d3..0000000 --- a/patch/0091-add-repo-to-local-image-when-output-transporter-is-d.patch +++ /dev/null @@ -1,45 +0,0 @@ -From c39db6aff78c1da4d6004c5ea92058121e706092 Mon Sep 17 00:00:00 2001 -From: xingweizheng -Date: Wed, 27 Oct 2021 20:13:59 +0800 -Subject: [PATCH] add repo to local image when output transporter is docker:// - ---- - builder/dockerfile/builder.go | 2 +- - builder/dockerfile/builder_test.go | 4 ++-- - 2 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go -index df8c6ce..7fff71b 100644 ---- a/builder/dockerfile/builder.go -+++ b/builder/dockerfile/builder.go -@@ -627,7 +627,7 @@ func parseOutputTag(output string) string { - if repo == "" { - return "" - } -- tag = repoAndTag[len(repo):] -+ tag = strings.TrimLeft(repoAndTag, "/") - } - - return tag -diff --git a/builder/dockerfile/builder_test.go b/builder/dockerfile/builder_test.go -index c2fec6c..20cca43 100644 ---- a/builder/dockerfile/builder_test.go -+++ b/builder/dockerfile/builder_test.go -@@ -1300,12 +1300,12 @@ func TestParseTag(t *testing.T) { - { - name: "docker output", - output: "docker://localhost:5000/isula/test:latest", -- tag: "isula/test:latest", -+ tag: "localhost:5000/isula/test:latest", - }, - { - name: "docker output", - output: "docker://localhost:5000/isula/test", -- tag: "isula/test", -+ tag: "localhost:5000/isula/test", - }, - { - name: "invalid docker output", --- -1.8.3.1 - diff --git a/series.conf b/series.conf index 6cefad0..01bcda6 100644 --- a/series.conf +++ b/series.conf @@ -1,56 +1,2 @@ -patch/0013-vendor-change-auth.json-file-mode-from-0700-to-0600.patch patch/0030-xattr-support-ima-and-evm.patch patch/0033-isula-build-remove-docker-releated-path-for-authenti.patch -patch/0037-isula-build-fix-goroutine-leak-problem.patch -patch/0039-bugfix-remove-Healthcheck-field-when-build-from-scra.patch -patch/0040-vendor-update-tabulate-vendor-to-support-eliminate-s.patch -patch/0041-enhancement-remove-empty-lines-when-showing-image-li.patch -patch/0042-fix-some-make-checkall-golangci-lint-flaws.patch -patch/0043-enhancement-add-go-test-for-RUN-panic-problem.patch -patch/0044-fix-load-oci-image-panic.patch -patch/0045-fix-images-command-when-only-give-repository.patch -patch/0046-check-if-add-default-tag-to-image-name-when-using-pu.patch -patch/0047-checkAndExpandTag-return-empty-when-tag-is-empty.patch -patch/0048-trim-space-when-counting-length-of-fields-to-avoid-p.patch -patch/0049-fix-data-and-run-root-not-effective-when-setting-con.patch -patch/0050-data-and-run-root-set-unit-test.patch -patch/0051-bugfix-set-user-s-uid-and-gid-for-containers.patch -patch/0052-hack-make-isula-build-binary-static.patch -patch/0053-integration-test-from-new-flaw-of-run-and-data-root-.patch -patch/0054-isula-build-cleancode-for-errors.Wrap-function.patch -patch/0055-isula-build-change-isula-build-file-mode.patch -patch/0056-isula-build-update-documents-about-file-mode.patch -patch/0057-bugfix-pidofbuilder-do-not-set-when-running-a-new-ba.patch -patch/0058-shellcheck-fix-of-common.sh.patch -patch/0059-bugfix-fix-save-multiple-tags-single-image-failed.patch -patch/0060-add-integration-test-for-saving-one-image-with-multi.patch -patch/0061-fix-save-single-image-error-when-id-first-with-its-n.patch -patch/0062-clean-code-staticcheck-fix-of-S1020-S1023-SA9003-S10.patch -patch/0063-relocation-exporter-package-and-remove-unused-const.patch -patch/0064-clean-code-tidy-FindImage-function.patch -patch/0065-clean-code-delete-channel-within-the-same-goroutine.patch -patch/0067-fix-golangci-lint-warnings.patch -patch/0068-change-golangci-lint-config-and-remove-redundant-che.patch -patch/0069-make-add-make-info-for-Makefile.patch -patch/0070-clean-code-all-latest-tag-checks-take-the-FindImage-.patch -patch/0071-use-image.GetNamedTaggedReference-instead-of-dockerf.patch -patch/0072-protocol-define-separator-protocol.patch -patch/0073-cli-finish-client-save-separated-image.patch -patch/0074-daemon-finish-daemon-save-separated-image.patch -patch/0075-cli-finish-client-load-separated-image.patch -patch/0076-daemon-finish-daemon-load-separated-image.patch -patch/0077-test-optimize-save-client-options-and-add-unit-test.patch -patch/0078-test-optimize-load-client-options-and-add-unit-test.patch -patch/0079-bugfix-fix-when-load-separated-image-error-return.patch -patch/0080-util-add-unit-test-for-file.go.patch -patch/0081-test-cleancode-test-for-better-experience.patch -patch/0082-test-optimize-scripts-in-hack.patch -patch/0083-test-add-common-function-for-testing-separated-image.patch -patch/0084-test-add-integration-tests-for-saving-and-loading-se.patch -patch/0085-util-add-unit-test-for-increment-util-functions.patch -patch/0086-bugfix-fix-random-sequence-for-saving-separated-imag.patch -patch/0087-bugfix-optimize-function-IsExist.patch -patch/0088-bugfix-loaded-images-cover-existing-images-name-and-.patch -patch/0089-isula-build-fix-panic-when-using-image-ID-to-save-se.patch -patch/0090-enhancement-add-log-info-to-show-the-image-layer-num.patch -patch/0091-add-repo-to-local-image-when-output-transporter-is-d.patch diff --git a/v0.9.5.tar.gz b/v0.9.5.tar.gz deleted file mode 100644 index 3dd3054..0000000 Binary files a/v0.9.5.tar.gz and /dev/null differ diff --git a/v0.9.6.tar.gz b/v0.9.6.tar.gz new file mode 100644 index 0000000..0ba2acc Binary files /dev/null and b/v0.9.6.tar.gz differ