diff --git a/VERSION-openeuler b/VERSION-openeuler index 6fa1f2b..58ea706 100644 --- a/VERSION-openeuler +++ b/VERSION-openeuler @@ -1 +1 @@ -0.9.6-3 +0.9.6-4 diff --git a/git-commit b/git-commit index 564803b..1ddb98c 100644 --- a/git-commit +++ b/git-commit @@ -1 +1 @@ -2f8e5cc49d62d2f1c1ac161e9f3156b9f927084e +505d5b5e92ce12031f2c253bc850a2692fb25e7f diff --git a/isula-build.spec b/isula-build.spec index 99981c1..8d75140 100644 --- a/isula-build.spec +++ b/isula-build.spec @@ -2,7 +2,7 @@ Name: isula-build Version: 0.9.6 -Release: 3 +Release: 4 Summary: A tool to build container images License: Mulan PSL V2 URL: https://gitee.com/openeuler/isula-build @@ -85,6 +85,12 @@ fi /usr/share/bash-completion/completions/isula-build %changelog +* Fri Dec 31 2021 jingxiaolu - 0.9.6-4 +- Type:enhancement +- CVE:NA +- SUG:restart +- DESC:refactor image separator related + * Thu Dec 23 2021 DCCooper <1866858@gmail.com> - 0.9.6-3 - Type:bugfix - CVE:NA diff --git a/patch/0099-Refactor-refactor-image-separator-related.patch b/patch/0099-Refactor-refactor-image-separator-related.patch new file mode 100644 index 0000000..c828a1c --- /dev/null +++ b/patch/0099-Refactor-refactor-image-separator-related.patch @@ -0,0 +1,2343 @@ +From c4f400ec1cfaf65d3e83dbd796f1f2b00574ba6e Mon Sep 17 00:00:00 2001 +From: Lu Jingxiao +Date: Fri, 10 Dec 2021 13:36:26 +0800 +Subject: [PATCH] Refactor: refactor image separator related + +Signed-off-by: Lu Jingxiao +--- + cmd/cli/save.go | 4 +- + cmd/daemon/main.go | 7 +- + constant.go | 26 +- + daemon/daemon.go | 5 +- + daemon/import.go | 4 +- + daemon/load.go | 295 ++------------- + daemon/load_test.go | 2 +- + daemon/save.go | 649 +-------------------------------- + daemon/separator/image_info.go | 235 ++++++++++++ + daemon/separator/load.go | 283 ++++++++++++++ + daemon/separator/save.go | 407 +++++++++++++++++++++ + daemon/separator/utils.go | 78 ++++ + 12 files changed, 1074 insertions(+), 921 deletions(-) + create mode 100644 daemon/separator/image_info.go + create mode 100644 daemon/separator/load.go + create mode 100644 daemon/separator/save.go + create mode 100644 daemon/separator/utils.go + +diff --git a/cmd/cli/save.go b/cmd/cli/save.go +index 5a63e02..a30681f 100644 +--- a/cmd/cli/save.go ++++ b/cmd/cli/save.go +@@ -144,8 +144,8 @@ func (opt *saveOptions) checkSaveOpts(args []string) error { + } + // separate image only support docker image spec + opt.format = constant.DockerTransport +- if err := opt.sep.check(pwd); err != nil { +- return err ++ if cerr := opt.sep.check(pwd); cerr != nil { ++ return cerr + } + opt.sep.enabled = true + +diff --git a/cmd/daemon/main.go b/cmd/daemon/main.go +index 3665f6b..3cecbf9 100644 +--- a/cmd/daemon/main.go ++++ b/cmd/daemon/main.go +@@ -36,10 +36,7 @@ import ( + "isula.org/isula-build/util" + ) + +-const ( +- lockFileName = "isula-builder.lock" +- dataRootTmpDirPrefix = "tmp" +-) ++const lockFileName = "isula-builder.lock" + + var daemonOpts daemon.Options + +@@ -331,7 +328,7 @@ func setupWorkingDirectories() error { + return errors.Errorf("runroot(%q) and dataroot(%q) must be different paths", daemonOpts.RunRoot, daemonOpts.DataRoot) + } + +- buildTmpDir, err := securejoin.SecureJoin(daemonOpts.DataRoot, dataRootTmpDirPrefix) ++ buildTmpDir, err := securejoin.SecureJoin(daemonOpts.DataRoot, constant.DataRootTmpDirPrefix) + if err != nil { + return err + } +diff --git a/constant.go b/constant.go +index 4d1596a..5af4fe2 100644 +--- a/constant.go ++++ b/constant.go +@@ -31,14 +31,20 @@ const ( + RegistryDirPath = ConfigRoot + "registries.d" + // AuthFilePath is authentication file used for registry connection + AuthFilePath = ConfigRoot + "auth.json" +- // DefaultGRPCAddress is the local unix socket used by isula-builder +- DefaultGRPCAddress = "unix:///var/run/isula_build.sock" +- // UnixPrefix is the prefix used on defined an unix sock +- UnixPrefix = "unix://" ++ // DefaultCertRoot is path of certification used for registry connection ++ DefaultCertRoot = ConfigRoot + "certs.d" ++ + // DefaultDataRoot is the default persistent data root used by isula-builder + DefaultDataRoot = "/var/lib/isula-build" + // DefaultRunRoot is the default run root used by isula-builder + DefaultRunRoot = "/var/run/isula-build" ++ // UnixPrefix is the prefix used on defined an unix sock ++ UnixPrefix = "unix://" ++ // DefaultGRPCAddress is the local unix socket used by isula-builder ++ DefaultGRPCAddress = UnixPrefix + "/var/run/isula_build.sock" ++ // DataRootTmpDirPrefix is the dir for storing temporary items using during images building ++ DataRootTmpDirPrefix = "tmp" ++ + // DefaultSharedDirMode is dir perm mode with higher permission + DefaultSharedDirMode = 0755 + // DefaultSharedFileMode is file perm mode with higher permission +@@ -53,12 +59,14 @@ const ( + DefaultReadOnlyFileMode = 0400 + // DefaultUmask is the working umask of isula-builder as a process, not for users + DefaultUmask = 0022 +- // CliLogBufferLen is log channel buffer size +- CliLogBufferLen = 8 ++ + // HostsFilePath is the path of file hosts + HostsFilePath = "/etc/hosts" + // ResolvFilePath is the path of file resolv.conf + ResolvFilePath = "/etc/resolv.conf" ++ ++ // CliLogBufferLen is log channel buffer size ++ CliLogBufferLen = 8 + // MaxFileSize is the maximum file size allowed, set 1M + MaxFileSize = 1024 * 1024 + // DefaultHTTPTimeout includes the total time of dial, TLS handshake, request, resp headers and body +@@ -67,14 +75,12 @@ const ( + DefaultFailedCode = 1 + // DefaultIDLen is the ID length for image ID and build ID + DefaultIDLen = 12 +- // DefaultCertRoot is path of certification used for registry connection +- DefaultCertRoot = ConfigRoot + "certs.d" ++ + // LayoutTime is the time format used to parse time from a string + LayoutTime = "2006-01-02 15:04:05" + // BuildContainerImageType is the default build type + BuildContainerImageType = "ctr-img" +- // BufferSize is default buffer size for file transportation +- BufferSize = 32 * 1024 ++ + // DockerTransport used to export docker image format images to registry + DockerTransport = "docker" + // DockerArchiveTransport used to export docker image format images to local tarball +diff --git a/daemon/daemon.go b/daemon/daemon.go +index 4e0435a..1847dbb 100644 +--- a/daemon/daemon.go ++++ b/daemon/daemon.go +@@ -28,6 +28,7 @@ import ( + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" + ++ constant "isula.org/isula-build" + pb "isula.org/isula-build/api/services" + "isula.org/isula-build/builder" + "isula.org/isula-build/pkg/gc" +@@ -37,8 +38,6 @@ import ( + "isula.org/isula-build/util" + ) + +-const dataRootTmpDirPrefix = "tmp" +- + // Options carries the options configured to daemon + type Options struct { + Debug bool +@@ -137,7 +136,7 @@ func (d *Daemon) NewBuilder(ctx context.Context, req *pb.BuildRequest) (b builde + runDir string + ) + // buildDir is used to set directory which is used to store tmp data +- buildDir, err = securejoin.SecureJoin(d.opts.DataRoot, filepath.Join(dataRootTmpDirPrefix, req.BuildID)) ++ buildDir, err = securejoin.SecureJoin(d.opts.DataRoot, filepath.Join(constant.DataRootTmpDirPrefix, req.BuildID)) + if err != nil { + return nil, err + } +diff --git a/daemon/import.go b/daemon/import.go +index 2da36be..21ab729 100644 +--- a/daemon/import.go ++++ b/daemon/import.go +@@ -87,7 +87,7 @@ func (b *Backend) Import(req *pb.ImportRequest, stream pb.Control_ImportServer) + + log := logger.NewCliLogger(constant.CliLogBufferLen) + imageCopyOptions := image.NewImageCopyOptions(log) +- tmpDir, err = securejoin.SecureJoin(b.daemon.opts.DataRoot, filepath.Join(dataRootTmpDirPrefix, importID)) ++ tmpDir, err = securejoin.SecureJoin(b.daemon.opts.DataRoot, filepath.Join(constant.DataRootTmpDirPrefix, importID)) + if err != nil { + return err + } +@@ -97,7 +97,7 @@ func (b *Backend) Import(req *pb.ImportRequest, stream pb.Control_ImportServer) + } + defer func() { + if rErr := os.RemoveAll(tmpDir); rErr != nil { +- logEntry.Errorf("Failed to remove import temporary dir %q, err: %v", filepath.Join(dataRootTmpDirPrefix, importID), rErr) ++ logEntry.Errorf("Failed to remove import temporary dir %q, err: %v", filepath.Join(constant.DataRootTmpDirPrefix, importID), rErr) + } + }() + +diff --git a/daemon/load.go b/daemon/load.go +index 894159b..1ee025b 100644 +--- a/daemon/load.go ++++ b/daemon/load.go +@@ -15,9 +15,7 @@ package daemon + + import ( + "context" +- "io/ioutil" + "os" +- "path/filepath" + "strings" + + "github.com/containers/image/v5/docker/tarfile" +@@ -25,7 +23,6 @@ import ( + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" +- "github.com/containers/storage/pkg/archive" + securejoin "github.com/cyphar/filepath-securejoin" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +@@ -35,89 +32,47 @@ import ( + + constant "isula.org/isula-build" + pb "isula.org/isula-build/api/services" ++ "isula.org/isula-build/daemon/separator" + "isula.org/isula-build/exporter" + "isula.org/isula-build/image" + "isula.org/isula-build/pkg/logger" + "isula.org/isula-build/util" + ) + +-const ( +- tmpBaseDirName = "base" +- tmpAppDirName = "app" +- tmpLibDirName = "lib" +- unionCompressedTarName = "all.tar.gz" +-) +- +-type loadImageTmpDir struct { +- app string +- base string +- lib string +- root string +-} +- +-type separatorLoad struct { +- log *logrus.Entry +- tmpDir loadImageTmpDir +- info tarballInfo +- appName string +- basePath string +- appPath string +- libPath string +- dir string +- skipCheck bool +- enabled bool +-} +- +-type loadOptions struct { +- logEntry *logrus.Entry +- path string +- format string +- sep separatorLoad +-} +- + type singleImage struct { + index int + id string + nameTag []string + } + +-func (b *Backend) getLoadOptions(req *pb.LoadRequest) (loadOptions, error) { +- var opt = loadOptions{ +- path: req.GetPath(), +- sep: separatorLoad{ +- appName: req.GetSep().GetApp(), +- basePath: req.GetSep().GetBase(), +- libPath: req.GetSep().GetLib(), +- dir: req.GetSep().GetDir(), +- skipCheck: req.GetSep().GetSkipCheck(), +- enabled: req.GetSep().GetEnabled(), +- }, +- logEntry: logrus.WithFields(logrus.Fields{"LoadID": req.GetLoadID()}), ++// LoadOptions stores the options for image loading ++type LoadOptions struct { ++ LogEntry *logrus.Entry ++ path string ++ format string ++ sep separator.Loader ++} ++ ++func (b *Backend) getLoadOptions(req *pb.LoadRequest) (LoadOptions, error) { ++ var err error ++ var opt = LoadOptions{ ++ path: req.GetPath(), ++ LogEntry: logrus.WithFields(logrus.Fields{"LoadID": req.GetLoadID()}), + } + +- // normal loadOptions +- if !opt.sep.enabled { +- if err := util.CheckLoadFile(opt.path); err != nil { +- return loadOptions{}, err ++ // normal image loading ++ if !req.GetSep().GetEnabled() { ++ if err = util.CheckLoadFile(opt.path); err != nil { ++ return LoadOptions{}, err + } + return opt, nil + } + +- // load separated images +- // log is used for sep methods +- opt.sep.log = opt.logEntry +- tmpRoot := filepath.Join(b.daemon.opts.DataRoot, filepath.Join(dataRootTmpDirPrefix, req.GetLoadID())) +- opt.sep.tmpDir.root = tmpRoot +- opt.sep.tmpDir.base = filepath.Join(tmpRoot, tmpBaseDirName) +- opt.sep.tmpDir.app = filepath.Join(tmpRoot, tmpAppDirName) +- opt.sep.tmpDir.lib = filepath.Join(tmpRoot, tmpLibDirName) +- +- // check image name and add "latest" tag if not present +- _, appImgName, err := image.GetNamedTaggedReference(opt.sep.appName) ++ // separated images loading ++ opt.sep, err = separator.GetSepLoadOptions(req, opt.LogEntry, b.daemon.opts.DataRoot) + if err != nil { +- return loadOptions{}, err ++ return LoadOptions{}, err + } +- opt.sep.appName = appImgName + + return opt, nil + } +@@ -136,15 +91,16 @@ func (b *Backend) Load(req *pb.LoadRequest, stream pb.Control_LoadServer) error + } + + defer func() { +- if tErr := os.RemoveAll(opts.sep.tmpDir.root); tErr != nil { +- opts.logEntry.Warnf("Removing load tmp directory %q failed: %v", opts.sep.tmpDir.root, tErr) ++ if tErr := os.RemoveAll(opts.sep.TmpDirRoot()); tErr != nil { ++ opts.LogEntry.Warnf("Removing load tmp directory %q failed: %v", opts.sep.TmpDirRoot(), tErr) + } + }() + + // construct separated images +- if opts.sep.enabled { +- if lErr := loadSeparatedImage(&opts); lErr != nil { +- opts.logEntry.Errorf("Load separated image for %s failed: %v", opts.sep.appName, lErr) ++ if opts.sep.Enabled() { ++ var lErr error ++ if opts.path, lErr = opts.sep.LoadSeparatedImage(); lErr != nil { ++ opts.LogEntry.Errorf("Load separated image for %s failed: %v", opts.sep.AppName(), lErr) + return lErr + } + } +@@ -206,9 +162,9 @@ func (b *Backend) Load(req *pb.LoadRequest, stream pb.Control_LoadServer) error + return nil + } + +-func tryToParseImageFormatFromTarball(dataRoot string, opts *loadOptions) ([]singleImage, error) { ++func tryToParseImageFormatFromTarball(dataRoot string, opts *LoadOptions) ([]singleImage, error) { + // tmp dir will be removed after NewSourceFromFileWithContext +- tmpDir, err := securejoin.SecureJoin(dataRoot, dataRootTmpDirPrefix) ++ tmpDir, err := securejoin.SecureJoin(dataRoot, constant.DataRootTmpDirPrefix) + if err != nil { + return nil, err + } +@@ -323,196 +279,3 @@ func getLoadedImageID(imageRef types.ImageReference, systemContext *types.System + + return "@" + imageDigest.Encoded(), nil + } +- +-func loadSeparatedImage(opt *loadOptions) error { +- s := &opt.sep +- s.log.Infof("Starting load separated image %s", s.appName) +- +- // load manifest file to get tarball info +- if err := s.getTarballInfo(); err != nil { +- return errors.Wrap(err, "failed to get tarball info") +- } +- if err := s.constructTarballInfo(); err != nil { +- return err +- } +- // checksum for image tarballs +- if err := s.tarballCheckSum(); err != nil { +- return err +- } +- // process image tarballs and get final constructed image tarball +- tarPath, err := s.processTarballs() +- if err != nil { +- return err +- } +- opt.path = tarPath +- +- return nil +-} +- +-func (s *separatorLoad) getTarballInfo() error { +- manifest, err := securejoin.SecureJoin(s.dir, manifestFile) +- if err != nil { +- return errors.Wrap(err, "join manifest file path failed") +- } +- +- var t = make(map[string]tarballInfo, 1) +- if err = util.LoadJSONFile(manifest, &t); err != nil { +- return errors.Wrap(err, "load manifest file failed") +- } +- +- tarball, ok := t[s.appName] +- if !ok { +- return errors.Errorf("failed to find app image %s", s.appName) +- } +- s.info = tarball +- +- return nil +-} +- +-func (s *separatorLoad) constructTarballInfo() (err error) { +- s.log.Infof("Construct image tarball info for %s", s.appName) +- // fill up path for separator +- // this case should not happened since client side already check this flag +- if len(s.appName) == 0 { +- return errors.New("app image name should not be empty") +- } +- s.appPath, err = securejoin.SecureJoin(s.dir, s.info.AppTarName) +- if err != nil { +- return err +- } +- +- if len(s.basePath) == 0 { +- if len(s.info.BaseTarName) == 0 { +- return errors.Errorf("base image %s tarball can not be empty", s.info.BaseImageName) +- } +- s.log.Info("Base image path is empty, use path from manifest") +- s.basePath, err = securejoin.SecureJoin(s.dir, s.info.BaseTarName) +- if err != nil { +- return err +- } +- } +- if len(s.libPath) == 0 && len(s.info.LibTarName) != 0 { +- s.log.Info("Lib image path is empty, use path from manifest") +- s.libPath, err = securejoin.SecureJoin(s.dir, s.info.LibTarName) +- if err != nil { +- return err +- } +- } +- +- return nil +-} +- +-func (s *separatorLoad) tarballCheckSum() error { +- if s.skipCheck { +- s.log.Info("Skip checksum for tarballs") +- return nil +- } +- +- type checkInfo struct { +- path string +- hash string +- str string +- canBeEmpty bool +- } +- checkLen := 3 +- var checkList = make([]checkInfo, 0, checkLen) +- checkList = append(checkList, checkInfo{path: s.basePath, hash: s.info.BaseHash, canBeEmpty: false, str: "base image"}) +- checkList = append(checkList, checkInfo{path: s.libPath, hash: s.info.LibHash, canBeEmpty: true, str: "lib image"}) +- checkList = append(checkList, checkInfo{path: s.appPath, hash: s.info.AppHash, canBeEmpty: false, str: "app image"}) +- for _, p := range checkList { +- if len(p.path) == 0 && !p.canBeEmpty { +- return errors.Errorf("%s tarball path can not be empty", p.str) +- } +- if len(p.path) != 0 { +- if err := util.CheckSum(p.path, p.hash); err != nil { +- return errors.Wrapf(err, "check sum for file %q failed", p.path) +- } +- } +- } +- +- return nil +-} +- +-func (s *separatorLoad) processTarballs() (string, error) { +- if err := s.unpackTarballs(); err != nil { +- return "", err +- } +- +- if err := s.reconstructImage(); err != nil { +- return "", err +- } +- +- // pack app image to tarball +- tarPath := filepath.Join(s.tmpDir.root, unionCompressedTarName) +- if err := util.PackFiles(s.tmpDir.base, tarPath, archive.Gzip, true); err != nil { +- return "", err +- } +- +- return tarPath, nil +-} +- +-func (s *separatorLoad) unpackTarballs() error { +- if err := s.makeTempDir(); err != nil { +- return errors.Wrap(err, "failed to make temporary directories") +- } +- +- type unpackInfo struct{ path, dir, str string } +- unpackLen := 3 +- var unpackList = make([]unpackInfo, 0, unpackLen) +- unpackList = append(unpackList, unpackInfo{path: s.basePath, dir: s.tmpDir.base, str: "base image"}) +- unpackList = append(unpackList, unpackInfo{path: s.appPath, dir: s.tmpDir.app, str: "app image"}) +- unpackList = append(unpackList, unpackInfo{path: s.libPath, dir: s.tmpDir.lib, str: "lib image"}) +- +- for _, p := range unpackList { +- if len(p.path) != 0 { +- if err := util.UnpackFile(p.path, p.dir, archive.Gzip, false); err != nil { +- return errors.Wrapf(err, "unpack %s tarball %q failed", p.str, p.path) +- } +- } +- } +- +- return nil +-} +- +-func (s *separatorLoad) reconstructImage() error { +- files, err := ioutil.ReadDir(s.tmpDir.app) +- if err != nil { +- return err +- } +- +- for _, f := range files { +- src := filepath.Join(s.tmpDir.app, f.Name()) +- dest := filepath.Join(s.tmpDir.base, f.Name()) +- if err := os.Rename(src, dest); err != nil { +- return errors.Wrapf(err, "reconstruct app file %q failed", s.info.AppTarName) +- } +- } +- +- if len(s.libPath) != 0 { +- files, err := ioutil.ReadDir(s.tmpDir.lib) +- if err != nil { +- return err +- } +- +- for _, f := range files { +- src := filepath.Join(s.tmpDir.lib, f.Name()) +- dest := filepath.Join(s.tmpDir.base, f.Name()) +- if err := os.Rename(src, dest); err != nil { +- return errors.Wrapf(err, "reconstruct lib file %q failed", s.info.LibTarName) +- } +- } +- } +- +- return nil +-} +- +-func (s *separatorLoad) makeTempDir() error { +- dirs := []string{s.tmpDir.root, s.tmpDir.app, s.tmpDir.base, s.tmpDir.lib} +- for _, dir := range dirs { +- if err := os.MkdirAll(dir, constant.DefaultRootDirMode); err != nil { +- return err +- } +- } +- +- return nil +-} +diff --git a/daemon/load_test.go b/daemon/load_test.go +index 860b897..5e1a42b 100644 +--- a/daemon/load_test.go ++++ b/daemon/load_test.go +@@ -314,7 +314,7 @@ func TestLoadMultipleImages(t *testing.T) { + defer clean(dir) + + path := dir.Join(loadedTarFile) +- repoTags, err := tryToParseImageFormatFromTarball(daemon.opts.DataRoot, &loadOptions{path: path}) ++ repoTags, err := tryToParseImageFormatFromTarball(daemon.opts.DataRoot, &LoadOptions{path: path}) + assert.NilError(t, err) + assert.Equal(t, repoTags[0].nameTag[0], "registry.example.com/sayhello:first") + assert.Equal(t, repoTags[1].nameTag[0], "registry.example.com/sayhello:second") +diff --git a/daemon/save.go b/daemon/save.go +index 7a110bd..708fab3 100644 +--- a/daemon/save.go ++++ b/daemon/save.go +@@ -15,24 +15,19 @@ package daemon + + import ( + "context" +- "encoding/json" +- "fmt" +- "io/ioutil" + "os" + "path/filepath" +- "sort" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" +- "github.com/containers/storage/pkg/archive" +- "github.com/docker/docker/pkg/ioutils" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" + + constant "isula.org/isula-build" + pb "isula.org/isula-build/api/services" ++ "isula.org/isula-build/daemon/separator" + "isula.org/isula-build/exporter" + savedocker "isula.org/isula-build/exporter/docker/archive" + "isula.org/isula-build/image" +@@ -41,28 +36,13 @@ import ( + "isula.org/isula-build/util" + ) + +-const ( +- manifestDataFile = "manifest.json" +- manifestFile = "manifest" +- repositoriesFile = "repositories" +- baseTarNameSuffix = "_base_image.tar.gz" +- appTarNameSuffix = "_app_image.tar.gz" +- libTarNameSuffix = "_lib_image.tar.gz" +- untarTempDirName = "untar" +- baseUntarTempDirName = "base_images" +- appUntarTempDirName = "app_images" +- libUntarTempDirName = "lib_images" +- unionTarName = "all.tar" +- layerTarName = "layer.tar" +- tarSuffix = ".tar" +-) +- + type savedImage struct { + exist bool + tags []reference.NamedTagged + } + +-type saveOptions struct { ++// SaveOptions stores the options for saving images ++type SaveOptions struct { + sysCtx *types.SystemContext + localStore *store.Store + logger *logger.Logger +@@ -73,77 +53,11 @@ type saveOptions struct { + oriImgList []string + finalImageOrdered []string + finalImageSet map[string]*savedImage +- sep separatorSave +-} +- +-type separatorSave struct { +- log *logrus.Entry +- renameData []renames +- tmpDir imageTmpDir +- base string +- lib string +- dest string +- renameFile string +- enabled bool +-} +- +-type renames struct { +- Name string `json:"name"` +- Rename string `json:"rename"` +-} +- +-type imageTmpDir struct { +- app string +- base string +- lib string +- untar string +- root string +-} +- +-type layer struct { +- all []string +- base []string +- lib []string +- app []string +-} +- +-type imageInfo struct { +- layers layer +- repoTags []string +- config string +- name string +- tag string +- nameTag string +- topLayer string +-} +- +-// imageManifest return image's manifest info +-type imageManifest struct { +- Config string `json:"Config"` +- RepoTags []string `json:"RepoTags"` +- Layers []string `json:"Layers"` +- // Not shown in the json file +- HashMap map[string]string `json:"-"` ++ sep separator.Saver + } + +-type imageLayersMap map[string]string +- +-type tarballInfo struct { +- AppTarName string `json:"app"` +- AppHash string `json:"appHash"` +- AppLayers []string `json:"appLayers"` +- LibTarName string `json:"lib"` +- LibHash string `json:"libHash"` +- LibImageName string `json:"libImageName"` +- LibLayers []string `json:"libLayers"` +- BaseTarName string `json:"base"` +- BaseHash string `json:"baseHash"` +- BaseImageName string `json:"baseImageName"` +- BaseLayer string `json:"baseLayer"` +-} +- +-func (b *Backend) getSaveOptions(req *pb.SaveRequest) saveOptions { +- var opt = saveOptions{ ++func (b *Backend) getSaveOptions(req *pb.SaveRequest) SaveOptions { ++ var opt = SaveOptions{ + sysCtx: image.GetSystemContext(), + localStore: b.daemon.localStore, + saveID: req.GetSaveID(), +@@ -160,24 +74,7 @@ func (b *Backend) getSaveOptions(req *pb.SaveRequest) saveOptions { + return opt + } + +- opt.sep = separatorSave{ +- base: req.GetSep().GetBase(), +- lib: req.GetSep().GetLib(), +- dest: req.GetSep().GetDest(), +- log: logrus.WithFields(logrus.Fields{"SaveID": req.GetSaveID()}), +- enabled: req.GetSep().GetEnabled(), +- renameFile: req.GetSep().GetRename(), +- } +- +- // save separated image +- tmpRoot := filepath.Join(b.daemon.opts.DataRoot, filepath.Join(dataRootTmpDirPrefix, req.GetSaveID())) +- untar := filepath.Join(tmpRoot, untarTempDirName) +- appDir := filepath.Join(tmpRoot, appUntarTempDirName) +- baseDir := filepath.Join(tmpRoot, baseUntarTempDirName) +- libDir := filepath.Join(tmpRoot, libUntarTempDirName) +- +- opt.sep.tmpDir = imageTmpDir{app: appDir, base: baseDir, lib: libDir, untar: untar, root: tmpRoot} +- opt.outputPath = filepath.Join(untar, unionTarName) ++ opt.sep, opt.outputPath = separator.GetSepSaveOptions(req, opt.logEntry, b.daemon.opts.DataRoot) + + return opt + } +@@ -213,15 +110,14 @@ func (b *Backend) Save(req *pb.SaveRequest, stream pb.Control_SaveServer) (err e + return err + } + +- // separatorSave found +- if opts.sep.enabled { +- return separateImage(opts) ++ if opts.sep.Enabled() { ++ return opts.sep.SeparateImage(opts.localStore, opts.oriImgList, opts.outputPath) + } + + return nil + } + +-func exportHandler(ctx context.Context, opts *saveOptions) func() error { ++func exportHandler(ctx context.Context, opts *SaveOptions) func() error { + return func() error { + defer func() { + opts.logger.CloseContent() +@@ -278,7 +174,7 @@ func messageHandler(stream pb.Control_SaveServer, cliLogger *logger.Logger) func + } + } + +-func (opts *saveOptions) manage() error { ++func (opts *SaveOptions) manage() error { + if err := opts.checkImageNameIsID(); err != nil { + return err + } +@@ -288,29 +184,22 @@ func (opts *saveOptions) manage() error { + if err := opts.filterImageName(); err != nil { + return err + } +- if err := opts.loadRenameFile(); err != nil { ++ if err := opts.sep.LoadRenameFile(); err != nil { + return err + } + + return nil + } + +-func (opts *saveOptions) checkImageNameIsID() error { ++func (opts *SaveOptions) checkImageNameIsID() error { + imageNames := opts.oriImgList +- if opts.sep.enabled { +- if len(opts.sep.base) != 0 { +- imageNames = append(imageNames, opts.sep.base) +- } +- if len(opts.sep.lib) != 0 { +- imageNames = append(imageNames, opts.sep.lib) +- } +- } ++ imageNames = append(imageNames, opts.sep.ImageNames()...) + for _, name := range imageNames { + _, img, err := image.FindImage(opts.localStore, name) + if err != nil { + return errors.Wrapf(err, "check image name failed when finding image name %q", name) + } +- if strings.HasPrefix(img.ID, name) && opts.sep.enabled { ++ if strings.HasPrefix(img.ID, name) && opts.sep.Enabled() { + return errors.Errorf("using image ID %q as image name to save separated image is not allowed", name) + } + } +@@ -318,7 +207,7 @@ func (opts *saveOptions) checkImageNameIsID() error { + return nil + } + +-func (opts *saveOptions) setFormat() error { ++func (opts *SaveOptions) setFormat() error { + switch opts.format { + case constant.DockerTransport: + opts.format = constant.DockerArchiveTransport +@@ -331,7 +220,7 @@ func (opts *saveOptions) setFormat() error { + return nil + } + +-func (opts *saveOptions) filterImageName() error { ++func (opts *SaveOptions) filterImageName() error { + if opts.format == constant.OCIArchiveTransport { + opts.finalImageOrdered = opts.oriImgList + return nil +@@ -367,507 +256,3 @@ func (opts *saveOptions) filterImageName() error { + + return nil + } +- +-func (opts *saveOptions) loadRenameFile() error { +- if len(opts.sep.renameFile) != 0 { +- var reName []renames +- if err := util.LoadJSONFile(opts.sep.renameFile, &reName); err != nil { +- return errors.Wrap(err, "check rename file failed") +- } +- opts.sep.renameData = reName +- } +- +- return nil +-} +- +-func (s *separatorSave) getLayerHashFromStorage(store *store.Store, name string) ([]string, error) { +- if len(name) == 0 { +- return nil, nil +- } +- _, img, err := image.FindImage(store, name) +- if err != nil { +- return nil, err +- } +- +- layer, err := store.Layer(img.TopLayer) +- if err != nil { +- return nil, errors.Wrapf(err, "failed to get top layer for image %s", name) +- } +- +- var layers []string +- // add each layer in the layers until reach the root layer +- for layer != nil { +- fields := strings.Split(layer.UncompressedDigest.String(), ":") +- if len(fields) != 2 { +- return nil, errors.Errorf("error format of layer of image %s", name) +- } +- layers = append(layers, fields[1]) +- if layer.Parent == "" { +- break +- } +- layer, err = store.Layer(layer.Parent) +- if err != nil { +- return nil, errors.Wrapf(err, "unable to read layer %q", layer.Parent) +- } +- } +- +- return layers, nil +-} +- +-// process physic file +-func (s *separatorSave) constructLayerMap() (map[string]string, error) { +- path := s.tmpDir.untar +- files, rErr := ioutil.ReadDir(path) +- if rErr != nil { +- return nil, rErr +- } +- +- var layerMap = make(map[string]string, len(files)) +- // process layer's file +- for _, file := range files { +- if file.IsDir() { +- layerFile := filepath.Join(path, file.Name(), layerTarName) +- oriFile, err := os.Readlink(layerFile) +- if err != nil { +- return nil, err +- } +- physicFile := filepath.Join(path, file.Name(), oriFile) +- layerMap[filepath.Base(physicFile)] = filepath.Join(file.Name(), layerTarName) +- if err := os.Rename(physicFile, layerFile); err != nil { +- return nil, err +- } +- } +- } +- +- return layerMap, nil +-} +- +-func getLayerHashFromTar(layerMap map[string]string, layer []string) map[string]string { +- hashMap := make(map[string]string, len(layer)) +- // first reverse map since it's is unique +- revMap := make(map[string]string, len(layerMap)) +- for k, v := range layerMap { +- revMap[v] = k +- } +- for _, l := range layer { +- if v, ok := revMap[l]; ok { +- // format is like xxx(hash): xxx/layer.tar +- hashMap[strings.TrimSuffix(v, tarSuffix)] = l +- } +- } +- +- return hashMap +-} +- +-func (s *separatorSave) adjustLayers() ([]imageManifest, error) { +- s.log.Info("Adjusting layers for saving separated image") +- +- layerMap, err := s.constructLayerMap() +- if err != nil { +- s.log.Errorf("Process layers failed: %v", err) +- return nil, err +- } +- +- // process manifest file +- var man []imageManifest +- if lErr := util.LoadJSONFile(filepath.Join(s.tmpDir.untar, manifestDataFile), &man); lErr != nil { +- return nil, lErr +- } +- +- for i, img := range man { +- layers := make([]string, len(img.Layers)) +- for i, layer := range img.Layers { +- layers[i] = layerMap[layer] +- } +- man[i].Layers = layers +- man[i].HashMap = getLayerHashFromTar(layerMap, layers) +- } +- buf, err := json.Marshal(&man) +- if err != nil { +- return nil, err +- } +- if err := ioutils.AtomicWriteFile(manifestFile, buf, constant.DefaultSharedFileMode); err != nil { +- return nil, err +- } +- +- return man, nil +-} +- +-func separateImage(opt saveOptions) (err error) { +- s := &opt.sep +- s.log.Infof("Start saving separated images %v", opt.oriImgList) +- +- if err = os.MkdirAll(s.dest, constant.DefaultRootDirMode); err != nil { +- return err +- } +- +- defer func() { +- if tErr := os.RemoveAll(s.tmpDir.root); tErr != nil && !os.IsNotExist(tErr) { +- s.log.Warnf("Removing save tmp directory %q failed: %v", s.tmpDir.root, tErr) +- } +- if err != nil { +- if rErr := os.RemoveAll(s.dest); rErr != nil && !os.IsNotExist(rErr) { +- s.log.Warnf("Removing save dest directory %q failed: %v", s.dest, rErr) +- } +- } +- }() +- if err = util.UnpackFile(opt.outputPath, s.tmpDir.untar, archive.Gzip, true); err != nil { +- return errors.Wrapf(err, "unpack %q failed", opt.outputPath) +- } +- manifest, aErr := s.adjustLayers() +- if aErr != nil { +- return errors.Wrap(aErr, "adjust layers failed") +- } +- +- imgInfos, cErr := s.constructImageInfos(manifest, opt.localStore) +- if cErr != nil { +- return errors.Wrap(cErr, "process image infos failed") +- } +- +- if err = s.processImageLayers(imgInfos); err != nil { +- return err +- } +- +- return nil +-} +- +-func (s *separatorSave) processImageLayers(imgInfos map[string]imageInfo) error { +- s.log.Info("Processing image layers") +- var ( +- tarballs = make(map[string]tarballInfo) +- baseImagesMap = make(imageLayersMap, 1) +- libImagesMap = make(imageLayersMap, 1) +- appImagesMap = make(imageLayersMap, 1) +- ) +- var sortedKey []string +- for k := range imgInfos { +- sortedKey = append(sortedKey, k) +- } +- sort.Strings(sortedKey) +- for _, k := range sortedKey { +- info := imgInfos[k] +- if err := s.clearTempDirs(); err != nil { +- return errors.Wrap(err, "clear tmp dirs failed") +- } +- var t tarballInfo +- // process base +- if err := info.processBaseImg(s, baseImagesMap, &t); err != nil { +- return errors.Wrapf(err, "process base images %s failed", info.nameTag) +- } +- // process lib +- if err := info.processLibImg(s, libImagesMap, &t); err != nil { +- return errors.Wrapf(err, "process lib images %s failed", info.nameTag) +- } +- // process app +- if err := info.processAppImg(s, appImagesMap, &t); err != nil { +- return errors.Wrapf(err, "process app images %s failed", info.nameTag) +- } +- tarballs[info.nameTag] = t +- } +- buf, err := json.Marshal(&tarballs) +- if err != nil { +- return err +- } +- // manifest file +- manifestFile := filepath.Join(s.dest, manifestFile) +- if err := ioutils.AtomicWriteFile(manifestFile, buf, constant.DefaultRootFileMode); err != nil { +- return err +- } +- +- s.log.Info("Save separated image succeed") +- return nil +-} +- +-func (s *separatorSave) clearTempDirs() error { +- dirs := []string{s.tmpDir.base, s.tmpDir.app, s.tmpDir.lib} +- for _, dir := range dirs { +- if err := os.RemoveAll(dir); err != nil { +- return err +- } +- if err := os.MkdirAll(dir, constant.DefaultRootDirMode); err != nil { +- return err +- } +- } +- return nil +-} +- +-// processTarName will trim the prefix of image name like example.io/library/myapp:v1 +-// after processed, the name will be myapp_v1_suffix +-// mind: suffix here should not contain path separator +-func (info imageInfo) processTarName(suffix string) string { +- originNames := strings.Split(info.name, string(os.PathSeparator)) +- originTags := strings.Split(info.tag, string(os.PathSeparator)) +- // get the last element of the list, which mast be the right name without prefix +- name := originNames[len(originNames)-1] +- tag := originTags[len(originTags)-1] +- +- return fmt.Sprintf("%s_%s%s", name, tag, suffix) +-} +- +-func (info *imageInfo) processBaseImg(sep *separatorSave, baseImagesMap map[string]string, tarball *tarballInfo) error { +- // process base +- tarball.BaseImageName = sep.base +- if len(info.layers.base) != 0 { +- sep.log.Infof("Base image %s has %d layers", sep.base, len(info.layers.base)) +- tarball.BaseLayer = info.layers.base[0] +- } +- for _, layerID := range info.layers.base { +- if baseImg, ok := baseImagesMap[layerID]; !ok { +- srcLayerPath := filepath.Join(sep.tmpDir.untar, layerID) +- destLayerPath := filepath.Join(sep.tmpDir.base, layerID) +- if err := os.Rename(srcLayerPath, destLayerPath); err != nil { +- return err +- } +- baseTarName := info.processTarName(baseTarNameSuffix) +- baseTarName = sep.rename(baseTarName) +- baseTarPath := filepath.Join(sep.dest, baseTarName) +- if err := util.PackFiles(sep.tmpDir.base, baseTarPath, archive.Gzip, true); err != nil { +- return err +- } +- baseImagesMap[layerID] = baseTarPath +- tarball.BaseTarName = baseTarName +- digest, err := util.SHA256Sum(baseTarPath) +- if err != nil { +- return errors.Wrapf(err, "check sum for new base image %s failed", baseTarName) +- } +- tarball.BaseHash = digest +- } else { +- tarball.BaseTarName = filepath.Base(baseImg) +- digest, err := util.SHA256Sum(baseImg) +- if err != nil { +- return errors.Wrapf(err, "check sum for reuse base image %s failed", baseImg) +- } +- tarball.BaseHash = digest +- } +- } +- +- return nil +-} +- +-func (info *imageInfo) processLibImg(sep *separatorSave, libImagesMap map[string]string, tarball *tarballInfo) error { +- // process lib +- if info.layers.lib == nil { +- return nil +- } +- +- tarball.LibImageName = sep.lib +- sep.log.Infof("Lib image %s has %d layers", sep.lib, len(info.layers.lib)) +- for _, layerID := range info.layers.lib { +- tarball.LibLayers = append(tarball.LibLayers, layerID) +- if libImg, ok := libImagesMap[layerID]; !ok { +- srcLayerPath := filepath.Join(sep.tmpDir.untar, layerID) +- destLayerPath := filepath.Join(sep.tmpDir.lib, layerID) +- if err := os.Rename(srcLayerPath, destLayerPath); err != nil { +- return err +- } +- libTarName := info.processTarName(libTarNameSuffix) +- libTarName = sep.rename(libTarName) +- libTarPath := filepath.Join(sep.dest, libTarName) +- if err := util.PackFiles(sep.tmpDir.lib, libTarPath, archive.Gzip, true); err != nil { +- return err +- } +- libImagesMap[layerID] = libTarPath +- tarball.LibTarName = libTarName +- digest, err := util.SHA256Sum(libTarPath) +- if err != nil { +- return errors.Wrapf(err, "check sum for lib image %s failed", sep.lib) +- } +- tarball.LibHash = digest +- } else { +- tarball.LibTarName = filepath.Base(libImg) +- digest, err := util.SHA256Sum(libImg) +- if err != nil { +- return errors.Wrapf(err, "check sum for lib image %s failed", sep.lib) +- } +- tarball.LibHash = digest +- } +- } +- +- return nil +-} +- +-func (info *imageInfo) processAppImg(sep *separatorSave, appImagesMap map[string]string, tarball *tarballInfo) error { +- // process app +- sep.log.Infof("App image %s has %d layers", info.nameTag, len(info.layers.app)) +- appTarName := info.processTarName(appTarNameSuffix) +- appTarName = sep.rename(appTarName) +- appTarPath := filepath.Join(sep.dest, appTarName) +- for _, layerID := range info.layers.app { +- srcLayerPath := filepath.Join(sep.tmpDir.untar, layerID) +- destLayerPath := filepath.Join(sep.tmpDir.app, layerID) +- if err := os.Rename(srcLayerPath, destLayerPath); err != nil { +- if appImg, ok := appImagesMap[layerID]; ok { +- return errors.Errorf("lib layers %s already saved in %s for image %s", +- layerID, appImg, info.nameTag) +- } +- } +- appImagesMap[layerID] = appTarPath +- tarball.AppLayers = append(tarball.AppLayers, layerID) +- } +- // create config file +- if err := info.createManifestFile(sep); err != nil { +- return err +- } +- if err := info.createRepositoriesFile(sep); err != nil { +- return err +- } +- +- srcConfigPath := filepath.Join(sep.tmpDir.untar, info.config) +- destConfigPath := filepath.Join(sep.tmpDir.app, info.config) +- if err := os.Rename(srcConfigPath, destConfigPath); err != nil { +- return err +- } +- +- if err := util.PackFiles(sep.tmpDir.app, appTarPath, archive.Gzip, true); err != nil { +- return err +- } +- tarball.AppTarName = appTarName +- digest, err := util.SHA256Sum(appTarPath) +- if err != nil { +- return errors.Wrapf(err, "check sum for app image %s failed", info.nameTag) +- } +- tarball.AppHash = digest +- +- return nil +-} +- +-func (info imageInfo) createRepositoriesFile(sep *separatorSave) error { +- // create repositories +- type repoItem map[string]string +- repo := make(map[string]repoItem, 1) +- item := make(repoItem, 1) +- if _, ok := item[info.tag]; !ok { +- item[info.tag] = info.topLayer +- } +- repo[info.name] = item +- buf, err := json.Marshal(repo) +- if err != nil { +- return err +- } +- repositoryFile := filepath.Join(sep.tmpDir.app, repositoriesFile) +- if err := ioutils.AtomicWriteFile(repositoryFile, buf, constant.DefaultRootFileMode); err != nil { +- return err +- } +- return nil +-} +- +-func (info imageInfo) createManifestFile(sep *separatorSave) error { +- // create manifest.json +- var s = imageManifest{ +- Config: info.config, +- Layers: info.layers.all, +- RepoTags: info.repoTags, +- } +- var m []imageManifest +- m = append(m, s) +- buf, err := json.Marshal(&m) +- if err != nil { +- return err +- } +- data := filepath.Join(sep.tmpDir.app, manifestDataFile) +- if err := ioutils.AtomicWriteFile(data, buf, constant.DefaultRootFileMode); err != nil { +- return err +- } +- return nil +-} +- +-func getLayersID(layer []string) []string { +- var after = make([]string, len(layer)) +- for i, v := range layer { +- after[i] = strings.Split(v, "/")[0] +- } +- return after +-} +- +-func (s *separatorSave) constructSingleImgInfo(mani imageManifest, store *store.Store) (imageInfo, error) { +- var libLayers, appLayers []string +- // image name should not be empty here +- if len(mani.RepoTags) == 0 { +- return imageInfo{}, errors.New("image name and tag is empty") +- } +- // if there is more than one repoTag, will use first one as image name +- imageRepoFields := strings.Split(mani.RepoTags[0], ":") +- imageLayers := getLayersID(mani.Layers) +- +- libs, bases, err := s.checkLayersHash(mani.HashMap, store) +- if err != nil { +- return imageInfo{}, errors.Wrap(err, "compare layers failed") +- } +- baseLayers := imageLayers[0:len(bases)] +- if len(libs) != 0 { +- libLayers = imageLayers[len(bases):len(libs)] +- appLayers = imageLayers[len(libs):] +- } else { +- libLayers = nil +- appLayers = imageLayers[len(bases):] +- } +- +- return imageInfo{ +- config: mani.Config, +- repoTags: mani.RepoTags, +- nameTag: mani.RepoTags[0], +- name: strings.Join(imageRepoFields[0:len(imageRepoFields)-1], ":"), +- tag: imageRepoFields[len(imageRepoFields)-1], +- layers: layer{app: appLayers, lib: libLayers, base: baseLayers, all: mani.Layers}, +- topLayer: imageLayers[len(imageLayers)-1], +- }, nil +-} +- +-func (s *separatorSave) checkLayersHash(layerHashMap map[string]string, store *store.Store) ([]string, []string, error) { +- libHash, err := s.getLayerHashFromStorage(store, s.lib) +- if err != nil { +- return nil, nil, errors.Wrapf(err, "get lib image %s layers failed", s.lib) +- } +- baseHash, err := s.getLayerHashFromStorage(store, s.base) +- if err != nil { +- return nil, nil, errors.Wrapf(err, "get base image %s layers failed", s.base) +- } +- if len(baseHash) > 1 { +- return nil, nil, errors.Errorf("number of base layers %d more than one", len(baseHash)) +- } +- if len(libHash) >= len(layerHashMap) || len(baseHash) >= len(layerHashMap) { +- return nil, nil, errors.Errorf("number of base or lib layers is equal or greater than saved app layers") +- } +- +- for _, l := range libHash { +- if _, ok := layerHashMap[l]; !ok { +- return nil, nil, errors.Errorf("dismatch checksum for lib image %s", s.lib) +- } +- } +- for _, b := range baseHash { +- if _, ok := layerHashMap[b]; !ok { +- return nil, nil, errors.Errorf("dismatch checksum for base image %s", s.base) +- } +- } +- +- return libHash, baseHash, nil +-} +- +-func (s *separatorSave) constructImageInfos(manifest []imageManifest, store *store.Store) (map[string]imageInfo, error) { +- s.log.Info("Constructing image info") +- +- var imgInfos = make(map[string]imageInfo, 1) +- for _, mani := range manifest { +- imgInfo, err := s.constructSingleImgInfo(mani, store) +- if err != nil { +- s.log.Errorf("Constructing image info failed: %v", err) +- return nil, errors.Wrap(err, "construct image info failed") +- } +- if _, ok := imgInfos[imgInfo.nameTag]; !ok { +- imgInfos[imgInfo.nameTag] = imgInfo +- } +- } +- return imgInfos, nil +-} +- +-func (s *separatorSave) rename(name string) string { +- if len(s.renameData) != 0 { +- s.log.Info("Renaming image tarballs") +- for _, item := range s.renameData { +- if item.Name == name { +- return item.Rename +- } +- } +- } +- return name +-} +diff --git a/daemon/separator/image_info.go b/daemon/separator/image_info.go +new file mode 100644 +index 0000000..53bcd7a +--- /dev/null ++++ b/daemon/separator/image_info.go +@@ -0,0 +1,235 @@ ++// Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. ++// isula-build licensed under the Mulan PSL v2. ++// You can use this software according to the terms and conditions of the Mulan PSL v2. ++// You may obtain a copy of Mulan PSL v2 at: ++// http://license.coscl.org.cn/MulanPSL2 ++// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR ++// IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR ++// PURPOSE. ++// See the Mulan PSL v2 for more details. ++// Author: Xiang Li ++// Create: 2021-12-09 ++// Description: This file is handling image info for image separator ++ ++package separator ++ ++import ( ++ "encoding/json" ++ "fmt" ++ "os" ++ "path/filepath" ++ "strings" ++ ++ "github.com/containers/storage/pkg/archive" ++ "github.com/docker/docker/pkg/ioutils" ++ "github.com/pkg/errors" ++ ++ constant "isula.org/isula-build" ++ "isula.org/isula-build/util" ++) ++ ++type layer struct { ++ all []string ++ base []string ++ lib []string ++ app []string ++} ++ ++type imageInfo struct { ++ layers layer ++ repoTags []string ++ config string ++ name string ++ tag string ++ nameTag string ++ topLayer string ++} ++ ++// processTarName will trim the prefix of image name like example.io/library/myapp:v1 ++// after processed, the name will be myapp_v1_suffix ++// mind: suffix here should not contain path separator ++func (info *imageInfo) processTarName(suffix string) string { ++ originNames := strings.Split(info.name, string(os.PathSeparator)) ++ originTags := strings.Split(info.tag, string(os.PathSeparator)) ++ // get the last element of the list, which mast be the right name without prefix ++ name := originNames[len(originNames)-1] ++ tag := originTags[len(originTags)-1] ++ ++ return fmt.Sprintf("%s_%s%s", name, tag, suffix) ++} ++ ++func (info *imageInfo) processBaseImg(sep *Saver, baseImagesMap map[string]string, tarball *tarballInfo) error { ++ // process base ++ tarball.BaseImageName = sep.base ++ if len(info.layers.base) != 0 { ++ sep.log.Infof("Base image %s has %d layers", sep.base, len(info.layers.base)) ++ tarball.BaseLayer = info.layers.base[0] ++ } ++ for _, layerID := range info.layers.base { ++ if baseImg, ok := baseImagesMap[layerID]; !ok { ++ srcLayerPath := filepath.Join(sep.tmpDir.untar, layerID) ++ destLayerPath := filepath.Join(sep.tmpDir.base, layerID) ++ if err := os.Rename(srcLayerPath, destLayerPath); err != nil { ++ return err ++ } ++ baseTarName := info.processTarName(baseTarNameSuffix) ++ baseTarName = sep.getRename(baseTarName) ++ baseTarPath := filepath.Join(sep.dest, baseTarName) ++ if err := util.PackFiles(sep.tmpDir.base, baseTarPath, archive.Gzip, true); err != nil { ++ return err ++ } ++ baseImagesMap[layerID] = baseTarPath ++ tarball.BaseTarName = baseTarName ++ digest, err := util.SHA256Sum(baseTarPath) ++ if err != nil { ++ return errors.Wrapf(err, "check sum for new base image %s failed", baseTarName) ++ } ++ tarball.BaseHash = digest ++ } else { ++ tarball.BaseTarName = filepath.Base(baseImg) ++ digest, err := util.SHA256Sum(baseImg) ++ if err != nil { ++ return errors.Wrapf(err, "check sum for reuse base image %s failed", baseImg) ++ } ++ tarball.BaseHash = digest ++ } ++ } ++ ++ return nil ++} ++ ++func (info *imageInfo) processLibImg(sep *Saver, libImagesMap map[string]string, tarball *tarballInfo) error { ++ // process lib ++ if info.layers.lib == nil { ++ return nil ++ } ++ ++ tarball.LibImageName = sep.lib ++ sep.log.Infof("Lib image %s has %d layers", sep.lib, len(info.layers.lib)) ++ for _, layerID := range info.layers.lib { ++ tarball.LibLayers = append(tarball.LibLayers, layerID) ++ if libImg, ok := libImagesMap[layerID]; !ok { ++ srcLayerPath := filepath.Join(sep.tmpDir.untar, layerID) ++ destLayerPath := filepath.Join(sep.tmpDir.lib, layerID) ++ if err := os.Rename(srcLayerPath, destLayerPath); err != nil { ++ return err ++ } ++ libTarName := info.processTarName(libTarNameSuffix) ++ libTarName = sep.getRename(libTarName) ++ libTarPath := filepath.Join(sep.dest, libTarName) ++ if err := util.PackFiles(sep.tmpDir.lib, libTarPath, archive.Gzip, true); err != nil { ++ return err ++ } ++ libImagesMap[layerID] = libTarPath ++ tarball.LibTarName = libTarName ++ digest, err := util.SHA256Sum(libTarPath) ++ if err != nil { ++ return errors.Wrapf(err, "check sum for lib image %s failed", sep.lib) ++ } ++ tarball.LibHash = digest ++ } else { ++ tarball.LibTarName = filepath.Base(libImg) ++ digest, err := util.SHA256Sum(libImg) ++ if err != nil { ++ return errors.Wrapf(err, "check sum for lib image %s failed", sep.lib) ++ } ++ tarball.LibHash = digest ++ } ++ } ++ ++ return nil ++} ++ ++func (info *imageInfo) processAppImg(sep *Saver, appImagesMap map[string]string, tarball *tarballInfo) error { ++ // process app ++ sep.log.Infof("App image %s has %d layers", info.nameTag, len(info.layers.app)) ++ appTarName := info.processTarName(appTarNameSuffix) ++ appTarName = sep.getRename(appTarName) ++ appTarPath := filepath.Join(sep.dest, appTarName) ++ for _, layerID := range info.layers.app { ++ srcLayerPath := filepath.Join(sep.tmpDir.untar, layerID) ++ destLayerPath := filepath.Join(sep.tmpDir.app, layerID) ++ if err := os.Rename(srcLayerPath, destLayerPath); err != nil { ++ if appImg, ok := appImagesMap[layerID]; ok { ++ return errors.Errorf("lib layers %s already saved in %s for image %s", ++ layerID, appImg, info.nameTag) ++ } ++ } ++ appImagesMap[layerID] = appTarPath ++ tarball.AppLayers = append(tarball.AppLayers, layerID) ++ } ++ // create config file ++ if err := info.createManifestFile(sep); err != nil { ++ return err ++ } ++ if err := info.createRepositoriesFile(sep); err != nil { ++ return err ++ } ++ ++ srcConfigPath := filepath.Join(sep.tmpDir.untar, info.config) ++ destConfigPath := filepath.Join(sep.tmpDir.app, info.config) ++ if err := os.Rename(srcConfigPath, destConfigPath); err != nil { ++ return err ++ } ++ ++ if err := util.PackFiles(sep.tmpDir.app, appTarPath, archive.Gzip, true); err != nil { ++ return err ++ } ++ tarball.AppTarName = appTarName ++ digest, err := util.SHA256Sum(appTarPath) ++ if err != nil { ++ return errors.Wrapf(err, "check sum for app image %s failed", info.nameTag) ++ } ++ tarball.AppHash = digest ++ ++ return nil ++} ++ ++func (info *imageInfo) createRepositoriesFile(sep *Saver) error { ++ // create repositories ++ type repoItem map[string]string ++ repo := make(map[string]repoItem, 1) ++ item := make(repoItem, 1) ++ if _, ok := item[info.tag]; !ok { ++ item[info.tag] = info.topLayer ++ } ++ repo[info.name] = item ++ buf, err := json.Marshal(repo) ++ if err != nil { ++ return err ++ } ++ repositoryFile := filepath.Join(sep.tmpDir.app, repositoriesFile) ++ if err := ioutils.AtomicWriteFile(repositoryFile, buf, constant.DefaultRootFileMode); err != nil { ++ return err ++ } ++ return nil ++} ++ ++// imageManifest return image's manifest info ++type imageManifest struct { ++ Config string `json:"Config"` ++ RepoTags []string `json:"RepoTags"` ++ Layers []string `json:"Layers"` ++ // Not shown in the json file ++ HashMap map[string]string `json:"-"` ++} ++ ++func (info *imageInfo) createManifestFile(sep *Saver) error { ++ // create manifest.json ++ var s = imageManifest{ ++ Config: info.config, ++ Layers: info.layers.all, ++ RepoTags: info.repoTags, ++ } ++ var m []imageManifest ++ m = append(m, s) ++ buf, err := json.Marshal(&m) ++ if err != nil { ++ return err ++ } ++ data := filepath.Join(sep.tmpDir.app, manifestDataFile) ++ if err := ioutils.AtomicWriteFile(data, buf, constant.DefaultRootFileMode); err != nil { ++ return err ++ } ++ return nil ++} +diff --git a/daemon/separator/load.go b/daemon/separator/load.go +new file mode 100644 +index 0000000..d02dccf +--- /dev/null ++++ b/daemon/separator/load.go +@@ -0,0 +1,283 @@ ++// Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. ++// isula-build licensed under the Mulan PSL v2. ++// You can use this software according to the terms and conditions of the Mulan PSL v2. ++// You may obtain a copy of Mulan PSL v2 at: ++// http://license.coscl.org.cn/MulanPSL2 ++// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR ++// IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR ++// PURPOSE. ++// See the Mulan PSL v2 for more details. ++// Author: Xiang Li ++// Create: 2021-12-09 ++// Description: This file is handling "load" part for image separator at server side ++ ++package separator ++ ++import ( ++ "io/ioutil" ++ "os" ++ "path/filepath" ++ ++ "github.com/containers/storage/pkg/archive" ++ filepath_securejoin "github.com/cyphar/filepath-securejoin" ++ "github.com/pkg/errors" ++ "github.com/sirupsen/logrus" ++ ++ constant "isula.org/isula-build" ++ pb "isula.org/isula-build/api/services" ++ "isula.org/isula-build/image" ++ "isula.org/isula-build/util" ++) ++ ++type loadImageTmpDir struct { ++ app string ++ base string ++ lib string ++ root string ++} ++ ++// Loader the main instance for loading separated images ++type Loader struct { ++ log *logrus.Entry ++ tmpDir loadImageTmpDir ++ info tarballInfo ++ appName string ++ basePath string ++ appPath string ++ libPath string ++ dir string ++ skipCheck bool ++ enabled bool ++} ++ ++// GetSepLoadOptions returns Loader instance from LoadRequest ++func GetSepLoadOptions(req *pb.LoadRequest, logEntry *logrus.Entry, dataRoot string) (Loader, error) { ++ var tmpRoot = filepath.Join(dataRoot, filepath.Join(constant.DataRootTmpDirPrefix, req.GetLoadID())) ++ var sep = Loader{ ++ appName: req.GetSep().GetApp(), ++ basePath: req.GetSep().GetBase(), ++ libPath: req.GetSep().GetLib(), ++ dir: req.GetSep().GetDir(), ++ log: logEntry, ++ tmpDir: loadImageTmpDir{ ++ root: tmpRoot, ++ base: filepath.Join(tmpRoot, tmpBaseDirName), ++ lib: filepath.Join(tmpRoot, tmpLibDirName), ++ app: filepath.Join(tmpRoot, tmpAppDirName), ++ }, ++ skipCheck: req.GetSep().GetSkipCheck(), ++ enabled: req.GetSep().GetEnabled(), ++ } ++ ++ // check image name and add "latest" tag if not present ++ _, appImgName, err := image.GetNamedTaggedReference(sep.appName) ++ if err != nil { ++ return Loader{}, err ++ } ++ if len(appImgName) == 0 { ++ return Loader{}, errors.New("app image name should not be empty") ++ } ++ sep.appName = appImgName ++ return sep, nil ++} ++ ++// LoadSeparatedImage the main method of Loader, tries to load the separated images, and returns the path of the ++// reconstructed image tarball for later handling ++func (l *Loader) LoadSeparatedImage() (string, error) { ++ l.log.Infof("Starting load separated image %s", l.appName) ++ ++ // load manifest file to get tarball info ++ if err := l.getTarballInfo(); err != nil { ++ return "", errors.Wrap(err, "failed to get tarball info") ++ } ++ if err := l.constructLayerPath(); err != nil { ++ return "", err ++ } ++ // checksum for image tarballs ++ if err := l.tarballCheckSum(); err != nil { ++ return "", err ++ } ++ // process image tarballs and get final constructed image tarball ++ return l.processTarballs() ++} ++ ++func (l *Loader) getTarballInfo() error { ++ manifest, err := filepath_securejoin.SecureJoin(l.dir, manifestFile) ++ if err != nil { ++ return errors.Wrap(err, "join manifest file path failed") ++ } ++ ++ var t = make(map[string]tarballInfo, 1) ++ if err = util.LoadJSONFile(manifest, &t); err != nil { ++ return errors.Wrap(err, "load manifest file failed") ++ } ++ ++ tarball, ok := t[l.appName] ++ if !ok { ++ return errors.Errorf("failed to find app image %s", l.appName) ++ } ++ if len(tarball.AppTarName) == 0 { ++ return errors.Errorf("app image %s tarball can not be empty", tarball.AppTarName) ++ } ++ if len(tarball.BaseTarName) == 0 { ++ return errors.Errorf("base image %s tarball can not be empty", tarball.BaseImageName) ++ } ++ l.info = tarball ++ ++ return nil ++} ++ ++func (l *Loader) joinPath(path, tarName, str string, canBeEmpty bool) (string, error) { ++ if len(path) != 0 { ++ return path, nil ++ } ++ l.log.Infof("%s image path is empty, use path from manifest", str) ++ return filepath_securejoin.SecureJoin(l.dir, tarName) ++} ++ ++func (l *Loader) constructLayerPath() error { ++ l.log.Infof("Construct image layer pathes for %s\n", l.appName) ++ ++ var err error ++ if l.basePath, err = l.joinPath(l.basePath, l.info.BaseTarName, "Base", false); err != nil { ++ return err ++ } ++ if l.libPath, err = l.joinPath(l.libPath, l.info.LibTarName, "Lib", true); err != nil { ++ return err ++ } ++ if l.appPath, err = l.joinPath(l.appPath, l.info.AppTarName, "App", false); err != nil { ++ return err ++ } ++ ++ return nil ++} ++ ++func (l *Loader) tarballCheckSum() error { ++ if l.skipCheck { ++ l.log.Info("Skip checksum for tarballs") ++ return nil ++ } ++ ++ type checkInfo struct { ++ path string ++ hash string ++ str string ++ canBeEmpty bool ++ } ++ checkLen := 3 ++ var checkList = make([]checkInfo, 0, checkLen) ++ checkList = append(checkList, checkInfo{path: l.basePath, hash: l.info.BaseHash, canBeEmpty: false, str: "base"}) ++ checkList = append(checkList, checkInfo{path: l.libPath, hash: l.info.LibHash, canBeEmpty: true, str: "lib"}) ++ checkList = append(checkList, checkInfo{path: l.appPath, hash: l.info.AppHash, canBeEmpty: false, str: "app"}) ++ for _, p := range checkList { ++ if len(p.path) == 0 && !p.canBeEmpty { ++ return errors.Errorf("%s image tarball path can not be empty", p.str) ++ } ++ if len(p.path) != 0 { ++ if err := util.CheckSum(p.path, p.hash); err != nil { ++ return errors.Wrapf(err, "check sum for file %q failed", p.path) ++ } ++ } ++ } ++ ++ return nil ++} ++ ++func (l *Loader) processTarballs() (string, error) { ++ if err := l.unpackTarballs(); err != nil { ++ return "", err ++ } ++ ++ if err := l.reconstructImage(); err != nil { ++ return "", err ++ } ++ ++ // pack app image to tarball ++ tarPath := filepath.Join(l.tmpDir.root, unionCompressedTarName) ++ if err := util.PackFiles(l.tmpDir.base, tarPath, archive.Gzip, true); err != nil { ++ return "", err ++ } ++ ++ return tarPath, nil ++} ++ ++func (l *Loader) unpackTarballs() error { ++ if err := l.makeTempDir(); err != nil { ++ return errors.Wrap(err, "failed to make temporary directories") ++ } ++ ++ type unpackInfo struct{ path, dir, str string } ++ unpackLen := 3 ++ var unpackList = make([]unpackInfo, 0, unpackLen) ++ unpackList = append(unpackList, unpackInfo{path: l.basePath, dir: l.tmpDir.base, str: "base"}) ++ unpackList = append(unpackList, unpackInfo{path: l.libPath, dir: l.tmpDir.lib, str: "lib"}) ++ unpackList = append(unpackList, unpackInfo{path: l.appPath, dir: l.tmpDir.app, str: "app"}) ++ ++ for _, p := range unpackList { ++ if len(p.path) != 0 { ++ if err := util.UnpackFile(p.path, p.dir, archive.Gzip, false); err != nil { ++ return errors.Wrapf(err, "unpack %s image tarball %q failed", p.str, p.path) ++ } ++ } ++ } ++ ++ return nil ++} ++ ++func (l *Loader) reconstructImage() error { ++ files, err := ioutil.ReadDir(l.tmpDir.app) ++ if err != nil { ++ return err ++ } ++ ++ for _, f := range files { ++ src := filepath.Join(l.tmpDir.app, f.Name()) ++ dest := filepath.Join(l.tmpDir.base, f.Name()) ++ if err := os.Rename(src, dest); err != nil { ++ return errors.Wrapf(err, "reconstruct app file %q failed", l.info.AppTarName) ++ } ++ } ++ ++ if len(l.libPath) != 0 { ++ files, err := ioutil.ReadDir(l.tmpDir.lib) ++ if err != nil { ++ return err ++ } ++ ++ for _, f := range files { ++ src := filepath.Join(l.tmpDir.lib, f.Name()) ++ dest := filepath.Join(l.tmpDir.base, f.Name()) ++ if err := os.Rename(src, dest); err != nil { ++ return errors.Wrapf(err, "reconstruct lib file %q failed", l.info.LibTarName) ++ } ++ } ++ } ++ ++ return nil ++} ++ ++func (l *Loader) makeTempDir() error { ++ dirs := []string{l.tmpDir.root, l.tmpDir.app, l.tmpDir.base, l.tmpDir.lib} ++ for _, dir := range dirs { ++ if err := os.MkdirAll(dir, constant.DefaultRootDirMode); err != nil { ++ return err ++ } ++ } ++ ++ return nil ++} ++ ++// AppName returns the AppName of Loader ++func (l *Loader) AppName() string { ++ return l.appName ++} ++ ++// TmpDirRoot returns the tmpDir.root of Loader ++func (l *Loader) TmpDirRoot() string { ++ return l.tmpDir.root ++} ++ ++// Enabled returns whether separated-image feature is enabled ++func (l *Loader) Enabled() bool { ++ return l.enabled ++} +diff --git a/daemon/separator/save.go b/daemon/separator/save.go +new file mode 100644 +index 0000000..a455335 +--- /dev/null ++++ b/daemon/separator/save.go +@@ -0,0 +1,407 @@ ++// Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. ++// isula-build licensed under the Mulan PSL v2. ++// You can use this software according to the terms and conditions of the Mulan PSL v2. ++// You may obtain a copy of Mulan PSL v2 at: ++// http://license.coscl.org.cn/MulanPSL2 ++// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR ++// IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR ++// PURPOSE. ++// See the Mulan PSL v2 for more details. ++// Author: Xiang Li ++// Create: 2021-12-09 ++// Description: This file is handling "save" part for image separator at server side ++ ++package separator ++ ++import ( ++ "encoding/json" ++ "io/ioutil" ++ "os" ++ "path/filepath" ++ "sort" ++ "strings" ++ ++ "github.com/containers/storage/pkg/archive" ++ "github.com/docker/docker/pkg/ioutils" ++ "github.com/pkg/errors" ++ "github.com/sirupsen/logrus" ++ ++ constant "isula.org/isula-build" ++ pb "isula.org/isula-build/api/services" ++ "isula.org/isula-build/image" ++ "isula.org/isula-build/store" ++ "isula.org/isula-build/util" ++) ++ ++// Saver the main instance for saving separated images ++type Saver struct { ++ log *logrus.Entry ++ renameData []renames ++ tmpDir imageTmpDir ++ base string ++ lib string ++ dest string ++ renameFile string ++ enabled bool ++} ++ ++type renames struct { ++ OriName string `json:"name"` ++ NewName string `json:"rename"` ++} ++ ++type imageTmpDir struct { ++ app string ++ base string ++ lib string ++ untar string ++ root string ++} ++ ++type imageLayersMap map[string]string ++ ++// GetSepSaveOptions returns Save instance from SaveRequest ++func GetSepSaveOptions(req *pb.SaveRequest, logEntry *logrus.Entry, dataRoot string) (Saver, string) { ++ var tmpRoot = filepath.Join(dataRoot, filepath.Join(constant.DataRootTmpDirPrefix, req.GetSaveID())) ++ var sep = Saver{ ++ base: req.GetSep().GetBase(), ++ lib: req.GetSep().GetLib(), ++ dest: req.GetSep().GetDest(), ++ log: logEntry, ++ enabled: req.GetSep().GetEnabled(), ++ renameFile: req.GetSep().GetRename(), ++ ++ tmpDir: imageTmpDir{ ++ root: tmpRoot, ++ untar: filepath.Join(tmpRoot, untarTempDirName), ++ base: filepath.Join(tmpRoot, baseUntarTempDirName), ++ lib: filepath.Join(tmpRoot, libUntarTempDirName), ++ app: filepath.Join(tmpRoot, appUntarTempDirName), ++ }, ++ } ++ ++ return sep, filepath.Join(sep.tmpDir.untar, unionTarName) ++} ++ ++// SeparateImage the main method of Saver, tries to separated the listed images to pieces ++func (s *Saver) SeparateImage(localStore *store.Store, oriImgList []string, outputPath string) (err error) { ++ s.log.Infof("Start saving separated images %v", oriImgList) ++ ++ if err = os.MkdirAll(s.dest, constant.DefaultRootDirMode); err != nil { ++ return err ++ } ++ ++ defer func() { ++ if tErr := os.RemoveAll(s.tmpDir.root); tErr != nil && !os.IsNotExist(tErr) { ++ s.log.Warnf("Removing save tmp directory %q failed: %v", s.tmpDir.root, tErr) ++ } ++ if err != nil { ++ if rErr := os.RemoveAll(s.dest); rErr != nil && !os.IsNotExist(rErr) { ++ s.log.Warnf("Removing save dest directory %q failed: %v", s.dest, rErr) ++ } ++ } ++ }() ++ if err = util.UnpackFile(outputPath, s.tmpDir.untar, archive.Gzip, true); err != nil { ++ return errors.Wrapf(err, "unpack %q failed", outputPath) ++ } ++ manifest, aErr := s.adjustLayers() ++ if aErr != nil { ++ return errors.Wrap(aErr, "adjust layers failed") ++ } ++ ++ imgInfos, cErr := s.constructImageInfos(manifest, localStore) ++ if cErr != nil { ++ return errors.Wrap(cErr, "process image infos failed") ++ } ++ ++ if err = s.processImageLayers(imgInfos); err != nil { ++ return err ++ } ++ ++ return nil ++} ++ ++func (s *Saver) getLayerHashFromStorage(store *store.Store, name string) ([]string, error) { ++ if len(name) == 0 { ++ return nil, nil ++ } ++ _, img, err := image.FindImage(store, name) ++ if err != nil { ++ return nil, err ++ } ++ ++ layer, err := store.Layer(img.TopLayer) ++ if err != nil { ++ return nil, errors.Wrapf(err, "failed to get top layer for image %s", name) ++ } ++ ++ var layers []string ++ // add each layer in the layers until reach the root layer ++ for layer != nil { ++ fields := strings.Split(layer.UncompressedDigest.String(), ":") ++ if len(fields) != 2 { ++ return nil, errors.Errorf("error format of layer of image %s", name) ++ } ++ layers = append(layers, fields[1]) ++ if layer.Parent == "" { ++ break ++ } ++ layer, err = store.Layer(layer.Parent) ++ if err != nil { ++ return nil, errors.Wrapf(err, "unable to read layer %q", layer.Parent) ++ } ++ } ++ ++ return layers, nil ++} ++ ++// process physic file ++func (s *Saver) constructLayerMap() (map[string]string, error) { ++ path := s.tmpDir.untar ++ files, rErr := ioutil.ReadDir(path) ++ if rErr != nil { ++ return nil, rErr ++ } ++ ++ var layerMap = make(map[string]string, len(files)) ++ // process layer's file ++ for _, file := range files { ++ if file.IsDir() { ++ layerFile := filepath.Join(path, file.Name(), layerTarName) ++ oriFile, err := os.Readlink(layerFile) ++ if err != nil { ++ return nil, err ++ } ++ physicFile := filepath.Join(path, file.Name(), oriFile) ++ layerMap[filepath.Base(physicFile)] = filepath.Join(file.Name(), layerTarName) ++ if err := os.Rename(physicFile, layerFile); err != nil { ++ return nil, err ++ } ++ } ++ } ++ ++ return layerMap, nil ++} ++ ++func (s *Saver) adjustLayers() ([]imageManifest, error) { ++ s.log.Info("Adjusting layers for saving separated image") ++ ++ layerMap, err := s.constructLayerMap() ++ if err != nil { ++ s.log.Errorf("Process layers failed: %v", err) ++ return nil, err ++ } ++ ++ // process manifest file ++ var man []imageManifest ++ if lErr := util.LoadJSONFile(filepath.Join(s.tmpDir.untar, manifestDataFile), &man); lErr != nil { ++ return nil, lErr ++ } ++ ++ for i, img := range man { ++ layers := make([]string, len(img.Layers)) ++ for i, layer := range img.Layers { ++ layers[i] = layerMap[layer] ++ } ++ man[i].Layers = layers ++ man[i].HashMap = getLayerHashFromTar(layerMap, layers) ++ } ++ buf, err := json.Marshal(&man) ++ if err != nil { ++ return nil, err ++ } ++ if err := ioutils.AtomicWriteFile(manifestFile, buf, constant.DefaultSharedFileMode); err != nil { ++ return nil, err ++ } ++ ++ return man, nil ++} ++ ++func (s *Saver) processImageLayers(imgInfos map[string]imageInfo) error { ++ s.log.Info("Processing image layers") ++ var ( ++ tarballs = make(map[string]tarballInfo) ++ baseImagesMap = make(imageLayersMap, 1) ++ libImagesMap = make(imageLayersMap, 1) ++ appImagesMap = make(imageLayersMap, 1) ++ ) ++ var sortedKey []string ++ for k := range imgInfos { ++ sortedKey = append(sortedKey, k) ++ } ++ sort.Strings(sortedKey) ++ for _, k := range sortedKey { ++ info := imgInfos[k] ++ if err := s.clearTempDirs(); err != nil { ++ return errors.Wrap(err, "clear tmp dirs failed") ++ } ++ var t tarballInfo ++ // process base ++ if err := info.processBaseImg(s, baseImagesMap, &t); err != nil { ++ return errors.Wrapf(err, "process base images %s failed", info.nameTag) ++ } ++ // process lib ++ if err := info.processLibImg(s, libImagesMap, &t); err != nil { ++ return errors.Wrapf(err, "process lib images %s failed", info.nameTag) ++ } ++ // process app ++ if err := info.processAppImg(s, appImagesMap, &t); err != nil { ++ return errors.Wrapf(err, "process app images %s failed", info.nameTag) ++ } ++ tarballs[info.nameTag] = t ++ } ++ buf, err := json.Marshal(&tarballs) ++ if err != nil { ++ return err ++ } ++ // manifest file ++ manifestFile := filepath.Join(s.dest, manifestFile) ++ if err := ioutils.AtomicWriteFile(manifestFile, buf, constant.DefaultRootFileMode); err != nil { ++ return err ++ } ++ ++ s.log.Info("Save separated image succeed") ++ return nil ++} ++ ++func (s *Saver) clearTempDirs() error { ++ dirs := []string{s.tmpDir.base, s.tmpDir.app, s.tmpDir.lib} ++ for _, dir := range dirs { ++ if err := os.RemoveAll(dir); err != nil { ++ return err ++ } ++ if err := os.MkdirAll(dir, constant.DefaultRootDirMode); err != nil { ++ return err ++ } ++ } ++ return nil ++} ++ ++// ImageNames returns the images names of Saver ++func (s *Saver) ImageNames() []string { ++ var names = make([]string, 0, 2) ++ if !s.enabled { ++ return []string{} ++ } ++ if len(s.base) != 0 { ++ names = append(names, s.base) ++ } ++ if len(s.lib) != 0 { ++ names = append(names, s.lib) ++ } ++ return names ++} ++ ++func (s *Saver) constructSingleImgInfo(mani imageManifest, store *store.Store) (imageInfo, error) { ++ var libLayers, appLayers []string ++ // image name should not be empty here ++ if len(mani.RepoTags) == 0 { ++ return imageInfo{}, errors.New("image name and tag is empty") ++ } ++ // if there is more than one repoTag, will use first one as image name ++ imageRepoFields := strings.Split(mani.RepoTags[0], ":") ++ imageLayers := getLayersID(mani.Layers) ++ ++ libs, bases, err := s.checkLayersHash(mani.HashMap, store) ++ if err != nil { ++ return imageInfo{}, errors.Wrap(err, "compare layers failed") ++ } ++ baseLayers := imageLayers[0:len(bases)] ++ if len(libs) != 0 { ++ libLayers = imageLayers[len(bases):len(libs)] ++ appLayers = imageLayers[len(libs):] ++ } else { ++ libLayers = nil ++ appLayers = imageLayers[len(bases):] ++ } ++ ++ return imageInfo{ ++ config: mani.Config, ++ repoTags: mani.RepoTags, ++ nameTag: mani.RepoTags[0], ++ name: strings.Join(imageRepoFields[0:len(imageRepoFields)-1], ":"), ++ tag: imageRepoFields[len(imageRepoFields)-1], ++ layers: layer{app: appLayers, lib: libLayers, base: baseLayers, all: mani.Layers}, ++ topLayer: imageLayers[len(imageLayers)-1], ++ }, nil ++} ++ ++func (s *Saver) checkLayersHash(layerHashMap map[string]string, store *store.Store) ([]string, []string, error) { ++ libHash, err := s.getLayerHashFromStorage(store, s.lib) ++ if err != nil { ++ return nil, nil, errors.Wrapf(err, "get lib image %s layers failed", s.lib) ++ } ++ baseHash, err := s.getLayerHashFromStorage(store, s.base) ++ if err != nil { ++ return nil, nil, errors.Wrapf(err, "get base image %s layers failed", s.base) ++ } ++ if len(baseHash) > 1 { ++ return nil, nil, errors.Errorf("number of base layers %d more than one", len(baseHash)) ++ } ++ if len(libHash) >= len(layerHashMap) || len(baseHash) >= len(layerHashMap) { ++ return nil, nil, errors.Errorf("number of base or lib layers is equal or greater than saved app layers") ++ } ++ ++ for _, l := range libHash { ++ if _, ok := layerHashMap[l]; !ok { ++ return nil, nil, errors.Errorf("dismatch checksum for lib image %s", s.lib) ++ } ++ } ++ for _, b := range baseHash { ++ if _, ok := layerHashMap[b]; !ok { ++ return nil, nil, errors.Errorf("dismatch checksum for base image %s", s.base) ++ } ++ } ++ ++ return libHash, baseHash, nil ++} ++ ++func (s *Saver) constructImageInfos(manifest []imageManifest, store *store.Store) (map[string]imageInfo, error) { ++ s.log.Info("Constructing image info") ++ ++ var imgInfos = make(map[string]imageInfo, 1) ++ for _, mani := range manifest { ++ imgInfo, err := s.constructSingleImgInfo(mani, store) ++ if err != nil { ++ s.log.Errorf("Constructing image info failed: %v", err) ++ return nil, errors.Wrap(err, "construct image info failed") ++ } ++ if _, ok := imgInfos[imgInfo.nameTag]; !ok { ++ imgInfos[imgInfo.nameTag] = imgInfo ++ } ++ } ++ return imgInfos, nil ++} ++ ++// LoadRenameFile Saver tries to load the specified rename json ++func (s *Saver) LoadRenameFile() error { ++ if len(s.renameFile) == 0 { ++ return nil ++ } ++ ++ var reName []renames ++ if err := util.LoadJSONFile(s.renameFile, &reName); err != nil { ++ return errors.Wrap(err, "check rename file failed") ++ } ++ s.renameData = reName ++ return nil ++} ++ ++func (s *Saver) getRename(name string) string { ++ if len(s.renameData) == 0 { ++ return name ++ } ++ ++ for _, item := range s.renameData { ++ if item.OriName == name { ++ s.log.Infof("Renaming image tarballs for %s to %s\n", name, item.NewName) ++ return item.NewName ++ } ++ } ++ return name ++} ++ ++// Enabled returns whether separated-image feature is enabled ++func (s *Saver) Enabled() bool { ++ return s.enabled ++} +diff --git a/daemon/separator/utils.go b/daemon/separator/utils.go +new file mode 100644 +index 0000000..fb05f58 +--- /dev/null ++++ b/daemon/separator/utils.go +@@ -0,0 +1,78 @@ ++// Copyright (c) Huawei Technologies Co., Ltd. 2020. All rights reserved. ++// isula-build licensed under the Mulan PSL v2. ++// You can use this software according to the terms and conditions of the Mulan PSL v2. ++// You may obtain a copy of Mulan PSL v2 at: ++// http://license.coscl.org.cn/MulanPSL2 ++// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR ++// IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR ++// PURPOSE. ++// See the Mulan PSL v2 for more details. ++// Author: Xiang Li ++// Create: 2021-12-09 ++// Description: This file is utils for image separator ++ ++package separator ++ ++import "strings" ++ ++const ( ++ manifestDataFile = "manifest.json" ++ manifestFile = "manifest" ++ repositoriesFile = "repositories" ++ ++ tmpBaseDirName = "base" ++ tmpAppDirName = "app" ++ tmpLibDirName = "lib" ++ baseUntarTempDirName = "base_images" ++ appUntarTempDirName = "app_images" ++ libUntarTempDirName = "lib_images" ++ baseTarNameSuffix = "_base_image.tar.gz" ++ appTarNameSuffix = "_app_image.tar.gz" ++ libTarNameSuffix = "_lib_image.tar.gz" ++ ++ unionTarName = "all.tar" ++ unionCompressedTarName = "all.tar.gz" ++ ++ untarTempDirName = "untar" ++ layerTarName = "layer.tar" ++ tarSuffix = ".tar" ++) ++ ++type tarballInfo struct { ++ AppTarName string `json:"app"` ++ AppHash string `json:"appHash"` ++ AppLayers []string `json:"appLayers"` ++ LibTarName string `json:"lib"` ++ LibHash string `json:"libHash"` ++ LibImageName string `json:"libImageName"` ++ LibLayers []string `json:"libLayers"` ++ BaseTarName string `json:"base"` ++ BaseHash string `json:"baseHash"` ++ BaseImageName string `json:"baseImageName"` ++ BaseLayer string `json:"baseLayer"` ++} ++ ++func getLayerHashFromTar(layerMap map[string]string, layer []string) map[string]string { ++ hashMap := make(map[string]string, len(layer)) ++ // first reverse map since it's is unique ++ revMap := make(map[string]string, len(layerMap)) ++ for k, v := range layerMap { ++ revMap[v] = k ++ } ++ for _, l := range layer { ++ if v, ok := revMap[l]; ok { ++ // format is like xxx(hash): xxx/layer.tar ++ hashMap[strings.TrimSuffix(v, tarSuffix)] = l ++ } ++ } ++ ++ return hashMap ++} ++ ++func getLayersID(layer []string) []string { ++ var after = make([]string, len(layer)) ++ for i, v := range layer { ++ after[i] = strings.Split(v, "/")[0] ++ } ++ return after ++} +-- +2.31.0.windows.1 + diff --git a/series.conf b/series.conf index 11c99e1..125a78e 100644 --- a/series.conf +++ b/series.conf @@ -7,3 +7,4 @@ patch/0095-tests-fix-testcase-TestPrepareFromImage.patch patch/0096-fix-some-little-mistakes-in-manual_zh.md.patch patch/0097-fix-the-message-is-not-rational-when-not-appoint-Doc.patch patch/0098-utils-remove-unused-PBKDF2-and-AES-related.patch +patch/0099-Refactor-refactor-image-separator-related.patch