mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-07-09 21:17:09 +08:00
vendor: update buildkit to master@ae9d0f5
Signed-off-by: Justin Chadwell <me@jedevc.com>
This commit is contained in:
184
vendor/github.com/moby/buildkit/client/solve.go
generated
vendored
184
vendor/github.com/moby/buildkit/client/solve.go
generated
vendored
@ -2,6 +2,7 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
@ -14,6 +15,7 @@ import (
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/client/ociindex"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/session"
|
||||
sessioncontent "github.com/moby/buildkit/session/content"
|
||||
@ -124,6 +126,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
||||
ex = opt.Exports[0]
|
||||
}
|
||||
|
||||
indicesToUpdate := []string{}
|
||||
|
||||
if !opt.SessionPreInitialized {
|
||||
if len(syncedDirs) > 0 {
|
||||
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
|
||||
@ -133,49 +137,64 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
||||
s.Allow(a)
|
||||
}
|
||||
|
||||
contentStores := map[string]content.Store{}
|
||||
for key, store := range cacheOpt.contentStores {
|
||||
contentStores[key] = store
|
||||
}
|
||||
for key, store := range opt.OCIStores {
|
||||
key2 := "oci:" + key
|
||||
if _, ok := contentStores[key2]; ok {
|
||||
return nil, errors.Errorf("oci store key %q already exists", key)
|
||||
}
|
||||
contentStores[key2] = store
|
||||
}
|
||||
|
||||
var supportFile bool
|
||||
var supportDir bool
|
||||
switch ex.Type {
|
||||
case ExporterLocal:
|
||||
if ex.Output != nil {
|
||||
return nil, errors.New("output file writer is not supported by local exporter")
|
||||
}
|
||||
if ex.OutputDir == "" {
|
||||
return nil, errors.New("output directory is required for local exporter")
|
||||
}
|
||||
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
|
||||
case ExporterOCI, ExporterDocker, ExporterTar:
|
||||
if ex.OutputDir != "" {
|
||||
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
|
||||
}
|
||||
supportDir = true
|
||||
case ExporterTar:
|
||||
supportFile = true
|
||||
case ExporterOCI, ExporterDocker:
|
||||
supportDir = ex.OutputDir != ""
|
||||
supportFile = ex.Output != nil
|
||||
}
|
||||
|
||||
if supportFile && supportDir {
|
||||
return nil, errors.Errorf("both file and directory output is not support by %s exporter", ex.Type)
|
||||
}
|
||||
if !supportFile && ex.Output != nil {
|
||||
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
|
||||
}
|
||||
if !supportDir && ex.OutputDir != "" {
|
||||
return nil, errors.Errorf("output directory is not supported by %s exporter", ex.Type)
|
||||
}
|
||||
|
||||
if supportFile {
|
||||
if ex.Output == nil {
|
||||
return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
|
||||
}
|
||||
s.Allow(filesync.NewFSSyncTarget(ex.Output))
|
||||
default:
|
||||
if ex.Output != nil {
|
||||
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
|
||||
}
|
||||
if ex.OutputDir != "" {
|
||||
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// this is a new map that contains both cacheOpt stores and OCILayout stores
|
||||
contentStores := make(map[string]content.Store, len(cacheOpt.contentStores)+len(opt.OCIStores))
|
||||
// copy over the stores references from cacheOpt
|
||||
for key, store := range cacheOpt.contentStores {
|
||||
contentStores[key] = store
|
||||
}
|
||||
// copy over the stores references from ociLayout opts
|
||||
for key, store := range opt.OCIStores {
|
||||
// conflicts are not allowed
|
||||
if _, ok := contentStores[key]; ok {
|
||||
// we probably should check if the store is identical, but given that
|
||||
// https://pkg.go.dev/github.com/containerd/containerd/content#Store
|
||||
// is just an interface, composing 4 others, that is rather hard to do.
|
||||
// For a future iteration.
|
||||
return nil, errors.Errorf("contentStore key %s exists in both cache and OCI layouts", key)
|
||||
if supportDir {
|
||||
if ex.OutputDir == "" {
|
||||
return nil, errors.Errorf("output directory is required for %s exporter", ex.Type)
|
||||
}
|
||||
switch ex.Type {
|
||||
case ExporterOCI, ExporterDocker:
|
||||
if err := os.MkdirAll(ex.OutputDir, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cs, err := contentlocal.NewStore(ex.OutputDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contentStores["export"] = cs
|
||||
indicesToUpdate = append(indicesToUpdate, filepath.Join(ex.OutputDir, "index.json"))
|
||||
default:
|
||||
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
|
||||
}
|
||||
contentStores[key] = store
|
||||
}
|
||||
|
||||
if len(contentStores) > 0 {
|
||||
@ -352,10 +371,29 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
||||
}
|
||||
}
|
||||
}
|
||||
if manifestDescDt := res.ExporterResponse[exptypes.ExporterImageDescriptorKey]; manifestDescDt != "" {
|
||||
manifestDescDt, err := base64.StdEncoding.DecodeString(manifestDescDt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var manifestDesc ocispecs.Descriptor
|
||||
if err = json.Unmarshal([]byte(manifestDescDt), &manifestDesc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, indexJSONPath := range indicesToUpdate {
|
||||
tag := "latest"
|
||||
if t, ok := res.ExporterResponse["image.name"]; ok {
|
||||
tag = t
|
||||
}
|
||||
if err = ociindex.PutDescToIndexJSONFileLocked(indexJSONPath, manifestDesc, tag); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) {
|
||||
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) (filesync.StaticDirSource, error) {
|
||||
for _, d := range localDirs {
|
||||
fi, err := os.Stat(d)
|
||||
if err != nil {
|
||||
@ -371,10 +409,10 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file
|
||||
return fsutil.MapResultKeep
|
||||
}
|
||||
|
||||
dirs := make([]filesync.SyncedDir, 0, len(localDirs))
|
||||
dirs := make(filesync.StaticDirSource, len(localDirs))
|
||||
if def == nil {
|
||||
for name, d := range localDirs {
|
||||
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID})
|
||||
dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID}
|
||||
}
|
||||
} else {
|
||||
for _, dt := range def.Def {
|
||||
@ -389,7 +427,7 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file
|
||||
if !ok {
|
||||
return nil, errors.Errorf("local directory %s not enabled", name)
|
||||
}
|
||||
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID})
|
||||
dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -416,14 +454,10 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
|
||||
var (
|
||||
cacheExports []*controlapi.CacheOptionsEntry
|
||||
cacheImports []*controlapi.CacheOptionsEntry
|
||||
// legacy API is used for registry caches, because the daemon might not support the new API
|
||||
legacyExportRef string
|
||||
legacyImportRefs []string
|
||||
)
|
||||
contentStores := make(map[string]content.Store)
|
||||
indicesToUpdate := make(map[string]string) // key: index.JSON file name, value: tag
|
||||
frontendAttrs := make(map[string]string)
|
||||
legacyExportAttrs := make(map[string]string)
|
||||
for _, ex := range opt.CacheExports {
|
||||
if ex.Type == "local" {
|
||||
csDir := ex.Attrs["dest"]
|
||||
@ -438,26 +472,27 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
|
||||
return nil, err
|
||||
}
|
||||
contentStores["local:"+csDir] = cs
|
||||
|
||||
tag := "latest"
|
||||
if t, ok := ex.Attrs["tag"]; ok {
|
||||
tag = t
|
||||
}
|
||||
// TODO(AkihiroSuda): support custom index JSON path and tag
|
||||
indexJSONPath := filepath.Join(csDir, "index.json")
|
||||
indicesToUpdate[indexJSONPath] = "latest"
|
||||
indicesToUpdate[indexJSONPath] = tag
|
||||
}
|
||||
if ex.Type == "registry" && legacyExportRef == "" {
|
||||
legacyExportRef = ex.Attrs["ref"]
|
||||
for k, v := range ex.Attrs {
|
||||
if k != "ref" {
|
||||
legacyExportAttrs[k] = v
|
||||
}
|
||||
if ex.Type == "registry" {
|
||||
regRef := ex.Attrs["ref"]
|
||||
if regRef == "" {
|
||||
return nil, errors.New("registry cache exporter requires ref")
|
||||
}
|
||||
} else {
|
||||
cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{
|
||||
Type: ex.Type,
|
||||
Attrs: ex.Attrs,
|
||||
})
|
||||
}
|
||||
cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{
|
||||
Type: ex.Type,
|
||||
Attrs: ex.Attrs,
|
||||
})
|
||||
}
|
||||
for _, im := range opt.CacheImports {
|
||||
attrs := im.Attrs
|
||||
if im.Type == "local" {
|
||||
csDir := im.Attrs["src"]
|
||||
if csDir == "" {
|
||||
@ -469,40 +504,40 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
|
||||
continue
|
||||
}
|
||||
// if digest is not specified, load from "latest" tag
|
||||
if attrs["digest"] == "" {
|
||||
if im.Attrs["digest"] == "" {
|
||||
idx, err := ociindex.ReadIndexJSONFileLocked(filepath.Join(csDir, "index.json"))
|
||||
if err != nil {
|
||||
bklog.G(ctx).Warning("local cache import at " + csDir + " not found due to err: " + err.Error())
|
||||
continue
|
||||
}
|
||||
for _, m := range idx.Manifests {
|
||||
if (m.Annotations[ocispecs.AnnotationRefName] == "latest" && attrs["tag"] == "") || (attrs["tag"] != "" && m.Annotations[ocispecs.AnnotationRefName] == attrs["tag"]) {
|
||||
attrs["digest"] = string(m.Digest)
|
||||
tag := "latest"
|
||||
if t, ok := im.Attrs["tag"]; ok {
|
||||
tag = t
|
||||
}
|
||||
if m.Annotations[ocispecs.AnnotationRefName] == tag {
|
||||
im.Attrs["digest"] = string(m.Digest)
|
||||
break
|
||||
}
|
||||
}
|
||||
if attrs["digest"] == "" {
|
||||
if im.Attrs["digest"] == "" {
|
||||
return nil, errors.New("local cache importer requires either explicit digest, \"latest\" tag or custom tag on index.json")
|
||||
}
|
||||
}
|
||||
contentStores["local:"+csDir] = cs
|
||||
}
|
||||
if im.Type == "registry" {
|
||||
legacyImportRef := attrs["ref"]
|
||||
legacyImportRefs = append(legacyImportRefs, legacyImportRef)
|
||||
} else {
|
||||
cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{
|
||||
Type: im.Type,
|
||||
Attrs: attrs,
|
||||
})
|
||||
regRef := im.Attrs["ref"]
|
||||
if regRef == "" {
|
||||
return nil, errors.New("registry cache importer requires ref")
|
||||
}
|
||||
}
|
||||
cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{
|
||||
Type: im.Type,
|
||||
Attrs: im.Attrs,
|
||||
})
|
||||
}
|
||||
if opt.Frontend != "" || isGateway {
|
||||
// use legacy API for registry importers, because the frontend might not support the new API
|
||||
if len(legacyImportRefs) > 0 {
|
||||
frontendAttrs["cache-from"] = strings.Join(legacyImportRefs, ",")
|
||||
}
|
||||
// use new API for other importers
|
||||
if len(cacheImports) > 0 {
|
||||
s, err := json.Marshal(cacheImports)
|
||||
if err != nil {
|
||||
@ -513,11 +548,6 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
|
||||
}
|
||||
res := cacheOptions{
|
||||
options: controlapi.CacheOptions{
|
||||
// old API (for registry caches, planned to be removed in early 2019)
|
||||
ExportRefDeprecated: legacyExportRef,
|
||||
ExportAttrsDeprecated: legacyExportAttrs,
|
||||
ImportRefsDeprecated: legacyImportRefs,
|
||||
// new API
|
||||
Exports: cacheExports,
|
||||
Imports: cacheImports,
|
||||
},
|
||||
|
Reference in New Issue
Block a user