mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-07-09 21:17:09 +08:00
mutualize builder logic
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
This commit is contained in:
@ -9,6 +9,7 @@ import (
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/buildx/bake"
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
@ -90,20 +91,27 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||
}
|
||||
}()
|
||||
|
||||
var dis []build.DriverInfo
|
||||
var nodes []builder.Node
|
||||
var files []bake.File
|
||||
var inp *bake.Input
|
||||
|
||||
// instance only needed for reading remote bake files or building
|
||||
if url != "" || !in.printOnly {
|
||||
dis, err = getInstanceOrDefault(ctx, dockerCli, in.builder, contextPathHash)
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(in.builder),
|
||||
builder.WithContextPathHash(contextPathHash),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodes, err = b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if url != "" {
|
||||
files, inp, err = bake.ReadRemoteFiles(ctx, dis, url, in.files, printer)
|
||||
files, inp, err = bake.ReadRemoteFiles(ctx, nodes, url, in.files, printer)
|
||||
} else {
|
||||
files, err = bake.ReadLocalFiles(in.files)
|
||||
}
|
||||
@ -147,7 +155,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||
return nil
|
||||
}
|
||||
|
||||
resp, err := build.Build(ctx, dis, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
resp, err := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
if err != nil {
|
||||
return wrapBuildError(err, true)
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/monitor"
|
||||
"github.com/docker/buildx/util/buildflags"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
@ -238,7 +239,19 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
||||
contextPathHash = in.contextPath
|
||||
}
|
||||
|
||||
imageID, res, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile, in.invoke != "")
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(in.builder),
|
||||
builder.WithContextPathHash(contextPathHash),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodes, err := b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imageID, res, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, in.progress, in.metadataFile, in.invoke != "")
|
||||
err = wrapBuildError(err, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -255,7 +268,7 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
||||
return errors.Errorf("failed to configure terminal: %v", err)
|
||||
}
|
||||
err = monitor.RunMonitor(ctx, cfg, func(ctx context.Context) (*build.ResultContext, error) {
|
||||
_, rr, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile, true)
|
||||
_, rr, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, in.progress, in.metadataFile, true)
|
||||
return rr, err
|
||||
}, io.NopCloser(os.Stdin), nopCloser{os.Stdout}, nopCloser{os.Stderr})
|
||||
if err != nil {
|
||||
@ -276,12 +289,7 @@ type nopCloser struct {
|
||||
|
||||
func (c nopCloser) Close() error { return nil }
|
||||
|
||||
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string, allowNoOutput bool) (imageID string, res *build.ResultContext, err error) {
|
||||
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.Node, opts map[string]build.Options, progressMode string, metadataFile string, allowNoOutput bool) (imageID string, res *build.ResultContext, err error) {
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
@ -292,7 +300,7 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]bu
|
||||
|
||||
var mu sync.Mutex
|
||||
var idx int
|
||||
resp, err := build.BuildWithResultHandler(ctx, dis, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer, func(driverIndex int, gotRes *build.ResultContext) {
|
||||
resp, err := build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer, func(driverIndex int, gotRes *build.ResultContext) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if res == nil || driverIndex < idx {
|
||||
|
@ -10,7 +10,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/driver"
|
||||
remoteutil "github.com/docker/buildx/driver/remote/util"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/cobrautil"
|
||||
@ -18,6 +20,7 @@ import (
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
dopts "github.com/docker/cli/opts"
|
||||
"github.com/google/shlex"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
@ -235,17 +238,26 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
ngi := &nginfo{ng: ng}
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(ng.Name),
|
||||
builder.WithStore(txn),
|
||||
builder.WithSkippedValidation(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err = loadNodeGroupData(timeoutCtx, dockerCli, ngi); err != nil {
|
||||
nodes, err := b.LoadNodes(timeoutCtx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, info := range ngi.drivers {
|
||||
if err := info.di.Err; err != nil {
|
||||
err := errors.Errorf("failed to initialize builder %s (%s): %s", ng.Name, info.di.Name, err)
|
||||
|
||||
for _, node := range nodes {
|
||||
if err := node.Err; err != nil {
|
||||
err := errors.Errorf("failed to initialize builder %s (%s): %s", ng.Name, node.Name, err)
|
||||
var err2 error
|
||||
if ngOriginal == nil {
|
||||
err2 = txn.Remove(ng.Name)
|
||||
@ -270,7 +282,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||
}
|
||||
|
||||
if in.bootstrap {
|
||||
if _, err = boot(ctx, ngi); err != nil {
|
||||
if _, err = b.Boot(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -341,3 +353,27 @@ func csvToMap(in []string) (map[string]string, error) {
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// validateEndpoint validates that endpoint is either a context or a docker host
|
||||
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
||||
dem, err := dockerutil.GetDockerEndpoint(dockerCli, ep)
|
||||
if err == nil && dem != nil {
|
||||
if ep == "default" {
|
||||
return dem.Host, nil
|
||||
}
|
||||
return ep, nil
|
||||
}
|
||||
h, err := dopts.ParseHost(true, ep)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to parse endpoint %s", ep)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// validateBuildkitEndpoint validates that endpoint is a valid buildkit host
|
||||
func validateBuildkitEndpoint(ep string) (string, error) {
|
||||
if err := remoteutil.IsValidEndpoint(ep); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ep, nil
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
@ -33,25 +33,29 @@ func runDiskUsage(dockerCli command.Cli, opts duOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
dis, err := getInstanceOrDefault(ctx, dockerCli, opts.builder, "")
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, di := range dis {
|
||||
if di.Err != nil {
|
||||
return di.Err
|
||||
nodes, err := b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
out := make([][]*client.UsageInfo, len(dis))
|
||||
out := make([][]*client.UsageInfo, len(nodes))
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for i, di := range dis {
|
||||
func(i int, di build.DriverInfo) {
|
||||
for i, node := range nodes {
|
||||
func(i int, node builder.Node) {
|
||||
eg.Go(func() error {
|
||||
if di.Driver != nil {
|
||||
c, err := di.Driver.Client(ctx)
|
||||
if node.Driver != nil {
|
||||
c, err := node.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -64,7 +68,7 @@ func runDiskUsage(dockerCli command.Cli, opts duOptions) error {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(i, di)
|
||||
}(i, node)
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
|
@ -7,8 +7,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
@ -113,27 +112,11 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||
|
||||
ctx := appcontext.Context()
|
||||
|
||||
txn, release, err := storeutil.GetStore(dockerCli)
|
||||
b, err := builder.New(dockerCli, builder.WithName(in.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer release()
|
||||
|
||||
var ng *store.NodeGroup
|
||||
|
||||
if in.builder != "" {
|
||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
|
||||
imageopt, err := b.ImageOpt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,8 +1,7 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
"github.com/docker/cli-docs-tool/annotation"
|
||||
"github.com/docker/cli/cli"
|
||||
@ -25,27 +24,11 @@ func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
||||
return errors.Errorf("format and raw cannot be used together")
|
||||
}
|
||||
|
||||
txn, release, err := storeutil.GetStore(dockerCli)
|
||||
b, err := builder.New(dockerCli, builder.WithName(in.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer release()
|
||||
|
||||
var ng *store.NodeGroup
|
||||
|
||||
if in.builder != "" {
|
||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
|
||||
imageopt, err := b.ImageOpt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -8,8 +8,7 @@ import (
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
@ -25,71 +24,43 @@ type inspectOptions struct {
|
||||
func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
txn, release, err := storeutil.GetStore(dockerCli)
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(in.builder),
|
||||
builder.WithSkippedValidation(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer release()
|
||||
|
||||
var ng *store.NodeGroup
|
||||
|
||||
if in.builder != "" {
|
||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if ng == nil {
|
||||
ng = &store.NodeGroup{
|
||||
Name: "default",
|
||||
Nodes: []store.Node{{
|
||||
Name: "default",
|
||||
Endpoint: "default",
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
ngi := &nginfo{ng: ng}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err = loadNodeGroupData(timeoutCtx, dockerCli, ngi)
|
||||
|
||||
var bootNgi *nginfo
|
||||
nodes, err := b.LoadNodes(timeoutCtx, true)
|
||||
if in.bootstrap {
|
||||
var ok bool
|
||||
ok, err = boot(ctx, ngi)
|
||||
ok, err = b.Boot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bootNgi = ngi
|
||||
if ok {
|
||||
ngi = &nginfo{ng: ng}
|
||||
err = loadNodeGroupData(ctx, dockerCli, ngi)
|
||||
nodes, err = b.LoadNodes(timeoutCtx, true)
|
||||
}
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
|
||||
fmt.Fprintf(w, "Name:\t%s\n", ngi.ng.Name)
|
||||
fmt.Fprintf(w, "Driver:\t%s\n", ngi.ng.Driver)
|
||||
fmt.Fprintf(w, "Name:\t%s\n", b.Name)
|
||||
fmt.Fprintf(w, "Driver:\t%s\n", b.Driver)
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
||||
} else if ngi.err != nil {
|
||||
fmt.Fprintf(w, "Error:\t%s\n", ngi.err.Error())
|
||||
} else if b.Err() != nil {
|
||||
fmt.Fprintf(w, "Error:\t%s\n", b.Err().Error())
|
||||
}
|
||||
if err == nil {
|
||||
fmt.Fprintln(w, "")
|
||||
fmt.Fprintln(w, "Nodes:")
|
||||
|
||||
for i, n := range ngi.ng.Nodes {
|
||||
for i, n := range nodes {
|
||||
if i != 0 {
|
||||
fmt.Fprintln(w, "")
|
||||
}
|
||||
@ -104,21 +75,17 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
||||
fmt.Fprintf(w, "Driver Options:\t%s\n", strings.Join(driverOpts, " "))
|
||||
}
|
||||
|
||||
if err := ngi.drivers[i].di.Err; err != nil {
|
||||
if err := n.Err; err != nil {
|
||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
||||
} else if err := ngi.drivers[i].err; err != nil {
|
||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
||||
} else if bootNgi != nil && len(bootNgi.drivers) > i && bootNgi.drivers[i].err != nil {
|
||||
fmt.Fprintf(w, "Error:\t%s\n", bootNgi.drivers[i].err.Error())
|
||||
} else {
|
||||
fmt.Fprintf(w, "Status:\t%s\n", ngi.drivers[i].info.Status)
|
||||
fmt.Fprintf(w, "Status:\t%s\n", nodes[i].DriverInfo.Status)
|
||||
if len(n.Flags) > 0 {
|
||||
fmt.Fprintf(w, "Flags:\t%s\n", strings.Join(n.Flags, " "))
|
||||
}
|
||||
if ngi.drivers[i].version != "" {
|
||||
fmt.Fprintf(w, "Buildkit:\t%s\n", ngi.drivers[i].version)
|
||||
if nodes[i].Version != "" {
|
||||
fmt.Fprintf(w, "Buildkit:\t%s\n", nodes[i].Version)
|
||||
}
|
||||
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.FormatInGroups(n.Platforms, ngi.drivers[i].platforms), ", "))
|
||||
fmt.Fprintf(w, "Platforms:\t%s\n", strings.Join(platformutil.FormatInGroups(n.Node.Platforms, n.Platforms), ", "))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
115
commands/ls.go
115
commands/ls.go
@ -4,12 +4,11 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/cobrautil"
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
@ -32,52 +31,24 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
||||
}
|
||||
defer release()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
current, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
builders, err := builder.GetBuilders(dockerCli, txn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
ll, err := txn.List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
builders := make([]*nginfo, len(ll))
|
||||
for i, ng := range ll {
|
||||
builders[i] = &nginfo{ng: ng}
|
||||
}
|
||||
|
||||
contexts, err := dockerCli.ContextStore().List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sort.Slice(contexts, func(i, j int) bool {
|
||||
return contexts[i].Name < contexts[j].Name
|
||||
})
|
||||
for _, c := range contexts {
|
||||
ngi := &nginfo{ng: &store.NodeGroup{
|
||||
Name: c.Name,
|
||||
Nodes: []store.Node{{
|
||||
Name: c.Name,
|
||||
Endpoint: c.Name,
|
||||
}},
|
||||
}}
|
||||
// if a context has the same name as an instance from the store, do not
|
||||
// add it to the builders list. An instance from the store takes
|
||||
// precedence over context builders.
|
||||
if hasNodeGroup(builders, ngi) {
|
||||
continue
|
||||
}
|
||||
builders = append(builders, ngi)
|
||||
}
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
|
||||
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||
for _, b := range builders {
|
||||
func(b *nginfo) {
|
||||
func(b *builder.Builder) {
|
||||
eg.Go(func() error {
|
||||
err = loadNodeGroupData(ctx, dockerCli, b)
|
||||
if b.err == nil && err != nil {
|
||||
b.err = err
|
||||
}
|
||||
_, _ = b.LoadNodes(timeoutCtx, true)
|
||||
return nil
|
||||
})
|
||||
}(b)
|
||||
@ -87,29 +58,15 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
currentName := "default"
|
||||
current, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if current != nil {
|
||||
currentName = current.Name
|
||||
if current.Name == "default" {
|
||||
currentName = current.Nodes[0].Endpoint
|
||||
}
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(dockerCli.Out(), 0, 0, 1, ' ', 0)
|
||||
fmt.Fprintf(w, "NAME/NODE\tDRIVER/ENDPOINT\tSTATUS\tBUILDKIT\tPLATFORMS\n")
|
||||
|
||||
currentSet := false
|
||||
printErr := false
|
||||
for _, b := range builders {
|
||||
if !currentSet && b.ng.Name == currentName {
|
||||
b.ng.Name += " *"
|
||||
currentSet = true
|
||||
if current.Name == b.Name {
|
||||
b.Name += " *"
|
||||
}
|
||||
if ok := printngi(w, b); !ok {
|
||||
if ok := printBuilder(w, b); !ok {
|
||||
printErr = true
|
||||
}
|
||||
}
|
||||
@ -119,19 +76,12 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
||||
if printErr {
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "\n")
|
||||
for _, b := range builders {
|
||||
if b.err != nil {
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "Cannot load builder %s: %s\n", b.ng.Name, strings.TrimSpace(b.err.Error()))
|
||||
if b.Err() != nil {
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "Cannot load builder %s: %s\n", b.Name, strings.TrimSpace(b.Err().Error()))
|
||||
} else {
|
||||
for idx, n := range b.ng.Nodes {
|
||||
d := b.drivers[idx]
|
||||
var nodeErr string
|
||||
if d.err != nil {
|
||||
nodeErr = d.err.Error()
|
||||
} else if d.di.Err != nil {
|
||||
nodeErr = d.di.Err.Error()
|
||||
}
|
||||
if nodeErr != "" {
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "Failed to get status for %s (%s): %s\n", b.ng.Name, n.Name, strings.TrimSpace(nodeErr))
|
||||
for _, d := range b.Nodes() {
|
||||
if d.Err != nil {
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "Failed to get status for %s (%s): %s\n", b.Name, d.Name, strings.TrimSpace(d.Err.Error()))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -141,26 +91,25 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func printngi(w io.Writer, ngi *nginfo) (ok bool) {
|
||||
func printBuilder(w io.Writer, b *builder.Builder) (ok bool) {
|
||||
ok = true
|
||||
var err string
|
||||
if ngi.err != nil {
|
||||
if b.Err() != nil {
|
||||
ok = false
|
||||
err = "error"
|
||||
}
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t\t\n", ngi.ng.Name, ngi.ng.Driver, err)
|
||||
if ngi.err == nil {
|
||||
for idx, n := range ngi.ng.Nodes {
|
||||
d := ngi.drivers[idx]
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t\t\n", b.Name, b.Driver, err)
|
||||
if b.Err() == nil {
|
||||
for _, n := range b.Nodes() {
|
||||
var status string
|
||||
if d.info != nil {
|
||||
status = d.info.Status.String()
|
||||
if n.DriverInfo != nil {
|
||||
status = n.DriverInfo.Status.String()
|
||||
}
|
||||
if d.err != nil || d.di.Err != nil {
|
||||
if n.Err != nil {
|
||||
ok = false
|
||||
fmt.Fprintf(w, " %s\t%s\t%s\t\t\n", n.Name, n.Endpoint, "error")
|
||||
} else {
|
||||
fmt.Fprintf(w, " %s\t%s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, d.version, strings.Join(platformutil.FormatInGroups(n.Platforms, d.platforms), ", "))
|
||||
fmt.Fprintf(w, " %s\t%s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, n.Version, strings.Join(platformutil.FormatInGroups(n.Node.Platforms, n.Platforms), ", "))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
@ -54,14 +54,18 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
dis, err := getInstanceOrDefault(ctx, dockerCli, opts.builder, "")
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, di := range dis {
|
||||
if di.Err != nil {
|
||||
return di.Err
|
||||
nodes, err := b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
@ -90,11 +94,11 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
|
||||
}()
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for _, di := range dis {
|
||||
func(di build.DriverInfo) {
|
||||
for _, node := range nodes {
|
||||
func(node builder.Node) {
|
||||
eg.Go(func() error {
|
||||
if di.Driver != nil {
|
||||
c, err := di.Driver.Client(ctx)
|
||||
if node.Driver != nil {
|
||||
c, err := node.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -109,7 +113,7 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(di)
|
||||
}(node)
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/cli/cli"
|
||||
@ -44,41 +45,33 @@ func runRm(dockerCli command.Cli, in rmOptions) error {
|
||||
return rmAllInactive(ctx, txn, dockerCli, in)
|
||||
}
|
||||
|
||||
var ng *store.NodeGroup
|
||||
if in.builder != "" {
|
||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if ng == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctxbuilders, err := dockerCli.ContextStore().List()
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(in.builder),
|
||||
builder.WithStore(txn),
|
||||
builder.WithSkippedValidation(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, cb := range ctxbuilders {
|
||||
if ng.Driver == "docker" && len(ng.Nodes) == 1 && ng.Nodes[0].Endpoint == cb.Name {
|
||||
return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb.Name)
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err1 := rm(ctx, dockerCli, in, ng)
|
||||
if err := txn.Remove(ng.Name); err != nil {
|
||||
if cb := b.ContextName(); cb != "" {
|
||||
return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb)
|
||||
}
|
||||
|
||||
err1 := rm(ctx, nodes, in)
|
||||
if err := txn.Remove(b.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", ng.Name)
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", b.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -110,61 +103,56 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func rm(ctx context.Context, dockerCli command.Cli, in rmOptions, ng *store.NodeGroup) error {
|
||||
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, di := range dis {
|
||||
if di.Driver == nil {
|
||||
func rm(ctx context.Context, nodes []builder.Node, in rmOptions) (err error) {
|
||||
for _, node := range nodes {
|
||||
if node.Driver == nil {
|
||||
continue
|
||||
}
|
||||
// Do not stop the buildkitd daemon when --keep-daemon is provided
|
||||
if !in.keepDaemon {
|
||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
||||
if err := node.Driver.Stop(ctx, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := di.Driver.Rm(ctx, true, !in.keepState, !in.keepDaemon); err != nil {
|
||||
if err := node.Driver.Rm(ctx, true, !in.keepState, !in.keepDaemon); err != nil {
|
||||
return err
|
||||
}
|
||||
if di.Err != nil {
|
||||
err = di.Err
|
||||
if node.Err != nil {
|
||||
err = node.Err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, in rmOptions) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
ll, err := txn.List()
|
||||
builders, err := builder.GetBuilders(dockerCli, txn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
builders := make([]*nginfo, len(ll))
|
||||
for i, ng := range ll {
|
||||
builders[i] = &nginfo{ng: ng}
|
||||
}
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
eg, _ := errgroup.WithContext(timeoutCtx)
|
||||
for _, b := range builders {
|
||||
func(b *nginfo) {
|
||||
func(b *builder.Builder) {
|
||||
eg.Go(func() error {
|
||||
if err := loadNodeGroupData(ctx, dockerCli, b); err != nil {
|
||||
return errors.Wrapf(err, "cannot load %s", b.ng.Name)
|
||||
nodes, err := b.LoadNodes(timeoutCtx, true)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot load %s", b.Name)
|
||||
}
|
||||
if b.ng.Dynamic {
|
||||
if cb := b.ContextName(); cb != "" {
|
||||
return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb)
|
||||
}
|
||||
if b.Dynamic {
|
||||
return nil
|
||||
}
|
||||
if b.inactive() {
|
||||
rmerr := rm(ctx, dockerCli, in, b.ng)
|
||||
if err := txn.Remove(b.ng.Name); err != nil {
|
||||
if b.Inactive() {
|
||||
rmerr := rm(ctx, nodes, in)
|
||||
if err := txn.Remove(b.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", b.ng.Name)
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", b.Name)
|
||||
return rmerr
|
||||
}
|
||||
return nil
|
||||
|
@ -3,8 +3,7 @@ package commands
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
@ -18,32 +17,19 @@ type stopOptions struct {
|
||||
func runStop(dockerCli command.Cli, in stopOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
txn, release, err := storeutil.GetStore(dockerCli)
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(in.builder),
|
||||
builder.WithSkippedValidation(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer release()
|
||||
|
||||
if in.builder != "" {
|
||||
ng, err := storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := stop(ctx, dockerCli, ng); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
||||
nodes, err := b.LoadNodes(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ng != nil {
|
||||
return stop(ctx, dockerCli, ng)
|
||||
}
|
||||
|
||||
return stopCurrent(ctx, dockerCli)
|
||||
return stop(ctx, nodes)
|
||||
}
|
||||
|
||||
func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
@ -65,37 +51,15 @@ func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func stop(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup) error {
|
||||
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, di := range dis {
|
||||
if di.Driver != nil {
|
||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
||||
func stop(ctx context.Context, nodes []builder.Node) (err error) {
|
||||
for _, node := range nodes {
|
||||
if node.Driver != nil {
|
||||
if err := node.Driver.Stop(ctx, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if di.Err != nil {
|
||||
err = di.Err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func stopCurrent(ctx context.Context, dockerCli command.Cli) error {
|
||||
dis, err := getDefaultDrivers(ctx, dockerCli, false, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, di := range dis {
|
||||
if di.Driver != nil {
|
||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if di.Err != nil {
|
||||
err = di.Err
|
||||
if node.Err != nil {
|
||||
err = node.Err
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
410
commands/util.go
410
commands/util.go
@ -1,410 +0,0 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/driver"
|
||||
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
|
||||
remoteutil "github.com/docker/buildx/driver/remote/util"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/dockerutil"
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
dopts "github.com/docker/cli/opts"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// validateEndpoint validates that endpoint is either a context or a docker host
|
||||
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
||||
dem, err := dockerutil.GetDockerEndpoint(dockerCli, ep)
|
||||
if err == nil && dem != nil {
|
||||
if ep == "default" {
|
||||
return dem.Host, nil
|
||||
}
|
||||
return ep, nil
|
||||
}
|
||||
h, err := dopts.ParseHost(true, ep)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to parse endpoint %s", ep)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// validateBuildkitEndpoint validates that endpoint is a valid buildkit host
|
||||
func validateBuildkitEndpoint(ep string) (string, error) {
|
||||
if err := remoteutil.IsValidEndpoint(ep); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ep, nil
|
||||
}
|
||||
|
||||
// driversForNodeGroup returns drivers for a nodegroup instance
|
||||
func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, contextPathHash string) ([]build.DriverInfo, error) {
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
|
||||
dis := make([]build.DriverInfo, len(ng.Nodes))
|
||||
|
||||
var f driver.Factory
|
||||
if ng.Driver != "" {
|
||||
var err error
|
||||
f, err = driver.GetFactory(ng.Driver, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// empty driver means nodegroup was implicitly created as a default
|
||||
// driver for a docker context and allows falling back to a
|
||||
// docker-container driver for older daemon that doesn't support
|
||||
// buildkit (< 18.06).
|
||||
ep := ng.Nodes[0].Endpoint
|
||||
dockerapi, err := dockerutil.NewClientAPI(dockerCli, ep)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// check if endpoint is healthy is needed to determine the driver type.
|
||||
// if this fails then can't continue with driver selection.
|
||||
if _, err = dockerapi.Ping(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err = driver.GetDefaultFactory(ctx, ep, dockerapi, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ng.Driver = f.Name()
|
||||
}
|
||||
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, n := range ng.Nodes {
|
||||
func(i int, n store.Node) {
|
||||
eg.Go(func() error {
|
||||
di := build.DriverInfo{
|
||||
Name: n.Name,
|
||||
Platform: n.Platforms,
|
||||
ProxyConfig: storeutil.GetProxyConfig(dockerCli),
|
||||
}
|
||||
defer func() {
|
||||
dis[i] = di
|
||||
}()
|
||||
|
||||
dockerapi, err := dockerutil.NewClientAPI(dockerCli, n.Endpoint)
|
||||
if err != nil {
|
||||
di.Err = err
|
||||
return nil
|
||||
}
|
||||
|
||||
contextStore := dockerCli.ContextStore()
|
||||
|
||||
var kcc driver.KubeClientConfig
|
||||
kcc, err = ctxkube.ConfigFromEndpoint(n.Endpoint, contextStore)
|
||||
if err != nil {
|
||||
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
|
||||
// try again with name="default".
|
||||
// FIXME: n should retain real context name.
|
||||
kcc, err = ctxkube.ConfigFromEndpoint("default", contextStore)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
tryToUseKubeConfigInCluster := false
|
||||
if kcc == nil {
|
||||
tryToUseKubeConfigInCluster = true
|
||||
} else {
|
||||
if _, err := kcc.ClientConfig(); err != nil {
|
||||
tryToUseKubeConfigInCluster = true
|
||||
}
|
||||
}
|
||||
if tryToUseKubeConfigInCluster {
|
||||
kccInCluster := driver.KubeClientConfigInCluster{}
|
||||
if _, err := kccInCluster.ClientConfig(); err == nil {
|
||||
logrus.Debug("using kube config in cluster")
|
||||
kcc = kccInCluster
|
||||
}
|
||||
}
|
||||
|
||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, n.Endpoint, dockerapi, imageopt.Auth, kcc, n.Flags, n.Files, n.DriverOpts, n.Platforms, contextPathHash)
|
||||
if err != nil {
|
||||
di.Err = err
|
||||
return nil
|
||||
}
|
||||
di.Driver = d
|
||||
di.ImageOpt = imageopt
|
||||
return nil
|
||||
})
|
||||
}(i, n)
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dis, nil
|
||||
}
|
||||
|
||||
func getInstanceOrDefault(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
|
||||
var defaultOnly bool
|
||||
|
||||
if instance == "default" && instance != dockerCli.CurrentContext() {
|
||||
return nil, errors.Errorf("use `docker --context=default buildx` to switch to default context")
|
||||
}
|
||||
if instance == "default" || instance == dockerCli.CurrentContext() {
|
||||
instance = ""
|
||||
defaultOnly = true
|
||||
}
|
||||
list, err := dockerCli.ContextStore().List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, l := range list {
|
||||
if l.Name == instance {
|
||||
return nil, errors.Errorf("use `docker --context=%s buildx` to switch to context %s", instance, instance)
|
||||
}
|
||||
}
|
||||
|
||||
if instance != "" {
|
||||
return getInstanceByName(ctx, dockerCli, instance, contextPathHash)
|
||||
}
|
||||
return getDefaultDrivers(ctx, dockerCli, defaultOnly, contextPathHash)
|
||||
}
|
||||
|
||||
func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
|
||||
txn, release, err := storeutil.GetStore(dockerCli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer release()
|
||||
|
||||
ng, err := txn.NodeGroupByName(instance)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return driversForNodeGroup(ctx, dockerCli, ng, contextPathHash)
|
||||
}
|
||||
|
||||
// getDefaultDrivers returns drivers based on current cli config
|
||||
func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly bool, contextPathHash string) ([]build.DriverInfo, error) {
|
||||
txn, release, err := storeutil.GetStore(dockerCli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer release()
|
||||
|
||||
if !defaultOnly {
|
||||
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ng != nil {
|
||||
return driversForNodeGroup(ctx, dockerCli, ng, contextPathHash)
|
||||
}
|
||||
}
|
||||
|
||||
imageopt, err := storeutil.GetImageConfig(dockerCli, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, "", dockerCli.Client(), imageopt.Auth, nil, nil, nil, nil, nil, contextPathHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []build.DriverInfo{
|
||||
{
|
||||
Name: "default",
|
||||
Driver: d,
|
||||
ImageOpt: imageopt,
|
||||
ProxyConfig: storeutil.GetProxyConfig(dockerCli),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func loadInfoData(ctx context.Context, d *dinfo) error {
|
||||
if d.di.Driver == nil {
|
||||
return nil
|
||||
}
|
||||
info, err := d.di.Driver.Info(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.info = info
|
||||
if info.Status == driver.Running {
|
||||
c, err := d.di.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
workers, err := c.ListWorkers(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "listing workers")
|
||||
}
|
||||
for _, w := range workers {
|
||||
d.platforms = append(d.platforms, w.Platforms...)
|
||||
}
|
||||
d.platforms = platformutil.Dedupe(d.platforms)
|
||||
inf, err := c.Info(ctx)
|
||||
if err != nil {
|
||||
if st, ok := grpcerrors.AsGRPCStatus(err); ok && st.Code() == codes.Unimplemented {
|
||||
d.version, err = d.di.Driver.Version(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting version")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
d.version = inf.BuildkitVersion.Version
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadNodeGroupData(ctx context.Context, dockerCli command.Cli, ngi *nginfo) error {
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
|
||||
dis, err := driversForNodeGroup(ctx, dockerCli, ngi.ng, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ngi.drivers = make([]dinfo, len(dis))
|
||||
for i, di := range dis {
|
||||
d := di
|
||||
ngi.drivers[i].di = &d
|
||||
func(d *dinfo) {
|
||||
eg.Go(func() error {
|
||||
if err := loadInfoData(ctx, d); err != nil {
|
||||
d.err = err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(&ngi.drivers[i])
|
||||
}
|
||||
|
||||
if eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kubernetesDriverCount := 0
|
||||
|
||||
for _, di := range ngi.drivers {
|
||||
if di.info != nil && len(di.info.DynamicNodes) > 0 {
|
||||
kubernetesDriverCount++
|
||||
}
|
||||
}
|
||||
|
||||
isAllKubernetesDrivers := len(ngi.drivers) == kubernetesDriverCount
|
||||
|
||||
if isAllKubernetesDrivers {
|
||||
var drivers []dinfo
|
||||
var dynamicNodes []store.Node
|
||||
|
||||
for _, di := range ngi.drivers {
|
||||
// dynamic nodes are used in Kubernetes driver.
|
||||
// Kubernetes pods are dynamically mapped to BuildKit Nodes.
|
||||
if di.info != nil && len(di.info.DynamicNodes) > 0 {
|
||||
for i := 0; i < len(di.info.DynamicNodes); i++ {
|
||||
// all []dinfo share *build.DriverInfo and *driver.Info
|
||||
diClone := di
|
||||
if pl := di.info.DynamicNodes[i].Platforms; len(pl) > 0 {
|
||||
diClone.platforms = pl
|
||||
}
|
||||
drivers = append(drivers, di)
|
||||
}
|
||||
dynamicNodes = append(dynamicNodes, di.info.DynamicNodes...)
|
||||
}
|
||||
}
|
||||
|
||||
// not append (remove the static nodes in the store)
|
||||
ngi.ng.Nodes = dynamicNodes
|
||||
ngi.drivers = drivers
|
||||
ngi.ng.Dynamic = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasNodeGroup(list []*nginfo, ngi *nginfo) bool {
|
||||
for _, l := range list {
|
||||
if ngi.ng.Name == l.ng.Name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type dinfo struct {
|
||||
di *build.DriverInfo
|
||||
info *driver.Info
|
||||
platforms []specs.Platform
|
||||
version string
|
||||
err error
|
||||
}
|
||||
|
||||
type nginfo struct {
|
||||
ng *store.NodeGroup
|
||||
drivers []dinfo
|
||||
err error
|
||||
}
|
||||
|
||||
// inactive checks if all nodes are inactive for this builder
|
||||
func (n *nginfo) inactive() bool {
|
||||
for idx := range n.ng.Nodes {
|
||||
d := n.drivers[idx]
|
||||
if d.info != nil && d.info.Status == driver.Running {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func boot(ctx context.Context, ngi *nginfo) (bool, error) {
|
||||
toBoot := make([]int, 0, len(ngi.drivers))
|
||||
for i, d := range ngi.drivers {
|
||||
if d.err != nil || d.di.Err != nil || d.di.Driver == nil || d.info == nil {
|
||||
continue
|
||||
}
|
||||
if d.info.Status != driver.Running {
|
||||
toBoot = append(toBoot, i)
|
||||
}
|
||||
}
|
||||
if len(toBoot) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, progress.PrinterModeAuto)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
baseCtx := ctx
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, idx := range toBoot {
|
||||
func(idx int) {
|
||||
eg.Go(func() error {
|
||||
pw := progress.WithPrefix(printer, ngi.ng.Nodes[idx].Name, len(toBoot) > 1)
|
||||
_, err := driver.Boot(ctx, baseCtx, ngi.drivers[idx].di.Driver, pw)
|
||||
if err != nil {
|
||||
ngi.drivers[idx].err = err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(idx)
|
||||
}
|
||||
|
||||
err = eg.Wait()
|
||||
err1 := printer.Wait()
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
|
||||
return true, err
|
||||
}
|
Reference in New Issue
Block a user