mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-05-18 00:47:48 +08:00
Merge pull request #1737 from jedevc/improved-controller-progress
controller: refactor progress api
This commit is contained in:
commit
75ed3e296b
@ -927,7 +927,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
|
||||
}
|
||||
results.Set(resultKey(dp.driverIndex, k), res)
|
||||
if resultHandleFunc != nil {
|
||||
resultCtx, err := NewResultContext(cc, so, res)
|
||||
resultCtx, err := NewResultContext(ctx, cc, so, res)
|
||||
if err == nil {
|
||||
resultHandleFunc(dp.driverIndex, resultCtx)
|
||||
} else {
|
||||
|
@ -19,8 +19,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func NewResultContext(c *client.Client, solveOpt client.SolveOpt, res *gateway.Result) (*ResultContext, error) {
|
||||
ctx := context.Background()
|
||||
func NewResultContext(ctx context.Context, c *client.Client, solveOpt client.SolveOpt, res *gateway.Result) (*ResultContext, error) {
|
||||
def, err := getDefinition(ctx, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -17,7 +17,6 @@ import (
|
||||
"github.com/docker/buildx/util/tracing"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@ -118,7 +117,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions, cFlags com
|
||||
}
|
||||
|
||||
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, cFlags.progress,
|
||||
progressui.WithDesc(progressTextDesc, progressConsoleDesc),
|
||||
progress.WithDesc(progressTextDesc, progressConsoleDesc),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -1,6 +1,7 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/csv"
|
||||
@ -15,6 +16,7 @@ import (
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/controller"
|
||||
cbuild "github.com/docker/buildx/controller/build"
|
||||
"github.com/docker/buildx/controller/control"
|
||||
@ -35,8 +37,10 @@ import (
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/morikuni/aec"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
@ -88,7 +92,7 @@ type buildOptions struct {
|
||||
control.ControlOptions
|
||||
}
|
||||
|
||||
func (o *buildOptions) toControllerOptions() (controllerapi.BuildOptions, error) {
|
||||
func (o *buildOptions) toControllerOptions() (*controllerapi.BuildOptions, error) {
|
||||
var err error
|
||||
opts := controllerapi.BuildOptions{
|
||||
Allow: o.allow,
|
||||
@ -130,43 +134,43 @@ func (o *buildOptions) toControllerOptions() (controllerapi.BuildOptions, error)
|
||||
}
|
||||
opts.Attests, err = buildflags.ParseAttests(inAttests)
|
||||
if err != nil {
|
||||
return controllerapi.BuildOptions{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts.NamedContexts, err = buildflags.ParseContextNames(o.contexts)
|
||||
if err != nil {
|
||||
return controllerapi.BuildOptions{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts.Exports, err = buildflags.ParseExports(o.outputs)
|
||||
if err != nil {
|
||||
return controllerapi.BuildOptions{}, err
|
||||
return nil, err
|
||||
}
|
||||
for _, e := range opts.Exports {
|
||||
if (e.Type == client.ExporterLocal || e.Type == client.ExporterTar) && o.imageIDFile != "" {
|
||||
return controllerapi.BuildOptions{}, errors.Errorf("local and tar exporters are incompatible with image ID file")
|
||||
return nil, errors.Errorf("local and tar exporters are incompatible with image ID file")
|
||||
}
|
||||
}
|
||||
|
||||
opts.CacheFrom, err = buildflags.ParseCacheEntry(o.cacheFrom)
|
||||
if err != nil {
|
||||
return controllerapi.BuildOptions{}, err
|
||||
return nil, err
|
||||
}
|
||||
opts.CacheTo, err = buildflags.ParseCacheEntry(o.cacheTo)
|
||||
if err != nil {
|
||||
return controllerapi.BuildOptions{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts.Secrets, err = buildflags.ParseSecretSpecs(o.secrets)
|
||||
if err != nil {
|
||||
return controllerapi.BuildOptions{}, err
|
||||
return nil, err
|
||||
}
|
||||
opts.SSH, err = buildflags.ParseSSHSpecs(o.ssh)
|
||||
if err != nil {
|
||||
return controllerapi.BuildOptions{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
return &opts, nil
|
||||
}
|
||||
|
||||
func (o *buildOptions) toProgress() (string, error) {
|
||||
@ -185,9 +189,8 @@ func (o *buildOptions) toProgress() (string, error) {
|
||||
return o.progress, nil
|
||||
}
|
||||
|
||||
func runBuild(dockerCli command.Cli, in buildOptions) error {
|
||||
func runBuild(dockerCli command.Cli, options buildOptions) (err error) {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
|
||||
if err != nil {
|
||||
return err
|
||||
@ -196,38 +199,177 @@ func runBuild(dockerCli command.Cli, in buildOptions) error {
|
||||
end(err)
|
||||
}()
|
||||
|
||||
opts, err := in.toControllerOptions()
|
||||
if err != nil {
|
||||
return err
|
||||
// Avoid leaving a stale file if we eventually fail
|
||||
if options.imageIDFile != "" {
|
||||
if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrap(err, "removing image ID file")
|
||||
}
|
||||
}
|
||||
progress, err := in.toProgress()
|
||||
|
||||
contextPathHash := options.contextPath
|
||||
if absContextPath, err := filepath.Abs(contextPathHash); err == nil {
|
||||
contextPathHash = absContextPath
|
||||
}
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(options.builder),
|
||||
builder.WithContextPathHash(contextPathHash),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Avoid leaving a stale file if we eventually fail
|
||||
if in.imageIDFile != "" {
|
||||
if err := os.Remove(in.imageIDFile); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrap(err, "removing image ID file")
|
||||
}
|
||||
}
|
||||
resp, _, err := cbuild.RunBuild(ctx, dockerCli, opts, os.Stdin, progress, nil, false)
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
progressMode, err := options.toProgress()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if in.quiet {
|
||||
var printer *progress.Printer
|
||||
printer, err = progress.NewPrinter(ctx2, os.Stderr, os.Stderr, progressMode,
|
||||
progress.WithDesc(
|
||||
fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver),
|
||||
fmt.Sprintf("%s:%s", b.Driver, b.Name),
|
||||
),
|
||||
progress.WithOnClose(func() {
|
||||
printWarnings(os.Stderr, printer.Warnings(), progressMode)
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var resp *client.SolveResponse
|
||||
var retErr error
|
||||
if isExperimental() {
|
||||
resp, retErr = runControllerBuild(ctx, dockerCli, options, printer)
|
||||
} else {
|
||||
resp, retErr = runBasicBuild(ctx, dockerCli, options, printer)
|
||||
}
|
||||
|
||||
if err := printer.Wait(); retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
if retErr != nil {
|
||||
return retErr
|
||||
}
|
||||
|
||||
if options.quiet {
|
||||
fmt.Println(resp.ExporterResponse[exptypes.ExporterImageDigestKey])
|
||||
}
|
||||
if in.imageIDFile != "" {
|
||||
if options.imageIDFile != "" {
|
||||
dgst := resp.ExporterResponse[exptypes.ExporterImageDigestKey]
|
||||
if v, ok := resp.ExporterResponse[exptypes.ExporterImageConfigDigestKey]; ok {
|
||||
dgst = v
|
||||
}
|
||||
return os.WriteFile(in.imageIDFile, []byte(dgst), 0644)
|
||||
return os.WriteFile(options.imageIDFile, []byte(dgst), 0644)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runBasicBuild(ctx context.Context, dockerCli command.Cli, options buildOptions, printer *progress.Printer) (*client.SolveResponse, error) {
|
||||
opts, err := options.toControllerOptions()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, _, err := cbuild.RunBuild(ctx, dockerCli, *opts, os.Stdin, printer, false)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func runControllerBuild(ctx context.Context, dockerCli command.Cli, options buildOptions, printer *progress.Printer) (*client.SolveResponse, error) {
|
||||
if options.invoke != nil && (options.dockerfileName == "-" || options.contextPath == "-") {
|
||||
// stdin must be usable for monitor
|
||||
return nil, errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
|
||||
}
|
||||
|
||||
c, err := controller.NewController(ctx, options.ControlOptions, dockerCli, printer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := c.Close(); err != nil {
|
||||
logrus.Warnf("failed to close server connection %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start build
|
||||
opts, err := options.toControllerOptions()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// NOTE: buildx server has the current working directory different from the client
|
||||
// so we need to resolve paths to abosolute ones in the client.
|
||||
opts, err = resolvePaths(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ref string
|
||||
var retErr error
|
||||
var resp *client.SolveResponse
|
||||
f := ioset.NewSingleForwarder()
|
||||
f.SetReader(os.Stdin)
|
||||
if !options.noBuild {
|
||||
pr, pw := io.Pipe()
|
||||
f.SetWriter(pw, func() io.WriteCloser {
|
||||
pw.Close() // propagate EOF
|
||||
logrus.Debug("propagating stdin close")
|
||||
return nil
|
||||
})
|
||||
|
||||
ref, resp, err = c.Build(ctx, *opts, pr, printer)
|
||||
if err != nil {
|
||||
var be *controllererrors.BuildError
|
||||
if errors.As(err, &be) {
|
||||
ref = be.Ref
|
||||
retErr = err
|
||||
// We can proceed to monitor
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "failed to build")
|
||||
}
|
||||
}
|
||||
|
||||
if err := pw.Close(); err != nil {
|
||||
logrus.Debug("failed to close stdin pipe writer")
|
||||
}
|
||||
if err := pr.Close(); err != nil {
|
||||
logrus.Debug("failed to close stdin pipe reader")
|
||||
}
|
||||
}
|
||||
|
||||
// post-build operations
|
||||
if options.invoke != nil && options.invoke.needsMonitor(retErr) {
|
||||
pr2, pw2 := io.Pipe()
|
||||
f.SetWriter(pw2, func() io.WriteCloser {
|
||||
pw2.Close() // propagate EOF
|
||||
return nil
|
||||
})
|
||||
con := console.Current()
|
||||
if err := con.SetRaw(); err != nil {
|
||||
if err := c.Disconnect(ctx, ref); err != nil {
|
||||
logrus.Warnf("disconnect error: %v", err)
|
||||
}
|
||||
return nil, errors.Errorf("failed to configure terminal: %v", err)
|
||||
}
|
||||
err = monitor.RunMonitor(ctx, ref, opts, options.invoke.InvokeConfig, c, pr2, os.Stdout, os.Stderr, printer)
|
||||
con.Reset()
|
||||
if err := pw2.Close(); err != nil {
|
||||
logrus.Debug("failed to close monitor stdin pipe reader")
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Warnf("failed to run monitor: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := c.Disconnect(ctx, ref); err != nil {
|
||||
logrus.Warnf("disconnect error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, retErr
|
||||
}
|
||||
|
||||
func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
options := buildOptions{}
|
||||
cFlags := &commonFlags{}
|
||||
@ -252,16 +394,14 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
}
|
||||
options.progress = cFlags.progress
|
||||
cmd.Flags().VisitAll(checkWarnedFlags)
|
||||
if isExperimental() {
|
||||
if invokeFlag != "" {
|
||||
invokeConfig, err := parseInvokeConfig(invokeFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
options.invoke = &invokeConfig
|
||||
options.noBuild = invokeFlag == "debug-shell"
|
||||
|
||||
if invokeFlag != "" {
|
||||
invoke, err := parseInvokeConfig(invokeFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return launchControllerAndRunBuild(dockerCli, options)
|
||||
options.invoke = &invoke
|
||||
options.noBuild = invokeFlag == "debug-shell"
|
||||
}
|
||||
return runBuild(dockerCli, options)
|
||||
},
|
||||
@ -494,123 +634,6 @@ func updateLastActivity(dockerCli command.Cli, ng *store.NodeGroup) error {
|
||||
return txn.UpdateLastActivity(ng)
|
||||
}
|
||||
|
||||
func launchControllerAndRunBuild(dockerCli command.Cli, options buildOptions) error {
|
||||
ctx := context.TODO()
|
||||
|
||||
if options.invoke != nil && (options.dockerfileName == "-" || options.contextPath == "-") {
|
||||
// stdin must be usable for monitor
|
||||
return errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
|
||||
}
|
||||
|
||||
c, err := controller.NewController(ctx, options.ControlOptions, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := c.Close(); err != nil {
|
||||
logrus.Warnf("failed to close server connection %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start build
|
||||
opts, err := options.toControllerOptions()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
progress, err := options.toProgress()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE: buildx server has the current working directory different from the client
|
||||
// so we need to resolve paths to abosolute ones in the client.
|
||||
optsP, err := resolvePaths(&opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts = *optsP
|
||||
|
||||
var ref string
|
||||
var retErr error
|
||||
f := ioset.NewSingleForwarder()
|
||||
f.SetReader(os.Stdin)
|
||||
if !options.noBuild {
|
||||
pr, pw := io.Pipe()
|
||||
f.SetWriter(pw, func() io.WriteCloser {
|
||||
pw.Close() // propagate EOF
|
||||
logrus.Debug("propagating stdin close")
|
||||
return nil
|
||||
})
|
||||
|
||||
// Avoid leaving a stale file if we eventually fail
|
||||
if options.imageIDFile != "" {
|
||||
if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrap(err, "removing image ID file")
|
||||
}
|
||||
}
|
||||
|
||||
var resp *client.SolveResponse
|
||||
ref, resp, err = c.Build(ctx, opts, pr, os.Stdout, os.Stderr, progress)
|
||||
if err != nil {
|
||||
var be *controllererrors.BuildError
|
||||
if errors.As(err, &be) {
|
||||
ref = be.Ref
|
||||
retErr = err
|
||||
// We can proceed to monitor
|
||||
} else {
|
||||
return errors.Wrapf(err, "failed to build")
|
||||
}
|
||||
}
|
||||
if err := pw.Close(); err != nil {
|
||||
logrus.Debug("failed to close stdin pipe writer")
|
||||
}
|
||||
if err := pr.Close(); err != nil {
|
||||
logrus.Debug("failed to close stdin pipe reader")
|
||||
}
|
||||
|
||||
if options.quiet {
|
||||
fmt.Println(resp.ExporterResponse[exptypes.ExporterImageDigestKey])
|
||||
}
|
||||
if options.imageIDFile != "" {
|
||||
dgst := resp.ExporterResponse[exptypes.ExporterImageDigestKey]
|
||||
if v, ok := resp.ExporterResponse[exptypes.ExporterImageConfigDigestKey]; ok {
|
||||
dgst = v
|
||||
}
|
||||
return os.WriteFile(options.imageIDFile, []byte(dgst), 0644)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// post-build operations
|
||||
if options.invoke != nil && options.invoke.needsMonitor(retErr) {
|
||||
pr2, pw2 := io.Pipe()
|
||||
f.SetWriter(pw2, func() io.WriteCloser {
|
||||
pw2.Close() // propagate EOF
|
||||
return nil
|
||||
})
|
||||
con := console.Current()
|
||||
if err := con.SetRaw(); err != nil {
|
||||
if err := c.Disconnect(ctx, ref); err != nil {
|
||||
logrus.Warnf("disconnect error: %v", err)
|
||||
}
|
||||
return errors.Errorf("failed to configure terminal: %v", err)
|
||||
}
|
||||
err = monitor.RunMonitor(ctx, ref, &opts, options.invoke.InvokeConfig, c, progress, pr2, os.Stdout, os.Stderr)
|
||||
con.Reset()
|
||||
if err := pw2.Close(); err != nil {
|
||||
logrus.Debug("failed to close monitor stdin pipe reader")
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Warnf("failed to run monitor: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := c.Disconnect(ctx, ref); err != nil {
|
||||
logrus.Warnf("disconnect error: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type invokeConfig struct {
|
||||
controllerapi.InvokeConfig
|
||||
invokeFlag string
|
||||
@ -881,3 +904,43 @@ func resolvePaths(options *controllerapi.BuildOptions) (_ *controllerapi.BuildOp
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) {
|
||||
if len(warnings) == 0 || mode == progress.PrinterModeQuiet {
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "\n ")
|
||||
sb := &bytes.Buffer{}
|
||||
if len(warnings) == 1 {
|
||||
fmt.Fprintf(sb, "1 warning found")
|
||||
} else {
|
||||
fmt.Fprintf(sb, "%d warnings found", len(warnings))
|
||||
}
|
||||
if logrus.GetLevel() < logrus.DebugLevel {
|
||||
fmt.Fprintf(sb, " (use --debug to expand)")
|
||||
}
|
||||
fmt.Fprintf(sb, ":\n")
|
||||
fmt.Fprint(w, aec.Apply(sb.String(), aec.YellowF))
|
||||
|
||||
for _, warn := range warnings {
|
||||
fmt.Fprintf(w, " - %s\n", warn.Short)
|
||||
if logrus.GetLevel() < logrus.DebugLevel {
|
||||
continue
|
||||
}
|
||||
for _, d := range warn.Detail {
|
||||
fmt.Fprintf(w, "%s\n", d)
|
||||
}
|
||||
if warn.URL != "" {
|
||||
fmt.Fprintf(w, "More info: %s\n", warn.URL)
|
||||
}
|
||||
if warn.SourceInfo != nil && warn.Range != nil {
|
||||
src := errdefs.Source{
|
||||
Info: warn.SourceInfo,
|
||||
Ranges: warn.Range,
|
||||
}
|
||||
src.Print(w)
|
||||
}
|
||||
fmt.Fprintf(w, "\n")
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/docker/buildx/controller/control"
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/monitor"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -18,14 +19,19 @@ import (
|
||||
|
||||
func debugShellCmd(dockerCli command.Cli) *cobra.Command {
|
||||
var options control.ControlOptions
|
||||
var progress string
|
||||
var progressMode string
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "debug-shell",
|
||||
Short: "Start a monitor",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, progressMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
c, err := controller.NewController(ctx, options, dockerCli)
|
||||
c, err := controller.NewController(ctx, options, dockerCli, printer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -38,9 +44,10 @@ func debugShellCmd(dockerCli command.Cli) *cobra.Command {
|
||||
if err := con.SetRaw(); err != nil {
|
||||
return errors.Errorf("failed to configure terminal: %v", err)
|
||||
}
|
||||
|
||||
err = monitor.RunMonitor(ctx, "", nil, controllerapi.InvokeConfig{
|
||||
Tty: true,
|
||||
}, c, progress, os.Stdin, os.Stdout, os.Stderr)
|
||||
}, c, os.Stdin, os.Stdout, os.Stderr, printer)
|
||||
con.Reset()
|
||||
return err
|
||||
},
|
||||
@ -51,7 +58,7 @@ func debugShellCmd(dockerCli command.Cli) *cobra.Command {
|
||||
flags.StringVar(&options.Root, "root", "", "Specify root directory of server to connect [experimental]")
|
||||
flags.BoolVar(&options.Detach, "detach", runtime.GOOS == "linux", "Detach buildx server (supported only on linux) [experimental]")
|
||||
flags.StringVar(&options.ServerConfig, "server-config", "", "Specify buildx server config file (used only when launching new server) [experimental]")
|
||||
flags.StringVar(&progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
|
||||
flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -1,12 +1,10 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -30,12 +28,8 @@ import (
|
||||
"github.com/docker/go-units"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/session/auth/authprovider"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/morikuni/aec"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
@ -46,7 +40,7 @@ const defaultTargetName = "default"
|
||||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext,
|
||||
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
||||
// inspect the result and debug the cause of that error.
|
||||
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progressMode string, statusChan chan *client.SolveStatus, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
|
||||
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
|
||||
if in.NoCache && len(in.NoCacheFilter) > 0 {
|
||||
return nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
|
||||
}
|
||||
@ -164,6 +158,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
||||
contextPathHash = in.ContextPath
|
||||
}
|
||||
|
||||
// TODO: this should not be loaded this side of the controller api
|
||||
b, err := builder.New(dockerCli,
|
||||
builder.WithName(in.Builder),
|
||||
builder.WithContextPathHash(contextPathHash),
|
||||
@ -179,7 +174,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progressMode, in.MetadataFile, statusChan, generateResult)
|
||||
resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progress, in.MetadataFile, generateResult)
|
||||
err = wrapBuildError(err, false)
|
||||
if err != nil {
|
||||
// NOTE: buildTargets can return *build.ResultContext even on error.
|
||||
@ -193,24 +188,14 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
|
||||
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext,
|
||||
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
|
||||
// inspect the result and debug the cause of that error.
|
||||
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progressMode string, metadataFile string, statusChan chan *client.SolveStatus, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, progressMode, progressui.WithDesc(
|
||||
fmt.Sprintf("building with %q instance using %s driver", ng.Name, ng.Driver),
|
||||
fmt.Sprintf("%s:%s", ng.Driver, ng.Name),
|
||||
))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, metadataFile string, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
|
||||
var res *build.ResultContext
|
||||
var resp map[string]*client.SolveResponse
|
||||
var err error
|
||||
if generateResult {
|
||||
var mu sync.Mutex
|
||||
var idx int
|
||||
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress.Tee(printer, statusChan), func(driverIndex int, gotRes *build.ResultContext) {
|
||||
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultContext) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if res == nil || driverIndex < idx {
|
||||
@ -218,11 +203,7 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGrou
|
||||
}
|
||||
})
|
||||
} else {
|
||||
resp, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress.Tee(printer, statusChan))
|
||||
}
|
||||
err1 := printer.Wait()
|
||||
if err == nil {
|
||||
err = err1
|
||||
resp, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, res, err
|
||||
@ -234,8 +215,6 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGrou
|
||||
}
|
||||
}
|
||||
|
||||
printWarnings(os.Stderr, printer.Warnings(), progressMode)
|
||||
|
||||
for k := range resp {
|
||||
if opts[k].PrintFunc != nil {
|
||||
if err := printResult(opts[k].PrintFunc, resp[k].ExporterResponse); err != nil {
|
||||
@ -247,46 +226,6 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGrou
|
||||
return resp[defaultTargetName], res, err
|
||||
}
|
||||
|
||||
func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) {
|
||||
if len(warnings) == 0 || mode == progress.PrinterModeQuiet {
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "\n ")
|
||||
sb := &bytes.Buffer{}
|
||||
if len(warnings) == 1 {
|
||||
fmt.Fprintf(sb, "1 warning found")
|
||||
} else {
|
||||
fmt.Fprintf(sb, "%d warnings found", len(warnings))
|
||||
}
|
||||
if logrus.GetLevel() < logrus.DebugLevel {
|
||||
fmt.Fprintf(sb, " (use --debug to expand)")
|
||||
}
|
||||
fmt.Fprintf(sb, ":\n")
|
||||
fmt.Fprint(w, aec.Apply(sb.String(), aec.YellowF))
|
||||
|
||||
for _, warn := range warnings {
|
||||
fmt.Fprintf(w, " - %s\n", warn.Short)
|
||||
if logrus.GetLevel() < logrus.DebugLevel {
|
||||
continue
|
||||
}
|
||||
for _, d := range warn.Detail {
|
||||
fmt.Fprintf(w, "%s\n", d)
|
||||
}
|
||||
if warn.URL != "" {
|
||||
fmt.Fprintf(w, "More info: %s\n", warn.URL)
|
||||
}
|
||||
if warn.SourceInfo != nil && warn.Range != nil {
|
||||
src := errdefs.Source{
|
||||
Info: warn.SourceInfo,
|
||||
Ranges: warn.Range,
|
||||
}
|
||||
src.Print(w)
|
||||
}
|
||||
fmt.Fprintf(w, "\n")
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func parsePrintFunc(str string) (*build.PrintFunc, error) {
|
||||
if str == "" {
|
||||
return nil, nil
|
||||
|
@ -4,13 +4,13 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/console"
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/moby/buildkit/client"
|
||||
)
|
||||
|
||||
type BuildxController interface {
|
||||
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, w io.Writer, out console.File, progressMode string) (ref string, resp *client.SolveResponse, err error)
|
||||
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, err error)
|
||||
// Invoke starts an IO session into the specified process.
|
||||
// If pid doesn't matche to any running processes, it starts a new process with the specified config.
|
||||
// If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container.
|
||||
|
@ -2,26 +2,35 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/buildx/controller/control"
|
||||
"github.com/docker/buildx/controller/local"
|
||||
"github.com/docker/buildx/controller/remote"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func NewController(ctx context.Context, opts control.ControlOptions, dockerCli command.Cli) (c control.BuildxController, err error) {
|
||||
if !opts.Detach {
|
||||
logrus.Infof("launching local buildx controller")
|
||||
c = local.NewLocalBuildxController(ctx, dockerCli)
|
||||
return c, nil
|
||||
func NewController(ctx context.Context, opts control.ControlOptions, dockerCli command.Cli, pw progress.Writer) (control.BuildxController, error) {
|
||||
var name string
|
||||
if opts.Detach {
|
||||
name = "remote"
|
||||
} else {
|
||||
name = "local"
|
||||
}
|
||||
|
||||
logrus.Infof("connecting to buildx server")
|
||||
c, err = remote.NewRemoteBuildxController(ctx, dockerCli, opts)
|
||||
var c control.BuildxController
|
||||
err := progress.Wrap(fmt.Sprintf("[internal] connecting to %s controller", name), pw.Write, func(l progress.SubLogger) (err error) {
|
||||
if opts.Detach {
|
||||
c, err = remote.NewRemoteBuildxController(ctx, dockerCli, opts, l)
|
||||
} else {
|
||||
c = local.NewLocalBuildxController(ctx, dockerCli, l)
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to use buildx server; use --detach=false")
|
||||
return nil, errors.Wrap(err, "failed to start buildx controller")
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"io"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/build"
|
||||
cbuild "github.com/docker/buildx/controller/build"
|
||||
"github.com/docker/buildx/controller/control"
|
||||
@ -13,12 +12,13 @@ import (
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/controller/processes"
|
||||
"github.com/docker/buildx/util/ioset"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli) control.BuildxController {
|
||||
func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli, logger progress.SubLogger) control.BuildxController {
|
||||
return &localController{
|
||||
dockerCli: dockerCli,
|
||||
ref: "local",
|
||||
@ -42,13 +42,13 @@ type localController struct {
|
||||
buildOnGoing atomic.Bool
|
||||
}
|
||||
|
||||
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, w io.Writer, out console.File, progressMode string) (string, *client.SolveResponse, error) {
|
||||
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) {
|
||||
if !b.buildOnGoing.CompareAndSwap(false, true) {
|
||||
return "", nil, errors.New("build ongoing")
|
||||
}
|
||||
defer b.buildOnGoing.Store(false)
|
||||
|
||||
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progressMode, nil, true)
|
||||
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true)
|
||||
// NOTE: RunBuild can return *build.ResultContext even on error.
|
||||
if res != nil {
|
||||
b.buildConfig = buildConfig{
|
||||
|
122
controller/pb/progress.go
Normal file
122
controller/pb/progress.go
Normal file
@ -0,0 +1,122 @@
|
||||
package pb
|
||||
|
||||
import (
|
||||
"github.com/docker/buildx/util/progress"
|
||||
control "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type writer struct {
|
||||
ch chan<- *StatusResponse
|
||||
}
|
||||
|
||||
func NewProgressWriter(ch chan<- *StatusResponse) progress.Writer {
|
||||
return &writer{ch: ch}
|
||||
}
|
||||
|
||||
func (w *writer) Write(status *client.SolveStatus) {
|
||||
w.ch <- ToControlStatus(status)
|
||||
}
|
||||
|
||||
func (w *writer) ValidateLogSource(digest.Digest, interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (w *writer) ClearLogSource(interface{}) {}
|
||||
|
||||
func ToControlStatus(s *client.SolveStatus) *StatusResponse {
|
||||
resp := StatusResponse{}
|
||||
for _, v := range s.Vertexes {
|
||||
resp.Vertexes = append(resp.Vertexes, &control.Vertex{
|
||||
Digest: v.Digest,
|
||||
Inputs: v.Inputs,
|
||||
Name: v.Name,
|
||||
Started: v.Started,
|
||||
Completed: v.Completed,
|
||||
Error: v.Error,
|
||||
Cached: v.Cached,
|
||||
ProgressGroup: v.ProgressGroup,
|
||||
})
|
||||
}
|
||||
for _, v := range s.Statuses {
|
||||
resp.Statuses = append(resp.Statuses, &control.VertexStatus{
|
||||
ID: v.ID,
|
||||
Vertex: v.Vertex,
|
||||
Name: v.Name,
|
||||
Total: v.Total,
|
||||
Current: v.Current,
|
||||
Timestamp: v.Timestamp,
|
||||
Started: v.Started,
|
||||
Completed: v.Completed,
|
||||
})
|
||||
}
|
||||
for _, v := range s.Logs {
|
||||
resp.Logs = append(resp.Logs, &control.VertexLog{
|
||||
Vertex: v.Vertex,
|
||||
Stream: int64(v.Stream),
|
||||
Msg: v.Data,
|
||||
Timestamp: v.Timestamp,
|
||||
})
|
||||
}
|
||||
for _, v := range s.Warnings {
|
||||
resp.Warnings = append(resp.Warnings, &control.VertexWarning{
|
||||
Vertex: v.Vertex,
|
||||
Level: int64(v.Level),
|
||||
Short: v.Short,
|
||||
Detail: v.Detail,
|
||||
Url: v.URL,
|
||||
Info: v.SourceInfo,
|
||||
Ranges: v.Range,
|
||||
})
|
||||
}
|
||||
return &resp
|
||||
}
|
||||
|
||||
func FromControlStatus(resp *StatusResponse) *client.SolveStatus {
|
||||
s := client.SolveStatus{}
|
||||
for _, v := range resp.Vertexes {
|
||||
s.Vertexes = append(s.Vertexes, &client.Vertex{
|
||||
Digest: v.Digest,
|
||||
Inputs: v.Inputs,
|
||||
Name: v.Name,
|
||||
Started: v.Started,
|
||||
Completed: v.Completed,
|
||||
Error: v.Error,
|
||||
Cached: v.Cached,
|
||||
ProgressGroup: v.ProgressGroup,
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Statuses {
|
||||
s.Statuses = append(s.Statuses, &client.VertexStatus{
|
||||
ID: v.ID,
|
||||
Vertex: v.Vertex,
|
||||
Name: v.Name,
|
||||
Total: v.Total,
|
||||
Current: v.Current,
|
||||
Timestamp: v.Timestamp,
|
||||
Started: v.Started,
|
||||
Completed: v.Completed,
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Logs {
|
||||
s.Logs = append(s.Logs, &client.VertexLog{
|
||||
Vertex: v.Vertex,
|
||||
Stream: int(v.Stream),
|
||||
Data: v.Msg,
|
||||
Timestamp: v.Timestamp,
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Warnings {
|
||||
s.Warnings = append(s.Warnings, &client.VertexWarning{
|
||||
Vertex: v.Vertex,
|
||||
Level: int(v.Level),
|
||||
Short: v.Short,
|
||||
Detail: v.Detail,
|
||||
URL: v.Url,
|
||||
SourceInfo: v.Info,
|
||||
Range: v.Ranges,
|
||||
})
|
||||
}
|
||||
return &s
|
||||
}
|
@ -6,7 +6,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/containerd/containerd/defaults"
|
||||
"github.com/containerd/containerd/pkg/dialer"
|
||||
"github.com/docker/buildx/controller/pb"
|
||||
@ -114,14 +113,9 @@ func (c *Client) Inspect(ctx context.Context, ref string) (*pb.InspectResponse,
|
||||
return c.client().Inspect(ctx, &pb.InspectRequest{Ref: ref})
|
||||
}
|
||||
|
||||
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, w io.Writer, out console.File, progressMode string) (string, *client.SolveResponse, error) {
|
||||
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) {
|
||||
ref := identity.NewID()
|
||||
pw, err := progress.NewPrinter(context.TODO(), w, out, progressMode)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
statusChan := make(chan *client.SolveStatus)
|
||||
statusDone := make(chan struct{})
|
||||
eg, egCtx := errgroup.WithContext(ctx)
|
||||
var resp *client.SolveResponse
|
||||
eg.Go(func() error {
|
||||
@ -131,17 +125,12 @@ func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadC
|
||||
return err
|
||||
})
|
||||
eg.Go(func() error {
|
||||
defer close(statusDone)
|
||||
for s := range statusChan {
|
||||
st := s
|
||||
pw.Write(st)
|
||||
progress.Write(st)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
eg.Go(func() error {
|
||||
<-statusDone
|
||||
return pw.Wait()
|
||||
})
|
||||
return ref, resp, eg.Wait()
|
||||
}
|
||||
|
||||
@ -180,51 +169,7 @@ func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions,
|
||||
}
|
||||
return errors.Wrap(err, "failed to receive status")
|
||||
}
|
||||
s := client.SolveStatus{}
|
||||
for _, v := range resp.Vertexes {
|
||||
s.Vertexes = append(s.Vertexes, &client.Vertex{
|
||||
Digest: v.Digest,
|
||||
Inputs: v.Inputs,
|
||||
Name: v.Name,
|
||||
Started: v.Started,
|
||||
Completed: v.Completed,
|
||||
Error: v.Error,
|
||||
Cached: v.Cached,
|
||||
ProgressGroup: v.ProgressGroup,
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Statuses {
|
||||
s.Statuses = append(s.Statuses, &client.VertexStatus{
|
||||
ID: v.ID,
|
||||
Vertex: v.Vertex,
|
||||
Name: v.Name,
|
||||
Total: v.Total,
|
||||
Current: v.Current,
|
||||
Timestamp: v.Timestamp,
|
||||
Started: v.Started,
|
||||
Completed: v.Completed,
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Logs {
|
||||
s.Logs = append(s.Logs, &client.VertexLog{
|
||||
Vertex: v.Vertex,
|
||||
Stream: int(v.Stream),
|
||||
Data: v.Msg,
|
||||
Timestamp: v.Timestamp,
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Warnings {
|
||||
s.Warnings = append(s.Warnings, &client.VertexWarning{
|
||||
Vertex: v.Vertex,
|
||||
Level: int(v.Level),
|
||||
Short: v.Short,
|
||||
Detail: v.Detail,
|
||||
URL: v.Url,
|
||||
SourceInfo: v.Info,
|
||||
Range: v.Ranges,
|
||||
})
|
||||
}
|
||||
statusChan <- &s
|
||||
statusChan <- pb.FromControlStatus(resp)
|
||||
}
|
||||
})
|
||||
if in != nil {
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"github.com/docker/buildx/controller/control"
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/version"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/client"
|
||||
@ -53,7 +54,7 @@ type serverConfig struct {
|
||||
LogFile string `toml:"log_file"`
|
||||
}
|
||||
|
||||
func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts control.ControlOptions) (control.BuildxController, error) {
|
||||
func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts control.ControlOptions, logger progress.SubLogger) (control.BuildxController, error) {
|
||||
rootDir := opts.Root
|
||||
if rootDir == "" {
|
||||
rootDir = rootDataDir(dockerCli)
|
||||
@ -73,27 +74,32 @@ func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts
|
||||
}
|
||||
|
||||
// start buildx server via subcommand
|
||||
logrus.Info("no buildx server found; launching...")
|
||||
launchFlags := []string{}
|
||||
if opts.ServerConfig != "" {
|
||||
launchFlags = append(launchFlags, "--config", opts.ServerConfig)
|
||||
}
|
||||
logFile, err := getLogFilePath(dockerCli, opts.ServerConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wait, err := launch(ctx, logFile, append([]string{serveCommandName}, launchFlags...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go wait()
|
||||
err = logger.Wrap("no buildx server found; launching...", func() error {
|
||||
launchFlags := []string{}
|
||||
if opts.ServerConfig != "" {
|
||||
launchFlags = append(launchFlags, "--config", opts.ServerConfig)
|
||||
}
|
||||
logFile, err := getLogFilePath(dockerCli, opts.ServerConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wait, err := launch(ctx, logFile, append([]string{serveCommandName}, launchFlags...)...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go wait()
|
||||
|
||||
// wait for buildx server to be ready
|
||||
ctx2, cancel = context.WithTimeout(ctx, 10*time.Second)
|
||||
c, err = newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
||||
cancel()
|
||||
// wait for buildx server to be ready
|
||||
ctx2, cancel = context.WithTimeout(ctx, 10*time.Second)
|
||||
c, err = newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
|
||||
cancel()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot connect to the buildx server")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot connect to the buildx server")
|
||||
return nil, err
|
||||
}
|
||||
return &buildxController{c, serverRoot}, nil
|
||||
}
|
||||
@ -142,8 +148,8 @@ func serveCmd(dockerCli command.Cli) *cobra.Command {
|
||||
}()
|
||||
|
||||
// prepare server
|
||||
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, statusChan chan *client.SolveStatus) (*client.SolveResponse, *build.ResultContext, error) {
|
||||
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, "quiet", statusChan, true)
|
||||
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultContext, error) {
|
||||
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress, true)
|
||||
})
|
||||
defer b.Close()
|
||||
|
||||
|
@ -6,12 +6,13 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/buildx/controller/control"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts control.ControlOptions) (control.BuildxController, error) {
|
||||
func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts control.ControlOptions, logger progress.SubLogger) (control.BuildxController, error) {
|
||||
return nil, errors.New("remote buildx unsupported")
|
||||
}
|
||||
|
||||
|
@ -12,14 +12,14 @@ import (
|
||||
"github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/controller/processes"
|
||||
"github.com/docker/buildx/util/ioset"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/version"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, statusChan chan *client.SolveStatus) (resp *client.SolveResponse, res *build.ResultContext, err error)
|
||||
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultContext, err error)
|
||||
|
||||
func NewServer(buildFunc BuildFunc) *Server {
|
||||
return &Server{
|
||||
@ -35,7 +35,7 @@ type Server struct {
|
||||
|
||||
type session struct {
|
||||
buildOnGoing atomic.Bool
|
||||
statusChan chan *client.SolveStatus
|
||||
statusChan chan *pb.StatusResponse
|
||||
cancelBuild func()
|
||||
buildOptions *pb.BuildOptions
|
||||
inputPipe *io.PipeWriter
|
||||
@ -177,8 +177,9 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
|
||||
s = &session{}
|
||||
s.buildOnGoing.Store(true)
|
||||
}
|
||||
|
||||
s.processes = processes.NewManager()
|
||||
statusChan := make(chan *client.SolveStatus)
|
||||
statusChan := make(chan *pb.StatusResponse)
|
||||
s.statusChan = statusChan
|
||||
inR, inW := io.Pipe()
|
||||
defer inR.Close()
|
||||
@ -196,10 +197,12 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
|
||||
m.sessionMu.Unlock()
|
||||
}()
|
||||
|
||||
pw := pb.NewProgressWriter(statusChan)
|
||||
|
||||
// Build the specified request
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, statusChan)
|
||||
resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
|
||||
m.sessionMu.Lock()
|
||||
if s, ok := m.session[ref]; ok {
|
||||
// NOTE: buildFunc can return *build.ResultContext even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).
|
||||
@ -237,7 +240,7 @@ func (m *Server) Status(req *pb.StatusRequest, stream pb.Controller_StatusServer
|
||||
}
|
||||
|
||||
// Wait and get status channel prepared by Build()
|
||||
var statusChan <-chan *client.SolveStatus
|
||||
var statusChan <-chan *pb.StatusResponse
|
||||
for {
|
||||
// TODO: timeout?
|
||||
m.sessionMu.Lock()
|
||||
@ -256,8 +259,7 @@ func (m *Server) Status(req *pb.StatusRequest, stream pb.Controller_StatusServer
|
||||
if ss == nil {
|
||||
break
|
||||
}
|
||||
cs := toControlStatus(ss)
|
||||
if err := stream.Send(cs); err != nil {
|
||||
if err := stream.Send(ss); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -437,51 +439,3 @@ func (m *Server) Invoke(srv pb.Controller_InvokeServer) error {
|
||||
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
func toControlStatus(s *client.SolveStatus) *pb.StatusResponse {
|
||||
resp := pb.StatusResponse{}
|
||||
for _, v := range s.Vertexes {
|
||||
resp.Vertexes = append(resp.Vertexes, &controlapi.Vertex{
|
||||
Digest: v.Digest,
|
||||
Inputs: v.Inputs,
|
||||
Name: v.Name,
|
||||
Started: v.Started,
|
||||
Completed: v.Completed,
|
||||
Error: v.Error,
|
||||
Cached: v.Cached,
|
||||
ProgressGroup: v.ProgressGroup,
|
||||
})
|
||||
}
|
||||
for _, v := range s.Statuses {
|
||||
resp.Statuses = append(resp.Statuses, &controlapi.VertexStatus{
|
||||
ID: v.ID,
|
||||
Vertex: v.Vertex,
|
||||
Name: v.Name,
|
||||
Total: v.Total,
|
||||
Current: v.Current,
|
||||
Timestamp: v.Timestamp,
|
||||
Started: v.Started,
|
||||
Completed: v.Completed,
|
||||
})
|
||||
}
|
||||
for _, v := range s.Logs {
|
||||
resp.Logs = append(resp.Logs, &controlapi.VertexLog{
|
||||
Vertex: v.Vertex,
|
||||
Stream: int64(v.Stream),
|
||||
Msg: v.Data,
|
||||
Timestamp: v.Timestamp,
|
||||
})
|
||||
}
|
||||
for _, v := range s.Warnings {
|
||||
resp.Warnings = append(resp.Warnings, &controlapi.VertexWarning{
|
||||
Vertex: v.Vertex,
|
||||
Level: int64(v.Level),
|
||||
Short: v.Short,
|
||||
Detail: v.Detail,
|
||||
Url: v.URL,
|
||||
Info: v.SourceInfo,
|
||||
Ranges: v.Range,
|
||||
})
|
||||
}
|
||||
return &resp
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
controllererrors "github.com/docker/buildx/controller/errdefs"
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
"github.com/docker/buildx/util/ioset"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -36,12 +37,18 @@ Available commands are:
|
||||
`
|
||||
|
||||
// RunMonitor provides an interactive session for running and managing containers via specified IO.
|
||||
func RunMonitor(ctx context.Context, curRef string, options *controllerapi.BuildOptions, invokeConfig controllerapi.InvokeConfig, c control.BuildxController, progressMode string, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File) error {
|
||||
func RunMonitor(ctx context.Context, curRef string, options *controllerapi.BuildOptions, invokeConfig controllerapi.InvokeConfig, c control.BuildxController, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress *progress.Printer) error {
|
||||
defer func() {
|
||||
if err := c.Disconnect(ctx, curRef); err != nil {
|
||||
logrus.Warnf("disconnect error: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := progress.Pause(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer progress.Unpause()
|
||||
|
||||
monitorIn, monitorOut := ioset.Pipe()
|
||||
defer func() {
|
||||
monitorIn.Close()
|
||||
@ -145,7 +152,9 @@ func RunMonitor(ctx context.Context, curRef string, options *controllerapi.Build
|
||||
}
|
||||
}
|
||||
var resultUpdated bool
|
||||
ref, _, err := c.Build(ctx, *bo, nil, stdout, stderr, progressMode) // TODO: support stdin, hold build ref
|
||||
progress.Unpause()
|
||||
ref, _, err := c.Build(ctx, *bo, nil, progress) // TODO: support stdin, hold build ref
|
||||
progress.Pause()
|
||||
if err != nil {
|
||||
var be *controllererrors.BuildError
|
||||
if errors.As(err, &be) {
|
||||
|
@ -23,8 +23,12 @@ const (
|
||||
)
|
||||
|
||||
type Printer struct {
|
||||
status chan *client.SolveStatus
|
||||
done <-chan struct{}
|
||||
status chan *client.SolveStatus
|
||||
|
||||
ready chan struct{}
|
||||
done chan struct{}
|
||||
paused chan struct{}
|
||||
|
||||
err error
|
||||
warnings []client.VertexWarning
|
||||
logMu sync.Mutex
|
||||
@ -37,6 +41,16 @@ func (p *Printer) Wait() error {
|
||||
return p.err
|
||||
}
|
||||
|
||||
func (p *Printer) Pause() error {
|
||||
p.paused = make(chan struct{})
|
||||
return p.Wait()
|
||||
}
|
||||
|
||||
func (p *Printer) Unpause() {
|
||||
close(p.paused)
|
||||
<-p.ready
|
||||
}
|
||||
|
||||
func (p *Printer) Write(s *client.SolveStatus) {
|
||||
p.status <- s
|
||||
}
|
||||
@ -70,14 +84,10 @@ func (p *Printer) ClearLogSource(v interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
func NewPrinter(ctx context.Context, w io.Writer, out console.File, mode string, solveStatusOpt ...progressui.DisplaySolveStatusOpt) (*Printer, error) {
|
||||
statusCh := make(chan *client.SolveStatus)
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
pw := &Printer{
|
||||
status: statusCh,
|
||||
done: doneCh,
|
||||
logSourceMap: map[digest.Digest]interface{}{},
|
||||
func NewPrinter(ctx context.Context, w io.Writer, out console.File, mode string, opts ...PrinterOpt) (*Printer, error) {
|
||||
opt := &printerOpts{}
|
||||
for _, o := range opts {
|
||||
o(opt)
|
||||
}
|
||||
|
||||
if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && mode == PrinterModeAuto {
|
||||
@ -98,12 +108,64 @@ func NewPrinter(ctx context.Context, w io.Writer, out console.File, mode string,
|
||||
}
|
||||
}
|
||||
|
||||
pw := &Printer{
|
||||
ready: make(chan struct{}),
|
||||
}
|
||||
go func() {
|
||||
resumeLogs := logutil.Pause(logrus.StandardLogger())
|
||||
// not using shared context to not disrupt display but let is finish reporting errors
|
||||
pw.warnings, pw.err = progressui.DisplaySolveStatus(ctx, c, w, statusCh, solveStatusOpt...)
|
||||
resumeLogs()
|
||||
close(doneCh)
|
||||
for {
|
||||
pw.status = make(chan *client.SolveStatus)
|
||||
pw.done = make(chan struct{})
|
||||
|
||||
pw.logMu.Lock()
|
||||
pw.logSourceMap = map[digest.Digest]interface{}{}
|
||||
pw.logMu.Unlock()
|
||||
|
||||
close(pw.ready)
|
||||
|
||||
resumeLogs := logutil.Pause(logrus.StandardLogger())
|
||||
// not using shared context to not disrupt display but let is finish reporting errors
|
||||
pw.warnings, pw.err = progressui.DisplaySolveStatus(ctx, c, w, pw.status, opt.displayOpts...)
|
||||
resumeLogs()
|
||||
close(pw.done)
|
||||
|
||||
if opt.onclose != nil {
|
||||
opt.onclose()
|
||||
}
|
||||
if pw.paused == nil {
|
||||
break
|
||||
}
|
||||
|
||||
pw.ready = make(chan struct{})
|
||||
<-pw.paused
|
||||
pw.paused = nil
|
||||
}
|
||||
}()
|
||||
<-pw.ready
|
||||
return pw, nil
|
||||
}
|
||||
|
||||
type printerOpts struct {
|
||||
displayOpts []progressui.DisplaySolveStatusOpt
|
||||
|
||||
onclose func()
|
||||
}
|
||||
|
||||
type PrinterOpt func(b *printerOpts)
|
||||
|
||||
func WithPhase(phase string) PrinterOpt {
|
||||
return func(opt *printerOpts) {
|
||||
opt.displayOpts = append(opt.displayOpts, progressui.WithPhase(phase))
|
||||
}
|
||||
}
|
||||
|
||||
func WithDesc(text string, console string) PrinterOpt {
|
||||
return func(opt *printerOpts) {
|
||||
opt.displayOpts = append(opt.displayOpts, progressui.WithDesc(text, console))
|
||||
}
|
||||
}
|
||||
|
||||
func WithOnClose(onclose func()) PrinterOpt {
|
||||
return func(opt *printerOpts) {
|
||||
opt.onclose = onclose
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user