controller: refactor progress api

Refactor the progress printer creation to the caller-side of the
controller api. Then, instead of passing around status channels (and
progressMode strings), we can simply pass around the higher level
interface progress.Writer.

This has a couple of benefits:
- A simplified interface to the controller
- Allows us to correctly extract warnings out of the controller, so that
  they can be displayed correctly from the client side.

Some extra work is required to make sure that we can pass a
progress.Printer into the debug monitor. If we want to keep it
persistent, then we need a way to temporarily suspend output from it,
otherwise it will continue printing as the monitor is prompting for
input from the user, and forwarding output from debug containers.

To handle this, we add two methods to the printer, `Pause` and
`Unpause`. `Pause` acts similarly to `Wait`, closing the printer, and
cleanly shutting down the display - however, the printer does not
terminate, and can later be resumed by a call to `Unpause`. This
provides a neater interface to the caller, instead of needing to
continually reconstruct printers for every single time we want to
produce progress output.

Signed-off-by: Justin Chadwell <me@jedevc.com>
This commit is contained in:
Justin Chadwell
2023-04-21 11:17:43 +01:00
parent 0c1fd31226
commit e826141af4
11 changed files with 190 additions and 122 deletions

View File

@ -1,12 +1,10 @@
package build
import (
"bytes"
"context"
"encoding/base64"
"encoding/csv"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
@ -30,12 +28,8 @@ import (
"github.com/docker/go-units"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/session/auth/authprovider"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/progress/progressui"
"github.com/morikuni/aec"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
)
@ -46,7 +40,7 @@ const defaultTargetName = "default"
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext,
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
// inspect the result and debug the cause of that error.
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progressMode string, statusChan chan *client.SolveStatus, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
if in.NoCache && len(in.NoCacheFilter) > 0 {
return nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
}
@ -164,6 +158,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
contextPathHash = in.ContextPath
}
// TODO: this should not be loaded this side of the controller api
b, err := builder.New(dockerCli,
builder.WithName(in.Builder),
builder.WithContextPathHash(contextPathHash),
@ -179,7 +174,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
return nil, nil, err
}
resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progressMode, in.MetadataFile, statusChan, generateResult)
resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progress, in.MetadataFile, generateResult)
err = wrapBuildError(err, false)
if err != nil {
// NOTE: buildTargets can return *build.ResultContext even on error.
@ -193,24 +188,14 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext,
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
// inspect the result and debug the cause of that error.
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progressMode string, metadataFile string, statusChan chan *client.SolveStatus, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
ctx2, cancel := context.WithCancel(context.TODO())
defer cancel()
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, progressMode, progressui.WithDesc(
fmt.Sprintf("building with %q instance using %s driver", ng.Name, ng.Driver),
fmt.Sprintf("%s:%s", ng.Driver, ng.Name),
))
if err != nil {
return nil, nil, err
}
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, metadataFile string, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
var res *build.ResultContext
var resp map[string]*client.SolveResponse
var err error
if generateResult {
var mu sync.Mutex
var idx int
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress.Tee(printer, statusChan), func(driverIndex int, gotRes *build.ResultContext) {
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultContext) {
mu.Lock()
defer mu.Unlock()
if res == nil || driverIndex < idx {
@ -218,11 +203,7 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGrou
}
})
} else {
resp, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress.Tee(printer, statusChan))
}
err1 := printer.Wait()
if err == nil {
err = err1
resp, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress)
}
if err != nil {
return nil, res, err
@ -234,8 +215,6 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGrou
}
}
printWarnings(os.Stderr, printer.Warnings(), progressMode)
for k := range resp {
if opts[k].PrintFunc != nil {
if err := printResult(opts[k].PrintFunc, resp[k].ExporterResponse); err != nil {
@ -247,46 +226,6 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGrou
return resp[defaultTargetName], res, err
}
func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) {
if len(warnings) == 0 || mode == progress.PrinterModeQuiet {
return
}
fmt.Fprintf(w, "\n ")
sb := &bytes.Buffer{}
if len(warnings) == 1 {
fmt.Fprintf(sb, "1 warning found")
} else {
fmt.Fprintf(sb, "%d warnings found", len(warnings))
}
if logrus.GetLevel() < logrus.DebugLevel {
fmt.Fprintf(sb, " (use --debug to expand)")
}
fmt.Fprintf(sb, ":\n")
fmt.Fprint(w, aec.Apply(sb.String(), aec.YellowF))
for _, warn := range warnings {
fmt.Fprintf(w, " - %s\n", warn.Short)
if logrus.GetLevel() < logrus.DebugLevel {
continue
}
for _, d := range warn.Detail {
fmt.Fprintf(w, "%s\n", d)
}
if warn.URL != "" {
fmt.Fprintf(w, "More info: %s\n", warn.URL)
}
if warn.SourceInfo != nil && warn.Range != nil {
src := errdefs.Source{
Info: warn.SourceInfo,
Ranges: warn.Range,
}
src.Print(w)
}
fmt.Fprintf(w, "\n")
}
}
func parsePrintFunc(str string) (*build.PrintFunc, error) {
if str == "" {
return nil, nil

View File

@ -4,13 +4,13 @@ import (
"context"
"io"
"github.com/containerd/console"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/util/progress"
"github.com/moby/buildkit/client"
)
type BuildxController interface {
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, w io.Writer, out console.File, progressMode string) (ref string, resp *client.SolveResponse, err error)
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, err error)
// Invoke starts an IO session into the specified process.
// If pid doesn't matche to any running processes, it starts a new process with the specified config.
// If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container.

View File

@ -5,7 +5,6 @@ import (
"io"
"sync/atomic"
"github.com/containerd/console"
"github.com/docker/buildx/build"
cbuild "github.com/docker/buildx/controller/build"
"github.com/docker/buildx/controller/control"
@ -13,6 +12,7 @@ import (
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/controller/processes"
"github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
"github.com/moby/buildkit/client"
"github.com/pkg/errors"
@ -42,13 +42,13 @@ type localController struct {
buildOnGoing atomic.Bool
}
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, w io.Writer, out console.File, progressMode string) (string, *client.SolveResponse, error) {
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) {
if !b.buildOnGoing.CompareAndSwap(false, true) {
return "", nil, errors.New("build ongoing")
}
defer b.buildOnGoing.Store(false)
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progressMode, nil, true)
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true)
// NOTE: RunBuild can return *build.ResultContext even on error.
if res != nil {
b.buildConfig = buildConfig{

View File

@ -1,10 +1,30 @@
package pb
import (
"github.com/docker/buildx/util/progress"
control "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client"
"github.com/opencontainers/go-digest"
)
type writer struct {
ch chan<- *StatusResponse
}
func NewProgressWriter(ch chan<- *StatusResponse) progress.Writer {
return &writer{ch: ch}
}
func (w *writer) Write(status *client.SolveStatus) {
w.ch <- ToControlStatus(status)
}
func (w *writer) ValidateLogSource(digest.Digest, interface{}) bool {
return true
}
func (w *writer) ClearLogSource(interface{}) {}
func ToControlStatus(s *client.SolveStatus) *StatusResponse {
resp := StatusResponse{}
for _, v := range s.Vertexes {

View File

@ -6,7 +6,6 @@ import (
"sync"
"time"
"github.com/containerd/console"
"github.com/containerd/containerd/defaults"
"github.com/containerd/containerd/pkg/dialer"
"github.com/docker/buildx/controller/pb"
@ -114,14 +113,9 @@ func (c *Client) Inspect(ctx context.Context, ref string) (*pb.InspectResponse,
return c.client().Inspect(ctx, &pb.InspectRequest{Ref: ref})
}
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, w io.Writer, out console.File, progressMode string) (string, *client.SolveResponse, error) {
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) {
ref := identity.NewID()
pw, err := progress.NewPrinter(context.TODO(), w, out, progressMode)
if err != nil {
return "", nil, err
}
statusChan := make(chan *client.SolveStatus)
statusDone := make(chan struct{})
eg, egCtx := errgroup.WithContext(ctx)
var resp *client.SolveResponse
eg.Go(func() error {
@ -131,17 +125,12 @@ func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadC
return err
})
eg.Go(func() error {
defer close(statusDone)
for s := range statusChan {
st := s
pw.Write(st)
progress.Write(st)
}
return nil
})
eg.Go(func() error {
<-statusDone
return pw.Wait()
})
return ref, resp, eg.Wait()
}

View File

@ -21,6 +21,7 @@ import (
"github.com/docker/buildx/controller/control"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/version"
"github.com/docker/cli/cli/command"
"github.com/moby/buildkit/client"
@ -142,8 +143,8 @@ func serveCmd(dockerCli command.Cli) *cobra.Command {
}()
// prepare server
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, statusChan chan *client.SolveStatus) (*client.SolveResponse, *build.ResultContext, error) {
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, "quiet", statusChan, true)
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultContext, error) {
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress, true)
})
defer b.Close()

View File

@ -12,13 +12,14 @@ import (
"github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/controller/processes"
"github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/version"
"github.com/moby/buildkit/client"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, statusChan chan *client.SolveStatus) (resp *client.SolveResponse, res *build.ResultContext, err error)
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultContext, err error)
func NewServer(buildFunc BuildFunc) *Server {
return &Server{
@ -34,7 +35,7 @@ type Server struct {
type session struct {
buildOnGoing atomic.Bool
statusChan chan *client.SolveStatus
statusChan chan *pb.StatusResponse
cancelBuild func()
buildOptions *pb.BuildOptions
inputPipe *io.PipeWriter
@ -176,8 +177,9 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
s = &session{}
s.buildOnGoing.Store(true)
}
s.processes = processes.NewManager()
statusChan := make(chan *client.SolveStatus)
statusChan := make(chan *pb.StatusResponse)
s.statusChan = statusChan
inR, inW := io.Pipe()
defer inR.Close()
@ -195,10 +197,12 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
m.sessionMu.Unlock()
}()
pw := pb.NewProgressWriter(statusChan)
// Build the specified request
ctx, cancel := context.WithCancel(ctx)
defer cancel()
resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, statusChan)
resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
m.sessionMu.Lock()
if s, ok := m.session[ref]; ok {
// NOTE: buildFunc can return *build.ResultContext even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).
@ -236,7 +240,7 @@ func (m *Server) Status(req *pb.StatusRequest, stream pb.Controller_StatusServer
}
// Wait and get status channel prepared by Build()
var statusChan <-chan *client.SolveStatus
var statusChan <-chan *pb.StatusResponse
for {
// TODO: timeout?
m.sessionMu.Lock()
@ -255,7 +259,7 @@ func (m *Server) Status(req *pb.StatusRequest, stream pb.Controller_StatusServer
if ss == nil {
break
}
if err := stream.Send(pb.ToControlStatus(ss)); err != nil {
if err := stream.Send(ss); err != nil {
return err
}
}