Enable to run build and invoke in background

Signed-off-by: Kohei Tokunaga <ktokunaga.mail@gmail.com>
This commit is contained in:
Kohei Tokunaga
2022-08-29 14:14:14 +09:00
parent b1b4e64c97
commit a27b8395b1
80 changed files with 24862 additions and 678 deletions

View File

@ -10,6 +10,7 @@ import (
"github.com/docker/buildx/bake"
"github.com/docker/buildx/build"
"github.com/docker/buildx/builder"
controllerapi "github.com/docker/buildx/commands/controller/pb"
"github.com/docker/buildx/util/buildflags"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/dockerutil"
@ -25,10 +26,10 @@ type bakeOptions struct {
files []string
overrides []string
printOnly bool
commonOptions
controllerapi.CommonOptions
}
func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error) {
func runBake(dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
ctx := appcontext.Context()
ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
@ -60,31 +61,31 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
}
overrides := in.overrides
if in.exportPush {
if in.exportLoad {
if in.ExportPush {
if in.ExportLoad {
return errors.Errorf("push and load may not be set together at the moment")
}
overrides = append(overrides, "*.push=true")
} else if in.exportLoad {
} else if in.ExportLoad {
overrides = append(overrides, "*.output=type=docker")
}
if in.noCache != nil {
overrides = append(overrides, fmt.Sprintf("*.no-cache=%t", *in.noCache))
if cFlags.noCache != nil {
overrides = append(overrides, fmt.Sprintf("*.no-cache=%t", *cFlags.noCache))
}
if in.pull != nil {
overrides = append(overrides, fmt.Sprintf("*.pull=%t", *in.pull))
if cFlags.pull != nil {
overrides = append(overrides, fmt.Sprintf("*.pull=%t", *cFlags.pull))
}
if in.sbom != "" {
overrides = append(overrides, fmt.Sprintf("*.attest=%s", buildflags.CanonicalizeAttest("sbom", in.sbom)))
if in.SBOM != "" {
overrides = append(overrides, fmt.Sprintf("*.attest=%s", buildflags.CanonicalizeAttest("sbom", in.SBOM)))
}
if in.provenance != "" {
overrides = append(overrides, fmt.Sprintf("*.attest=%s", buildflags.CanonicalizeAttest("provenance", in.provenance)))
if in.Provenance != "" {
overrides = append(overrides, fmt.Sprintf("*.attest=%s", buildflags.CanonicalizeAttest("provenance", in.Provenance)))
}
contextPathHash, _ := os.Getwd()
ctx2, cancel := context.WithCancel(context.TODO())
defer cancel()
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, cFlags.progress)
if err != nil {
return err
}
@ -105,7 +106,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
// instance only needed for reading remote bake files or building
if url != "" || !in.printOnly {
b, err := builder.New(dockerCli,
builder.WithName(in.builder),
builder.WithName(in.Builder),
builder.WithContextPathHash(contextPathHash),
)
if err != nil {
@ -170,12 +171,12 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
return wrapBuildError(err, true)
}
if len(in.metadataFile) > 0 {
if len(in.MetadataFile) > 0 {
dt := make(map[string]interface{})
for t, r := range resp {
dt[t] = decodeExporterResponse(r.ExporterResponse)
}
if err := writeMetadataFile(in.metadataFile, dt); err != nil {
if err := writeMetadataFile(in.MetadataFile, dt); err != nil {
return err
}
}
@ -185,6 +186,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
var options bakeOptions
var cFlags commonFlags
cmd := &cobra.Command{
Use: "bake [OPTIONS] [TARGET...]",
@ -193,27 +195,29 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
RunE: func(cmd *cobra.Command, args []string) error {
// reset to nil to avoid override is unset
if !cmd.Flags().Lookup("no-cache").Changed {
options.noCache = nil
cFlags.noCache = nil
}
if !cmd.Flags().Lookup("pull").Changed {
options.pull = nil
cFlags.pull = nil
}
options.commonOptions.builder = rootOpts.builder
return runBake(dockerCli, args, options)
options.Builder = rootOpts.builder
options.MetadataFile = cFlags.metadataFile
// Other common flags (noCache, pull and progress) are processed in runBake function.
return runBake(dockerCli, args, options, cFlags)
},
}
flags := cmd.Flags()
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
flags.BoolVar(&options.ExportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`)
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
flags.BoolVar(&options.ExportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
flags.StringVar(&options.SBOM, "sbom", "", `Shorthand for "--set=*.attest=type=sbom"`)
flags.StringVar(&options.Provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`)
flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`)
commonBuildFlags(&options.commonOptions, flags)
commonBuildFlags(&cFlags, flags)
return cmd
}

View File

@ -10,6 +10,7 @@ import (
"io"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
@ -17,12 +18,14 @@ import (
"github.com/containerd/console"
"github.com/docker/buildx/build"
"github.com/docker/buildx/builder"
controllerapi "github.com/docker/buildx/commands/controller/pb"
"github.com/docker/buildx/monitor"
"github.com/docker/buildx/store"
"github.com/docker/buildx/store/storeutil"
"github.com/docker/buildx/util/buildflags"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/dockerutil"
"github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/util/platformutil"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/util/tracing"
@ -50,50 +53,15 @@ import (
const defaultTargetName = "default"
type buildOptions struct {
contextPath string
dockerfileName string
printFunc string
allow []string
attests []string
buildArgs []string
cacheFrom []string
cacheTo []string
cgroupParent string
contexts []string
extraHosts []string
imageIDFile string
invoke string
labels []string
networkMode string
noCacheFilter []string
outputs []string
platforms []string
quiet bool
secrets []string
shmSize dockeropts.MemBytes
ssh []string
tags []string
target string
ulimits *dockeropts.UlimitOpt
commonOptions
}
type commonOptions struct {
builder string
metadataFile string
noCache *bool
progress string
pull *bool
exportPush bool
exportLoad bool
sbom string
provenance string
invoke string
serverConfig string
root string
detach bool
controllerapi.BuildOptions
}
func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
func runBuild(dockerCli command.Cli, in buildOptions) error {
ctx := appcontext.Context()
ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
@ -104,89 +72,85 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
end(err)
}()
noCache := false
if in.noCache != nil {
noCache = *in.noCache
}
pull := false
if in.pull != nil {
pull = *in.pull
_, err = runBuildWithContext(ctx, dockerCli, in.BuildOptions, os.Stdin, in.progress, nil)
return err
}
func runBuildWithContext(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progressMode string, statusChan chan *client.SolveStatus) (res *build.ResultContext, err error) {
if in.Opts.NoCache && len(in.NoCacheFilter) > 0 {
return nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
}
if noCache && len(in.noCacheFilter) > 0 {
return errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
if in.Quiet && progressMode != progress.PrinterModeAuto && progressMode != progress.PrinterModeQuiet {
return nil, errors.Errorf("progress=%s and quiet cannot be used together", progressMode)
} else if in.Quiet {
progressMode = "quiet"
}
if in.quiet && in.progress != progress.PrinterModeAuto && in.progress != progress.PrinterModeQuiet {
return errors.Errorf("progress=%s and quiet cannot be used together", in.progress)
} else if in.quiet {
in.progress = "quiet"
}
contexts, err := parseContextNames(in.contexts)
contexts, err := parseContextNames(in.Contexts)
if err != nil {
return err
return nil, err
}
printFunc, err := parsePrintFunc(in.printFunc)
printFunc, err := parsePrintFunc(in.PrintFunc)
if err != nil {
return err
return nil, err
}
opts := build.Options{
Inputs: build.Inputs{
ContextPath: in.contextPath,
DockerfilePath: in.dockerfileName,
InStream: os.Stdin,
ContextPath: in.ContextPath,
DockerfilePath: in.DockerfileName,
InStream: inStream,
NamedContexts: contexts,
},
BuildArgs: listToMap(in.buildArgs, true),
ExtraHosts: in.extraHosts,
ImageIDFile: in.imageIDFile,
Labels: listToMap(in.labels, false),
NetworkMode: in.networkMode,
NoCache: noCache,
NoCacheFilter: in.noCacheFilter,
Pull: pull,
ShmSize: in.shmSize,
Tags: in.tags,
Target: in.target,
Ulimits: in.ulimits,
BuildArgs: listToMap(in.BuildArgs, true),
ExtraHosts: in.ExtraHosts,
ImageIDFile: in.ImageIDFile,
Labels: listToMap(in.Labels, false),
NetworkMode: in.NetworkMode,
NoCache: in.Opts.NoCache,
NoCacheFilter: in.NoCacheFilter,
Pull: in.Opts.Pull,
ShmSize: dockeropts.MemBytes(in.ShmSize),
Tags: in.Tags,
Target: in.Target,
Ulimits: controllerUlimitOpt2DockerUlimit(in.Ulimits),
PrintFunc: printFunc,
}
platforms, err := platformutil.Parse(in.platforms)
platforms, err := platformutil.Parse(in.Platforms)
if err != nil {
return err
return nil, err
}
opts.Platforms = platforms
dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(dockerConfig))
secrets, err := buildflags.ParseSecretSpecs(in.secrets)
secrets, err := buildflags.ParseSecretSpecs(in.Secrets)
if err != nil {
return err
return nil, err
}
opts.Session = append(opts.Session, secrets)
sshSpecs := in.ssh
if len(sshSpecs) == 0 && buildflags.IsGitSSH(in.contextPath) {
sshSpecs := in.SSH
if len(sshSpecs) == 0 && buildflags.IsGitSSH(in.ContextPath) {
sshSpecs = []string{"default"}
}
ssh, err := buildflags.ParseSSHSpecs(sshSpecs)
if err != nil {
return err
return nil, err
}
opts.Session = append(opts.Session, ssh)
outputs, err := buildflags.ParseOutputs(in.outputs)
outputs, err := buildflags.ParseOutputs(in.Outputs)
if err != nil {
return err
return nil, err
}
if in.exportPush {
if in.exportLoad {
return errors.Errorf("push and load may not be set together at the moment")
if in.Opts.ExportPush {
if in.Opts.ExportLoad {
return nil, errors.Errorf("push and load may not be set together at the moment")
}
if len(outputs) == 0 {
outputs = []client.ExportEntry{{
@ -200,11 +164,11 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
case "image":
outputs[0].Attrs["push"] = "true"
default:
return errors.Errorf("push and %q output can't be used together", outputs[0].Type)
return nil, errors.Errorf("push and %q output can't be used together", outputs[0].Type)
}
}
}
if in.exportLoad {
if in.Opts.ExportLoad {
if len(outputs) == 0 {
outputs = []client.ExportEntry{{
Type: "docker",
@ -214,102 +178,76 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
switch outputs[0].Type {
case "docker":
default:
return errors.Errorf("load and %q output can't be used together", outputs[0].Type)
return nil, errors.Errorf("load and %q output can't be used together", outputs[0].Type)
}
}
}
opts.Exports = outputs
inAttests := append([]string{}, in.attests...)
if in.provenance != "" {
inAttests = append(inAttests, buildflags.CanonicalizeAttest("provenance", in.provenance))
inAttests := append([]string{}, in.Attests...)
if in.Opts.Provenance != "" {
inAttests = append(inAttests, buildflags.CanonicalizeAttest("provenance", in.Opts.Provenance))
}
if in.sbom != "" {
inAttests = append(inAttests, buildflags.CanonicalizeAttest("sbom", in.sbom))
if in.Opts.SBOM != "" {
inAttests = append(inAttests, buildflags.CanonicalizeAttest("sbom", in.Opts.SBOM))
}
opts.Attests, err = buildflags.ParseAttests(inAttests)
if err != nil {
return err
return nil, err
}
cacheImports, err := buildflags.ParseCacheEntry(in.cacheFrom)
cacheImports, err := buildflags.ParseCacheEntry(in.CacheFrom)
if err != nil {
return err
return nil, err
}
opts.CacheFrom = cacheImports
cacheExports, err := buildflags.ParseCacheEntry(in.cacheTo)
cacheExports, err := buildflags.ParseCacheEntry(in.CacheTo)
if err != nil {
return err
return nil, err
}
opts.CacheTo = cacheExports
allow, err := buildflags.ParseEntitlements(in.allow)
allow, err := buildflags.ParseEntitlements(in.Allow)
if err != nil {
return err
return nil, err
}
opts.Allow = allow
// key string used for kubernetes "sticky" mode
contextPathHash, err := filepath.Abs(in.contextPath)
contextPathHash, err := filepath.Abs(in.ContextPath)
if err != nil {
contextPathHash = in.contextPath
contextPathHash = in.ContextPath
}
b, err := builder.New(dockerCli,
builder.WithName(in.builder),
builder.WithName(in.Opts.Builder),
builder.WithContextPathHash(contextPathHash),
)
if err != nil {
return err
return nil, err
}
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
return errors.Wrapf(err, "failed to update builder last activity time")
return nil, errors.Wrapf(err, "failed to update builder last activity time")
}
nodes, err := b.LoadNodes(ctx, false)
if err != nil {
return err
return nil, err
}
imageID, res, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, in.progress, in.metadataFile, in.invoke != "")
imageID, res, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, progressMode, in.Opts.MetadataFile, statusChan)
err = wrapBuildError(err, false)
if err != nil {
return err
return nil, err
}
if in.invoke != "" {
cfg, err := parseInvokeConfig(in.invoke)
if err != nil {
return err
}
cfg.ResultCtx = res
con := console.Current()
if err := con.SetRaw(); err != nil {
return errors.Errorf("failed to configure terminal: %v", err)
}
err = monitor.RunMonitor(ctx, cfg, func(ctx context.Context) (*build.ResultContext, error) {
_, rr, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, in.progress, in.metadataFile, true)
return rr, err
}, io.NopCloser(os.Stdin), nopCloser{os.Stdout}, nopCloser{os.Stderr})
if err != nil {
logrus.Warnf("failed to run monitor: %v", err)
}
con.Reset()
}
if in.quiet {
if in.Quiet {
fmt.Println(imageID)
}
return nil
return res, nil
}
type nopCloser struct {
io.WriteCloser
}
func (c nopCloser) Close() error { return nil }
func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.Node, opts map[string]build.Options, progressMode string, metadataFile string, allowNoOutput bool) (imageID string, res *build.ResultContext, err error) {
func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.Node, opts map[string]build.Options, progressMode string, metadataFile string, statusChan chan *client.SolveStatus) (imageID string, res *build.ResultContext, err error) {
ctx2, cancel := context.WithCancel(context.TODO())
defer cancel()
@ -320,13 +258,13 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.No
var mu sync.Mutex
var idx int
resp, err := build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer, func(driverIndex int, gotRes *build.ResultContext) {
resp, err := build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress.Tee(printer, statusChan), func(driverIndex int, gotRes *build.ResultContext) {
mu.Lock()
defer mu.Unlock()
if res == nil || driverIndex < idx {
idx, res = driverIndex, gotRes
}
}, allowNoOutput)
})
err1 := printer.Wait()
if err == nil {
err = err1
@ -354,51 +292,6 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, nodes []builder.No
return resp[defaultTargetName].ExporterResponse["containerimage.digest"], res, err
}
func parseInvokeConfig(invoke string) (cfg build.ContainerConfig, err error) {
cfg.Tty = true
if invoke == "default" {
return cfg, nil
}
csvReader := csv.NewReader(strings.NewReader(invoke))
fields, err := csvReader.Read()
if err != nil {
return cfg, err
}
if len(fields) == 1 && !strings.Contains(fields[0], "=") {
cfg.Cmd = []string{fields[0]}
return cfg, nil
}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) != 2 {
return cfg, errors.Errorf("invalid value %s", field)
}
key := strings.ToLower(parts[0])
value := parts[1]
switch key {
case "args":
cfg.Cmd = append(cfg.Cmd, value) // TODO: support JSON
case "entrypoint":
cfg.Entrypoint = append(cfg.Entrypoint, value) // TODO: support JSON
case "env":
cfg.Env = append(cfg.Env, value)
case "user":
cfg.User = &value
case "cwd":
cfg.Cwd = &value
case "tty":
cfg.Tty, err = strconv.ParseBool(value)
if err != nil {
return cfg, errors.Errorf("failed to parse tty: %v", err)
}
default:
return cfg, errors.Errorf("unknown key %q", key)
}
}
return cfg, nil
}
func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) {
if len(warnings) == 0 || mode == progress.PrinterModeQuiet {
return
@ -439,15 +332,9 @@ func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) {
}
}
func newBuildOptions() buildOptions {
ulimits := make(map[string]*units.Ulimit)
return buildOptions{
ulimits: dockeropts.NewUlimitOpt(&ulimits),
}
}
func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
options := newBuildOptions()
cFlags := &commonFlags{}
cmd := &cobra.Command{
Use: "build [OPTIONS] PATH | URL | -",
@ -455,9 +342,22 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
Short: "Start a build",
Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
options.contextPath = args[0]
options.builder = rootOpts.builder
options.ContextPath = args[0]
options.Opts.Builder = rootOpts.builder
options.Opts.MetadataFile = cFlags.metadataFile
options.Opts.NoCache = false
if cFlags.noCache != nil {
options.Opts.NoCache = *cFlags.noCache
}
options.Opts.Pull = false
if cFlags.pull != nil {
options.Opts.Pull = *cFlags.pull
}
options.progress = cFlags.progress
cmd.Flags().VisitAll(checkWarnedFlags)
if isExperimental() {
return launchControllerAndRunBuild(dockerCli, options)
}
return runBuild(dockerCli, options)
},
}
@ -469,67 +369,70 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags := cmd.Flags()
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
flags.StringSliceVar(&options.ExtraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
flags.SetAnnotation("add-host", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#add-host"})
flags.StringSliceVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
flags.StringSliceVar(&options.Allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables")
flags.StringArrayVar(&options.BuildArgs, "build-arg", []string{}, "Set build-time variables")
flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, `External cache sources (e.g., "user/app:cache", "type=local,src=path/to/dir")`)
flags.StringArrayVar(&options.CacheFrom, "cache-from", []string{}, `External cache sources (e.g., "user/app:cache", "type=local,src=path/to/dir")`)
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, `Cache export destinations (e.g., "user/app:cache", "type=local,dest=path/to/dir")`)
flags.StringArrayVar(&options.CacheTo, "cache-to", []string{}, `Cache export destinations (e.g., "user/app:cache", "type=local,dest=path/to/dir")`)
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
flags.StringVar(&options.CgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
flags.SetAnnotation("cgroup-parent", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#cgroup-parent"})
flags.StringArrayVar(&options.contexts, "build-context", []string{}, "Additional build contexts (e.g., name=path)")
flags.StringArrayVar(&options.Contexts, "build-context", []string{}, "Additional build contexts (e.g., name=path)")
flags.StringVarP(&options.dockerfileName, "file", "f", "", `Name of the Dockerfile (default: "PATH/Dockerfile")`)
flags.StringVarP(&options.DockerfileName, "file", "f", "", `Name of the Dockerfile (default: "PATH/Dockerfile")`)
flags.SetAnnotation("file", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#file"})
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
flags.StringVar(&options.ImageIDFile, "iidfile", "", "Write the image ID to the file")
flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image")
flags.StringArrayVar(&options.Labels, "label", []string{}, "Set metadata for an image")
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--output=type=docker"`)
flags.BoolVar(&options.Opts.ExportLoad, "load", false, `Shorthand for "--output=type=docker"`)
flags.StringVar(&options.networkMode, "network", "default", `Set the networking mode for the "RUN" instructions during build`)
flags.StringVar(&options.NetworkMode, "network", "default", `Set the networking mode for the "RUN" instructions during build`)
flags.StringArrayVar(&options.noCacheFilter, "no-cache-filter", []string{}, "Do not cache specified stages")
flags.StringArrayVar(&options.NoCacheFilter, "no-cache-filter", []string{}, "Do not cache specified stages")
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, `Output destination (format: "type=local,dest=path")`)
flags.StringArrayVarP(&options.Outputs, "output", "o", []string{}, `Output destination (format: "type=local,dest=path")`)
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
flags.StringArrayVar(&options.Platforms, "platform", platformsDefault, "Set target platform for build")
if isExperimental() {
flags.StringVar(&options.printFunc, "print", "", "Print result of information request (e.g., outline, targets) [experimental]")
flags.StringVar(&options.PrintFunc, "print", "", "Print result of information request (e.g., outline, targets) [experimental]")
}
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--output=type=registry"`)
flags.BoolVar(&options.Opts.ExportPush, "push", false, `Shorthand for "--output=type=registry"`)
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
flags.BoolVarP(&options.Quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
flags.StringArrayVar(&options.secrets, "secret", []string{}, `Secret to expose to the build (format: "id=mysecret[,src=/local/secret]")`)
flags.StringArrayVar(&options.Secrets, "secret", []string{}, `Secret to expose to the build (format: "id=mysecret[,src=/local/secret]")`)
flags.Var(&options.shmSize, "shm-size", `Size of "/dev/shm"`)
flags.Var(newShmSize(&options), "shm-size", `Size of "/dev/shm"`)
flags.StringArrayVar(&options.ssh, "ssh", []string{}, `SSH agent socket or keys to expose to the build (format: "default|<id>[=<socket>|<key>[,<key>]]")`)
flags.StringArrayVar(&options.SSH, "ssh", []string{}, `SSH agent socket or keys to expose to the build (format: "default|<id>[=<socket>|<key>[,<key>]]")`)
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, `Name and optionally a tag (format: "name:tag")`)
flags.StringArrayVarP(&options.Tags, "tag", "t", []string{}, `Name and optionally a tag (format: "name:tag")`)
flags.SetAnnotation("tag", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#tag"})
flags.StringVar(&options.target, "target", "", "Set the target build stage to build")
flags.StringVar(&options.Target, "target", "", "Set the target build stage to build")
flags.SetAnnotation("target", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#target"})
flags.Var(options.ulimits, "ulimit", "Ulimit options")
flags.Var(newUlimits(&options), "ulimit", "Ulimit options")
flags.StringArrayVar(&options.attests, "attest", []string{}, `Attestation parameters (format: "type=sbom,generator=image")`)
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--attest=type=sbom"`)
flags.StringVar(&options.provenance, "provenance", "", `Shortand for "--attest=type=provenance"`)
flags.StringArrayVar(&options.Attests, "attest", []string{}, `Attestation parameters (format: "type=sbom,generator=image")`)
flags.StringVar(&options.Opts.SBOM, "sbom", "", `Shorthand for "--attest=type=sbom"`)
flags.StringVar(&options.Opts.Provenance, "provenance", "", `Shortand for "--attest=type=provenance"`)
if isExperimental() {
flags.StringVar(&options.invoke, "invoke", "", "Invoke a command after the build [experimental]")
flags.StringVar(&options.root, "root", "", "Specify root directory of server to connect [experimental]")
flags.BoolVar(&options.detach, "detach", runtime.GOOS == "linux", "Detach buildx server (supported only on linux) [experimental]")
flags.StringVar(&options.serverConfig, "server-config", "", "Specify buildx server config file (used only when launching new server) [experimental]")
}
// hidden flags
@ -580,11 +483,19 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags.BoolVar(&ignoreBool, "force-rm", false, "Always remove intermediate containers")
flags.MarkHidden("force-rm")
commonBuildFlags(&options.commonOptions, flags)
commonBuildFlags(cFlags, flags)
return cmd
}
func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) {
// comomnFlags is a set of flags commonly shared among subcommands.
type commonFlags struct {
metadataFile string
progress string
noCache *bool
pull *bool
}
func commonBuildFlags(options *commonFlags, flags *pflag.FlagSet) {
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images")
@ -744,3 +655,235 @@ func updateLastActivity(dockerCli command.Cli, ng *store.NodeGroup) error {
defer release()
return txn.UpdateLastActivity(ng)
}
func launchControllerAndRunBuild(dockerCli command.Cli, options buildOptions) error {
ctx := context.TODO()
if options.Quiet && options.progress != "auto" && options.progress != "quiet" {
return errors.Errorf("progress=%s and quiet cannot be used together", options.progress)
} else if options.Quiet {
options.progress = "quiet"
}
if options.invoke != "" && (options.DockerfileName == "-" || options.ContextPath == "-") {
// stdin must be usable for monitor
return errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
}
var invokeConfig controllerapi.ContainerConfig
if inv := options.invoke; inv != "" {
var err error
invokeConfig, err = parseInvokeConfig(inv) // TODO: produce *controller.ContainerConfig directly.
if err != nil {
return err
}
}
var c monitor.BuildxController
var err error
if options.detach {
logrus.Infof("connecting to buildx server")
c, err = newRemoteBuildxController(ctx, dockerCli, options)
if err != nil {
return fmt.Errorf("failed to use buildx server; use --detach=false: %w", err)
}
} else {
logrus.Infof("launching local buildx controller")
c = newLocalBuildxController(ctx, dockerCli)
}
defer func() {
if err := c.Close(); err != nil {
logrus.Warnf("failed to close server connection %v", err)
}
}()
f := ioset.NewSingleForwarder()
pr, pw := io.Pipe()
f.SetWriter(pw, func() io.WriteCloser {
pw.Close() // propagate EOF
logrus.Debug("propagating stdin close")
return nil
})
f.SetReader(os.Stdin)
// Start build
ref, err := c.Build(ctx, options.BuildOptions, pr, os.Stdout, os.Stderr, options.progress)
if err != nil {
return fmt.Errorf("failed to build: %w", err) // TODO: allow invoke even on error
}
if err := pw.Close(); err != nil {
logrus.Debug("failed to close stdin pipe writer")
}
if err := pr.Close(); err != nil {
logrus.Debug("failed to close stdin pipe reader")
}
// post-build operations
if options.invoke != "" {
pr2, pw2 := io.Pipe()
f.SetWriter(pw2, func() io.WriteCloser {
pw2.Close() // propagate EOF
return nil
})
con := console.Current()
if err := con.SetRaw(); err != nil {
if err := c.Disconnect(ctx, ref); err != nil {
logrus.Warnf("disconnect error: %v", err)
}
return errors.Errorf("failed to configure terminal: %v", err)
}
err = monitor.RunMonitor(ctx, ref, options.BuildOptions, invokeConfig, c, options.progress, pr2, os.Stdout, os.Stderr)
con.Reset()
if err := pw2.Close(); err != nil {
logrus.Debug("failed to close monitor stdin pipe reader")
}
if err != nil {
logrus.Warnf("failed to run monitor: %v", err)
}
} else {
if err := c.Disconnect(ctx, ref); err != nil {
logrus.Warnf("disconnect error: %v", err)
}
// If "invoke" isn't specified, further inspection ins't provided. Finish the buildx server.
if err := c.Kill(ctx); err != nil {
return err
}
}
return nil
}
func parseInvokeConfig(invoke string) (cfg controllerapi.ContainerConfig, err error) {
cfg.Tty = true
if invoke == "default" {
return cfg, nil
}
csvReader := csv.NewReader(strings.NewReader(invoke))
fields, err := csvReader.Read()
if err != nil {
return cfg, err
}
if len(fields) == 1 && !strings.Contains(fields[0], "=") {
cfg.Cmd = []string{fields[0]}
return cfg, nil
}
cfg.NoUser = true
cfg.NoCwd = true
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) != 2 {
return cfg, errors.Errorf("invalid value %s", field)
}
key := strings.ToLower(parts[0])
value := parts[1]
switch key {
case "args":
cfg.Cmd = append(cfg.Cmd, value) // TODO: support JSON
case "entrypoint":
cfg.Entrypoint = append(cfg.Entrypoint, value) // TODO: support JSON
case "env":
cfg.Env = append(cfg.Env, value)
case "user":
cfg.User = value
cfg.NoUser = false
case "cwd":
cfg.Cwd = value
cfg.NoCwd = false
case "tty":
cfg.Tty, err = strconv.ParseBool(value)
if err != nil {
return cfg, errors.Errorf("failed to parse tty: %v", err)
}
default:
return cfg, errors.Errorf("unknown key %q", key)
}
}
return cfg, nil
}
func controllerUlimitOpt2DockerUlimit(u *controllerapi.UlimitOpt) *dockeropts.UlimitOpt {
if u == nil {
return nil
}
values := make(map[string]*units.Ulimit)
for k, v := range u.Values {
values[k] = &units.Ulimit{
Name: v.Name,
Hard: v.Hard,
Soft: v.Soft,
}
}
return dockeropts.NewUlimitOpt(&values)
}
func newBuildOptions() buildOptions {
return buildOptions{
BuildOptions: controllerapi.BuildOptions{
Opts: &controllerapi.CommonOptions{},
},
}
}
func newUlimits(opt *buildOptions) *ulimits {
ul := make(map[string]*units.Ulimit)
return &ulimits{opt: opt, org: dockeropts.NewUlimitOpt(&ul)}
}
type ulimits struct {
opt *buildOptions
org *dockeropts.UlimitOpt
}
func (u *ulimits) sync() {
du := &controllerapi.UlimitOpt{
Values: make(map[string]*controllerapi.Ulimit),
}
for _, l := range u.org.GetList() {
du.Values[l.Name] = &controllerapi.Ulimit{
Name: l.Name,
Hard: l.Hard,
Soft: l.Soft,
}
}
u.opt.Ulimits = du
}
func (u *ulimits) String() string {
return u.org.String()
}
func (u *ulimits) Set(v string) error {
err := u.org.Set(v)
u.sync()
return err
}
func (u *ulimits) Type() string {
return u.org.Type()
}
func newShmSize(opt *buildOptions) *shmSize {
return &shmSize{opt: opt}
}
type shmSize struct {
opt *buildOptions
org dockeropts.MemBytes
}
func (s *shmSize) sync() {
s.opt.ShmSize = s.org.Value()
}
func (s *shmSize) String() string {
return s.org.String()
}
func (s *shmSize) Set(v string) error {
err := s.org.Set(v)
s.sync()
return err
}
func (s *shmSize) Type() string {
return s.org.Type()
}

View File

@ -0,0 +1,262 @@
package controller
import (
"context"
"fmt"
"io"
"sync"
"time"
"github.com/containerd/console"
"github.com/containerd/containerd/defaults"
"github.com/containerd/containerd/pkg/dialer"
"github.com/docker/buildx/commands/controller/pb"
"github.com/docker/buildx/util/progress"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/credentials/insecure"
)
func NewClient(addr string) (*Client, error) {
backoffConfig := backoff.DefaultConfig
backoffConfig.MaxDelay = 3 * time.Second
connParams := grpc.ConnectParams{
Backoff: backoffConfig,
}
gopts := []grpc.DialOption{
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithConnectParams(connParams),
grpc.WithContextDialer(dialer.ContextDialer),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
}
conn, err := grpc.Dial(dialer.DialAddress(addr), gopts...)
if err != nil {
return nil, err
}
return &Client{conn: conn}, nil
}
type Client struct {
conn *grpc.ClientConn
closeOnce sync.Once
}
func (c *Client) Close() (err error) {
c.closeOnce.Do(func() {
err = c.conn.Close()
})
return
}
func (c *Client) Version(ctx context.Context) (string, string, string, error) {
res, err := c.client().Info(ctx, &pb.InfoRequest{})
if err != nil {
return "", "", "", err
}
v := res.BuildxVersion
return v.Package, v.Version, v.Revision, nil
}
func (c *Client) List(ctx context.Context) (keys []string, retErr error) {
res, err := c.client().List(ctx, &pb.ListRequest{})
if err != nil {
return nil, err
}
return res.Keys, nil
}
func (c *Client) Disconnect(ctx context.Context, key string) error {
_, err := c.client().Disconnect(ctx, &pb.DisconnectRequest{Ref: key})
return err
}
func (c *Client) Invoke(ctx context.Context, ref string, containerConfig pb.ContainerConfig, in io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
if ref == "" {
return fmt.Errorf("build reference must be specified")
}
stream, err := c.client().Invoke(ctx)
if err != nil {
return err
}
return attachIO(ctx, stream, &pb.InitMessage{Ref: ref, ContainerConfig: &containerConfig}, ioAttachConfig{
stdin: in,
stdout: stdout,
stderr: stderr,
// TODO: Signal, Resize
})
}
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, w io.Writer, out console.File, progressMode string) (string, error) {
ref := identity.NewID()
pw, err := progress.NewPrinter(context.TODO(), w, out, progressMode)
if err != nil {
return "", err
}
statusChan := make(chan *client.SolveStatus)
statusDone := make(chan struct{})
eg, egCtx := errgroup.WithContext(ctx)
eg.Go(func() error {
defer close(statusChan)
return c.build(egCtx, ref, options, in, statusChan)
})
eg.Go(func() error {
defer close(statusDone)
for s := range statusChan {
st := s
pw.Write(st)
}
return nil
})
eg.Go(func() error {
<-statusDone
return pw.Wait()
})
return ref, eg.Wait()
}
func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions, in io.ReadCloser, statusChan chan *client.SolveStatus) error {
eg, egCtx := errgroup.WithContext(ctx)
done := make(chan struct{})
eg.Go(func() error {
defer close(done)
if _, err := c.client().Build(egCtx, &pb.BuildRequest{
Ref: ref,
Options: &options,
}); err != nil {
return err
}
return nil
})
eg.Go(func() error {
stream, err := c.client().Status(egCtx, &pb.StatusRequest{
Ref: ref,
})
if err != nil {
return err
}
for {
resp, err := stream.Recv()
if err != nil {
if err == io.EOF {
return nil
}
return errors.Wrap(err, "failed to receive status")
}
s := client.SolveStatus{}
for _, v := range resp.Vertexes {
s.Vertexes = append(s.Vertexes, &client.Vertex{
Digest: v.Digest,
Inputs: v.Inputs,
Name: v.Name,
Started: v.Started,
Completed: v.Completed,
Error: v.Error,
Cached: v.Cached,
ProgressGroup: v.ProgressGroup,
})
}
for _, v := range resp.Statuses {
s.Statuses = append(s.Statuses, &client.VertexStatus{
ID: v.ID,
Vertex: v.Vertex,
Name: v.Name,
Total: v.Total,
Current: v.Current,
Timestamp: v.Timestamp,
Started: v.Started,
Completed: v.Completed,
})
}
for _, v := range resp.Logs {
s.Logs = append(s.Logs, &client.VertexLog{
Vertex: v.Vertex,
Stream: int(v.Stream),
Data: v.Msg,
Timestamp: v.Timestamp,
})
}
for _, v := range resp.Warnings {
s.Warnings = append(s.Warnings, &client.VertexWarning{
Vertex: v.Vertex,
Level: int(v.Level),
Short: v.Short,
Detail: v.Detail,
URL: v.Url,
SourceInfo: v.Info,
Range: v.Ranges,
})
}
statusChan <- &s
}
})
if in != nil {
eg.Go(func() error {
stream, err := c.client().Input(egCtx)
if err != nil {
return err
}
if err := stream.Send(&pb.InputMessage{
Input: &pb.InputMessage_Init{
Init: &pb.InputInitMessage{
Ref: ref,
},
},
}); err != nil {
return fmt.Errorf("failed to init input: %w", err)
}
inReader, inWriter := io.Pipe()
eg2, _ := errgroup.WithContext(ctx)
eg2.Go(func() error {
<-done
return inWriter.Close()
})
go func() {
// do not wait for read completion but return here and let the caller send EOF
// this allows us to return on ctx.Done() without being blocked by this reader.
io.Copy(inWriter, in)
inWriter.Close()
}()
eg2.Go(func() error {
for {
buf := make([]byte, 32*1024)
n, err := inReader.Read(buf)
if err != nil {
if err == io.EOF {
break // break loop and send EOF
}
return err
} else if n > 0 {
if stream.Send(&pb.InputMessage{
Input: &pb.InputMessage_Data{
Data: &pb.DataMessage{
Data: buf[:n],
},
},
}); err != nil {
return err
}
}
}
return stream.Send(&pb.InputMessage{
Input: &pb.InputMessage_Data{
Data: &pb.DataMessage{
EOF: true,
},
},
})
})
return eg2.Wait()
})
}
return eg.Wait()
}
func (c *Client) client() pb.ControllerClient {
return pb.NewControllerClient(c.conn)
}

View File

@ -0,0 +1,440 @@
package controller
import (
"context"
"errors"
"fmt"
"io"
"sync"
"time"
"github.com/docker/buildx/build"
"github.com/docker/buildx/commands/controller/pb"
"github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/version"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, statusChan chan *client.SolveStatus) (res *build.ResultContext, err error)
func New(buildFunc BuildFunc) *Controller {
return &Controller{
buildFunc: buildFunc,
}
}
type Controller struct {
buildFunc BuildFunc
session map[string]session
sessionMu sync.Mutex
}
type session struct {
statusChan chan *client.SolveStatus
result *build.ResultContext
inputPipe *io.PipeWriter
curInvokeCancel func()
curBuildCancel func()
}
func (m *Controller) Info(ctx context.Context, req *pb.InfoRequest) (res *pb.InfoResponse, err error) {
return &pb.InfoResponse{
BuildxVersion: &pb.BuildxVersion{
Package: version.Package,
Version: version.Version,
Revision: version.Revision,
},
}, nil
}
func (m *Controller) List(ctx context.Context, req *pb.ListRequest) (res *pb.ListResponse, err error) {
keys := make(map[string]struct{})
m.sessionMu.Lock()
for k := range m.session {
keys[k] = struct{}{}
}
m.sessionMu.Unlock()
var keysL []string
for k := range keys {
keysL = append(keysL, k)
}
return &pb.ListResponse{
Keys: keysL,
}, nil
}
func (m *Controller) Disconnect(ctx context.Context, req *pb.DisconnectRequest) (res *pb.DisconnectResponse, err error) {
key := req.Ref
if key == "" {
return nil, fmt.Errorf("disconnect: empty key")
}
m.sessionMu.Lock()
if s, ok := m.session[key]; ok {
if s.curBuildCancel != nil {
s.curBuildCancel()
}
if s.curInvokeCancel != nil {
s.curInvokeCancel()
}
}
delete(m.session, key)
m.sessionMu.Unlock()
return &pb.DisconnectResponse{}, nil
}
func (m *Controller) Close() error {
m.sessionMu.Lock()
for k := range m.session {
if s, ok := m.session[k]; ok {
if s.curBuildCancel != nil {
s.curBuildCancel()
}
if s.curInvokeCancel != nil {
s.curInvokeCancel()
}
}
}
m.sessionMu.Unlock()
return nil
}
func (m *Controller) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResponse, error) {
ref := req.Ref
if ref == "" {
return nil, fmt.Errorf("build: empty key")
}
// Prepare status channel and session if not exists
m.sessionMu.Lock()
if m.session == nil {
m.session = make(map[string]session)
}
s, ok := m.session[ref]
if ok && m.session[ref].statusChan != nil {
m.sessionMu.Unlock()
return &pb.BuildResponse{}, fmt.Errorf("build or status ongoing or status didn't called")
}
statusChan := make(chan *client.SolveStatus)
s.statusChan = statusChan
m.session[ref] = session{statusChan: statusChan}
m.sessionMu.Unlock()
defer func() {
close(statusChan)
m.sessionMu.Lock()
s, ok := m.session[ref]
if ok {
s.statusChan = nil
}
m.sessionMu.Unlock()
}()
// Prepare input stream pipe
inR, inW := io.Pipe()
m.sessionMu.Lock()
if s, ok := m.session[ref]; ok {
s.inputPipe = inW
m.session[ref] = s
} else {
m.sessionMu.Unlock()
return nil, fmt.Errorf("build: unknown key %v", ref)
}
m.sessionMu.Unlock()
defer inR.Close()
// Build the specified request
ctx, cancel := context.WithCancel(ctx)
defer cancel()
res, err := m.buildFunc(ctx, req.Options, inR, statusChan)
m.sessionMu.Lock()
if s, ok := m.session[ref]; ok {
s.result = res
s.curBuildCancel = cancel
m.session[ref] = s
} else {
m.sessionMu.Unlock()
return nil, fmt.Errorf("build: unknown key %v", ref)
}
m.sessionMu.Unlock()
return &pb.BuildResponse{}, err
}
func (m *Controller) Status(req *pb.StatusRequest, stream pb.Controller_StatusServer) error {
ref := req.Ref
if ref == "" {
return fmt.Errorf("status: empty key")
}
// Wait and get status channel prepared by Build()
var statusChan <-chan *client.SolveStatus
for {
// TODO: timeout?
m.sessionMu.Lock()
if _, ok := m.session[ref]; !ok || m.session[ref].statusChan == nil {
m.sessionMu.Unlock()
time.Sleep(time.Millisecond) // TODO: wait Build without busy loop and make it cancellable
continue
}
statusChan = m.session[ref].statusChan
m.sessionMu.Unlock()
break
}
// forward status
for ss := range statusChan {
if ss == nil {
break
}
cs := toControlStatus(ss)
if err := stream.Send(cs); err != nil {
return err
}
}
return nil
}
func (m *Controller) Input(stream pb.Controller_InputServer) (err error) {
// Get the target ref from init message
msg, err := stream.Recv()
if err != nil {
if !errors.Is(err, io.EOF) {
return err
}
return nil
}
init := msg.GetInit()
if init == nil {
return fmt.Errorf("unexpected message: %T; wanted init", msg.GetInit())
}
ref := init.Ref
if ref == "" {
return fmt.Errorf("input: no ref is provided")
}
// Wait and get input stream pipe prepared by Build()
var inputPipeW *io.PipeWriter
for {
// TODO: timeout?
m.sessionMu.Lock()
if _, ok := m.session[ref]; !ok || m.session[ref].inputPipe == nil {
m.sessionMu.Unlock()
time.Sleep(time.Millisecond) // TODO: wait Build without busy loop and make it cancellable
continue
}
inputPipeW = m.session[ref].inputPipe
m.sessionMu.Unlock()
break
}
// Forward input stream
eg, ctx := errgroup.WithContext(context.TODO())
done := make(chan struct{})
msgCh := make(chan *pb.InputMessage)
eg.Go(func() error {
defer close(msgCh)
for {
msg, err := stream.Recv()
if err != nil {
if !errors.Is(err, io.EOF) {
return err
}
return nil
}
select {
case msgCh <- msg:
case <-done:
return nil
case <-ctx.Done():
return nil
}
}
})
eg.Go(func() (retErr error) {
defer close(done)
defer func() {
if retErr != nil {
inputPipeW.CloseWithError(retErr)
return
}
inputPipeW.Close()
}()
for {
var msg *pb.InputMessage
select {
case msg = <-msgCh:
case <-ctx.Done():
return fmt.Errorf("canceled: %w", ctx.Err())
}
if msg == nil {
return nil
}
if data := msg.GetData(); data != nil {
if len(data.Data) > 0 {
_, err := inputPipeW.Write(data.Data)
if err != nil {
return err
}
}
if data.EOF {
return nil
}
}
}
})
return eg.Wait()
}
func (m *Controller) Invoke(srv pb.Controller_InvokeServer) error {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
containerIn, containerOut := ioset.Pipe()
waitInvokeDoneCh := make(chan struct{})
var cancelOnce sync.Once
curInvokeCancel := func() {
cancelOnce.Do(func() { containerOut.Close(); containerIn.Close(); cancel() })
<-waitInvokeDoneCh
}
defer curInvokeCancel()
var cfg *pb.ContainerConfig
var resultCtx *build.ResultContext
initDoneCh := make(chan struct{})
initErrCh := make(chan error)
eg, egCtx := errgroup.WithContext(ctx)
eg.Go(func() error {
return serveIO(egCtx, srv, func(initMessage *pb.InitMessage) (retErr error) {
defer func() {
if retErr != nil {
initErrCh <- retErr
}
close(initDoneCh)
}()
ref := initMessage.Ref
cfg = initMessage.ContainerConfig
// Register cancel callback
m.sessionMu.Lock()
if s, ok := m.session[ref]; ok {
if cancel := s.curInvokeCancel; cancel != nil {
logrus.Warnf("invoke: cancelling ongoing invoke of %q", ref)
cancel()
}
s.curInvokeCancel = curInvokeCancel
m.session[ref] = s
} else {
m.sessionMu.Unlock()
return fmt.Errorf("invoke: unknown key %v", ref)
}
m.sessionMu.Unlock()
// Get the target result to invoke a container from
m.sessionMu.Lock()
if _, ok := m.session[ref]; !ok || m.session[ref].result == nil {
m.sessionMu.Unlock()
return fmt.Errorf("unknown reference: %q", ref)
}
resultCtx = m.session[ref].result
m.sessionMu.Unlock()
return nil
}, &ioServerConfig{
stdin: containerOut.Stdin,
stdout: containerOut.Stdout,
stderr: containerOut.Stderr,
// TODO: signal, resize
})
})
eg.Go(func() error {
defer containerIn.Close()
defer cancel()
select {
case <-initDoneCh:
case err := <-initErrCh:
return err
}
if cfg == nil {
return fmt.Errorf("no container config is provided")
}
if resultCtx == nil {
return fmt.Errorf("no result is provided")
}
ccfg := build.ContainerConfig{
ResultCtx: resultCtx,
Entrypoint: cfg.Entrypoint,
Cmd: cfg.Cmd,
Env: cfg.Env,
Tty: cfg.Tty,
Stdin: containerIn.Stdin,
Stdout: containerIn.Stdout,
Stderr: containerIn.Stderr,
}
if !cfg.NoUser {
ccfg.User = &cfg.User
}
if !cfg.NoCwd {
ccfg.Cwd = &cfg.Cwd
}
return build.Invoke(egCtx, ccfg)
})
err := eg.Wait()
close(waitInvokeDoneCh)
curInvokeCancel()
return err
}
func toControlStatus(s *client.SolveStatus) *pb.StatusResponse {
resp := pb.StatusResponse{}
for _, v := range s.Vertexes {
resp.Vertexes = append(resp.Vertexes, &controlapi.Vertex{
Digest: v.Digest,
Inputs: v.Inputs,
Name: v.Name,
Started: v.Started,
Completed: v.Completed,
Error: v.Error,
Cached: v.Cached,
ProgressGroup: v.ProgressGroup,
})
}
for _, v := range s.Statuses {
resp.Statuses = append(resp.Statuses, &controlapi.VertexStatus{
ID: v.ID,
Vertex: v.Vertex,
Name: v.Name,
Total: v.Total,
Current: v.Current,
Timestamp: v.Timestamp,
Started: v.Started,
Completed: v.Completed,
})
}
for _, v := range s.Logs {
resp.Logs = append(resp.Logs, &controlapi.VertexLog{
Vertex: v.Vertex,
Stream: int64(v.Stream),
Msg: v.Data,
Timestamp: v.Timestamp,
})
}
for _, v := range s.Warnings {
resp.Warnings = append(resp.Warnings, &controlapi.VertexWarning{
Vertex: v.Vertex,
Level: int64(v.Level),
Short: v.Short,
Detail: v.Detail,
Url: v.URL,
Info: v.SourceInfo,
Ranges: v.Range,
})
}
return &resp
}

431
commands/controller/io.go Normal file
View File

@ -0,0 +1,431 @@
package controller
import (
"context"
"errors"
"fmt"
"io"
"syscall"
"time"
"github.com/docker/buildx/commands/controller/pb"
"github.com/moby/sys/signal"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type msgStream interface {
Send(*pb.Message) error
Recv() (*pb.Message, error)
}
type ioServerConfig struct {
stdin io.WriteCloser
stdout, stderr io.ReadCloser
// signalFn is a callback function called when a signal is reached to the client.
signalFn func(context.Context, syscall.Signal) error
// resizeFn is a callback function called when a resize event is reached to the client.
resizeFn func(context.Context, winSize) error
}
func serveIO(attachCtx context.Context, srv msgStream, initFn func(*pb.InitMessage) error, ioConfig *ioServerConfig) (err error) {
stdin, stdout, stderr := ioConfig.stdin, ioConfig.stdout, ioConfig.stderr
stream := &debugStream{srv, "server=" + time.Now().String()}
eg, ctx := errgroup.WithContext(attachCtx)
done := make(chan struct{})
msg, err := receive(ctx, stream)
if err != nil {
return err
}
init := msg.GetInit()
if init == nil {
return fmt.Errorf("unexpected message: %T; wanted init", msg.GetInput())
}
ref := init.Ref
if ref == "" {
return fmt.Errorf("no ref is provided")
}
if err := initFn(init); err != nil {
return fmt.Errorf("failed to initialize IO server: %w", err)
}
if stdout != nil {
stdoutReader, stdoutWriter := io.Pipe()
eg.Go(func() error {
<-done
return stdoutWriter.Close()
})
go func() {
// do not wait for read completion but return here and let the caller send EOF
// this allows us to return on ctx.Done() without being blocked by this reader.
io.Copy(stdoutWriter, stdout)
stdoutWriter.Close()
}()
eg.Go(func() error {
defer stdoutReader.Close()
return copyToStream(1, stream, stdoutReader)
})
}
if stderr != nil {
stderrReader, stderrWriter := io.Pipe()
eg.Go(func() error {
<-done
return stderrWriter.Close()
})
go func() {
// do not wait for read completion but return here and let the caller send EOF
// this allows us to return on ctx.Done() without being blocked by this reader.
io.Copy(stderrWriter, stderr)
stderrWriter.Close()
}()
eg.Go(func() error {
defer stderrReader.Close()
return copyToStream(2, stream, stderrReader)
})
}
msgCh := make(chan *pb.Message)
eg.Go(func() error {
defer close(msgCh)
for {
msg, err := receive(ctx, stream)
if err != nil {
return err
}
select {
case msgCh <- msg:
case <-done:
return nil
case <-ctx.Done():
return nil
}
}
})
eg.Go(func() error {
defer close(done)
for {
var msg *pb.Message
select {
case msg = <-msgCh:
case <-ctx.Done():
return nil
}
if msg == nil {
return nil
}
if file := msg.GetFile(); file != nil {
if file.Fd != 0 {
return fmt.Errorf("unexpected fd: %v", file.Fd)
}
if stdin == nil {
continue // no stdin destination is specified so ignore the data
}
if len(file.Data) > 0 {
_, err := stdin.Write(file.Data)
if err != nil {
return err
}
}
if file.EOF {
stdin.Close()
}
} else if resize := msg.GetResize(); resize != nil {
if ioConfig.resizeFn != nil {
ioConfig.resizeFn(ctx, winSize{
cols: resize.Cols,
rows: resize.Rows,
})
}
} else if sig := msg.GetSignal(); sig != nil {
if ioConfig.signalFn != nil {
syscallSignal, ok := signal.SignalMap[sig.Name]
if !ok {
continue
}
ioConfig.signalFn(ctx, syscallSignal)
}
} else {
return fmt.Errorf("unexpected message: %T", msg.GetInput())
}
}
})
return eg.Wait()
}
type ioAttachConfig struct {
stdin io.ReadCloser
stdout, stderr io.WriteCloser
signal <-chan syscall.Signal
resize <-chan winSize
}
type winSize struct {
rows uint32
cols uint32
}
func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage, cfg ioAttachConfig) (retErr error) {
eg, ctx := errgroup.WithContext(ctx)
done := make(chan struct{})
if err := stream.Send(&pb.Message{
Input: &pb.Message_Init{
Init: initMessage,
},
}); err != nil {
return fmt.Errorf("failed to init: %w", err)
}
if cfg.stdin != nil {
stdinReader, stdinWriter := io.Pipe()
eg.Go(func() error {
<-done
return stdinWriter.Close()
})
go func() {
// do not wait for read completion but return here and let the caller send EOF
// this allows us to return on ctx.Done() without being blocked by this reader.
io.Copy(stdinWriter, cfg.stdin)
stdinWriter.Close()
}()
eg.Go(func() error {
defer stdinReader.Close()
return copyToStream(0, stream, stdinReader)
})
}
if cfg.signal != nil {
eg.Go(func() error {
for {
var sig syscall.Signal
select {
case sig = <-cfg.signal:
case <-done:
return nil
case <-ctx.Done():
return nil
}
name := sigToName[sig]
if name == "" {
continue
}
if err := stream.Send(&pb.Message{
Input: &pb.Message_Signal{
Signal: &pb.SignalMessage{
Name: name,
},
},
}); err != nil {
return fmt.Errorf("failed to send signal: %w", err)
}
}
})
}
if cfg.resize != nil {
eg.Go(func() error {
for {
var win winSize
select {
case win = <-cfg.resize:
case <-done:
return nil
case <-ctx.Done():
return nil
}
if err := stream.Send(&pb.Message{
Input: &pb.Message_Resize{
Resize: &pb.ResizeMessage{
Rows: win.rows,
Cols: win.cols,
},
},
}); err != nil {
return fmt.Errorf("failed to send resize: %w", err)
}
}
})
}
msgCh := make(chan *pb.Message)
eg.Go(func() error {
defer close(msgCh)
for {
msg, err := receive(ctx, stream)
if err != nil {
return err
}
select {
case msgCh <- msg:
case <-done:
return nil
case <-ctx.Done():
return nil
}
}
})
eg.Go(func() error {
eofs := make(map[uint32]struct{})
defer close(done)
for {
var msg *pb.Message
select {
case msg = <-msgCh:
case <-ctx.Done():
return nil
}
if msg == nil {
return nil
}
if file := msg.GetFile(); file != nil {
if _, ok := eofs[file.Fd]; ok {
continue
}
var out io.WriteCloser
switch file.Fd {
case 1:
out = cfg.stdout
case 2:
out = cfg.stderr
default:
return fmt.Errorf("unsupported fd %d", file.Fd)
}
if out == nil {
logrus.Warnf("attachIO: no writer for fd %d", file.Fd)
continue
}
if len(file.Data) > 0 {
if _, err := out.Write(file.Data); err != nil {
return err
}
}
if file.EOF {
eofs[file.Fd] = struct{}{}
}
} else {
return fmt.Errorf("unexpected message: %T", msg.GetInput())
}
}
})
return eg.Wait()
}
func receive(ctx context.Context, stream msgStream) (*pb.Message, error) {
msgCh := make(chan *pb.Message)
errCh := make(chan error)
go func() {
msg, err := stream.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
return
}
errCh <- err
return
}
msgCh <- msg
}()
select {
case msg := <-msgCh:
return msg, nil
case err := <-errCh:
return nil, err
case <-ctx.Done():
return nil, ctx.Err()
}
}
func copyToStream(fd uint32, snd msgStream, r io.Reader) error {
for {
buf := make([]byte, 32*1024)
n, err := r.Read(buf)
if err != nil {
if err == io.EOF {
break // break loop and send EOF
}
return err
} else if n > 0 {
if snd.Send(&pb.Message{
Input: &pb.Message_File{
File: &pb.FdMessage{
Fd: fd,
Data: buf[:n],
},
},
}); err != nil {
return err
}
}
}
return snd.Send(&pb.Message{
Input: &pb.Message_File{
File: &pb.FdMessage{
Fd: fd,
EOF: true,
},
},
})
}
var sigToName = map[syscall.Signal]string{}
func init() {
for name, value := range signal.SignalMap {
sigToName[value] = name
}
}
type debugStream struct {
msgStream
prefix string
}
func (s *debugStream) Send(msg *pb.Message) error {
switch m := msg.GetInput().(type) {
case *pb.Message_File:
if m.File.EOF {
logrus.Debugf("|---> File Message (sender:%v) fd=%d, EOF", s.prefix, m.File.Fd)
} else {
logrus.Debugf("|---> File Message (sender:%v) fd=%d, %d bytes", s.prefix, m.File.Fd, len(m.File.Data))
}
case *pb.Message_Resize:
logrus.Debugf("|---> Resize Message (sender:%v): %+v", s.prefix, m.Resize)
case *pb.Message_Signal:
logrus.Debugf("|---> Signal Message (sender:%v): %s", s.prefix, m.Signal.Name)
}
return s.msgStream.Send(msg)
}
func (s *debugStream) Recv() (*pb.Message, error) {
msg, err := s.msgStream.Recv()
if err != nil {
return nil, err
}
switch m := msg.GetInput().(type) {
case *pb.Message_File:
if m.File.EOF {
logrus.Debugf("|<--- File Message (receiver:%v) fd=%d, EOF", s.prefix, m.File.Fd)
} else {
logrus.Debugf("|<--- File Message (receiver:%v) fd=%d, %d bytes", s.prefix, m.File.Fd, len(m.File.Data))
}
case *pb.Message_Resize:
logrus.Debugf("|<--- Resize Message (receiver:%v): %+v", s.prefix, m.Resize)
case *pb.Message_Signal:
logrus.Debugf("|<--- Signal Message (receiver:%v): %s", s.prefix, m.Signal.Name)
}
return msg, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,179 @@
syntax = "proto3";
package buildx.controller.v1;
import "github.com/moby/buildkit/api/services/control/control.proto";
option go_package = "pb";
service Controller {
rpc Build(BuildRequest) returns (BuildResponse);
rpc Status(StatusRequest) returns (stream StatusResponse);
rpc Input(stream InputMessage) returns (InputResponse);
rpc Invoke(stream Message) returns (stream Message);
rpc List(ListRequest) returns (ListResponse);
rpc Disconnect(DisconnectRequest) returns (DisconnectResponse);
rpc Info(InfoRequest) returns (InfoResponse);
}
message BuildRequest {
string Ref = 1;
BuildOptions Options = 2;
}
message BuildOptions {
string ContextPath = 1;
string DockerfileName = 2;
string PrintFunc = 3;
repeated string Allow = 4;
repeated string Attests = 5; // TODO
repeated string BuildArgs = 6;
repeated string CacheFrom = 7;
repeated string CacheTo = 8;
string CgroupParent = 9;
repeated string Contexts = 10;
repeated string ExtraHosts = 11;
string ImageIDFile = 12;
repeated string Labels = 13;
string NetworkMode = 14;
repeated string NoCacheFilter = 15;
repeated string Outputs = 16;
repeated string Platforms = 17;
bool Quiet = 18;
repeated string Secrets = 19;
int64 ShmSize = 20;
repeated string SSH = 21;
repeated string Tags = 22;
string Target = 23;
UlimitOpt Ulimits = 24;
// string Invoke: provided via Invoke API
CommonOptions Opts = 25;
}
message UlimitOpt {
map<string, Ulimit> values = 1;
}
message Ulimit {
string Name = 1;
int64 Hard = 2;
int64 Soft = 3;
}
message CommonOptions {
string Builder = 1;
string MetadataFile = 2;
bool NoCache = 3;
// string Progress: no progress view on server side
bool Pull = 4;
bool ExportPush = 5;
bool ExportLoad = 6;
string SBOM = 7; // TODO
string Provenance = 8; // TODO
}
message BuildResponse {}
message DisconnectRequest {
string Ref = 1;
}
message DisconnectResponse {}
message ListRequest {
string Ref = 1;
}
message ListResponse {
repeated string keys = 1;
}
message InputMessage {
oneof Input {
InputInitMessage Init = 1;
DataMessage Data = 2;
}
}
message InputInitMessage {
string Ref = 1;
}
message DataMessage {
bool EOF = 1; // true if eof was reached
bytes Data = 2; // should be chunked smaller than 4MB:
// https://pkg.go.dev/google.golang.org/grpc#MaxRecvMsgSize
}
message InputResponse {}
message Message {
oneof Input {
InitMessage Init = 1;
// FdMessage used from client to server for input (stdin) and
// from server to client for output (stdout, stderr)
FdMessage File = 2;
// ResizeMessage used from client to server for terminal resize events
ResizeMessage Resize = 3;
// SignalMessage is used from client to server to send signal events
SignalMessage Signal = 4;
}
}
message InitMessage {
string Ref = 1;
ContainerConfig ContainerConfig = 2;
}
message ContainerConfig {
repeated string Entrypoint = 1;
repeated string Cmd = 2;
repeated string Env = 3;
string User = 4;
bool NoUser = 5; // Do not set user but use the image's default
string Cwd = 6;
bool NoCwd = 7; // Do not set cwd but use the image's default
bool Tty = 8;
}
message FdMessage {
uint32 Fd = 1; // what fd the data was from
bool EOF = 2; // true if eof was reached
bytes Data = 3; // should be chunked smaller than 4MB:
// https://pkg.go.dev/google.golang.org/grpc#MaxRecvMsgSize
}
message ResizeMessage {
uint32 Rows = 1;
uint32 Cols = 2;
}
message SignalMessage {
// we only send name (ie HUP, INT) because the int values
// are platform dependent.
string Name = 1;
}
message StatusRequest {
string Ref = 1;
}
message StatusResponse {
repeated moby.buildkit.v1.Vertex vertexes = 1;
repeated moby.buildkit.v1.VertexStatus statuses = 2;
repeated moby.buildkit.v1.VertexLog logs = 3;
repeated moby.buildkit.v1.VertexWarning warnings = 4;
}
message InfoRequest {}
message InfoResponse {
BuildxVersion buildxVersion = 1;
}
message BuildxVersion {
string package = 1;
string version = 2;
string revision = 3;
}

View File

@ -0,0 +1,3 @@
package pb
//go:generate protoc -I=. -I=../../../vendor/ --gogo_out=plugins=grpc:. controller.proto

View File

@ -0,0 +1,78 @@
package commands
import (
"context"
"fmt"
"io"
"github.com/containerd/console"
"github.com/docker/buildx/build"
controllerapi "github.com/docker/buildx/commands/controller/pb"
"github.com/docker/buildx/monitor"
"github.com/docker/cli/cli/command"
)
func newLocalBuildxController(ctx context.Context, dockerCli command.Cli) monitor.BuildxController {
return &localController{
dockerCli: dockerCli,
ref: "local",
}
}
type localController struct {
dockerCli command.Cli
ref string
resultCtx *build.ResultContext
}
func (b *localController) Invoke(ctx context.Context, ref string, cfg controllerapi.ContainerConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error {
if ref != b.ref {
return fmt.Errorf("unknown ref %q", ref)
}
if b.resultCtx == nil {
return fmt.Errorf("no build result is registered")
}
ccfg := build.ContainerConfig{
ResultCtx: b.resultCtx,
Entrypoint: cfg.Entrypoint,
Cmd: cfg.Cmd,
Env: cfg.Env,
Tty: cfg.Tty,
Stdin: ioIn,
Stdout: ioOut,
Stderr: ioErr,
}
if !cfg.NoUser {
ccfg.User = &cfg.User
}
if !cfg.NoCwd {
ccfg.Cwd = &cfg.Cwd
}
return build.Invoke(ctx, ccfg)
}
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, w io.Writer, out console.File, progressMode string) (string, error) {
res, err := runBuildWithContext(ctx, b.dockerCli, options, in, progressMode, nil)
if err != nil {
return "", err
}
b.resultCtx = res
return b.ref, nil
}
func (b *localController) Kill(context.Context) error {
return nil // nop
}
func (b *localController) Close() error {
// TODO: cancel current build and invoke
return nil
}
func (b *localController) List(ctx context.Context) (res []string, _ error) {
return []string{b.ref}, nil
}
func (b *localController) Disconnect(ctx context.Context, key string) error {
return nil // nop
}

View File

@ -0,0 +1,311 @@
//go:build linux
package commands
import (
"context"
"fmt"
"io"
"net"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strconv"
"syscall"
"time"
"github.com/containerd/containerd/log"
"github.com/docker/buildx/build"
"github.com/docker/buildx/commands/controller"
controllerapi "github.com/docker/buildx/commands/controller/pb"
"github.com/docker/buildx/monitor"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/version"
"github.com/docker/cli/cli/command"
"github.com/moby/buildkit/client"
"github.com/pelletier/go-toml"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc"
)
const (
serveCommandName = "_INTERNAL_SERVE"
)
type serverConfig struct {
// Specify buildx server root
Root string `toml:"root"`
// LogLevel sets the logging level [trace, debug, info, warn, error, fatal, panic]
LogLevel string `toml:"log_level"`
// Specify file to output buildx server log
LogFile string `toml:"log_file"`
}
func newRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts buildOptions) (monitor.BuildxController, error) {
rootDir := opts.root
if rootDir == "" {
rootDir = rootDataDir(dockerCli)
}
serverRoot := filepath.Join(rootDir, "shared")
c, err := newBuildxClientAndCheck(filepath.Join(serverRoot, "buildx.sock"), 1, 0)
if err != nil {
logrus.Info("no buildx server found; launching...")
// start buildx server via subcommand
launchFlags := []string{}
if opts.serverConfig != "" {
launchFlags = append(launchFlags, "--config", opts.serverConfig)
}
logFile, err := getLogFilePath(dockerCli, opts.serverConfig)
if err != nil {
return nil, err
}
wait, err := launch(ctx, logFile, append([]string{serveCommandName}, launchFlags...)...)
if err != nil {
return nil, err
}
go wait()
c, err = newBuildxClientAndCheck(filepath.Join(serverRoot, "buildx.sock"), 10, time.Second)
if err != nil {
return nil, fmt.Errorf("cannot connect to the buildx server: %w", err)
}
}
return &buildxController{c, serverRoot}, nil
}
func addControllerCommands(cmd *cobra.Command, dockerCli command.Cli, rootOpts *rootOptions) {
cmd.AddCommand(
serveCmd(dockerCli, rootOpts),
)
}
func serveCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
var serverConfigPath string
cmd := &cobra.Command{
Use: fmt.Sprintf("%s [OPTIONS]", serveCommandName),
Hidden: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Parse config
config, err := getConfig(dockerCli, serverConfigPath)
if err != nil {
return fmt.Errorf("failed to get config")
}
if config.LogLevel == "" {
logrus.SetLevel(logrus.InfoLevel)
} else {
lvl, err := logrus.ParseLevel(config.LogLevel)
if err != nil {
return fmt.Errorf("failed to prepare logger: %w", err)
}
logrus.SetLevel(lvl)
}
logrus.SetFormatter(&logrus.JSONFormatter{
TimestampFormat: log.RFC3339NanoFixed,
})
root, err := prepareRootDir(dockerCli, config)
if err != nil {
return err
}
pidF := filepath.Join(root, "pid")
if err := os.WriteFile(pidF, []byte(fmt.Sprintf("%d", os.Getpid())), 0600); err != nil {
return err
}
defer func() {
if err := os.Remove(pidF); err != nil {
logrus.Errorf("failed to clean up info file %q: %v", pidF, err)
}
}()
// prepare server
b := controller.New(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, statusChan chan *client.SolveStatus) (res *build.ResultContext, err error) {
return runBuildWithContext(ctx, dockerCli, *options, stdin, "quiet", statusChan)
})
defer b.Close()
// serve server
addr := filepath.Join(root, "buildx.sock")
if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { // avoid EADDRINUSE
return err
}
defer func() {
if err := os.Remove(addr); err != nil {
logrus.Errorf("failed to clean up socket %q: %v", addr, err)
}
}()
logrus.Infof("starting server at %q", addr)
l, err := net.Listen("unix", addr)
if err != nil {
return err
}
rpc := grpc.NewServer()
controllerapi.RegisterControllerServer(rpc, b)
doneCh := make(chan struct{})
errCh := make(chan error, 1)
go func() {
defer close(doneCh)
if err := rpc.Serve(l); err != nil {
errCh <- fmt.Errorf("error on serving via socket %q: %w", addr, err)
}
return
}()
var s os.Signal
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, os.Interrupt)
select {
case s = <-sigCh:
logrus.Debugf("got signal %v", s)
case err := <-errCh:
return err
case <-doneCh:
}
return nil
},
}
flags := cmd.Flags()
flags.StringVar(&serverConfigPath, "config", "", "Specify buildx server config file")
return cmd
}
func getLogFilePath(dockerCli command.Cli, configPath string) (string, error) {
config, err := getConfig(dockerCli, configPath)
if err != nil {
return "", fmt.Errorf("failed to get config")
}
logFile := config.LogFile
if logFile == "" {
root, err := prepareRootDir(dockerCli, config)
if err != nil {
return "", err
}
logFile = filepath.Join(root, "log")
}
return logFile, nil
}
func getConfig(dockerCli command.Cli, configPath string) (*serverConfig, error) {
var defaultConfigPath bool
if configPath == "" {
defaultRoot := rootDataDir(dockerCli)
configPath = filepath.Join(defaultRoot, "config.toml")
defaultConfigPath = true
}
var config serverConfig
tree, err := toml.LoadFile(configPath)
if err != nil && !(os.IsNotExist(err) && defaultConfigPath) {
return nil, fmt.Errorf("failed to load config file %q", configPath)
} else if err == nil {
if err := tree.Unmarshal(&config); err != nil {
return nil, fmt.Errorf("failed to unmarshal config file %q", configPath)
}
}
return &config, nil
}
func prepareRootDir(dockerCli command.Cli, config *serverConfig) (string, error) {
rootDir := config.Root
if rootDir == "" {
rootDir = rootDataDir(dockerCli)
}
if rootDir == "" {
return "", fmt.Errorf("buildx root dir must be determined")
}
if err := os.MkdirAll(rootDir, 0700); err != nil {
return "", err
}
serverRoot := filepath.Join(rootDir, "shared")
if err := os.MkdirAll(serverRoot, 0700); err != nil {
return "", err
}
return serverRoot, nil
}
func rootDataDir(dockerCli command.Cli) string {
return filepath.Join(confutil.ConfigDir(dockerCli), "controller")
}
func newBuildxClientAndCheck(addr string, checkNum int, duration time.Duration) (*controller.Client, error) {
c, err := controller.NewClient(addr)
if err != nil {
return nil, err
}
var lastErr error
for i := 0; i < checkNum; i++ {
_, err := c.List(context.TODO())
if err == nil {
lastErr = nil
break
}
err = fmt.Errorf("failed to access server (tried %d times): %w", i, err)
logrus.Debugf("connection failure: %v", err)
lastErr = err
time.Sleep(duration)
}
if lastErr != nil {
return nil, lastErr
}
p, v, r, err := c.Version(context.TODO())
if err != nil {
return nil, err
}
logrus.Debugf("connected to server (\"%v %v %v\")", p, v, r)
if !(p == version.Package && v == version.Version && r == version.Revision) {
logrus.Warnf("version mismatch (server: \"%v %v %v\", client: \"%v %v %v\"); please kill and restart buildx server",
p, v, r, version.Package, version.Version, version.Revision)
}
return c, nil
}
type buildxController struct {
*controller.Client
serverRoot string
}
func (c *buildxController) Kill(ctx context.Context) error {
pidB, err := os.ReadFile(filepath.Join(c.serverRoot, "pid"))
if err != nil {
return err
}
pid, err := strconv.ParseInt(string(pidB), 10, 64)
if err != nil {
return err
}
if pid <= 0 {
return fmt.Errorf("no PID is recorded for buildx server")
}
p, err := os.FindProcess(int(pid))
if err != nil {
return err
}
if err := p.Signal(syscall.SIGINT); err != nil {
return err
}
// TODO: Should we send SIGKILL if process doesn't finish?
return nil
}
func launch(ctx context.Context, logFile string, args ...string) (func() error, error) {
bCmd := exec.CommandContext(ctx, os.Args[0], args...)
if logFile != "" {
f, err := os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
defer f.Close()
bCmd.Stdout = f
bCmd.Stderr = f
}
bCmd.Stdin = nil
bCmd.Dir = "/"
bCmd.SysProcAttr = &syscall.SysProcAttr{
Setsid: true,
}
if err := bCmd.Start(); err != nil {
return nil, err
}
return bCmd.Wait, nil
}

View File

@ -0,0 +1,18 @@
//go:build !linux
package commands
import (
"context"
"fmt"
"github.com/docker/buildx/monitor"
"github.com/docker/cli/cli/command"
"github.com/spf13/cobra"
)
func newRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts buildOptions) (monitor.BuildxController, error) {
return nil, fmt.Errorf("remote buildx unsupported")
}
func addControllerCommands(cmd *cobra.Command, dockerCli command.Cli, rootOpts *rootOptions) {}

View File

@ -86,6 +86,9 @@ func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
duCmd(dockerCli, opts),
imagetoolscmd.RootCmd(dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}),
)
if isExperimental() {
addControllerCommands(cmd, dockerCli, opts)
}
}
func rootFlags(options *rootOptions, flags *pflag.FlagSet) {