Compare commits

..

15 Commits

Author SHA1 Message Date
CrazyMax
7c2359c6bf Merge pull request #3018 from crazy-max/0.21_picks_0.21.1
[v0.21] cherry-picks for v0.21.1
2025-02-21 13:22:55 +01:00
Tonis Tiigi
65a52b5272 remove accidental debug
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
2025-02-21 09:43:43 +01:00
Jonathan A. Sternberg
34ed52e4ae Merge pull request #3011 from jsternberg/v0.21.0-picks
[v0.21] cherry-picks for v0.21.0
2025-02-19 13:54:05 -06:00
Jonathan A. Sternberg
a05bbb9e3d vendor: github.com/moby/buildkit v0.20.0
Signed-off-by: Jonathan A. Sternberg <jonathan.sternberg@docker.com>
(cherry picked from commit 80c3832c94)
2025-02-19 13:53:30 -06:00
CrazyMax
d9f8c73375 Merge pull request #3004 from crazy-max/0.21_picks_0.21.0-rc3
[v0.21] cherry-picks 0.21.0-rc3
2025-02-18 22:55:57 +01:00
CrazyMax
af5d0d4ab5 vendor: update buildkit to v0.20.0-rc3
Signed-off-by: CrazyMax <1951866+crazy-max@users.noreply.github.com>
2025-02-18 22:29:44 +01:00
Tõnis Tiigi
20256b6999 support for device entitlement in build and bake
Allow access to CDI Devices in Buildkit v0.20.0+ for
devices that are not automatically allowed to be used by
everyone in BuildKit configuration.

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
Signed-off-by: CrazyMax <1951866+crazy-max@users.noreply.github.com>
2025-02-18 22:29:43 +01:00
CrazyMax
c09d38af8a cache: fix gha cache url handling
Signed-off-by: CrazyMax <1951866+crazy-max@users.noreply.github.com>
2025-02-18 21:25:21 +01:00
CrazyMax
9430ed6752 cache: enable gha cache backend if cache service v2 detected
Signed-off-by: CrazyMax <1951866+crazy-max@users.noreply.github.com>
2025-02-18 21:25:20 +01:00
Tonis Tiigi
3b31a33d59 progress: fix race on pausing progress on debug shell
Current progress writer has a logic of pausing/unpausing
the printer and internally recreating internal channels.

This conflicts with a change that added sync.Once to Wait
to allow it being called multiple times without erroring.

In debug shell this could mean that new progress printer
showed up in debug shell because it was not closed.

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
2025-02-18 11:01:09 +01:00
CrazyMax
5e5568f3cd Merge pull request #2996 from crazy-max/0.21_backport_docker-28
[v0.21 backport] vendor: docker, docker/cli v28.0.0-rc.2
2025-02-17 17:53:00 +01:00
Sebastiaan van Stijn
9ac9a4170b vendor: github.com/docker/cli v28.0.0-rc.2
full diff: https://github.com/docker/cli/compare/v28.0.0-rc.1...v28.0.0-rc.2

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2025-02-17 17:09:23 +01:00
Sebastiaan van Stijn
8d1ba91dcb vendor: github.com/docker/docker v28.0.0-rc.2
full diff: https://github.com/docker/docker/compare/v28.0.0-rc.1...v28.0.0-rc.2

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2025-02-17 17:09:23 +01:00
Sebastiaan van Stijn
0df7a75961 vendor: github.com/docker/cli/v28.0.0-rc.1
full diff: https://github.com/docker/cli/compare/v27.5.1..v28.0.0-rc.1

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2025-02-14 14:07:46 +01:00
Sebastiaan van Stijn
de5fbc38b8 vendor: github.com/docker/docker/v28.0.0-rc.1
full diff: https://github.com/docker/docker/compare/v27.5.1..v28.0.0-rc.1

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2025-02-14 14:07:46 +01:00
984 changed files with 102734 additions and 14346 deletions

View File

@@ -54,9 +54,9 @@ jobs:
- master
- latest
- buildx-stable-1
- v0.20.2
- v0.19.0
- v0.18.2
- v0.17.2
worker:
- docker-container
- remote
@@ -76,16 +76,6 @@ jobs:
- worker: docker+containerd # same as docker, but with containerd snapshotter
pkg: ./tests
mode: experimental
- worker: "docker@27.5"
pkg: ./tests
- worker: "docker+containerd@27.5" # same as docker, but with containerd snapshotter
pkg: ./tests
- worker: "docker@27.5"
pkg: ./tests
mode: experimental
- worker: "docker+containerd@27.5" # same as docker, but with containerd snapshotter
pkg: ./tests
mode: experimental
- worker: "docker@26.1"
pkg: ./tests
- worker: "docker+containerd@26.1" # same as docker, but with containerd snapshotter
@@ -258,20 +248,12 @@ jobs:
matrix:
os:
- freebsd
- netbsd
- openbsd
env:
# https://github.com/hashicorp/vagrant/issues/13652
VAGRANT_DISABLE_STRICT_DEPENDENCY_ENFORCEMENT: 1
steps:
-
name: Prepare
run: |
echo "VAGRANT_FILE=hack/Vagrantfile.${{ matrix.os }}" >> $GITHUB_ENV
# Sets semver Go version to be able to download tarball during vagrant setup
goVersion=$(curl --silent "https://go.dev/dl/?mode=json&include=all" | jq -r '.[].files[].version' | uniq | sed -e 's/go//' | sort -V | grep $GO_VERSION | tail -1)
echo "GO_VERSION=$goVersion" >> $GITHUB_ENV
-
name: Checkout
uses: actions/checkout@v4
@@ -414,18 +396,6 @@ jobs:
- test-unit
if: ${{ github.event_name != 'pull_request' && github.repository == 'docker/buildx' }}
steps:
-
name: Free disk space
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
with:
android: true
dotnet: true
haskell: true
large-packages: true
swap-storage: true
-
name: Checkout
uses: actions/checkout@v4
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
@@ -459,10 +429,9 @@ jobs:
name: Build and push image
uses: docker/bake-action@v6
with:
source: .
files: |
./docker-bake.hcl
${{ steps.meta.outputs.bake-file }}
cwd://${{ steps.meta.outputs.bake-file }}
targets: image-cross
push: ${{ github.event_name != 'pull_request' }}
sbom: true
@@ -535,7 +504,7 @@ jobs:
-
name: GitHub Release
if: startsWith(github.ref, 'refs/tags/v')
uses: softprops/action-gh-release@da05d552573ad5aba039eaac05058a918a7bf631 # v2.2.2
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2.2.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:

View File

@@ -77,7 +77,7 @@ jobs:
VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }}
-
name: Create PR on docs repo
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f # v7.0.6
with:
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
push-to-fork: docker-tools-robot/docker.github.io

View File

@@ -29,7 +29,7 @@ env:
SETUP_BUILDX_VERSION: "edge"
SETUP_BUILDKIT_IMAGE: "moby/buildkit:latest"
DESTDIR: "./bin"
K3S_VERSION: "v1.32.2+k3s1"
K3S_VERSION: "v1.21.2-k3s1"
jobs:
build:
@@ -65,7 +65,7 @@ jobs:
retention-days: 7
driver:
runs-on: ubuntu-24.04
runs-on: ubuntu-20.04
needs:
- build
strategy:
@@ -153,7 +153,7 @@ jobs:
-
name: Install k3s
if: matrix.driver == 'kubernetes'
uses: crazy-max/.github/.github/actions/install-k3s@7730d1434364d4b9aded32735b078a7ace5ea79a
uses: crazy-max/.github/.github/actions/install-k3s@fa6141aedf23596fb8bdcceab9cce8dadaa31bd9
with:
version: ${{ env.K3S_VERSION }}
-

View File

@@ -1,17 +0,0 @@
name: pr-assign-author
permissions:
contents: read
on:
pull_request_target:
types:
- opened
- reopened
jobs:
run:
uses: crazy-max/.github/.github/workflows/pr-assign-author.yml@c27924b5b93ccfe6dcc0d7b22e779ef3c05f9a92
permissions:
contents: read
pull-requests: write

View File

@@ -1,6 +1,9 @@
run:
timeout: 30m
modules-download-mode: vendor
# default uses Go version from the go.mod file, fallback on the env var
# `GOVERSION`, fallback on 1.17: https://golangci-lint.run/usage/configuration/#run-configuration
go: "1.23"
linters:
enable:

View File

@@ -5,23 +5,20 @@ ARG ALPINE_VERSION=3.21
ARG XX_VERSION=1.6.1
# for testing
ARG DOCKER_VERSION=28.1.0
ARG DOCKER_VERSION_ALT_27=27.5.1
ARG DOCKER_VERSION=28.0.0-rc.1
ARG DOCKER_VERSION_ALT_26=26.1.3
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
ARG GOTESTSUM_VERSION=v1.12.0
ARG REGISTRY_VERSION=3.0.0
ARG BUILDKIT_VERSION=v0.21.0
ARG REGISTRY_VERSION=2.8.3
ARG BUILDKIT_VERSION=v0.19.0
ARG UNDOCK_VERSION=0.9.0
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS golatest
FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine
FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli
FROM moby/moby-bin:$DOCKER_VERSION_ALT_27 AS docker-engine-alt27
FROM moby/moby-bin:$DOCKER_VERSION_ALT_26 AS docker-engine-alt26
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_27 AS docker-cli-alt27
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_26 AS docker-cli-alt26
FROM moby/moby-bin:$DOCKER_VERSION_ALT_26 AS docker-engine-alt
FROM dockereng/cli-bin:$DOCKER_VERSION_ALT_26 AS docker-cli-alt
FROM registry:$REGISTRY_VERSION AS registry
FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit
FROM crazymax/undock:$UNDOCK_VERSION AS undock
@@ -105,7 +102,6 @@ COPY --link --from=buildx-build /usr/bin/docker-buildx /buildx
FROM binaries-unix AS binaries-darwin
FROM binaries-unix AS binaries-freebsd
FROM binaries-unix AS binaries-linux
FROM binaries-unix AS binaries-netbsd
FROM binaries-unix AS binaries-openbsd
FROM scratch AS binaries-windows
@@ -131,15 +127,13 @@ COPY --link --from=gotestsum /out /usr/bin/
COPY --link --from=registry /bin/registry /usr/bin/
COPY --link --from=docker-engine / /usr/bin/
COPY --link --from=docker-cli / /usr/bin/
COPY --link --from=docker-engine-alt27 / /opt/docker-alt-27/
COPY --link --from=docker-engine-alt26 / /opt/docker-alt-26/
COPY --link --from=docker-cli-alt27 / /opt/docker-alt-27/
COPY --link --from=docker-cli-alt26 / /opt/docker-alt-26/
COPY --link --from=docker-engine-alt / /opt/docker-alt-26/
COPY --link --from=docker-cli-alt / /opt/docker-alt-26/
COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/
COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/
COPY --link --from=undock /usr/local/bin/undock /usr/bin/
COPY --link --from=binaries /buildx /usr/bin/
ENV TEST_DOCKER_EXTRA="docker@27.5=/opt/docker-alt-27,docker@26.1=/opt/docker-alt-26"
ENV TEST_DOCKER_EXTRA="docker@26.1=/opt/docker-alt-26"
FROM integration-test-base AS integration-test
COPY . .

View File

@@ -21,7 +21,7 @@
- [Verify essential information](#verify-essential-information)
- [Classify the issue](#classify-the-issue)
- [Prioritization guidelines for `kind/bug`](#prioritization-guidelines-for-kindbug)
- [Issue lifecycle](#issue-lifecycle)
- [Issue lifecyle](#issue-lifecyle)
- [Examples](#examples)
- [Submitting a bug](#submitting-a-bug)
- [Pull request review process](#pull-request-review-process)
@@ -308,7 +308,7 @@ Examples:
- Bugs in non-default configurations
- Most enhancements
## Issue lifecycle
## Issue lifecyle
```mermaid
flowchart LR

View File

@@ -3,9 +3,7 @@ package bake
import (
"context"
"encoding"
"encoding/json"
"io"
"maps"
"os"
"path"
"path/filepath"
@@ -47,7 +45,6 @@ type File struct {
type Override struct {
Value string
ArrValue []string
Append bool
}
func defaultFilenames() []string {
@@ -488,8 +485,10 @@ func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[st
if target == name {
return errors.Errorf("target %s cannot link to itself", target)
}
if slices.Contains(visited, target) {
return errors.Errorf("infinite loop from %s to %s", name, target)
for _, v := range visited {
if v == target {
return errors.Errorf("infinite loop from %s to %s", name, target)
}
}
t2, ok := m[target]
if !ok {
@@ -529,12 +528,9 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
m := map[string]map[string]Override{}
for _, v := range v {
parts := strings.SplitN(v, "=", 2)
skey := strings.TrimSuffix(parts[0], "+")
appendTo := strings.HasSuffix(parts[0], "+")
keys := strings.SplitN(skey, ".", 3)
keys := strings.SplitN(parts[0], ".", 3)
if len(keys) < 2 {
return nil, errors.Errorf("invalid override key %s, expected target.name", skey)
return nil, errors.Errorf("invalid override key %s, expected target.name", parts[0])
}
pattern := keys[0]
@@ -547,7 +543,8 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
return nil, err
}
okey := strings.Join(keys[1:], ".")
kk := strings.SplitN(parts[0], ".", 2)
for _, name := range names {
t, ok := m[name]
if !ok {
@@ -555,15 +552,14 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
m[name] = t
}
override := t[okey]
o := t[kk[1]]
// IMPORTANT: if you add more fields here, do not forget to update
// docs/reference/buildx_bake.md (--set) and https://docs.docker.com/build/bake/overrides/
// docs/bake-reference.md and https://docs.docker.com/build/bake/overrides/
switch keys[1] {
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest", "entitlements", "network", "annotations":
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest", "entitlements", "network":
if len(parts) == 2 {
override.Append = appendTo
override.ArrValue = append(override.ArrValue, parts[1])
o.ArrValue = append(o.ArrValue, parts[1])
}
case "args":
if len(keys) != 3 {
@@ -574,7 +570,7 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
if !ok {
continue
}
override.Value = v
o.Value = v
}
fallthrough
case "contexts":
@@ -584,11 +580,11 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
fallthrough
default:
if len(parts) == 2 {
override.Value = parts[1]
o.Value = parts[1]
}
}
t[okey] = override
t[kk[1]] = o
}
}
return m, nil
@@ -734,41 +730,6 @@ type Target struct {
linked bool
}
func (t *Target) MarshalJSON() ([]byte, error) {
tgt := *t
esc := func(s string) string {
return strings.ReplaceAll(strings.ReplaceAll(s, "${", "$${"), "%{", "%%{")
}
tgt.Annotations = slices.Clone(t.Annotations)
for i, v := range tgt.Annotations {
tgt.Annotations[i] = esc(v)
}
if tgt.DockerfileInline != nil {
escaped := esc(*tgt.DockerfileInline)
tgt.DockerfileInline = &escaped
}
tgt.Labels = maps.Clone(t.Labels)
for k, v := range t.Labels {
if v != nil {
escaped := esc(*v)
tgt.Labels[k] = &escaped
}
}
tgt.Args = maps.Clone(t.Args)
for k, v := range t.Args {
if v != nil {
escaped := esc(*v)
tgt.Args[k] = &escaped
}
}
return json.Marshal(tgt)
}
var (
_ hclparser.WithEvalContexts = &Target{}
_ hclparser.WithGetName = &Target{}
@@ -935,21 +896,13 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
}
t.Labels[keys[1]] = &value
case "tags":
if o.Append {
t.Tags = append(t.Tags, o.ArrValue...)
} else {
t.Tags = o.ArrValue
}
t.Tags = o.ArrValue
case "cache-from":
cacheFrom, err := buildflags.ParseCacheEntry(o.ArrValue)
if err != nil {
return err
}
if o.Append {
t.CacheFrom = t.CacheFrom.Merge(cacheFrom)
} else {
t.CacheFrom = cacheFrom
}
t.CacheFrom = cacheFrom
for _, c := range t.CacheFrom {
if c.Type == "local" {
if v, ok := c.Attrs["src"]; ok {
@@ -962,11 +915,7 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
if err != nil {
return err
}
if o.Append {
t.CacheTo = t.CacheTo.Merge(cacheTo)
} else {
t.CacheTo = cacheTo
}
t.CacheTo = cacheTo
for _, c := range t.CacheTo {
if c.Type == "local" {
if v, ok := c.Attrs["dest"]; ok {
@@ -983,11 +932,7 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
if err != nil {
return errors.Wrap(err, "invalid value for outputs")
}
if o.Append {
t.Secrets = t.Secrets.Merge(secrets)
} else {
t.Secrets = secrets
}
t.Secrets = secrets
for _, s := range t.Secrets {
if s.FilePath != "" {
ent.FSRead = append(ent.FSRead, s.FilePath)
@@ -998,30 +943,18 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
if err != nil {
return errors.Wrap(err, "invalid value for outputs")
}
if o.Append {
t.SSH = t.SSH.Merge(ssh)
} else {
t.SSH = ssh
}
t.SSH = ssh
for _, s := range t.SSH {
ent.FSRead = append(ent.FSRead, s.Paths...)
}
case "platform":
if o.Append {
t.Platforms = append(t.Platforms, o.ArrValue...)
} else {
t.Platforms = o.ArrValue
}
t.Platforms = o.ArrValue
case "output":
outputs, err := parseArrValue[buildflags.ExportEntry](o.ArrValue)
if err != nil {
return errors.Wrap(err, "invalid value for outputs")
}
if o.Append {
t.Outputs = t.Outputs.Merge(outputs)
} else {
t.Outputs = outputs
}
t.Outputs = outputs
for _, o := range t.Outputs {
if o.Destination != "" {
ent.FSWrite = append(ent.FSWrite, o.Destination)
@@ -1051,19 +984,11 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
}
t.NoCache = &noCache
case "no-cache-filter":
if o.Append {
t.NoCacheFilter = append(t.NoCacheFilter, o.ArrValue...)
} else {
t.NoCacheFilter = o.ArrValue
}
t.NoCacheFilter = o.ArrValue
case "shm-size":
t.ShmSize = &value
case "ulimits":
if o.Append {
t.Ulimits = append(t.Ulimits, o.ArrValue...)
} else {
t.Ulimits = o.ArrValue
}
t.Ulimits = o.ArrValue
case "network":
t.NetworkMode = &value
case "pull":
@@ -1141,7 +1066,9 @@ func (t *Target) GetEvalContexts(ectx *hcl.EvalContext, block *hcl.Block, loadDe
e2 := ectx.NewChild()
e2.Variables = make(map[string]cty.Value)
if e != ectx {
maps.Copy(e2.Variables, e.Variables)
for k, v := range e.Variables {
e2.Variables[k] = v
}
}
e2.Variables[k] = v
ectxs2 = append(ectxs2, e2)
@@ -1360,8 +1287,8 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
if strings.HasPrefix(bi.ContextPath, "cwd://") {
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
}
if !build.IsRemoteURL(bi.ContextPath) && bi.ContextState == nil && !filepath.IsAbs(bi.DockerfilePath) {
bi.DockerfilePath = filepath.Join(bi.ContextPath, bi.DockerfilePath)
if !build.IsRemoteURL(bi.ContextPath) && bi.ContextState == nil && !path.IsAbs(bi.DockerfilePath) {
bi.DockerfilePath = path.Join(bi.ContextPath, bi.DockerfilePath)
}
for k, v := range bi.NamedContexts {
if strings.HasPrefix(v.Path, "cwd://") {

View File

@@ -34,18 +34,6 @@ target "webapp" {
args = {
VAR_BOTH = "webapp"
}
annotations = [
"index,manifest:org.opencontainers.image.authors=dvdksn"
]
attest = [
"type=provenance,mode=max"
]
platforms = [
"linux/amd64"
]
secret = [
"id=FOO,env=FOO"
]
inherits = ["webDEP"]
}`),
}
@@ -127,31 +115,6 @@ target "webapp" {
})
})
t.Run("AnnotationsOverrides", func(t *testing.T) {
t.Parallel()
m, g, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.annotations=index,manifest:org.opencontainers.image.vendor=docker"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, []string{"index,manifest:org.opencontainers.image.authors=dvdksn", "index,manifest:org.opencontainers.image.vendor=docker"}, m["webapp"].Annotations)
require.Equal(t, 1, len(g))
require.Equal(t, []string{"webapp"}, g["default"].Targets)
})
t.Run("AttestOverride", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.attest=type=sbom"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Len(t, m["webapp"].Attest, 2)
require.Equal(t, "provenance", m["webapp"].Attest[0].Type)
require.Equal(t, "sbom", m["webapp"].Attest[1].Type)
})
t.Run("AttestAppend", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.attest+=type=sbom"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Len(t, m["webapp"].Attest, 2)
require.Equal(t, "provenance", m["webapp"].Attest[0].Type)
require.Equal(t, "sbom", m["webapp"].Attest[1].Type)
})
t.Run("ContextOverride", func(t *testing.T) {
t.Parallel()
_, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context"}, nil, &EntitlementConf{})
@@ -173,49 +136,6 @@ target "webapp" {
require.Equal(t, []string{"webapp"}, g["default"].Targets)
})
t.Run("PlatformOverride", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.platform=linux/arm64"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, []string{"linux/arm64"}, m["webapp"].Platforms)
})
t.Run("PlatformAppend", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.platform+=linux/arm64"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, m["webapp"].Platforms)
})
t.Run("PlatformAppendMulti", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.platform+=linux/arm64", "webapp.platform+=linux/riscv64"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, []string{"linux/amd64", "linux/arm64", "linux/riscv64"}, m["webapp"].Platforms)
})
t.Run("PlatformAppendMultiLastOverride", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.platform+=linux/arm64", "webapp.platform=linux/riscv64"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Equal(t, []string{"linux/arm64", "linux/riscv64"}, m["webapp"].Platforms)
})
t.Run("SecretsOverride", func(t *testing.T) {
t.Setenv("FOO", "foo")
t.Setenv("BAR", "bar")
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.secrets=id=BAR,env=BAR"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Len(t, m["webapp"].Secrets, 1)
require.Equal(t, "BAR", m["webapp"].Secrets[0].ID)
})
t.Run("SecretsAppend", func(t *testing.T) {
t.Setenv("FOO", "foo")
t.Setenv("BAR", "bar")
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.secrets+=id=BAR,env=BAR"}, nil, &EntitlementConf{})
require.NoError(t, err)
require.Len(t, m["webapp"].Secrets, 2)
require.Equal(t, "FOO", m["webapp"].Secrets[0].ID)
require.Equal(t, "BAR", m["webapp"].Secrets[1].ID)
})
t.Run("ShmSizeOverride", func(t *testing.T) {
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.shm-size=256m"}, nil, &EntitlementConf{})
require.NoError(t, err)
@@ -692,7 +612,7 @@ func TestHCLContextCwdPrefix(t *testing.T) {
require.Contains(t, m, "app")
assert.Equal(t, "test", *m["app"].Dockerfile)
assert.Equal(t, "foo", *m["app"].Context)
assert.Equal(t, filepath.Clean("foo/test"), bo["app"].Inputs.DockerfilePath)
assert.Equal(t, "foo/test", bo["app"].Inputs.DockerfilePath)
assert.Equal(t, "foo", bo["app"].Inputs.ContextPath)
}
@@ -2142,73 +2062,6 @@ target "app" {
})
}
func TestVariableValidationConditionNull(t *testing.T) {
fp := File{
Name: "docker-bake.hcl",
Data: []byte(`
variable "PORT" {
default = 3000
validation {}
}
target "app" {
args = {
PORT = PORT
}
}
`),
}
_, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
require.Error(t, err)
require.Contains(t, err.Error(), "Condition expression must return either true or false, not null")
}
func TestVariableValidationConditionUnknownValue(t *testing.T) {
fp := File{
Name: "docker-bake.hcl",
Data: []byte(`
variable "PORT" {
default = 3000
validation {
condition = "foo"
}
}
target "app" {
args = {
PORT = PORT
}
}
`),
}
_, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
require.Error(t, err)
require.Contains(t, err.Error(), "Invalid condition result value: a bool is required")
}
func TestVariableValidationInvalidErrorMessage(t *testing.T) {
fp := File{
Name: "docker-bake.hcl",
Data: []byte(`
variable "FOO" {
default = 0
validation {
condition = FOO > 5
}
}
target "app" {
args = {
FOO = FOO
}
}
`),
}
_, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
require.Error(t, err)
require.Contains(t, err.Error(), "This check failed, but has an invalid error message")
}
// https://github.com/docker/buildx/issues/2822
func TestVariableEmpty(t *testing.T) {
fp := File{

View File

@@ -92,9 +92,6 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
if s.Build.AdditionalContexts != nil {
additionalContexts = map[string]string{}
for k, v := range s.Build.AdditionalContexts {
if strings.HasPrefix(v, "service:") {
v = strings.Replace(v, "service:", "target:", 1)
}
additionalContexts[k] = v
}
}
@@ -177,7 +174,6 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
CacheFrom: cacheFrom,
CacheTo: cacheTo,
NetworkMode: networkModeP,
Platforms: s.Build.Platforms,
SSH: ssh,
Secrets: secrets,
ShmSize: shmSize,
@@ -218,7 +214,7 @@ func validateComposeFile(dt []byte, fn string) (bool, error) {
}
func validateCompose(dt []byte, envs map[string]string) error {
_, err := loader.LoadWithContext(context.Background(), composetypes.ConfigDetails{
_, err := loader.Load(composetypes.ConfigDetails{
ConfigFiles: []composetypes.ConfigFile{
{
Content: dt,
@@ -319,7 +315,7 @@ type (
stringArray []string
)
func (sa *stringArray) UnmarshalYAML(unmarshal func(any) error) error {
func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
var multi []string
err := unmarshal(&multi)
if err != nil {
@@ -336,7 +332,7 @@ func (sa *stringArray) UnmarshalYAML(unmarshal func(any) error) error {
// composeExtTarget converts Compose build extension x-bake to bake Target
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
func (t *Target) composeExtTarget(exts map[string]any) error {
func (t *Target) composeExtTarget(exts map[string]interface{}) error {
var xb xbake
ext, ok := exts["x-bake"]

View File

@@ -463,21 +463,6 @@ services:
require.NoError(t, err)
}
func TestPlatforms(t *testing.T) {
dt := []byte(`
services:
foo:
build:
context: .
platforms:
- linux/amd64
- linux/arm64
`)
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
}
func newBool(val bool) *bool {
b := val
return &b
@@ -813,37 +798,6 @@ services:
})
}
func TestServiceContext(t *testing.T) {
dt := []byte(`
services:
base:
build:
dockerfile: baseapp.Dockerfile
command: ./entrypoint.sh
webapp:
build:
context: ./dir
additional_contexts:
base: service:base
`)
c, err := ParseCompose([]composetypes.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(c.Groups))
require.Equal(t, "default", c.Groups[0].Name)
sort.Strings(c.Groups[0].Targets)
require.Equal(t, []string{"base", "webapp"}, c.Groups[0].Targets)
require.Equal(t, 2, len(c.Targets))
sort.Slice(c.Targets, func(i, j int) bool {
return c.Targets[i].Name < c.Targets[j].Name
})
require.Equal(t, "webapp", c.Targets[1].Name)
require.Equal(t, map[string]string{"base": "target:base"}, c.Targets[1].Contexts)
}
// chdir changes the current working directory to the named directory,
// and then restore the original working directory at the end of the test.
func chdir(t *testing.T, dir string) {

View File

@@ -306,7 +306,7 @@ func (c EntitlementConf) Prompt(ctx context.Context, isRemote bool, out io.Write
fmt.Fprintf(out, "\nPass %q to grant requested privileges.\n", strings.Join(slices.Concat(flags, flagsFS), " "))
}
args := slices.Clone(os.Args)
args := append([]string(nil), os.Args...)
if v, ok := os.LookupEnv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"); ok && v != "" {
args[0] = v
}

View File

@@ -608,7 +608,7 @@ func TestHCLAttrsCapsuleType(t *testing.T) {
target "app" {
attest = [
{ type = "provenance", mode = "max" },
"type=sbom,disabled=true,generator=foo,\"ENV1=bar,baz\",ENV2=hello",
"type=sbom,disabled=true",
]
cache-from = [
@@ -641,7 +641,7 @@ func TestHCLAttrsCapsuleType(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 1, len(c.Targets))
require.Equal(t, []string{"type=provenance,mode=max", "type=sbom,disabled=true,\"ENV1=bar,baz\",ENV2=hello,generator=foo"}, stringify(c.Targets[0].Attest))
require.Equal(t, []string{"type=provenance,mode=max", "type=sbom,disabled=true"}, stringify(c.Targets[0].Attest))
require.Equal(t, []string{"type=local,dest=../out", "type=oci,dest=../out.tar"}, stringify(c.Targets[0].Outputs))
require.Equal(t, []string{"type=local,src=path/to/cache", "user/app:cache"}, stringify(c.Targets[0].CacheFrom))
require.Equal(t, []string{"type=local,dest=path/to/cache"}, stringify(c.Targets[0].CacheTo))
@@ -1645,7 +1645,7 @@ func TestHCLIndexOfFunc(t *testing.T) {
require.Empty(t, c.Targets[1].Tags[1])
}
func ptrstr(s any) *string {
func ptrstr(s interface{}) *string {
var n *string
if reflect.ValueOf(s).Kind() == reflect.String {
ss := s.(string)

View File

@@ -15,11 +15,11 @@ import (
// DecodeOptions allows customizing sections of the decoding process.
type DecodeOptions struct {
ImpliedType func(gv any) (cty.Type, error)
ImpliedType func(gv interface{}) (cty.Type, error)
Convert func(in cty.Value, want cty.Type) (cty.Value, error)
}
func (o DecodeOptions) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
func (o DecodeOptions) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
o = o.withDefaults()
rv := reflect.ValueOf(val)
@@ -46,7 +46,7 @@ func (o DecodeOptions) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val any)
// are returned then the given value may have been partially-populated but
// may still be accessed by a careful caller for static analysis and editor
// integration use-cases.
func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
return DecodeOptions{}.DecodeBody(body, ctx, val)
}
@@ -282,7 +282,7 @@ func (o DecodeOptions) decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext
return diags
}
func (o DecodeOptions) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
func (o DecodeOptions) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
o = o.withDefaults()
srcVal, diags := expr.Value(ctx)
@@ -332,7 +332,7 @@ func (o DecodeOptions) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContex
// are returned then the given value may have been partially-populated but
// may still be accessed by a careful caller for static analysis and editor
// integration use-cases.
func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
return DecodeOptions{}.DecodeExpression(expr, ctx, val)
}

View File

@@ -16,8 +16,8 @@ import (
)
func TestDecodeBody(t *testing.T) {
deepEquals := func(other any) func(v any) bool {
return func(v any) bool {
deepEquals := func(other interface{}) func(v interface{}) bool {
return func(v interface{}) bool {
return reflect.DeepEqual(v, other)
}
}
@@ -45,19 +45,19 @@ func TestDecodeBody(t *testing.T) {
}
tests := []struct {
Body map[string]any
Target func() any
Check func(v any) bool
Body map[string]interface{}
Target func() interface{}
Check func(v interface{}) bool
DiagCount int
}{
{
map[string]any{},
map[string]interface{}{},
makeInstantiateType(struct{}{}),
deepEquals(struct{}{}),
0,
},
{
map[string]any{},
map[string]interface{}{},
makeInstantiateType(struct {
Name string `hcl:"name"`
}{}),
@@ -67,7 +67,7 @@ func TestDecodeBody(t *testing.T) {
1, // name is required
},
{
map[string]any{},
map[string]interface{}{},
makeInstantiateType(struct {
Name *string `hcl:"name"`
}{}),
@@ -77,7 +77,7 @@ func TestDecodeBody(t *testing.T) {
0,
}, // name nil
{
map[string]any{},
map[string]interface{}{},
makeInstantiateType(struct {
Name string `hcl:"name,optional"`
}{}),
@@ -87,9 +87,9 @@ func TestDecodeBody(t *testing.T) {
0,
}, // name optional
{
map[string]any{},
map[string]interface{}{},
makeInstantiateType(withNameExpression{}),
func(v any) bool {
func(v interface{}) bool {
if v == nil {
return false
}
@@ -109,11 +109,11 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
map[string]interface{}{
"name": "Ermintrude",
},
makeInstantiateType(withNameExpression{}),
func(v any) bool {
func(v interface{}) bool {
if v == nil {
return false
}
@@ -133,7 +133,7 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
map[string]interface{}{
"name": "Ermintrude",
},
makeInstantiateType(struct {
@@ -145,7 +145,7 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
map[string]interface{}{
"name": "Ermintrude",
"age": 23,
},
@@ -158,7 +158,7 @@ func TestDecodeBody(t *testing.T) {
1, // Extraneous "age" property
},
{
map[string]any{
map[string]interface{}{
"name": "Ermintrude",
"age": 50,
},
@@ -166,7 +166,7 @@ func TestDecodeBody(t *testing.T) {
Name string `hcl:"name"`
Attrs hcl.Attributes `hcl:",remain"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
got := gotI.(struct {
Name string `hcl:"name"`
Attrs hcl.Attributes `hcl:",remain"`
@@ -176,7 +176,7 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
map[string]interface{}{
"name": "Ermintrude",
"age": 50,
},
@@ -184,7 +184,7 @@ func TestDecodeBody(t *testing.T) {
Name string `hcl:"name"`
Remain hcl.Body `hcl:",remain"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
got := gotI.(struct {
Name string `hcl:"name"`
Remain hcl.Body `hcl:",remain"`
@@ -197,7 +197,7 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
map[string]interface{}{
"name": "Ermintrude",
"living": true,
},
@@ -217,7 +217,7 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
map[string]interface{}{
"name": "Ermintrude",
"age": 50,
},
@@ -226,7 +226,7 @@ func TestDecodeBody(t *testing.T) {
Body hcl.Body `hcl:",body"`
Remain hcl.Body `hcl:",remain"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
got := gotI.(struct {
Name string `hcl:"name"`
Body hcl.Body `hcl:",body"`
@@ -241,76 +241,76 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
"noodle": map[string]any{},
map[string]interface{}{
"noodle": map[string]interface{}{},
},
makeInstantiateType(struct {
Noodle struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
// Generating no diagnostics is good enough for this one.
return true
},
0,
},
{
map[string]any{
"noodle": []map[string]any{{}},
map[string]interface{}{
"noodle": []map[string]interface{}{{}},
},
makeInstantiateType(struct {
Noodle struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
// Generating no diagnostics is good enough for this one.
return true
},
0,
},
{
map[string]any{
"noodle": []map[string]any{{}, {}},
map[string]interface{}{
"noodle": []map[string]interface{}{{}, {}},
},
makeInstantiateType(struct {
Noodle struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
// Generating one diagnostic is good enough for this one.
return true
},
1,
},
{
map[string]any{},
map[string]interface{}{},
makeInstantiateType(struct {
Noodle struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
// Generating one diagnostic is good enough for this one.
return true
},
1,
},
{
map[string]any{
"noodle": []map[string]any{},
map[string]interface{}{
"noodle": []map[string]interface{}{},
},
makeInstantiateType(struct {
Noodle struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
// Generating one diagnostic is good enough for this one.
return true
},
1,
},
{
map[string]any{
"noodle": map[string]any{},
map[string]interface{}{
"noodle": map[string]interface{}{},
},
makeInstantiateType(struct {
Noodle *struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
return gotI.(struct {
Noodle *struct{} `hcl:"noodle,block"`
}).Noodle != nil
@@ -318,13 +318,13 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
"noodle": []map[string]any{{}},
map[string]interface{}{
"noodle": []map[string]interface{}{{}},
},
makeInstantiateType(struct {
Noodle *struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
return gotI.(struct {
Noodle *struct{} `hcl:"noodle,block"`
}).Noodle != nil
@@ -332,13 +332,13 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
"noodle": []map[string]any{},
map[string]interface{}{
"noodle": []map[string]interface{}{},
},
makeInstantiateType(struct {
Noodle *struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
return gotI.(struct {
Noodle *struct{} `hcl:"noodle,block"`
}).Noodle == nil
@@ -346,26 +346,26 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
"noodle": []map[string]any{{}, {}},
map[string]interface{}{
"noodle": []map[string]interface{}{{}, {}},
},
makeInstantiateType(struct {
Noodle *struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
// Generating one diagnostic is good enough for this one.
return true
},
1,
},
{
map[string]any{
"noodle": []map[string]any{},
map[string]interface{}{
"noodle": []map[string]interface{}{},
},
makeInstantiateType(struct {
Noodle []struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
noodle := gotI.(struct {
Noodle []struct{} `hcl:"noodle,block"`
}).Noodle
@@ -374,13 +374,13 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
"noodle": []map[string]any{{}},
map[string]interface{}{
"noodle": []map[string]interface{}{{}},
},
makeInstantiateType(struct {
Noodle []struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
noodle := gotI.(struct {
Noodle []struct{} `hcl:"noodle,block"`
}).Noodle
@@ -389,13 +389,13 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
"noodle": []map[string]any{{}, {}},
map[string]interface{}{
"noodle": []map[string]interface{}{{}, {}},
},
makeInstantiateType(struct {
Noodle []struct{} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
noodle := gotI.(struct {
Noodle []struct{} `hcl:"noodle,block"`
}).Noodle
@@ -404,15 +404,15 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
"noodle": map[string]any{},
map[string]interface{}{
"noodle": map[string]interface{}{},
},
makeInstantiateType(struct {
Noodle struct {
Name string `hcl:"name,label"`
} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
//nolint:misspell
// Generating two diagnostics is good enough for this one.
// (one for the missing noodle block and the other for
@@ -423,9 +423,9 @@ func TestDecodeBody(t *testing.T) {
2,
},
{
map[string]any{
"noodle": map[string]any{
"foo_foo": map[string]any{},
map[string]interface{}{
"noodle": map[string]interface{}{
"foo_foo": map[string]interface{}{},
},
},
makeInstantiateType(struct {
@@ -433,7 +433,7 @@ func TestDecodeBody(t *testing.T) {
Name string `hcl:"name,label"`
} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
noodle := gotI.(struct {
Noodle struct {
Name string `hcl:"name,label"`
@@ -444,10 +444,10 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
"noodle": map[string]any{
"foo_foo": map[string]any{},
"bar_baz": map[string]any{},
map[string]interface{}{
"noodle": map[string]interface{}{
"foo_foo": map[string]interface{}{},
"bar_baz": map[string]interface{}{},
},
},
makeInstantiateType(struct {
@@ -455,17 +455,17 @@ func TestDecodeBody(t *testing.T) {
Name string `hcl:"name,label"`
} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
// One diagnostic is enough for this one.
return true
},
1,
},
{
map[string]any{
"noodle": map[string]any{
"foo_foo": map[string]any{},
"bar_baz": map[string]any{},
map[string]interface{}{
"noodle": map[string]interface{}{
"foo_foo": map[string]interface{}{},
"bar_baz": map[string]interface{}{},
},
},
makeInstantiateType(struct {
@@ -473,7 +473,7 @@ func TestDecodeBody(t *testing.T) {
Name string `hcl:"name,label"`
} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
noodles := gotI.(struct {
Noodles []struct {
Name string `hcl:"name,label"`
@@ -484,9 +484,9 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
"noodle": map[string]any{
"foo_foo": map[string]any{
map[string]interface{}{
"noodle": map[string]interface{}{
"foo_foo": map[string]interface{}{
"type": "rice",
},
},
@@ -497,7 +497,7 @@ func TestDecodeBody(t *testing.T) {
Type string `hcl:"type"`
} `hcl:"noodle,block"`
}{}),
func(gotI any) bool {
func(gotI interface{}) bool {
noodle := gotI.(struct {
Noodle struct {
Name string `hcl:"name,label"`
@@ -510,7 +510,7 @@ func TestDecodeBody(t *testing.T) {
},
{
map[string]any{
map[string]interface{}{
"name": "Ermintrude",
"age": 34,
},
@@ -522,31 +522,31 @@ func TestDecodeBody(t *testing.T) {
0,
},
{
map[string]any{
map[string]interface{}{
"name": "Ermintrude",
"age": 89,
},
makeInstantiateType(map[string]*hcl.Attribute(nil)),
func(gotI any) bool {
func(gotI interface{}) bool {
got := gotI.(map[string]*hcl.Attribute)
return len(got) == 2 && got["name"] != nil && got["age"] != nil
},
0,
},
{
map[string]any{
map[string]interface{}{
"name": "Ermintrude",
"age": 13,
},
makeInstantiateType(map[string]hcl.Expression(nil)),
func(gotI any) bool {
func(gotI interface{}) bool {
got := gotI.(map[string]hcl.Expression)
return len(got) == 2 && got["name"] != nil && got["age"] != nil
},
0,
},
{
map[string]any{
map[string]interface{}{
"name": "Ermintrude",
"living": true,
},
@@ -559,10 +559,10 @@ func TestDecodeBody(t *testing.T) {
},
{
// Retain "nested" block while decoding
map[string]any{
map[string]interface{}{
"plain": "foo",
},
func() any {
func() interface{} {
return &withNestedBlock{
Plain: "bar",
Nested: &withTwoAttributes{
@@ -570,7 +570,7 @@ func TestDecodeBody(t *testing.T) {
},
}
},
func(gotI any) bool {
func(gotI interface{}) bool {
foo := gotI.(withNestedBlock)
return foo.Plain == "foo" && foo.Nested != nil && foo.Nested.A == "bar"
},
@@ -578,19 +578,19 @@ func TestDecodeBody(t *testing.T) {
},
{
// Retain values in "nested" block while decoding
map[string]any{
"nested": map[string]any{
map[string]interface{}{
"nested": map[string]interface{}{
"a": "foo",
},
},
func() any {
func() interface{} {
return &withNestedBlock{
Nested: &withTwoAttributes{
B: "bar",
},
}
},
func(gotI any) bool {
func(gotI interface{}) bool {
foo := gotI.(withNestedBlock)
return foo.Nested.A == "foo" && foo.Nested.B == "bar"
},
@@ -598,14 +598,14 @@ func TestDecodeBody(t *testing.T) {
},
{
// Retain values in "nested" block list while decoding
map[string]any{
"nested": []map[string]any{
map[string]interface{}{
"nested": []map[string]interface{}{
{
"a": "foo",
},
},
},
func() any {
func() interface{} {
return &withListofNestedBlocks{
Nested: []*withTwoAttributes{
{
@@ -614,7 +614,7 @@ func TestDecodeBody(t *testing.T) {
},
}
},
func(gotI any) bool {
func(gotI interface{}) bool {
n := gotI.(withListofNestedBlocks)
return n.Nested[0].A == "foo" && n.Nested[0].B == "bar"
},
@@ -622,14 +622,14 @@ func TestDecodeBody(t *testing.T) {
},
{
// Remove additional elements from the list while decoding nested blocks
map[string]any{
"nested": []map[string]any{
map[string]interface{}{
"nested": []map[string]interface{}{
{
"a": "foo",
},
},
},
func() any {
func() interface{} {
return &withListofNestedBlocks{
Nested: []*withTwoAttributes{
{
@@ -641,7 +641,7 @@ func TestDecodeBody(t *testing.T) {
},
}
},
func(gotI any) bool {
func(gotI interface{}) bool {
n := gotI.(withListofNestedBlocks)
return len(n.Nested) == 1
},
@@ -649,8 +649,8 @@ func TestDecodeBody(t *testing.T) {
},
{
// Make sure decoding value slices works the same as pointer slices.
map[string]any{
"nested": []map[string]any{
map[string]interface{}{
"nested": []map[string]interface{}{
{
"b": "bar",
},
@@ -659,7 +659,7 @@ func TestDecodeBody(t *testing.T) {
},
},
},
func() any {
func() interface{} {
return &withListofNestedBlocksNoPointers{
Nested: []withTwoAttributes{
{
@@ -668,7 +668,7 @@ func TestDecodeBody(t *testing.T) {
},
}
},
func(gotI any) bool {
func(gotI interface{}) bool {
n := gotI.(withListofNestedBlocksNoPointers)
return n.Nested[0].B == "bar" && len(n.Nested) == 2
},
@@ -710,8 +710,8 @@ func TestDecodeBody(t *testing.T) {
func TestDecodeExpression(t *testing.T) {
tests := []struct {
Value cty.Value
Target any
Want any
Target interface{}
Want interface{}
DiagCount int
}{
{
@@ -799,8 +799,8 @@ func (e *fixedExpression) Variables() []hcl.Traversal {
return nil
}
func makeInstantiateType(target any) func() any {
return func() any {
func makeInstantiateType(target interface{}) func() interface{} {
return func() interface{} {
return reflect.New(reflect.TypeOf(target)).Interface()
}
}

View File

@@ -34,9 +34,9 @@ import (
// The layout of the resulting HCL source is derived from the ordering of
// the struct fields, with blank lines around nested blocks of different types.
// Fields representing attributes should usually precede those representing
// blocks so that the attributes can group together in the result. For more
// blocks so that the attributes can group togather in the result. For more
// control, use the hclwrite API directly.
func EncodeIntoBody(val any, dst *hclwrite.Body) {
func EncodeIntoBody(val interface{}, dst *hclwrite.Body) {
rv := reflect.ValueOf(val)
ty := rv.Type()
if ty.Kind() == reflect.Ptr {
@@ -60,7 +60,7 @@ func EncodeIntoBody(val any, dst *hclwrite.Body) {
//
// This function has the same constraints as EncodeIntoBody and will panic
// if they are violated.
func EncodeAsBlock(val any, blockType string) *hclwrite.Block {
func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block {
rv := reflect.ValueOf(val)
ty := rv.Type()
if ty.Kind() == reflect.Ptr {
@@ -158,7 +158,7 @@ func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwr
if isSeq {
l := fieldVal.Len()
for i := range l {
for i := 0; i < l; i++ {
elemVal := fieldVal.Index(i)
if !elemVal.IsValid() {
continue // ignore (elem value is nil pointer)

View File

@@ -22,7 +22,7 @@ import (
// This uses the tags on the fields of the struct to discover how each
// field's value should be expressed within configuration. If an invalid
// mapping is attempted, this function will panic.
func ImpliedBodySchema(val any) (schema *hcl.BodySchema, partial bool) {
func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) {
ty := reflect.TypeOf(val)
if ty.Kind() == reflect.Ptr {
@@ -134,7 +134,7 @@ func getFieldTags(ty reflect.Type) *fieldTags {
}
ct := ty.NumField()
for i := range ct {
for i := 0; i < ct; i++ {
field := ty.Field(i)
tag := field.Tag.Get("hcl")
if tag == "" {

View File

@@ -14,7 +14,7 @@ import (
func TestImpliedBodySchema(t *testing.T) {
tests := []struct {
val any
val interface{}
wantSchema *hcl.BodySchema
wantPartial bool
}{

View File

@@ -7,7 +7,6 @@ import (
"math"
"math/big"
"reflect"
"slices"
"strconv"
"strings"
@@ -16,7 +15,6 @@ import (
"github.com/hashicorp/hcl/v2"
"github.com/pkg/errors"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/convert"
)
type Opt struct {
@@ -42,7 +40,7 @@ type variableValidation struct {
type functionDef struct {
Name string `json:"-" hcl:"name,label"`
Params *hcl.Attribute `json:"params,omitempty" hcl:"params"`
Variadic *hcl.Attribute `json:"variadic_params,omitempty" hcl:"variadic_params"`
Variadic *hcl.Attribute `json:"variadic_param,omitempty" hcl:"variadic_params"`
Result *hcl.Attribute `json:"result,omitempty" hcl:"result"`
}
@@ -556,57 +554,27 @@ func (p *parser) resolveBlockNames(block *hcl.Block) ([]string, error) {
func (p *parser) validateVariables(vars map[string]*variable, ectx *hcl.EvalContext) hcl.Diagnostics {
var diags hcl.Diagnostics
for _, v := range vars {
for _, rule := range v.Validations {
resultVal, condDiags := rule.Condition.Value(ectx)
for _, validation := range v.Validations {
condition, condDiags := validation.Condition.Value(ectx)
if condDiags.HasErrors() {
diags = append(diags, condDiags...)
continue
}
if resultVal.IsNull() {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid condition result",
Detail: "Condition expression must return either true or false, not null.",
Subject: rule.Condition.Range().Ptr(),
Expression: rule.Condition,
})
continue
}
var err error
resultVal, err = convert.Convert(resultVal, cty.Bool)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid condition result",
Detail: fmt.Sprintf("Invalid condition result value: %s", err),
Subject: rule.Condition.Range().Ptr(),
Expression: rule.Condition,
})
continue
}
if !resultVal.True() {
message, msgDiags := rule.ErrorMessage.Value(ectx)
if !condition.True() {
message, msgDiags := validation.ErrorMessage.Value(ectx)
if msgDiags.HasErrors() {
diags = append(diags, msgDiags...)
continue
}
errorMessage := "This check failed, but has an invalid error message."
if !message.IsNull() {
errorMessage = message.AsString()
}
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Validation failed",
Detail: errorMessage,
Subject: rule.Condition.Range().Ptr(),
Detail: message.AsString(),
Subject: validation.Condition.Range().Ptr(),
})
}
}
}
return diags
}
@@ -621,7 +589,7 @@ type ParseMeta struct {
AllVariables []*Variable
}
func Parse(b hcl.Body, opt Opt, val any) (*ParseMeta, hcl.Diagnostics) {
func Parse(b hcl.Body, opt Opt, val interface{}) (*ParseMeta, hcl.Diagnostics) {
reserved := map[string]struct{}{}
schema, _ := gohcl.ImpliedBodySchema(val)
@@ -795,7 +763,7 @@ func Parse(b hcl.Body, opt Opt, val any) (*ParseMeta, hcl.Diagnostics) {
types := map[string]field{}
renamed := map[string]map[string][]string{}
vt := reflect.ValueOf(val).Elem().Type()
for i := range vt.NumField() {
for i := 0; i < vt.NumField(); i++ {
tags := strings.Split(vt.Field(i).Tag.Get("hcl"), ",")
p.blockTypes[tags[0]] = vt.Field(i).Type.Elem().Elem()
@@ -863,7 +831,7 @@ func Parse(b hcl.Body, opt Opt, val any) (*ParseMeta, hcl.Diagnostics) {
oldValue, exists := t.values[lblName]
if !exists && lblExists {
if v.Elem().Field(t.idx).Type().Kind() == reflect.Slice {
for i := range v.Elem().Field(t.idx).Len() {
for i := 0; i < v.Elem().Field(t.idx).Len(); i++ {
if lblName == v.Elem().Field(t.idx).Index(i).Elem().Field(lblIndex).String() {
exists = true
oldValue = value{Value: v.Elem().Field(t.idx).Index(i), idx: i}
@@ -930,7 +898,7 @@ func wrapErrorDiagnostic(message string, err error, subject *hcl.Range, context
func setName(v reflect.Value, name string) {
numFields := v.Elem().Type().NumField()
for i := range numFields {
for i := 0; i < numFields; i++ {
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
for _, t := range parts[1:] {
if t == "label" {
@@ -942,10 +910,12 @@ func setName(v reflect.Value, name string) {
func getName(v reflect.Value) (string, bool) {
numFields := v.Elem().Type().NumField()
for i := range numFields {
for i := 0; i < numFields; i++ {
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
if slices.Contains(parts[1:], "label") {
return v.Elem().Field(i).String(), true
for _, t := range parts[1:] {
if t == "label" {
return v.Elem().Field(i).String(), true
}
}
}
return "", false
@@ -953,10 +923,12 @@ func getName(v reflect.Value) (string, bool) {
func getNameIndex(v reflect.Value) (int, bool) {
numFields := v.Elem().Type().NumField()
for i := range numFields {
for i := 0; i < numFields; i++ {
parts := strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",")
if slices.Contains(parts[1:], "label") {
return i, true
for _, t := range parts[1:] {
if t == "label" {
return i, true
}
}
}
return 0, false
@@ -1016,7 +988,7 @@ func key(ks ...any) uint64 {
return hash.Sum64()
}
func decodeBody(body hcl.Body, ctx *hcl.EvalContext, val any) hcl.Diagnostics {
func decodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
dec := gohcl.DecodeOptions{ImpliedType: ImpliedType}
return dec.DecodeBody(body, ctx, val)
}

View File

@@ -43,7 +43,7 @@ import (
// In particular, ImpliedType will never use capsule types in its returned
// type, because it cannot know the capsule types supported by the calling
// program.
func ImpliedType(gv any) (cty.Type, error) {
func ImpliedType(gv interface{}) (cty.Type, error) {
rt := reflect.TypeOf(gv)
var path cty.Path
return impliedType(rt, path)
@@ -148,7 +148,7 @@ func structTagIndices(st reflect.Type) map[string]int {
ct := st.NumField()
ret := make(map[string]int, ct)
for i := range ct {
for i := 0; i < ct; i++ {
field := st.Field(i)
attrName := field.Tag.Get("cty")
if attrName != "" {

View File

@@ -8,7 +8,6 @@ import (
"encoding/json"
"fmt"
"io"
"maps"
"os"
"slices"
"strconv"
@@ -205,6 +204,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
return nil, err
}
defers := make([]func(), 0, 2)
defer func() {
if err != nil {
for _, f := range defers {
f()
}
}
}()
reqForNodes := make(map[string][]*reqForNode)
eg, ctx := errgroup.WithContext(ctx)
@@ -234,11 +242,11 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
if err != nil {
return nil, err
}
defer release()
if err := saveLocalState(so, k, opt, np.Node(), cfg); err != nil {
return nil, err
}
addGitAttrs(so)
defers = append(defers, release)
reqn = append(reqn, &reqForNode{
resolvedNode: np,
so: so,
@@ -423,7 +431,9 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
FrontendInputs: frontendInputs,
FrontendOpt: make(map[string]string),
}
maps.Copy(req.FrontendOpt, so.FrontendAttrs)
for k, v := range so.FrontendAttrs {
req.FrontendOpt[k] = v
}
so.Frontend = ""
so.FrontendInputs = nil
@@ -525,10 +535,11 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
}
}
}
node := dp.Node().Driver
if node.IsMobyDriver() {
for _, e := range so.Exports {
if e.Type == "moby" && e.Attrs["push"] != "" && !node.Features(ctx)[driver.DirectPush] {
if e.Type == "moby" && e.Attrs["push"] != "" {
if ok, _ := strconv.ParseBool(e.Attrs["push"]); ok {
pushNames = e.Attrs["name"]
if pushNames == "" {
@@ -560,14 +571,6 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
}
}
}
// if prefer-image-digest is set in the solver options, remove the image
// config digest from the exporter's response
for _, e := range so.Exports {
if e.Attrs["prefer-image-digest"] == "true" {
delete(rr.ExporterResponse, exptypes.ExporterImageConfigDigestKey)
break
}
}
return nil
})
}
@@ -619,7 +622,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opts map[
// This is fallback for some very old buildkit versions.
// Note that the mediatype isn't really correct as most of the time it is image manifest and
// not manifest list but actually both are handled because for Docker mediatypes the
// mediatype value in the Accept header does not seem to matter.
// mediatype value in the Accpet header does not seem to matter.
s, ok = r.ExporterResponse[exptypes.ExporterImageDigestKey]
if ok {
descs = append(descs, specs.Descriptor{

View File

@@ -4,7 +4,6 @@ import (
"context"
stderrors "errors"
"net"
"slices"
"github.com/containerd/platforms"
"github.com/docker/buildx/builder"
@@ -38,7 +37,15 @@ func Dial(ctx context.Context, nodes []builder.Node, pw progress.Writer, platfor
for _, ls := range resolved {
for _, rn := range ls {
if platform != nil {
if !slices.ContainsFunc(rn.platforms, platforms.Only(*platform).Match) {
p := *platform
var found bool
for _, pp := range rn.platforms {
if platforms.Only(p).Match(pp) {
found = true
break
}
}
if !found {
continue
}
}

View File

@@ -3,7 +3,6 @@ package build
import (
"context"
"fmt"
"slices"
"sync"
"github.com/containerd/platforms"
@@ -222,7 +221,7 @@ func (r *nodeResolver) get(p specs.Platform, matcher matchMaker, additionalPlatf
for i, node := range r.nodes {
platforms := node.Platforms
if additionalPlatforms != nil {
platforms = slices.Clone(platforms)
platforms = append([]specs.Platform{}, platforms...)
platforms = append(platforms, additionalPlatforms(i, node)...)
}
for _, p2 := range platforms {

View File

@@ -2,7 +2,6 @@ package build
import (
"context"
"maps"
"os"
"path"
"path/filepath"
@@ -128,7 +127,9 @@ func getGitAttributes(ctx context.Context, contextPath, dockerfilePath string) (
if so.FrontendAttrs == nil {
so.FrontendAttrs = make(map[string]string)
}
maps.Copy(so.FrontendAttrs, res)
for k, v := range res {
so.FrontendAttrs[k] = v
}
if !setGitInfo || root == "" {
return

View File

@@ -9,7 +9,6 @@ import (
"testing"
"github.com/docker/buildx/util/gitutil"
"github.com/docker/buildx/util/gitutil/gittestutil"
"github.com/moby/buildkit/client"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/assert"
@@ -17,18 +16,18 @@ import (
)
func setupTest(tb testing.TB) {
gittestutil.Mktmp(tb)
gitutil.Mktmp(tb)
c, err := gitutil.New()
require.NoError(tb, err)
gittestutil.GitInit(c, tb)
gitutil.GitInit(c, tb)
df := []byte("FROM alpine:latest\n")
require.NoError(tb, os.WriteFile("Dockerfile", df, 0644))
gittestutil.GitAdd(c, tb, "Dockerfile")
gittestutil.GitCommit(c, tb, "initial commit")
gittestutil.GitSetRemote(c, tb, "origin", "git@github.com:docker/buildx.git")
gitutil.GitAdd(c, tb, "Dockerfile")
gitutil.GitCommit(c, tb, "initial commit")
gitutil.GitSetRemote(c, tb, "origin", "git@github.com:docker/buildx.git")
}
func TestGetGitAttributesNotGitRepo(t *testing.T) {
@@ -189,19 +188,19 @@ func TestLocalDirs(t *testing.T) {
}
func TestLocalDirsSub(t *testing.T) {
gittestutil.Mktmp(t)
gitutil.Mktmp(t)
c, err := gitutil.New()
require.NoError(t, err)
gittestutil.GitInit(c, t)
gitutil.GitInit(c, t)
df := []byte("FROM alpine:latest\n")
require.NoError(t, os.MkdirAll("app", 0755))
require.NoError(t, os.WriteFile("app/Dockerfile", df, 0644))
gittestutil.GitAdd(c, t, "app/Dockerfile")
gittestutil.GitCommit(c, t, "initial commit")
gittestutil.GitSetRemote(c, t, "origin", "git@github.com:docker/buildx.git")
gitutil.GitAdd(c, t, "app/Dockerfile")
gitutil.GitCommit(c, t, "initial commit")
gitutil.GitSetRemote(c, t, "origin", "git@github.com:docker/buildx.git")
so := &client.SolveOpt{
FrontendAttrs: map[string]string{},

View File

@@ -237,11 +237,6 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *O
opt.Exports[i].Output = func(_ map[string]string) (io.WriteCloser, error) {
return w, nil
}
// if docker is using the containerd snapshotter, prefer to export the image digest
// (rather than the image config digest). See https://github.com/moby/moby/issues/45458.
if features[dockerutil.OCIImporter] {
opt.Exports[i].Attrs["prefer-image-digest"] = "true"
}
}
} else if !nodeDriver.Features(ctx)[driver.DockerExporter] {
return nil, nil, notSupported(driver.DockerExporter, nodeDriver, "https://docs.docker.com/go/build-exporters/")

View File

@@ -5,7 +5,6 @@ import (
"encoding/base64"
"encoding/json"
"io"
"maps"
"strings"
"sync"
@@ -41,7 +40,9 @@ func setRecordProvenance(ctx context.Context, c *client.Client, sr *client.Solve
if err != nil {
return err
}
maps.Copy(sr.ExporterResponse, res)
for k, v := range res {
sr.ExporterResponse[k] = v
}
return nil
})
}

View File

@@ -28,11 +28,11 @@ func TestSyncMultiReaderParallel(t *testing.T) {
readers := make([]io.ReadCloser, numReaders)
for i := range numReaders {
for i := 0; i < numReaders; i++ {
readers[i] = mr.NewReadCloser()
}
for i := range numReaders {
for i := 0; i < numReaders; i++ {
wg.Add(1)
go func(readerId int) {
defer wg.Done()

View File

@@ -5,7 +5,6 @@ import (
"encoding/json"
"net/url"
"os"
"slices"
"sort"
"strings"
"sync"
@@ -200,7 +199,7 @@ func (b *Builder) Boot(ctx context.Context) (bool, error) {
err = err1
}
if err == nil && len(errCh) > 0 {
if err == nil && len(errCh) == len(toBoot) {
return false, <-errCh
}
return true, err
@@ -657,7 +656,13 @@ func parseBuildkitdFlags(inp string, driver string, driverOpts map[string]string
flags.StringArrayVar(&allowInsecureEntitlements, "allow-insecure-entitlement", nil, "")
_ = flags.Parse(res)
hasNetworkHostEntitlement := slices.Contains(allowInsecureEntitlements, "network.host")
var hasNetworkHostEntitlement bool
for _, e := range allowInsecureEntitlements {
if e == "network.host" {
hasNetworkHostEntitlement = true
break
}
}
var hasNetworkHostEntitlementInConf bool
if buildkitdConfigFile != "" {
@@ -666,8 +671,11 @@ func parseBuildkitdFlags(inp string, driver string, driverOpts map[string]string
return nil, err
} else if btoml != nil {
if ies := btoml.GetArray("insecure-entitlements"); ies != nil {
if slices.Contains(ies.([]string), "network.host") {
hasNetworkHostEntitlementInConf = true
for _, e := range ies.([]string) {
if e == "network.host" {
hasNetworkHostEntitlementInConf = true
break
}
}
}
}

View File

@@ -169,7 +169,7 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N
// dynamic nodes are used in Kubernetes driver.
// Kubernetes' pods are dynamically mapped to BuildKit Nodes.
if di.DriverInfo != nil && len(di.DriverInfo.DynamicNodes) > 0 {
for i := range di.DriverInfo.DynamicNodes {
for i := 0; i < len(di.DriverInfo.DynamicNodes); i++ {
diClone := di
if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 {
diClone.Platforms = pl

View File

@@ -7,13 +7,15 @@ import (
"path/filepath"
"github.com/docker/buildx/commands"
controllererrors "github.com/docker/buildx/controller/errdefs"
"github.com/docker/buildx/util/desktop"
"github.com/docker/buildx/version"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli-plugins/metadata"
"github.com/docker/cli/cli-plugins/manager"
"github.com/docker/cli/cli-plugins/plugin"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/debug"
cliflags "github.com/docker/cli/cli/flags"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/util/stack"
"github.com/pkg/errors"
@@ -35,7 +37,11 @@ func init() {
}
func runStandalone(cmd *command.DockerCli) error {
if err := cmd.Initialize(cliflags.NewClientOptions()); err != nil {
return err
}
defer flushMetrics(cmd)
executable := os.Args[0]
rootCmd := commands.NewRootCmd(filepath.Base(executable), false, cmd)
return rootCmd.Execute()
@@ -58,7 +64,7 @@ func flushMetrics(cmd *command.DockerCli) {
func runPlugin(cmd *command.DockerCli) error {
rootCmd := commands.NewRootCmd("buildx", true, cmd)
return plugin.RunPlugin(cmd, rootCmd, metadata.Metadata{
return plugin.RunPlugin(cmd, rootCmd, manager.Metadata{
SchemaVersion: "0.1.0",
Vendor: "Docker Inc.",
Version: version.Version,
@@ -111,6 +117,11 @@ func main() {
var ebr *desktop.ErrorWithBuildRef
if errors.As(err, &ebr) {
ebr.Print(cmd.Err())
} else {
var be *controllererrors.BuildError
if errors.As(err, &be) {
be.PrintBuildDetails(cmd.Err())
}
}
os.Exit(1)

View File

@@ -66,11 +66,7 @@ type bakeOptions struct {
func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) {
mp := dockerCli.MeterProvider()
ctx, end, err := tracing.TraceCurrentCommand(ctx, append([]string{"bake"}, targets...),
attribute.String("builder", in.builder),
attribute.StringSlice("targets", targets),
attribute.StringSlice("files", in.files),
)
ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
if err != nil {
return err
}
@@ -287,7 +283,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
}
}
if err := saveLocalStateGroup(dockerCli, in, targets, bo); err != nil {
if err := saveLocalStateGroup(dockerCli, in, targets, bo, overrides, def); err != nil {
return err
}
@@ -309,7 +305,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
}
if len(in.metadataFile) > 0 {
dt := make(map[string]any)
dt := make(map[string]interface{})
for t, r := range resp {
dt[t] = decodeExporterResponse(r.ExporterResponse)
}
@@ -424,14 +420,6 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
fmt.Fprintln(dockerCli.Out(), string(dt))
}
for _, name := range names {
if sp, ok := resp[name]; ok {
if v, ok := sp.ExporterResponse["frontend.result.inlinemessage"]; ok {
fmt.Fprintf(dockerCli.Out(), "\n# %s\n%s\n", name, v)
}
}
}
if exitCode != 0 {
os.Exit(exitCode)
}
@@ -500,14 +488,7 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
return cmd
}
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options) error {
l, err := localstate.New(confutil.NewConfig(dockerCli))
if err != nil {
return err
}
defer l.MigrateIfNeeded()
func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options, overrides []string, def any) error {
prm := confutil.MetadataProvenance()
if len(in.metadataFile) == 0 {
prm = confutil.MetadataProvenanceModeDisabled
@@ -527,10 +508,19 @@ func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string
if len(refs) == 0 {
return nil
}
l, err := localstate.New(confutil.NewConfig(dockerCli))
if err != nil {
return err
}
dtdef, err := json.MarshalIndent(def, "", " ")
if err != nil {
return err
}
return l.SaveGroup(groupRef, localstate.StateGroup{
Refs: refs,
Targets: targets,
Definition: dtdef,
Targets: targets,
Inputs: overrides,
Refs: refs,
})
}

View File

@@ -11,7 +11,6 @@ import (
"io"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
@@ -42,6 +41,7 @@ import (
"github.com/docker/cli/cli/command"
dockeropts "github.com/docker/cli/opts"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/pkg/atomicwriter"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/frontend/subrequests"
@@ -52,7 +52,6 @@ import (
solverpb "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/progress/progressui"
"github.com/moby/sys/atomicwriter"
"github.com/morikuni/aec"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -104,10 +103,12 @@ type buildOptions struct {
exportPush bool
exportLoad bool
control.ControlOptions
invokeConfig *invokeConfig
}
func (o *buildOptions) toControllerOptions() (*cbuild.Options, error) {
func (o *buildOptions) toControllerOptions() (*controllerapi.BuildOptions, error) {
var err error
buildArgs, err := listToMap(o.buildArgs, true)
@@ -120,7 +121,7 @@ func (o *buildOptions) toControllerOptions() (*cbuild.Options, error) {
return nil, err
}
opts := cbuild.Options{
opts := controllerapi.BuildOptions{
Allow: o.allow,
Annotations: o.annotations,
BuildArgs: buildArgs,
@@ -155,7 +156,7 @@ func (o *buildOptions) toControllerOptions() (*cbuild.Options, error) {
return nil, err
}
inAttests := slices.Clone(o.attests)
inAttests := append([]string{}, o.attests...)
if o.provenance != "" {
inAttests = append(inAttests, buildflags.CanonicalizeAttest("provenance", o.provenance))
}
@@ -284,11 +285,7 @@ func (o *buildOptionsHash) String() string {
func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions) (err error) {
mp := dockerCli.MeterProvider()
ctx, end, err := tracing.TraceCurrentCommand(ctx, []string{"build", options.contextPath},
attribute.String("builder", options.builder),
attribute.String("context", options.contextPath),
attribute.String("dockerfile", options.dockerfileName),
)
ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
if err != nil {
return err
}
@@ -402,10 +399,6 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
os.Exit(exitcode)
}
}
if v, ok := resp.ExporterResponse["frontend.result.inlinemessage"]; ok {
fmt.Fprintf(dockerCli.Out(), "\n%s\n", v)
return nil
}
return nil
}
@@ -418,7 +411,7 @@ func getImageID(resp map[string]string) string {
return dgst
}
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild.Options, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
resp, res, dfmap, err := cbuild.RunBuild(ctx, dockerCli, opts, dockerCli.In(), printer, false)
if res != nil {
res.Done()
@@ -426,12 +419,15 @@ func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild.Opti
return resp, dfmap, err
}
func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild.Options, options buildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, *build.Inputs, error) {
if options.invokeConfig != nil && (options.dockerfileName == "-" || options.contextPath == "-") {
// stdin must be usable for monitor
return nil, nil, errors.Errorf("Dockerfile or context from stdin is not supported with invoke")
}
c := controller.NewController(ctx, dockerCli)
c, err := controller.NewController(ctx, options.ControlOptions, dockerCli, printer)
if err != nil {
return nil, nil, err
}
defer func() {
if err := c.Close(); err != nil {
logrus.Warnf("failed to close server connection %v", err)
@@ -440,7 +436,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild
// NOTE: buildx server has the current working directory different from the client
// so we need to resolve paths to abosolute ones in the client.
opts, err := cbuild.ResolveOptionPaths(opts)
opts, err = controllerapi.ResolveOptionPaths(opts)
if err != nil {
return nil, nil, err
}
@@ -466,10 +462,11 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild
})
}
resp, inputs, err = c.Build(ctx, opts, pr, printer)
ref, resp, inputs, err = c.Build(ctx, opts, pr, printer)
if err != nil {
var be *controllererrors.BuildError
if errors.As(err, &be) {
ref = be.SessionID
retErr = err
// We can proceed to monitor
} else {
@@ -509,8 +506,8 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *cbuild
resp, retErr = monitorBuildResult.Resp, monitorBuildResult.Err
}
} else {
if err := c.Close(); err != nil {
logrus.Warnf("close error: %v", err)
if err := c.Disconnect(ctx, ref); err != nil {
logrus.Warnf("disconnect error: %v", err)
}
}
@@ -647,6 +644,14 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
flags.StringVar(&options.sbom, "sbom", "", `Shorthand for "--attest=type=sbom"`)
flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--attest=type=provenance"`)
if confutil.IsExperimental() {
// TODO: move this to debug command if needed
flags.StringVar(&options.Root, "root", "", "Specify root directory of server to connect")
flags.BoolVar(&options.Detach, "detach", false, "Detach buildx server (supported only on linux)")
flags.StringVar(&options.ServerConfig, "server-config", "", "Specify buildx server config file (used only when launching new server)")
cobrautil.MarkFlagsExperimental(flags, "root", "detach", "server-config")
}
flags.StringVar(&options.callFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`)
flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`)
flags.Lookup("check").NoOptDefVal = "true"
@@ -735,7 +740,7 @@ func checkWarnedFlags(f *pflag.Flag) {
}
}
func writeMetadataFile(filename string, dt any) error {
func writeMetadataFile(filename string, dt interface{}) error {
b, err := json.MarshalIndent(dt, "", " ")
if err != nil {
return err
@@ -743,7 +748,7 @@ func writeMetadataFile(filename string, dt any) error {
return atomicwriter.WriteFile(filename, b, 0644)
}
func decodeExporterResponse(exporterResponse map[string]string) map[string]any {
func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
decFunc := func(k, v string) ([]byte, error) {
if k == "result.json" {
// result.json is part of metadata response for subrequests which
@@ -752,16 +757,16 @@ func decodeExporterResponse(exporterResponse map[string]string) map[string]any {
}
return base64.StdEncoding.DecodeString(v)
}
out := make(map[string]any)
out := make(map[string]interface{})
for k, v := range exporterResponse {
dt, err := decFunc(k, v)
if err != nil {
out[k] = v
continue
}
var raw map[string]any
var raw map[string]interface{}
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
var rawList []map[string]any
var rawList []map[string]interface{}
if err = json.Unmarshal(dt, &rawList); err != nil || len(rawList) == 0 {
out[k] = v
continue
@@ -1003,12 +1008,12 @@ func (cfg *invokeConfig) needsDebug(retErr error) bool {
}
}
func (cfg *invokeConfig) runDebug(ctx context.Context, ref string, options *cbuild.Options, c control.BuildxController, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress *progress.Printer) (*monitor.MonitorBuildResult, error) {
func (cfg *invokeConfig) runDebug(ctx context.Context, ref string, options *controllerapi.BuildOptions, c control.BuildxController, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress *progress.Printer) (*monitor.MonitorBuildResult, error) {
con := console.Current()
if err := con.SetRaw(); err != nil {
// TODO: run disconnect in build command (on error case)
if err := c.Close(); err != nil {
logrus.Warnf("close error: %v", err)
if err := c.Disconnect(ctx, ref); err != nil {
logrus.Warnf("disconnect error: %v", err)
}
return nil, errors.Errorf("failed to configure terminal: %v", err)
}

View File

@@ -3,9 +3,11 @@ package debug
import (
"context"
"os"
"runtime"
"github.com/containerd/console"
"github.com/docker/buildx/controller"
"github.com/docker/buildx/controller/control"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/monitor"
"github.com/docker/buildx/util/cobrautil"
@@ -33,6 +35,7 @@ type DebuggableCmd interface {
}
func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
var controlOptions control.ControlOptions
var progressMode string
var options DebugConfig
@@ -47,7 +50,10 @@ func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
}
ctx := context.TODO()
c := controller.NewController(ctx, dockerCli)
c, err := controller.NewController(ctx, controlOptions, dockerCli, printer)
if err != nil {
return err
}
defer func() {
if err := c.Close(); err != nil {
logrus.Warnf("failed to close server connection %v", err)
@@ -70,9 +76,13 @@ func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
flags := cmd.Flags()
flags.StringVar(&options.InvokeFlag, "invoke", "", "Launch a monitor with executing specified command")
flags.StringVar(&options.OnFlag, "on", "error", "When to launch the monitor ([always, error])")
flags.StringVar(&controlOptions.Root, "root", "", "Specify root directory of server to connect for the monitor")
flags.BoolVar(&controlOptions.Detach, "detach", runtime.GOOS == "linux", "Detach buildx server for the monitor (supported only on linux)")
flags.StringVar(&controlOptions.ServerConfig, "server-config", "", "Specify buildx server config file for the monitor (used only when launching new server)")
flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson") for the monitor. Use plain to show container output`)
cobrautil.MarkFlagsExperimental(flags, "invoke", "on")
cobrautil.MarkFlagsExperimental(flags, "invoke", "on", "root", "detach", "server-config")
for _, c := range children {
cmd.AddCommand(c.NewDebugger(&options))

View File

@@ -124,7 +124,7 @@ func duCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
return cmd
}
func printKV(w io.Writer, k string, v any) {
func printKV(w io.Writer, k string, v interface{}) {
fmt.Fprintf(w, "%s:\t%v\n", k, v)
}

View File

@@ -1,160 +0,0 @@
package history
import (
"context"
"io"
"os"
"slices"
"github.com/containerd/console"
"github.com/containerd/platforms"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/localstate"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/desktop/bundle"
"github.com/docker/cli/cli/command"
"github.com/moby/buildkit/client"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type exportOptions struct {
builder string
refs []string
output string
all bool
}
func runExport(ctx context.Context, dockerCli command.Cli, opts exportOptions) error {
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
if err != nil {
return err
}
nodes, err := b.LoadNodes(ctx, builder.WithData())
if err != nil {
return err
}
for _, node := range nodes {
if node.Err != nil {
return node.Err
}
}
if len(opts.refs) == 0 {
opts.refs = []string{""}
}
var res []historyRecord
for _, ref := range opts.refs {
recs, err := queryRecords(ctx, ref, nodes, &queryOptions{
CompletedOnly: true,
})
if err != nil {
return err
}
if len(recs) == 0 {
if ref == "" {
return errors.New("no records found")
}
return errors.Errorf("no record found for ref %q", ref)
}
if ref == "" {
slices.SortFunc(recs, func(a, b historyRecord) int {
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
})
}
if opts.all {
res = append(res, recs...)
break
} else {
res = append(res, recs[0])
}
}
ls, err := localstate.New(confutil.NewConfig(dockerCli))
if err != nil {
return err
}
visited := map[*builder.Node]struct{}{}
var clients []*client.Client
for _, rec := range res {
if _, ok := visited[rec.node]; ok {
continue
}
c, err := rec.node.Driver.Client(ctx)
if err != nil {
return err
}
clients = append(clients, c)
}
toExport := make([]*bundle.Record, 0, len(res))
for _, rec := range res {
var defaultPlatform string
if p := rec.node.Platforms; len(p) > 0 {
defaultPlatform = platforms.FormatAll(platforms.Normalize(p[0]))
}
var stg *localstate.StateGroup
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
if st != nil && st.GroupRef != "" {
stg, err = ls.ReadGroup(st.GroupRef)
if err != nil {
return err
}
}
toExport = append(toExport, &bundle.Record{
BuildHistoryRecord: rec.BuildHistoryRecord,
DefaultPlatform: defaultPlatform,
LocalState: st,
StateGroup: stg,
})
}
var w io.Writer = os.Stdout
if opts.output != "" {
f, err := os.Create(opts.output)
if err != nil {
return errors.Wrapf(err, "failed to create output file %q", opts.output)
}
defer f.Close()
w = f
} else {
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
return errors.Errorf("refusing to write to console, use --output to specify a file")
}
}
return bundle.Export(ctx, clients, w, toExport)
}
func exportCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
var options exportOptions
cmd := &cobra.Command{
Use: "export [OPTIONS] [REF]",
Short: "Export a build into Docker Desktop bundle",
RunE: func(cmd *cobra.Command, args []string) error {
if options.all && len(args) > 0 {
return errors.New("cannot specify refs when using --all")
}
options.refs = args
options.builder = *rootOpts.Builder
return runExport(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()
flags.StringVarP(&options.output, "output", "o", "", "Output file path")
flags.BoolVar(&options.all, "all", false, "Export all records for the builder")
return cmd
}

View File

@@ -1,135 +0,0 @@
package history
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"os"
"strings"
remoteutil "github.com/docker/buildx/driver/remote/util"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/buildx/util/desktop"
"github.com/docker/cli/cli/command"
"github.com/pkg/browser"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type importOptions struct {
file []string
}
func runImport(ctx context.Context, dockerCli command.Cli, opts importOptions) error {
sock, err := desktop.BuildServerAddr()
if err != nil {
return err
}
tr := http.DefaultTransport.(*http.Transport).Clone()
tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
network, addr, ok := strings.Cut(sock, "://")
if !ok {
return nil, errors.Errorf("invalid endpoint address: %s", sock)
}
return remoteutil.DialContext(ctx, network, addr)
}
client := &http.Client{
Transport: tr,
}
var urls []string
if len(opts.file) == 0 {
u, err := importFrom(ctx, client, os.Stdin)
if err != nil {
return err
}
urls = append(urls, u...)
} else {
for _, fn := range opts.file {
var f *os.File
var rdr io.Reader = os.Stdin
if fn != "-" {
f, err = os.Open(fn)
if err != nil {
return errors.Wrapf(err, "failed to open file %s", fn)
}
rdr = f
}
u, err := importFrom(ctx, client, rdr)
if err != nil {
return err
}
urls = append(urls, u...)
if f != nil {
f.Close()
}
}
}
if len(urls) == 0 {
return errors.New("no build records found in the bundle")
}
for i, url := range urls {
fmt.Fprintln(dockerCli.Err(), url)
if i == 0 {
err = browser.OpenURL(url)
}
}
return err
}
func importFrom(ctx context.Context, c *http.Client, rdr io.Reader) ([]string, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://docker-desktop/upload", rdr)
if err != nil {
return nil, errors.Wrap(err, "failed to create request")
}
resp, err := c.Do(req)
if err != nil {
return nil, errors.Wrap(err, "failed to send request, check if Docker Desktop is running")
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, errors.Errorf("failed to import build: %s", string(body))
}
var refs []string
dec := json.NewDecoder(resp.Body)
if err := dec.Decode(&refs); err != nil {
return nil, errors.Wrap(err, "failed to decode response")
}
var urls []string
for _, ref := range refs {
urls = append(urls, desktop.BuildURL(fmt.Sprintf(".imported/_/%s", ref)))
}
return urls, err
}
func importCmd(dockerCli command.Cli, _ RootOptions) *cobra.Command {
var options importOptions
cmd := &cobra.Command{
Use: "import [OPTIONS] < bundle.dockerbuild",
Short: "Import a build into Docker Desktop",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runImport(cmd.Context(), dockerCli, options)
},
ValidArgsFunction: completion.Disable,
}
flags := cmd.Flags()
flags.StringArrayVarP(&options.file, "file", "f", nil, "Import from a file path")
return cmd
}

View File

@@ -173,7 +173,7 @@ func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions)
}
}
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
recs, err := queryRecords(ctx, opts.ref, nodes)
if err != nil {
return err
}
@@ -185,7 +185,14 @@ func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions)
return errors.Errorf("no record found for ref %q", opts.ref)
}
if opts.ref == "" {
slices.SortFunc(recs, func(a, b historyRecord) int {
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
})
}
rec := &recs[0]
c, err := rec.node.Driver.Client(ctx)
if err != nil {
return err
@@ -346,7 +353,7 @@ workers0:
out.Error.Name = name
out.Error.Logs = logs
}
out.Error.Stack = fmt.Appendf(nil, "%+v", stack.Formatter(retErr))
out.Error.Stack = []byte(fmt.Sprintf("%+v", stack.Formatter(retErr)))
}
}

View File

@@ -3,6 +3,7 @@ package history
import (
"context"
"io"
"slices"
"github.com/containerd/containerd/v2/core/content/proxy"
"github.com/containerd/platforms"
@@ -41,7 +42,7 @@ func runAttachment(ctx context.Context, dockerCli command.Cli, opts attachmentOp
}
}
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
recs, err := queryRecords(ctx, opts.ref, nodes)
if err != nil {
return err
}
@@ -53,6 +54,12 @@ func runAttachment(ctx context.Context, dockerCli command.Cli, opts attachmentOp
return errors.Errorf("no record found for ref %q", opts.ref)
}
if opts.ref == "" {
slices.SortFunc(recs, func(a, b historyRecord) int {
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
})
}
rec := &recs[0]
c, err := rec.node.Driver.Client(ctx)

View File

@@ -4,6 +4,7 @@ import (
"context"
"io"
"os"
"slices"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/util/cobrautil/completion"
@@ -38,7 +39,7 @@ func runLogs(ctx context.Context, dockerCli command.Cli, opts logsOptions) error
}
}
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
recs, err := queryRecords(ctx, opts.ref, nodes)
if err != nil {
return err
}
@@ -50,6 +51,12 @@ func runLogs(ctx context.Context, dockerCli command.Cli, opts logsOptions) error
return errors.Errorf("no record found for ref %q", opts.ref)
}
if opts.ref == "" {
slices.SortFunc(recs, func(a, b historyRecord) int {
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
})
}
rec := &recs[0]
c, err := rec.node.Driver.Client(ctx)
if err != nil {

View File

@@ -5,7 +5,6 @@ import (
"encoding/json"
"fmt"
"os"
"path"
"slices"
"time"
@@ -15,12 +14,10 @@ import (
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/desktop"
"github.com/docker/buildx/util/gitutil"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -41,9 +38,6 @@ type lsOptions struct {
builder string
format string
noTrunc bool
filters []string
local bool
}
func runLs(ctx context.Context, dockerCli command.Cli, opts lsOptions) error {
@@ -62,29 +56,7 @@ func runLs(ctx context.Context, dockerCli command.Cli, opts lsOptions) error {
}
}
queryOptions := &queryOptions{}
if opts.local {
wd, err := os.Getwd()
if err != nil {
return err
}
gitc, err := gitutil.New(gitutil.WithContext(ctx), gitutil.WithWorkingDir(wd))
if err != nil {
if st, err1 := os.Stat(path.Join(wd, ".git")); err1 == nil && st.IsDir() {
return errors.Wrap(err, "git was not found in the system")
}
return errors.Wrapf(err, "could not find git repository for local filter")
}
remote, err := gitc.RemoteURL()
if err != nil {
return errors.Wrapf(err, "could not get remote URL for local filter")
}
queryOptions.Filters = append(queryOptions.Filters, fmt.Sprintf("repository=%s", remote))
}
queryOptions.Filters = append(queryOptions.Filters, opts.filters...)
out, err := queryRecords(ctx, "", nodes, queryOptions)
out, err := queryRecords(ctx, "", nodes)
if err != nil {
return err
}
@@ -120,8 +92,6 @@ func lsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
flags := cmd.Flags()
flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
flags.StringArrayVar(&options.filters, "filter", nil, `Provide filter values (e.g., "status=error")`)
flags.BoolVar(&options.local, "local", false, "List records for current repository only")
return cmd
}
@@ -191,7 +161,7 @@ type lsContext struct {
}
func (c *lsContext) MarshalJSON() ([]byte, error) {
m := map[string]any{
m := map[string]interface{}{
"ref": c.FullRef(),
"name": c.Name(),
"status": c.Status(),

View File

@@ -3,6 +3,7 @@ package history
import (
"context"
"fmt"
"slices"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/util/cobrautil/completion"
@@ -34,7 +35,7 @@ func runOpen(ctx context.Context, dockerCli command.Cli, opts openOptions) error
}
}
recs, err := queryRecords(ctx, opts.ref, nodes, nil)
recs, err := queryRecords(ctx, opts.ref, nodes)
if err != nil {
return err
}
@@ -46,6 +47,12 @@ func runOpen(ctx context.Context, dockerCli command.Cli, opts openOptions) error
return errors.Errorf("no record found for ref %q", opts.ref)
}
if opts.ref == "" {
slices.SortFunc(recs, func(a, b historyRecord) int {
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
})
}
rec := &recs[0]
url := desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref))

View File

@@ -25,8 +25,6 @@ func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *c
inspectCmd(dockerCli, opts),
openCmd(dockerCli, opts),
traceCmd(dockerCli, opts),
importCmd(dockerCli, opts),
exportCmd(dockerCli, opts),
)
return cmd

View File

@@ -8,6 +8,9 @@ import (
"io"
"net"
"os"
"slices"
"strconv"
"strings"
"time"
"github.com/containerd/console"
@@ -34,20 +37,51 @@ type traceOptions struct {
}
func loadTrace(ctx context.Context, ref string, nodes []builder.Node) (string, []byte, error) {
recs, err := queryRecords(ctx, ref, nodes, &queryOptions{
CompletedOnly: true,
})
var offset *int
if strings.HasPrefix(ref, "^") {
off, err := strconv.Atoi(ref[1:])
if err != nil {
return "", nil, errors.Wrapf(err, "invalid offset %q", ref)
}
offset = &off
ref = ""
}
recs, err := queryRecords(ctx, ref, nodes)
if err != nil {
return "", nil, err
}
if len(recs) == 0 {
var rec *historyRecord
if ref == "" {
slices.SortFunc(recs, func(a, b historyRecord) int {
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
})
for _, r := range recs {
if r.CompletedAt != nil {
if offset != nil {
if *offset > 0 {
*offset--
continue
}
}
rec = &r
break
}
}
if offset != nil && *offset > 0 {
return "", nil, errors.Errorf("no completed build found with offset %d", *offset)
}
} else {
rec = &recs[0]
}
if rec == nil {
if ref == "" {
return "", nil, errors.New("no records found")
}
return "", nil, errors.Errorf("no record found for ref %q", ref)
}
rec := &recs[0]
if rec.CompletedAt == nil {
return "", nil, errors.Errorf("build %q is not completed, only completed builds can be traced", rec.Ref)
@@ -69,9 +103,7 @@ func loadTrace(ctx context.Context, ref string, nodes []builder.Node) (string, [
return "", nil, err
}
recs, err := queryRecords(ctx, rec.Ref, []builder.Node{*rec.node}, &queryOptions{
CompletedOnly: true,
})
recs, err := queryRecords(ctx, rec.Ref, []builder.Node{*rec.node})
if err != nil {
return "", nil, err
}

View File

@@ -1,14 +1,10 @@
package history
import (
"bytes"
"context"
"encoding/csv"
"fmt"
"io"
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
"time"
@@ -17,13 +13,10 @@ import (
"github.com/docker/buildx/builder"
"github.com/docker/buildx/localstate"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/util/gitutil"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
const recordsLimit = 50
func buildName(fattrs map[string]string, ls *localstate.State) string {
var res string
@@ -113,30 +106,10 @@ type historyRecord struct {
name string
}
type queryOptions struct {
CompletedOnly bool
Filters []string
}
func queryRecords(ctx context.Context, ref string, nodes []builder.Node, opts *queryOptions) ([]historyRecord, error) {
func queryRecords(ctx context.Context, ref string, nodes []builder.Node) ([]historyRecord, error) {
var mu sync.Mutex
var out []historyRecord
var offset *int
if strings.HasPrefix(ref, "^") {
off, err := strconv.Atoi(ref[1:])
if err != nil {
return nil, errors.Wrapf(err, "invalid offset %q", ref)
}
offset = &off
ref = ""
}
var filters []string
if opts != nil {
filters = opts.Filters
}
eg, ctx := errgroup.WithContext(ctx)
for _, node := range nodes {
node := node
@@ -149,25 +122,9 @@ func queryRecords(ctx context.Context, ref string, nodes []builder.Node, opts *q
if err != nil {
return err
}
var matchers []matchFunc
if len(filters) > 0 {
filters, matchers, err = dockerFiltersToBuildkit(filters)
if err != nil {
return err
}
sb := bytes.NewBuffer(nil)
w := csv.NewWriter(sb)
w.Write(filters)
w.Flush()
filters = []string{strings.TrimSuffix(sb.String(), "\n")}
}
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
EarlyExit: true,
Ref: ref,
Limit: recordsLimit,
Filter: filters,
})
if err != nil {
return err
@@ -185,7 +142,6 @@ func queryRecords(ctx context.Context, ref string, nodes []builder.Node, opts *q
ts = &t
}
defer serv.CloseSend()
loop0:
for {
he, err := serv.Recv()
if err != nil {
@@ -197,17 +153,6 @@ func queryRecords(ctx context.Context, ref string, nodes []builder.Node, opts *q
if he.Type == controlapi.BuildHistoryEventType_DELETED || he.Record == nil {
continue
}
if opts != nil && opts.CompletedOnly && he.Type != controlapi.BuildHistoryEventType_COMPLETE {
continue
}
// for older buildkit that don't support filters apply local filters
for _, matcher := range matchers {
if !matcher(he.Record) {
continue loop0
}
}
records = append(records, historyRecord{
BuildHistoryRecord: he.Record,
currentTimestamp: ts,
@@ -224,27 +169,6 @@ func queryRecords(ctx context.Context, ref string, nodes []builder.Node, opts *q
if err := eg.Wait(); err != nil {
return nil, err
}
slices.SortFunc(out, func(a, b historyRecord) int {
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
})
if offset != nil {
var filtered []historyRecord
for _, r := range out {
if *offset > 0 {
*offset--
continue
}
filtered = append(filtered, r)
break
}
if *offset > 0 {
return nil, errors.Errorf("no completed build found with offset %d", *offset)
}
out = filtered
}
return out, nil
}
@@ -254,150 +178,3 @@ func formatDuration(d time.Duration) string {
}
return fmt.Sprintf("%dm %2ds", int(d.Minutes()), int(d.Seconds())%60)
}
type matchFunc func(*controlapi.BuildHistoryRecord) bool
func dockerFiltersToBuildkit(in []string) ([]string, []matchFunc, error) {
out := []string{}
matchers := []matchFunc{}
for _, f := range in {
key, value, sep, found := cutAny(f, "!=", "=", "<=", "<", ">=", ">")
if !found {
return nil, nil, errors.Errorf("invalid filter %q", f)
}
switch key {
case "ref", "repository", "status":
if sep != "=" && sep != "!=" {
return nil, nil, errors.Errorf("invalid separator for %q, expected = or !=", f)
}
matchers = append(matchers, valueFiler(key, value, sep))
if sep == "=" {
if key == "status" {
sep = "=="
} else {
sep = "~="
}
}
case "startedAt", "completedAt", "duration":
if sep == "=" || sep == "!=" {
return nil, nil, errors.Errorf("invalid separator for %q, expected <=, <, >= or >", f)
}
matcher, err := timeBasedFilter(key, value, sep)
if err != nil {
return nil, nil, err
}
matchers = append(matchers, matcher)
default:
return nil, nil, errors.Errorf("unsupported filter %q", f)
}
out = append(out, key+sep+value)
}
return out, matchers, nil
}
func valueFiler(key, value, sep string) matchFunc {
return func(rec *controlapi.BuildHistoryRecord) bool {
var recValue string
switch key {
case "ref":
recValue = rec.Ref
case "repository":
v, ok := rec.FrontendAttrs["vcs:source"]
if ok {
recValue = v
} else {
if context, ok := rec.FrontendAttrs["context"]; ok {
if ref, err := gitutil.ParseGitRef(context); err == nil {
recValue = ref.Remote
}
}
}
case "status":
if rec.CompletedAt != nil {
if rec.Error != nil {
if strings.Contains(rec.Error.Message, "context canceled") {
recValue = "canceled"
} else {
recValue = "error"
}
} else {
recValue = "completed"
}
} else {
recValue = "running"
}
}
switch sep {
case "=":
if key == "status" {
return recValue == value
}
return strings.Contains(recValue, value)
case "!=":
return recValue != value
default:
return false
}
}
}
func timeBasedFilter(key, value, sep string) (matchFunc, error) {
var cmp int64
switch key {
case "startedAt", "completedAt":
v, err := time.ParseDuration(value)
if err == nil {
tm := time.Now().Add(-v)
cmp = tm.Unix()
} else {
tm, err := time.Parse(time.RFC3339, value)
if err != nil {
return nil, errors.Errorf("invalid time %s", value)
}
cmp = tm.Unix()
}
case "duration":
v, err := time.ParseDuration(value)
if err != nil {
return nil, errors.Errorf("invalid duration %s", value)
}
cmp = int64(v)
default:
return nil, nil
}
return func(rec *controlapi.BuildHistoryRecord) bool {
var val int64
switch key {
case "startedAt":
val = rec.CreatedAt.AsTime().Unix()
case "completedAt":
if rec.CompletedAt != nil {
val = rec.CompletedAt.AsTime().Unix()
}
case "duration":
if rec.CompletedAt != nil {
val = int64(rec.CompletedAt.AsTime().Sub(rec.CreatedAt.AsTime()))
}
}
switch sep {
case ">=":
return val >= cmp
case "<=":
return val <= cmp
case ">":
return val > cmp
default:
return val < cmp
}
}, nil
}
func cutAny(s string, seps ...string) (before, after, sep string, found bool) {
for _, sep := range seps {
if idx := strings.Index(s, sep); idx != -1 {
return s[:idx], s[idx+len(sep):], sep, true
}
}
return s, "", "", false
}

View File

@@ -194,7 +194,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
}
s := s
eg2.Go(func() error {
sub.Log(1, fmt.Appendf(nil, "copying %s from %s to %s\n", s.Desc.Digest.String(), s.Ref.String(), t.String()))
sub.Log(1, []byte(fmt.Sprintf("copying %s from %s to %s\n", s.Desc.Digest.String(), s.Ref.String(), t.String())))
return r.Copy(ctx, s, t)
})
}
@@ -202,7 +202,7 @@ func runCreate(ctx context.Context, dockerCli command.Cli, in createOptions, arg
if err := eg2.Wait(); err != nil {
return err
}
sub.Log(1, fmt.Appendf(nil, "pushing %s to %s\n", desc.Digest.String(), t.String()))
sub.Log(1, []byte(fmt.Sprintf("pushing %s to %s\n", desc.Digest.String(), t.String())))
return r.Push(ctx, t, desc, dt)
})
})

View File

@@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"maps"
"sort"
"strings"
"time"
@@ -410,7 +409,9 @@ func truncPlatforms(pfs []string, max int) truncatedPlatforms {
left[ppf] = append(left[ppf], pf)
}
}
maps.Copy(res, left)
for k, v := range left {
res[k] = v
}
return truncatedPlatforms{
res: res,
input: pfs,

View File

@@ -7,6 +7,7 @@ import (
debugcmd "github.com/docker/buildx/commands/debug"
historycmd "github.com/docker/buildx/commands/history"
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
"github.com/docker/buildx/controller/remote"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/logutil"
@@ -15,14 +16,13 @@ import (
"github.com/docker/cli/cli-plugins/plugin"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/debug"
cliflags "github.com/docker/cli/cli/flags"
"github.com/moby/buildkit/util/appcontext"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
func NewRootCmd(name string, isPlugin bool, dockerCli *command.DockerCli) *cobra.Command {
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
var opt rootOptions
cmd := &cobra.Command{
Short: "Docker Buildx",
@@ -40,17 +40,7 @@ func NewRootCmd(name string, isPlugin bool, dockerCli *command.DockerCli) *cobra
}
cmd.SetContext(appcontext.Context())
if !isPlugin {
// InstallFlags and SetDefaultOptions are necessary to match
// the plugin mode behavior to handle env vars such as
// DOCKER_TLS, DOCKER_TLS_VERIFY, ... and we also need to use a
// new flagset to avoid conflict with the global debug flag
// that we already handle in the root command otherwise it
// would panic.
nflags := pflag.NewFlagSet(cmd.DisplayName(), pflag.ContinueOnError)
options := cliflags.NewClientOptions()
options.InstallFlags(nflags)
options.SetDefaultOptions(nflags)
return dockerCli.Initialize(options)
return nil
}
return plugin.PersistentPreRunE(cmd, args)
},
@@ -123,6 +113,7 @@ func addCommands(cmd *cobra.Command, opts *rootOptions, dockerCli command.Cli) {
cmd.AddCommand(debugcmd.RootCmd(dockerCli,
newDebuggableBuild(dockerCli, opts),
))
remote.AddControllerCommands(cmd, dockerCli)
}
cmd.RegisterFlagCompletionFunc( //nolint:errcheck

View File

@@ -34,7 +34,7 @@ const defaultTargetName = "default"
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
// inspect the result and debug the cause of that error.
func RunBuild(ctx context.Context, dockerCli command.Cli, in *Options, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, *build.Inputs, error) {
func RunBuild(ctx context.Context, dockerCli command.Cli, in *controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, *build.Inputs, error) {
if in.NoCache && len(in.NoCacheFilter) > 0 {
return nil, nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
}

View File

@@ -5,22 +5,29 @@ import (
"io"
"github.com/docker/buildx/build"
cbuild "github.com/docker/buildx/controller/build"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/controller/processes"
"github.com/docker/buildx/util/progress"
"github.com/moby/buildkit/client"
)
type BuildxController interface {
Build(ctx context.Context, options *cbuild.Options, in io.ReadCloser, progress progress.Writer) (resp *client.SolveResponse, inputs *build.Inputs, err error)
Build(ctx context.Context, options *controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, inputs *build.Inputs, err error)
// Invoke starts an IO session into the specified process.
// If pid doesn't match to any running processes, it starts a new process with the specified config.
// If there is no container running or InvokeConfig.Rollback is specified, the process will start in a newly created container.
// If pid doesn't matche to any running processes, it starts a new process with the specified config.
// If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container.
// NOTE: If needed, in the future, we can split this API into three APIs (NewContainer, NewProcess and Attach).
Invoke(ctx context.Context, pid string, options *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error
Invoke(ctx context.Context, ref, pid string, options *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error
Kill(ctx context.Context) error
Close() error
ListProcesses(ctx context.Context) (infos []*processes.ProcessInfo, retErr error)
DisconnectProcess(ctx context.Context, pid string) error
Inspect(ctx context.Context) *cbuild.Options
List(ctx context.Context) (refs []string, _ error)
Disconnect(ctx context.Context, ref string) error
ListProcesses(ctx context.Context, ref string) (infos []*controllerapi.ProcessInfo, retErr error)
DisconnectProcess(ctx context.Context, ref, pid string) error
Inspect(ctx context.Context, ref string) (*controllerapi.InspectResponse, error)
}
type ControlOptions struct {
ServerConfig string
Root string
Detach bool
}

View File

@@ -2,12 +2,35 @@ package controller
import (
"context"
"fmt"
"github.com/docker/buildx/controller/control"
"github.com/docker/buildx/controller/local"
"github.com/docker/buildx/controller/remote"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
)
func NewController(ctx context.Context, dockerCli command.Cli) control.BuildxController {
return local.NewLocalBuildxController(ctx, dockerCli)
func NewController(ctx context.Context, opts control.ControlOptions, dockerCli command.Cli, pw progress.Writer) (control.BuildxController, error) {
var name string
if opts.Detach {
name = "remote"
} else {
name = "local"
}
var c control.BuildxController
err := progress.Wrap(fmt.Sprintf("[internal] connecting to %s controller", name), pw.Write, func(l progress.SubLogger) (err error) {
if opts.Detach {
c, err = remote.NewRemoteBuildxController(ctx, dockerCli, opts, l)
} else {
c = local.NewLocalBuildxController(ctx, dockerCli, l)
}
return err
})
if err != nil {
return nil, errors.Wrap(err, "failed to start buildx controller")
}
return c, nil
}

View File

@@ -1,20 +1,48 @@
package errdefs
import (
"io"
"github.com/containerd/typeurl/v2"
"github.com/docker/buildx/util/desktop"
"github.com/moby/buildkit/util/grpcerrors"
)
func init() {
typeurl.Register((*Build)(nil), "github.com/docker/buildx", "errdefs.Build+json")
}
type BuildError struct {
err error
*Build
error
}
func (e *BuildError) Unwrap() error {
return e.err
return e.error
}
func (e *BuildError) Error() string {
return e.err.Error()
func (e *BuildError) ToProto() grpcerrors.TypedErrorProto {
return e.Build
}
func WrapBuild(err error) error {
func (e *BuildError) PrintBuildDetails(w io.Writer) error {
if e.Ref == "" {
return nil
}
ebr := &desktop.ErrorWithBuildRef{
Ref: e.Ref,
Err: e.error,
}
return ebr.Print(w)
}
func WrapBuild(err error, sessionID string, ref string) error {
if err == nil {
return nil
}
return &BuildError{err: err}
return &BuildError{Build: &Build{SessionID: sessionID, Ref: ref}, error: err}
}
func (b *Build) WrapError(err error) error {
return &BuildError{error: err, Build: b}
}

View File

@@ -0,0 +1,157 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.34.1
// protoc v3.11.4
// source: github.com/docker/buildx/controller/errdefs/errdefs.proto
package errdefs
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Build struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
SessionID string `protobuf:"bytes,1,opt,name=SessionID,proto3" json:"SessionID,omitempty"`
Ref string `protobuf:"bytes,2,opt,name=Ref,proto3" json:"Ref,omitempty"`
}
func (x *Build) Reset() {
*x = Build{}
if protoimpl.UnsafeEnabled {
mi := &file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Build) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Build) ProtoMessage() {}
func (x *Build) ProtoReflect() protoreflect.Message {
mi := &file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Build.ProtoReflect.Descriptor instead.
func (*Build) Descriptor() ([]byte, []int) {
return file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescGZIP(), []int{0}
}
func (x *Build) GetSessionID() string {
if x != nil {
return x.SessionID
}
return ""
}
func (x *Build) GetRef() string {
if x != nil {
return x.Ref
}
return ""
}
var File_github_com_docker_buildx_controller_errdefs_errdefs_proto protoreflect.FileDescriptor
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc = []byte{
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x63,
0x6b, 0x65, 0x72, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x78, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72,
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x66, 0x73, 0x2f, 0x65, 0x72,
0x72, 0x64, 0x65, 0x66, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x64, 0x6f, 0x63,
0x6b, 0x65, 0x72, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x78, 0x2e, 0x65, 0x72, 0x72, 0x64, 0x65,
0x66, 0x73, 0x22, 0x37, 0x0a, 0x05, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x53,
0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x65, 0x66,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x42, 0x2d, 0x5a, 0x2b, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72,
0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x78, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
0x65, 0x72, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
}
var (
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescOnce sync.Once
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData = file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc
)
func file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescGZIP() []byte {
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescOnce.Do(func() {
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData)
})
return file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDescData
}
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_goTypes = []interface{}{
(*Build)(nil), // 0: docker.buildx.errdefs.Build
}
var file_github_com_docker_buildx_controller_errdefs_errdefs_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_docker_buildx_controller_errdefs_errdefs_proto_init() }
func file_github_com_docker_buildx_controller_errdefs_errdefs_proto_init() {
if File_github_com_docker_buildx_controller_errdefs_errdefs_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Build); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_goTypes,
DependencyIndexes: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_depIdxs,
MessageInfos: file_github_com_docker_buildx_controller_errdefs_errdefs_proto_msgTypes,
}.Build()
File_github_com_docker_buildx_controller_errdefs_errdefs_proto = out.File
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_rawDesc = nil
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_goTypes = nil
file_github_com_docker_buildx_controller_errdefs_errdefs_proto_depIdxs = nil
}

View File

@@ -0,0 +1,10 @@
syntax = "proto3";
package docker.buildx.errdefs;
option go_package = "github.com/docker/buildx/controller/errdefs";
message Build {
string SessionID = 1;
string Ref = 2;
}

View File

@@ -0,0 +1,241 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
// protoc-gen-go-vtproto version: v0.6.1-0.20240319094008-0393e58bdf10
// source: github.com/docker/buildx/controller/errdefs/errdefs.proto
package errdefs
import (
fmt "fmt"
protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
proto "google.golang.org/protobuf/proto"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
io "io"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
func (m *Build) CloneVT() *Build {
if m == nil {
return (*Build)(nil)
}
r := new(Build)
r.SessionID = m.SessionID
r.Ref = m.Ref
if len(m.unknownFields) > 0 {
r.unknownFields = make([]byte, len(m.unknownFields))
copy(r.unknownFields, m.unknownFields)
}
return r
}
func (m *Build) CloneMessageVT() proto.Message {
return m.CloneVT()
}
func (this *Build) EqualVT(that *Build) bool {
if this == that {
return true
} else if this == nil || that == nil {
return false
}
if this.SessionID != that.SessionID {
return false
}
if this.Ref != that.Ref {
return false
}
return string(this.unknownFields) == string(that.unknownFields)
}
func (this *Build) EqualMessageVT(thatMsg proto.Message) bool {
that, ok := thatMsg.(*Build)
if !ok {
return false
}
return this.EqualVT(that)
}
func (m *Build) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
}
size := m.SizeVT()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBufferVT(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Build) MarshalToVT(dAtA []byte) (int, error) {
size := m.SizeVT()
return m.MarshalToSizedBufferVT(dAtA[:size])
}
func (m *Build) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
if m == nil {
return 0, nil
}
i := len(dAtA)
_ = i
var l int
_ = l
if m.unknownFields != nil {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
if len(m.Ref) > 0 {
i -= len(m.Ref)
copy(dAtA[i:], m.Ref)
i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Ref)))
i--
dAtA[i] = 0x12
}
if len(m.SessionID) > 0 {
i -= len(m.SessionID)
copy(dAtA[i:], m.SessionID)
i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SessionID)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *Build) SizeVT() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.SessionID)
if l > 0 {
n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
}
l = len(m.Ref)
if l > 0 {
n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
}
n += len(m.unknownFields)
return n
}
func (m *Build) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return protohelpers.ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Build: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Build: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return protohelpers.ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return protohelpers.ErrInvalidLength
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return protohelpers.ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SessionID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return protohelpers.ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return protohelpers.ErrInvalidLength
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return protohelpers.ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ref = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := protohelpers.Skip(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return protohelpers.ErrInvalidLength
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}

View File

@@ -11,6 +11,7 @@ import (
controllererrors "github.com/docker/buildx/controller/errdefs"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/controller/processes"
"github.com/docker/buildx/util/desktop"
"github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
@@ -18,9 +19,10 @@ import (
"github.com/pkg/errors"
)
func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli) control.BuildxController {
func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli, logger progress.SubLogger) control.BuildxController {
return &localController{
dockerCli: dockerCli,
sessionID: "local",
processes: processes.NewManager(),
}
}
@@ -29,20 +31,21 @@ type buildConfig struct {
// TODO: these two structs should be merged
// Discussion: https://github.com/docker/buildx/pull/1640#discussion_r1113279719
resultCtx *build.ResultHandle
buildOptions *cbuild.Options
buildOptions *controllerapi.BuildOptions
}
type localController struct {
dockerCli command.Cli
sessionID string
buildConfig buildConfig
processes *processes.Manager
buildOnGoing atomic.Bool
}
func (b *localController) Build(ctx context.Context, options *cbuild.Options, in io.ReadCloser, progress progress.Writer) (*client.SolveResponse, *build.Inputs, error) {
func (b *localController) Build(ctx context.Context, options *controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, *build.Inputs, error) {
if !b.buildOnGoing.CompareAndSwap(false, true) {
return nil, nil, errors.New("build ongoing")
return "", nil, nil, errors.New("build ongoing")
}
defer b.buildOnGoing.Store(false)
@@ -54,20 +57,31 @@ func (b *localController) Build(ctx context.Context, options *cbuild.Options, in
buildOptions: options,
}
if buildErr != nil {
buildErr = controllererrors.WrapBuild(buildErr)
var ref string
var ebr *desktop.ErrorWithBuildRef
if errors.As(buildErr, &ebr) {
ref = ebr.Ref
}
buildErr = controllererrors.WrapBuild(buildErr, b.sessionID, ref)
}
}
if buildErr != nil {
return nil, nil, buildErr
return "", nil, nil, buildErr
}
return resp, dockerfileMappings, nil
return b.sessionID, resp, dockerfileMappings, nil
}
func (b *localController) ListProcesses(ctx context.Context) (infos []*processes.ProcessInfo, retErr error) {
func (b *localController) ListProcesses(ctx context.Context, sessionID string) (infos []*controllerapi.ProcessInfo, retErr error) {
if sessionID != b.sessionID {
return nil, errors.Errorf("unknown session ID %q", sessionID)
}
return b.processes.ListProcesses(), nil
}
func (b *localController) DisconnectProcess(ctx context.Context, pid string) error {
func (b *localController) DisconnectProcess(ctx context.Context, sessionID, pid string) error {
if sessionID != b.sessionID {
return errors.Errorf("unknown session ID %q", sessionID)
}
return b.processes.DeleteProcess(pid)
}
@@ -75,7 +89,11 @@ func (b *localController) cancelRunningProcesses() {
b.processes.CancelRunningProcesses()
}
func (b *localController) Invoke(ctx context.Context, pid string, cfg *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error {
func (b *localController) Invoke(ctx context.Context, sessionID string, pid string, cfg *controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error {
if sessionID != b.sessionID {
return errors.Errorf("unknown session ID %q", sessionID)
}
proc, ok := b.processes.Get(pid)
if !ok {
// Start a new process.
@@ -103,6 +121,11 @@ func (b *localController) Invoke(ctx context.Context, pid string, cfg *controlle
}
}
func (b *localController) Kill(context.Context) error {
b.Close()
return nil
}
func (b *localController) Close() error {
b.cancelRunningProcesses()
if b.buildConfig.resultCtx != nil {
@@ -112,6 +135,18 @@ func (b *localController) Close() error {
return nil
}
func (b *localController) Inspect(ctx context.Context) *cbuild.Options {
return b.buildConfig.buildOptions
func (b *localController) List(ctx context.Context) (res []string, _ error) {
return []string{b.sessionID}, nil
}
func (b *localController) Disconnect(ctx context.Context, key string) error {
b.Close()
return nil
}
func (b *localController) Inspect(ctx context.Context, sessionID string) (*controllerapi.InspectResponse, error) {
if sessionID != b.sessionID {
return nil, errors.Errorf("unknown session ID %q", sessionID)
}
return &controllerapi.InspectResponse{Options: b.buildConfig.buildOptions}, nil
}

View File

@@ -1,11 +1,5 @@
package pb
type Attest struct {
Type string
Disabled bool
Attrs string
}
func CreateAttestations(attests []*Attest) map[string]*string {
result := map[string]*string{}
for _, attest := range attests {

View File

@@ -1,15 +1,6 @@
package pb
import (
"maps"
"github.com/moby/buildkit/client"
)
type CacheOptionsEntry struct {
Type string
Attrs map[string]string
}
import "github.com/moby/buildkit/client"
func CreateCaches(entries []*CacheOptionsEntry) []client.CacheOptionsEntry {
var outs []client.CacheOptionsEntry
@@ -21,7 +12,9 @@ func CreateCaches(entries []*CacheOptionsEntry) []client.CacheOptionsEntry {
Type: entry.Type,
Attrs: map[string]string{},
}
maps.Copy(out.Attrs, entry.Attrs)
for k, v := range entry.Attrs {
out.Attrs[k] = v
}
outs = append(outs, out)
}
return outs

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,250 @@
syntax = "proto3";
package buildx.controller.v1;
import "github.com/moby/buildkit/api/services/control/control.proto";
import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto";
option go_package = "github.com/docker/buildx/controller/pb";
service Controller {
rpc Build(BuildRequest) returns (BuildResponse);
rpc Inspect(InspectRequest) returns (InspectResponse);
rpc Status(StatusRequest) returns (stream StatusResponse);
rpc Input(stream InputMessage) returns (InputResponse);
rpc Invoke(stream Message) returns (stream Message);
rpc List(ListRequest) returns (ListResponse);
rpc Disconnect(DisconnectRequest) returns (DisconnectResponse);
rpc Info(InfoRequest) returns (InfoResponse);
rpc ListProcesses(ListProcessesRequest) returns (ListProcessesResponse);
rpc DisconnectProcess(DisconnectProcessRequest) returns (DisconnectProcessResponse);
}
message ListProcessesRequest {
string SessionID = 1;
}
message ListProcessesResponse {
repeated ProcessInfo Infos = 1;
}
message ProcessInfo {
string ProcessID = 1;
InvokeConfig InvokeConfig = 2;
}
message DisconnectProcessRequest {
string SessionID = 1;
string ProcessID = 2;
}
message DisconnectProcessResponse {
}
message BuildRequest {
string SessionID = 1;
BuildOptions Options = 2;
}
message BuildOptions {
string ContextPath = 1;
string DockerfileName = 2;
CallFunc CallFunc = 3;
map<string, string> NamedContexts = 4;
repeated string Allow = 5;
repeated Attest Attests = 6;
map<string, string> BuildArgs = 7;
repeated CacheOptionsEntry CacheFrom = 8;
repeated CacheOptionsEntry CacheTo = 9;
string CgroupParent = 10;
repeated ExportEntry Exports = 11;
repeated string ExtraHosts = 12;
map<string, string> Labels = 13;
string NetworkMode = 14;
repeated string NoCacheFilter = 15;
repeated string Platforms = 16;
repeated Secret Secrets = 17;
int64 ShmSize = 18;
repeated SSH SSH = 19;
repeated string Tags = 20;
string Target = 21;
UlimitOpt Ulimits = 22;
string Builder = 23;
bool NoCache = 24;
bool Pull = 25;
bool ExportPush = 26;
bool ExportLoad = 27;
moby.buildkit.v1.sourcepolicy.Policy SourcePolicy = 28;
string Ref = 29;
string GroupRef = 30;
repeated string Annotations = 31;
string ProvenanceResponseMode = 32;
}
message ExportEntry {
string Type = 1;
map<string, string> Attrs = 2;
string Destination = 3;
}
message CacheOptionsEntry {
string Type = 1;
map<string, string> Attrs = 2;
}
message Attest {
string Type = 1;
bool Disabled = 2;
string Attrs = 3;
}
message SSH {
string ID = 1;
repeated string Paths = 2;
}
message Secret {
string ID = 1;
string FilePath = 2;
string Env = 3;
}
message CallFunc {
string Name = 1;
string Format = 2;
bool IgnoreStatus = 3;
}
message InspectRequest {
string SessionID = 1;
}
message InspectResponse {
BuildOptions Options = 1;
}
message UlimitOpt {
map<string, Ulimit> values = 1;
}
message Ulimit {
string Name = 1;
int64 Hard = 2;
int64 Soft = 3;
}
message BuildResponse {
map<string, string> ExporterResponse = 1;
}
message DisconnectRequest {
string SessionID = 1;
}
message DisconnectResponse {}
message ListRequest {
string SessionID = 1;
}
message ListResponse {
repeated string keys = 1;
}
message InputMessage {
oneof Input {
InputInitMessage Init = 1;
DataMessage Data = 2;
}
}
message InputInitMessage {
string SessionID = 1;
}
message DataMessage {
bool EOF = 1; // true if eof was reached
bytes Data = 2; // should be chunked smaller than 4MB:
// https://pkg.go.dev/google.golang.org/grpc#MaxRecvMsgSize
}
message InputResponse {}
message Message {
oneof Input {
InitMessage Init = 1;
// FdMessage used from client to server for input (stdin) and
// from server to client for output (stdout, stderr)
FdMessage File = 2;
// ResizeMessage used from client to server for terminal resize events
ResizeMessage Resize = 3;
// SignalMessage is used from client to server to send signal events
SignalMessage Signal = 4;
}
}
message InitMessage {
string SessionID = 1;
// If ProcessID already exists in the server, it tries to connect to it
// instead of invoking the new one. In this case, InvokeConfig will be ignored.
string ProcessID = 2;
InvokeConfig InvokeConfig = 3;
}
message InvokeConfig {
repeated string Entrypoint = 1;
repeated string Cmd = 2;
bool NoCmd = 11; // Do not set cmd but use the image's default
repeated string Env = 3;
string User = 4;
bool NoUser = 5; // Do not set user but use the image's default
string Cwd = 6;
bool NoCwd = 7; // Do not set cwd but use the image's default
bool Tty = 8;
bool Rollback = 9; // Kill all process in the container and recreate it.
bool Initial = 10; // Run container from the initial state of that stage (supported only on the failed step)
}
message FdMessage {
uint32 Fd = 1; // what fd the data was from
bool EOF = 2; // true if eof was reached
bytes Data = 3; // should be chunked smaller than 4MB:
// https://pkg.go.dev/google.golang.org/grpc#MaxRecvMsgSize
}
message ResizeMessage {
uint32 Rows = 1;
uint32 Cols = 2;
}
message SignalMessage {
// we only send name (ie HUP, INT) because the int values
// are platform dependent.
string Name = 1;
}
message StatusRequest {
string SessionID = 1;
}
message StatusResponse {
repeated moby.buildkit.v1.Vertex vertexes = 1;
repeated moby.buildkit.v1.VertexStatus statuses = 2;
repeated moby.buildkit.v1.VertexLog logs = 3;
repeated moby.buildkit.v1.VertexWarning warnings = 4;
}
message InfoRequest {}
message InfoResponse {
BuildxVersion buildxVersion = 1;
}
message BuildxVersion {
string package = 1;
string version = 2;
string revision = 3;
}

View File

@@ -0,0 +1,452 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v3.11.4
// source: github.com/docker/buildx/controller/pb/controller.proto
package pb
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
Controller_Build_FullMethodName = "/buildx.controller.v1.Controller/Build"
Controller_Inspect_FullMethodName = "/buildx.controller.v1.Controller/Inspect"
Controller_Status_FullMethodName = "/buildx.controller.v1.Controller/Status"
Controller_Input_FullMethodName = "/buildx.controller.v1.Controller/Input"
Controller_Invoke_FullMethodName = "/buildx.controller.v1.Controller/Invoke"
Controller_List_FullMethodName = "/buildx.controller.v1.Controller/List"
Controller_Disconnect_FullMethodName = "/buildx.controller.v1.Controller/Disconnect"
Controller_Info_FullMethodName = "/buildx.controller.v1.Controller/Info"
Controller_ListProcesses_FullMethodName = "/buildx.controller.v1.Controller/ListProcesses"
Controller_DisconnectProcess_FullMethodName = "/buildx.controller.v1.Controller/DisconnectProcess"
)
// ControllerClient is the client API for Controller service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ControllerClient interface {
Build(ctx context.Context, in *BuildRequest, opts ...grpc.CallOption) (*BuildResponse, error)
Inspect(ctx context.Context, in *InspectRequest, opts ...grpc.CallOption) (*InspectResponse, error)
Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StatusResponse], error)
Input(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[InputMessage, InputResponse], error)
Invoke(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[Message, Message], error)
List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error)
Disconnect(ctx context.Context, in *DisconnectRequest, opts ...grpc.CallOption) (*DisconnectResponse, error)
Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error)
ListProcesses(ctx context.Context, in *ListProcessesRequest, opts ...grpc.CallOption) (*ListProcessesResponse, error)
DisconnectProcess(ctx context.Context, in *DisconnectProcessRequest, opts ...grpc.CallOption) (*DisconnectProcessResponse, error)
}
type controllerClient struct {
cc grpc.ClientConnInterface
}
func NewControllerClient(cc grpc.ClientConnInterface) ControllerClient {
return &controllerClient{cc}
}
func (c *controllerClient) Build(ctx context.Context, in *BuildRequest, opts ...grpc.CallOption) (*BuildResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(BuildResponse)
err := c.cc.Invoke(ctx, Controller_Build_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controllerClient) Inspect(ctx context.Context, in *InspectRequest, opts ...grpc.CallOption) (*InspectResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(InspectResponse)
err := c.cc.Invoke(ctx, Controller_Inspect_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controllerClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StatusResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Controller_ServiceDesc.Streams[0], Controller_Status_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[StatusRequest, StatusResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Controller_StatusClient = grpc.ServerStreamingClient[StatusResponse]
func (c *controllerClient) Input(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[InputMessage, InputResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Controller_ServiceDesc.Streams[1], Controller_Input_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[InputMessage, InputResponse]{ClientStream: stream}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Controller_InputClient = grpc.ClientStreamingClient[InputMessage, InputResponse]
func (c *controllerClient) Invoke(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[Message, Message], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Controller_ServiceDesc.Streams[2], Controller_Invoke_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[Message, Message]{ClientStream: stream}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Controller_InvokeClient = grpc.BidiStreamingClient[Message, Message]
func (c *controllerClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListResponse)
err := c.cc.Invoke(ctx, Controller_List_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controllerClient) Disconnect(ctx context.Context, in *DisconnectRequest, opts ...grpc.CallOption) (*DisconnectResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DisconnectResponse)
err := c.cc.Invoke(ctx, Controller_Disconnect_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controllerClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(InfoResponse)
err := c.cc.Invoke(ctx, Controller_Info_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controllerClient) ListProcesses(ctx context.Context, in *ListProcessesRequest, opts ...grpc.CallOption) (*ListProcessesResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListProcessesResponse)
err := c.cc.Invoke(ctx, Controller_ListProcesses_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *controllerClient) DisconnectProcess(ctx context.Context, in *DisconnectProcessRequest, opts ...grpc.CallOption) (*DisconnectProcessResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DisconnectProcessResponse)
err := c.cc.Invoke(ctx, Controller_DisconnectProcess_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// ControllerServer is the server API for Controller service.
// All implementations should embed UnimplementedControllerServer
// for forward compatibility.
type ControllerServer interface {
Build(context.Context, *BuildRequest) (*BuildResponse, error)
Inspect(context.Context, *InspectRequest) (*InspectResponse, error)
Status(*StatusRequest, grpc.ServerStreamingServer[StatusResponse]) error
Input(grpc.ClientStreamingServer[InputMessage, InputResponse]) error
Invoke(grpc.BidiStreamingServer[Message, Message]) error
List(context.Context, *ListRequest) (*ListResponse, error)
Disconnect(context.Context, *DisconnectRequest) (*DisconnectResponse, error)
Info(context.Context, *InfoRequest) (*InfoResponse, error)
ListProcesses(context.Context, *ListProcessesRequest) (*ListProcessesResponse, error)
DisconnectProcess(context.Context, *DisconnectProcessRequest) (*DisconnectProcessResponse, error)
}
// UnimplementedControllerServer should be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedControllerServer struct{}
func (UnimplementedControllerServer) Build(context.Context, *BuildRequest) (*BuildResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Build not implemented")
}
func (UnimplementedControllerServer) Inspect(context.Context, *InspectRequest) (*InspectResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Inspect not implemented")
}
func (UnimplementedControllerServer) Status(*StatusRequest, grpc.ServerStreamingServer[StatusResponse]) error {
return status.Errorf(codes.Unimplemented, "method Status not implemented")
}
func (UnimplementedControllerServer) Input(grpc.ClientStreamingServer[InputMessage, InputResponse]) error {
return status.Errorf(codes.Unimplemented, "method Input not implemented")
}
func (UnimplementedControllerServer) Invoke(grpc.BidiStreamingServer[Message, Message]) error {
return status.Errorf(codes.Unimplemented, "method Invoke not implemented")
}
func (UnimplementedControllerServer) List(context.Context, *ListRequest) (*ListResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
}
func (UnimplementedControllerServer) Disconnect(context.Context, *DisconnectRequest) (*DisconnectResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Disconnect not implemented")
}
func (UnimplementedControllerServer) Info(context.Context, *InfoRequest) (*InfoResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Info not implemented")
}
func (UnimplementedControllerServer) ListProcesses(context.Context, *ListProcessesRequest) (*ListProcessesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListProcesses not implemented")
}
func (UnimplementedControllerServer) DisconnectProcess(context.Context, *DisconnectProcessRequest) (*DisconnectProcessResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DisconnectProcess not implemented")
}
func (UnimplementedControllerServer) testEmbeddedByValue() {}
// UnsafeControllerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ControllerServer will
// result in compilation errors.
type UnsafeControllerServer interface {
mustEmbedUnimplementedControllerServer()
}
func RegisterControllerServer(s grpc.ServiceRegistrar, srv ControllerServer) {
// If the following call pancis, it indicates UnimplementedControllerServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&Controller_ServiceDesc, srv)
}
func _Controller_Build_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(BuildRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControllerServer).Build(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Controller_Build_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControllerServer).Build(ctx, req.(*BuildRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Controller_Inspect_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(InspectRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControllerServer).Inspect(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Controller_Inspect_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControllerServer).Inspect(ctx, req.(*InspectRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Controller_Status_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(StatusRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(ControllerServer).Status(m, &grpc.GenericServerStream[StatusRequest, StatusResponse]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Controller_StatusServer = grpc.ServerStreamingServer[StatusResponse]
func _Controller_Input_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(ControllerServer).Input(&grpc.GenericServerStream[InputMessage, InputResponse]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Controller_InputServer = grpc.ClientStreamingServer[InputMessage, InputResponse]
func _Controller_Invoke_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(ControllerServer).Invoke(&grpc.GenericServerStream[Message, Message]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Controller_InvokeServer = grpc.BidiStreamingServer[Message, Message]
func _Controller_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControllerServer).List(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Controller_List_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControllerServer).List(ctx, req.(*ListRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Controller_Disconnect_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DisconnectRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControllerServer).Disconnect(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Controller_Disconnect_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControllerServer).Disconnect(ctx, req.(*DisconnectRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Controller_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(InfoRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControllerServer).Info(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Controller_Info_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControllerServer).Info(ctx, req.(*InfoRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Controller_ListProcesses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListProcessesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControllerServer).ListProcesses(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Controller_ListProcesses_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControllerServer).ListProcesses(ctx, req.(*ListProcessesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Controller_DisconnectProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DisconnectProcessRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ControllerServer).DisconnectProcess(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Controller_DisconnectProcess_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControllerServer).DisconnectProcess(ctx, req.(*DisconnectProcessRequest))
}
return interceptor(ctx, in, info, handler)
}
// Controller_ServiceDesc is the grpc.ServiceDesc for Controller service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Controller_ServiceDesc = grpc.ServiceDesc{
ServiceName: "buildx.controller.v1.Controller",
HandlerType: (*ControllerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Build",
Handler: _Controller_Build_Handler,
},
{
MethodName: "Inspect",
Handler: _Controller_Inspect_Handler,
},
{
MethodName: "List",
Handler: _Controller_List_Handler,
},
{
MethodName: "Disconnect",
Handler: _Controller_Disconnect_Handler,
},
{
MethodName: "Info",
Handler: _Controller_Info_Handler,
},
{
MethodName: "ListProcesses",
Handler: _Controller_ListProcesses_Handler,
},
{
MethodName: "DisconnectProcess",
Handler: _Controller_DisconnectProcess_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "Status",
Handler: _Controller_Status_Handler,
ServerStreams: true,
},
{
StreamName: "Input",
Handler: _Controller_Input_Handler,
ClientStreams: true,
},
{
StreamName: "Invoke",
Handler: _Controller_Invoke_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "github.com/docker/buildx/controller/pb/controller.proto",
}

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,6 @@ package pb
import (
"io"
"maps"
"os"
"strconv"
@@ -11,12 +10,6 @@ import (
"github.com/pkg/errors"
)
type ExportEntry struct {
Type string
Attrs map[string]string
Destination string
}
func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, []string, error) {
var outs []client.ExportEntry
var localPaths []string
@@ -33,7 +26,9 @@ func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, []string, erro
Type: entry.Type,
Attrs: map[string]string{},
}
maps.Copy(out.Attrs, entry.Attrs)
for k, v := range entry.Attrs {
out.Attrs[k] = v
}
supportFile := false
supportDir := false

View File

@@ -1,40 +0,0 @@
package pb
import (
"fmt"
"strings"
)
type CallFunc struct {
Name string
Format string
IgnoreStatus bool
}
func (x *CallFunc) String() string {
var elems []string
if x.Name != "" {
elems = append(elems, fmt.Sprintf("Name:%q", x.Name))
}
if x.Format != "" {
elems = append(elems, fmt.Sprintf("Format:%q", x.Format))
}
if x.IgnoreStatus {
elems = append(elems, fmt.Sprintf("IgnoreStatus:%v", x.IgnoreStatus))
}
return strings.Join(elems, " ")
}
type InvokeConfig struct {
Entrypoint []string
Cmd []string
NoCmd bool
Env []string
User string
NoUser bool
Cwd string
NoCwd bool
Tty bool
Rollback bool
Initial bool
}

View File

@@ -1,52 +1,15 @@
package build
package pb
import (
"path/filepath"
"strings"
"github.com/docker/buildx/controller/pb"
sourcepolicy "github.com/moby/buildkit/sourcepolicy/pb"
"github.com/moby/buildkit/util/gitutil"
)
type Options struct {
ContextPath string
DockerfileName string
CallFunc *pb.CallFunc
NamedContexts map[string]string
Allow []string
Attests []*pb.Attest
BuildArgs map[string]string
CacheFrom []*pb.CacheOptionsEntry
CacheTo []*pb.CacheOptionsEntry
CgroupParent string
Exports []*pb.ExportEntry
ExtraHosts []string
Labels map[string]string
NetworkMode string
NoCacheFilter []string
Platforms []string
Secrets []*pb.Secret
ShmSize int64
SSH []*pb.SSH
Tags []string
Target string
Ulimits *pb.UlimitOpt
Builder string
NoCache bool
Pull bool
ExportPush bool
ExportLoad bool
SourcePolicy *sourcepolicy.Policy
Ref string
GroupRef string
Annotations []string
ProvenanceResponseMode string
}
// ResolveOptionPaths resolves all paths contained in BuildOptions
// and replaces them to absolute paths.
func ResolveOptionPaths(options *Options) (_ *Options, err error) {
func ResolveOptionPaths(options *BuildOptions) (_ *BuildOptions, err error) {
localContext := false
if options.ContextPath != "" && options.ContextPath != "-" {
if !isRemoteURL(options.ContextPath) {
@@ -93,7 +56,7 @@ func ResolveOptionPaths(options *Options) (_ *Options, err error) {
}
options.NamedContexts = contexts
var cacheFrom []*pb.CacheOptionsEntry
var cacheFrom []*CacheOptionsEntry
for _, co := range options.CacheFrom {
switch co.Type {
case "local":
@@ -124,7 +87,7 @@ func ResolveOptionPaths(options *Options) (_ *Options, err error) {
}
options.CacheFrom = cacheFrom
var cacheTo []*pb.CacheOptionsEntry
var cacheTo []*CacheOptionsEntry
for _, co := range options.CacheTo {
switch co.Type {
case "local":
@@ -154,7 +117,7 @@ func ResolveOptionPaths(options *Options) (_ *Options, err error) {
}
}
options.CacheTo = cacheTo
var exports []*pb.ExportEntry
var exports []*ExportEntry
for _, e := range options.Exports {
if e.Destination != "" && e.Destination != "-" {
e.Destination, err = filepath.Abs(e.Destination)
@@ -166,7 +129,7 @@ func ResolveOptionPaths(options *Options) (_ *Options, err error) {
}
options.Exports = exports
var secrets []*pb.Secret
var secrets []*Secret
for _, s := range options.Secrets {
if s.FilePath != "" {
s.FilePath, err = filepath.Abs(s.FilePath)
@@ -178,7 +141,7 @@ func ResolveOptionPaths(options *Options) (_ *Options, err error) {
}
options.Secrets = secrets
var ssh []*pb.SSH
var ssh []*SSH
for _, s := range options.SSH {
var ps []string
for _, pt := range s.Paths {

View File

@@ -1,12 +1,12 @@
package build
package pb
import (
"os"
"path/filepath"
"testing"
"github.com/docker/buildx/controller/pb"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
)
func TestResolvePaths(t *testing.T) {
@@ -16,59 +16,59 @@ func TestResolvePaths(t *testing.T) {
require.NoError(t, os.Chdir(tmpwd))
tests := []struct {
name string
options *Options
want *Options
options *BuildOptions
want *BuildOptions
}{
{
name: "contextpath",
options: &Options{ContextPath: "test"},
want: &Options{ContextPath: filepath.Join(tmpwd, "test")},
options: &BuildOptions{ContextPath: "test"},
want: &BuildOptions{ContextPath: filepath.Join(tmpwd, "test")},
},
{
name: "contextpath-cwd",
options: &Options{ContextPath: "."},
want: &Options{ContextPath: tmpwd},
options: &BuildOptions{ContextPath: "."},
want: &BuildOptions{ContextPath: tmpwd},
},
{
name: "contextpath-dash",
options: &Options{ContextPath: "-"},
want: &Options{ContextPath: "-"},
options: &BuildOptions{ContextPath: "-"},
want: &BuildOptions{ContextPath: "-"},
},
{
name: "contextpath-ssh",
options: &Options{ContextPath: "git@github.com:docker/buildx.git"},
want: &Options{ContextPath: "git@github.com:docker/buildx.git"},
options: &BuildOptions{ContextPath: "git@github.com:docker/buildx.git"},
want: &BuildOptions{ContextPath: "git@github.com:docker/buildx.git"},
},
{
name: "dockerfilename",
options: &Options{DockerfileName: "test", ContextPath: "."},
want: &Options{DockerfileName: filepath.Join(tmpwd, "test"), ContextPath: tmpwd},
options: &BuildOptions{DockerfileName: "test", ContextPath: "."},
want: &BuildOptions{DockerfileName: filepath.Join(tmpwd, "test"), ContextPath: tmpwd},
},
{
name: "dockerfilename-dash",
options: &Options{DockerfileName: "-", ContextPath: "."},
want: &Options{DockerfileName: "-", ContextPath: tmpwd},
options: &BuildOptions{DockerfileName: "-", ContextPath: "."},
want: &BuildOptions{DockerfileName: "-", ContextPath: tmpwd},
},
{
name: "dockerfilename-remote",
options: &Options{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
want: &Options{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
options: &BuildOptions{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
want: &BuildOptions{DockerfileName: "test", ContextPath: "git@github.com:docker/buildx.git"},
},
{
name: "contexts",
options: &Options{NamedContexts: map[string]string{
options: &BuildOptions{NamedContexts: map[string]string{
"a": "test1", "b": "test2",
"alpine": "docker-image://alpine@sha256:0123456789", "project": "https://github.com/myuser/project.git",
}},
want: &Options{NamedContexts: map[string]string{
want: &BuildOptions{NamedContexts: map[string]string{
"a": filepath.Join(tmpwd, "test1"), "b": filepath.Join(tmpwd, "test2"),
"alpine": "docker-image://alpine@sha256:0123456789", "project": "https://github.com/myuser/project.git",
}},
},
{
name: "cache-from",
options: &Options{
CacheFrom: []*pb.CacheOptionsEntry{
options: &BuildOptions{
CacheFrom: []*CacheOptionsEntry{
{
Type: "local",
Attrs: map[string]string{"src": "test"},
@@ -79,8 +79,8 @@ func TestResolvePaths(t *testing.T) {
},
},
},
want: &Options{
CacheFrom: []*pb.CacheOptionsEntry{
want: &BuildOptions{
CacheFrom: []*CacheOptionsEntry{
{
Type: "local",
Attrs: map[string]string{"src": filepath.Join(tmpwd, "test")},
@@ -94,8 +94,8 @@ func TestResolvePaths(t *testing.T) {
},
{
name: "cache-to",
options: &Options{
CacheTo: []*pb.CacheOptionsEntry{
options: &BuildOptions{
CacheTo: []*CacheOptionsEntry{
{
Type: "local",
Attrs: map[string]string{"dest": "test"},
@@ -106,8 +106,8 @@ func TestResolvePaths(t *testing.T) {
},
},
},
want: &Options{
CacheTo: []*pb.CacheOptionsEntry{
want: &BuildOptions{
CacheTo: []*CacheOptionsEntry{
{
Type: "local",
Attrs: map[string]string{"dest": filepath.Join(tmpwd, "test")},
@@ -121,8 +121,8 @@ func TestResolvePaths(t *testing.T) {
},
{
name: "exports",
options: &Options{
Exports: []*pb.ExportEntry{
options: &BuildOptions{
Exports: []*ExportEntry{
{
Type: "local",
Destination: "-",
@@ -149,8 +149,8 @@ func TestResolvePaths(t *testing.T) {
},
},
},
want: &Options{
Exports: []*pb.ExportEntry{
want: &BuildOptions{
Exports: []*ExportEntry{
{
Type: "local",
Destination: "-",
@@ -180,8 +180,8 @@ func TestResolvePaths(t *testing.T) {
},
{
name: "secrets",
options: &Options{
Secrets: []*pb.Secret{
options: &BuildOptions{
Secrets: []*Secret{
{
FilePath: "test1",
},
@@ -195,8 +195,8 @@ func TestResolvePaths(t *testing.T) {
},
},
},
want: &Options{
Secrets: []*pb.Secret{
want: &BuildOptions{
Secrets: []*Secret{
{
FilePath: filepath.Join(tmpwd, "test1"),
},
@@ -213,8 +213,8 @@ func TestResolvePaths(t *testing.T) {
},
{
name: "ssh",
options: &Options{
SSH: []*pb.SSH{
options: &BuildOptions{
SSH: []*SSH{
{
ID: "default",
Paths: []string{"test1", "test2"},
@@ -225,8 +225,8 @@ func TestResolvePaths(t *testing.T) {
},
},
},
want: &Options{
SSH: []*pb.SSH{
want: &BuildOptions{
SSH: []*SSH{
{
ID: "default",
Paths: []string{filepath.Join(tmpwd, "test1"), filepath.Join(tmpwd, "test2")},
@@ -244,7 +244,9 @@ func TestResolvePaths(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
got, err := ResolveOptionPaths(tt.options)
require.NoError(t, err)
require.Equal(t, tt.want, got)
if !proto.Equal(tt.want, got) {
t.Fatalf("expected %#v, got %#v", tt.want, got)
}
})
}
}

162
controller/pb/progress.go Normal file
View File

@@ -0,0 +1,162 @@
package pb
import (
"time"
"github.com/docker/buildx/util/progress"
control "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client"
"github.com/opencontainers/go-digest"
"google.golang.org/protobuf/types/known/timestamppb"
)
type writer struct {
ch chan<- *StatusResponse
}
func NewProgressWriter(ch chan<- *StatusResponse) progress.Writer {
return &writer{ch: ch}
}
func (w *writer) Write(status *client.SolveStatus) {
w.ch <- ToControlStatus(status)
}
func (w *writer) WriteBuildRef(target string, ref string) {}
func (w *writer) ValidateLogSource(digest.Digest, interface{}) bool {
return true
}
func (w *writer) ClearLogSource(interface{}) {}
func ToControlStatus(s *client.SolveStatus) *StatusResponse {
resp := StatusResponse{}
for _, v := range s.Vertexes {
resp.Vertexes = append(resp.Vertexes, &control.Vertex{
Digest: string(v.Digest),
Inputs: digestSliceToPB(v.Inputs),
Name: v.Name,
Started: timestampToPB(v.Started),
Completed: timestampToPB(v.Completed),
Error: v.Error,
Cached: v.Cached,
ProgressGroup: v.ProgressGroup,
})
}
for _, v := range s.Statuses {
resp.Statuses = append(resp.Statuses, &control.VertexStatus{
ID: v.ID,
Vertex: string(v.Vertex),
Name: v.Name,
Total: v.Total,
Current: v.Current,
Timestamp: timestamppb.New(v.Timestamp),
Started: timestampToPB(v.Started),
Completed: timestampToPB(v.Completed),
})
}
for _, v := range s.Logs {
resp.Logs = append(resp.Logs, &control.VertexLog{
Vertex: string(v.Vertex),
Stream: int64(v.Stream),
Msg: v.Data,
Timestamp: timestamppb.New(v.Timestamp),
})
}
for _, v := range s.Warnings {
resp.Warnings = append(resp.Warnings, &control.VertexWarning{
Vertex: string(v.Vertex),
Level: int64(v.Level),
Short: v.Short,
Detail: v.Detail,
Url: v.URL,
Info: v.SourceInfo,
Ranges: v.Range,
})
}
return &resp
}
func FromControlStatus(resp *StatusResponse) *client.SolveStatus {
s := client.SolveStatus{}
for _, v := range resp.Vertexes {
s.Vertexes = append(s.Vertexes, &client.Vertex{
Digest: digest.Digest(v.Digest),
Inputs: digestSliceFromPB(v.Inputs),
Name: v.Name,
Started: timestampFromPB(v.Started),
Completed: timestampFromPB(v.Completed),
Error: v.Error,
Cached: v.Cached,
ProgressGroup: v.ProgressGroup,
})
}
for _, v := range resp.Statuses {
s.Statuses = append(s.Statuses, &client.VertexStatus{
ID: v.ID,
Vertex: digest.Digest(v.Vertex),
Name: v.Name,
Total: v.Total,
Current: v.Current,
Timestamp: v.Timestamp.AsTime(),
Started: timestampFromPB(v.Started),
Completed: timestampFromPB(v.Completed),
})
}
for _, v := range resp.Logs {
s.Logs = append(s.Logs, &client.VertexLog{
Vertex: digest.Digest(v.Vertex),
Stream: int(v.Stream),
Data: v.Msg,
Timestamp: v.Timestamp.AsTime(),
})
}
for _, v := range resp.Warnings {
s.Warnings = append(s.Warnings, &client.VertexWarning{
Vertex: digest.Digest(v.Vertex),
Level: int(v.Level),
Short: v.Short,
Detail: v.Detail,
URL: v.Url,
SourceInfo: v.Info,
Range: v.Ranges,
})
}
return &s
}
func timestampFromPB(ts *timestamppb.Timestamp) *time.Time {
if ts == nil {
return nil
}
t := ts.AsTime()
if t.IsZero() {
return nil
}
return &t
}
func timestampToPB(ts *time.Time) *timestamppb.Timestamp {
if ts == nil {
return nil
}
return timestamppb.New(*ts)
}
func digestSliceFromPB(elems []string) []digest.Digest {
clone := make([]digest.Digest, len(elems))
for i, e := range elems {
clone[i] = digest.Digest(e)
}
return clone
}
func digestSliceToPB(elems []digest.Digest) []string {
clone := make([]string, len(elems))
for i, e := range elems {
clone[i] = string(e)
}
return clone
}

View File

@@ -5,12 +5,6 @@ import (
"github.com/moby/buildkit/session/secrets/secretsprovider"
)
type Secret struct {
ID string
FilePath string
Env string
}
func CreateSecrets(secrets []*Secret) (session.Attachable, error) {
fs := make([]secretsprovider.Source, 0, len(secrets))
for _, secret := range secrets {

View File

@@ -1,23 +1,16 @@
package pb
import (
"slices"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/sshforward/sshprovider"
)
type SSH struct {
ID string
Paths []string
}
func CreateSSH(ssh []*SSH) (session.Attachable, error) {
configs := make([]sshprovider.AgentConfig, 0, len(ssh))
for _, ssh := range ssh {
cfg := sshprovider.AgentConfig{
ID: ssh.ID,
Paths: slices.Clone(ssh.Paths),
Paths: append([]string{}, ssh.Paths...),
}
configs = append(configs, cfg)
}

View File

@@ -1,11 +0,0 @@
package pb
type UlimitOpt struct {
Values map[string]*Ulimit
}
type Ulimit struct {
Name string
Hard int64
Soft int64
}

View File

@@ -39,7 +39,7 @@ func (p *Process) Done() <-chan error {
return p.errCh
}
// Manager manages a set of processes.
// Manager manages a set of proceses.
type Manager struct {
container atomic.Value
processes sync.Map
@@ -73,9 +73,9 @@ func (m *Manager) CancelRunningProcesses() {
}
// ListProcesses lists all running processes.
func (m *Manager) ListProcesses() (res []*ProcessInfo) {
func (m *Manager) ListProcesses() (res []*pb.ProcessInfo) {
m.processes.Range(func(key, value any) bool {
res = append(res, &ProcessInfo{
res = append(res, &pb.ProcessInfo{
ProcessID: key.(string),
InvokeConfig: value.(*Process).invokeConfig,
})
@@ -154,8 +154,3 @@ func (m *Manager) StartProcess(pid string, resultCtx *build.ResultHandle, cfg *p
return p, nil
}
type ProcessInfo struct {
ProcessID string
InvokeConfig *pb.InvokeConfig
}

243
controller/remote/client.go Normal file
View File

@@ -0,0 +1,243 @@
package remote
import (
"context"
"io"
"sync"
"time"
"github.com/containerd/containerd/v2/defaults"
"github.com/containerd/containerd/v2/pkg/dialer"
"github.com/docker/buildx/build"
"github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/util/progress"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/credentials/insecure"
)
func NewClient(ctx context.Context, addr string) (*Client, error) {
backoffConfig := backoff.DefaultConfig
backoffConfig.MaxDelay = 3 * time.Second
connParams := grpc.ConnectParams{
Backoff: backoffConfig,
}
gopts := []grpc.DialOption{
//nolint:staticcheck // ignore SA1019: WithBlock is deprecated and does not work with NewClient.
grpc.WithBlock(),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithConnectParams(connParams),
grpc.WithContextDialer(dialer.ContextDialer),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor),
grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor),
}
//nolint:staticcheck // ignore SA1019: Recommended NewClient has different behavior from DialContext.
conn, err := grpc.DialContext(ctx, dialer.DialAddress(addr), gopts...)
if err != nil {
return nil, err
}
return &Client{conn: conn}, nil
}
type Client struct {
conn *grpc.ClientConn
closeOnce sync.Once
}
func (c *Client) Close() (err error) {
c.closeOnce.Do(func() {
err = c.conn.Close()
})
return
}
func (c *Client) Version(ctx context.Context) (string, string, string, error) {
res, err := c.client().Info(ctx, &pb.InfoRequest{})
if err != nil {
return "", "", "", err
}
v := res.BuildxVersion
return v.Package, v.Version, v.Revision, nil
}
func (c *Client) List(ctx context.Context) (keys []string, retErr error) {
res, err := c.client().List(ctx, &pb.ListRequest{})
if err != nil {
return nil, err
}
return res.Keys, nil
}
func (c *Client) Disconnect(ctx context.Context, sessionID string) error {
if sessionID == "" {
return nil
}
_, err := c.client().Disconnect(ctx, &pb.DisconnectRequest{SessionID: sessionID})
return err
}
func (c *Client) ListProcesses(ctx context.Context, sessionID string) (infos []*pb.ProcessInfo, retErr error) {
res, err := c.client().ListProcesses(ctx, &pb.ListProcessesRequest{SessionID: sessionID})
if err != nil {
return nil, err
}
return res.Infos, nil
}
func (c *Client) DisconnectProcess(ctx context.Context, sessionID, pid string) error {
_, err := c.client().DisconnectProcess(ctx, &pb.DisconnectProcessRequest{SessionID: sessionID, ProcessID: pid})
return err
}
func (c *Client) Invoke(ctx context.Context, sessionID string, pid string, invokeConfig *pb.InvokeConfig, in io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
if sessionID == "" || pid == "" {
return errors.New("build session ID must be specified")
}
stream, err := c.client().Invoke(ctx)
if err != nil {
return err
}
return attachIO(ctx, stream, &pb.InitMessage{SessionID: sessionID, ProcessID: pid, InvokeConfig: invokeConfig}, ioAttachConfig{
stdin: in,
stdout: stdout,
stderr: stderr,
// TODO: Signal, Resize
})
}
func (c *Client) Inspect(ctx context.Context, sessionID string) (*pb.InspectResponse, error) {
return c.client().Inspect(ctx, &pb.InspectRequest{SessionID: sessionID})
}
func (c *Client) Build(ctx context.Context, options *pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, *build.Inputs, error) {
ref := identity.NewID()
statusChan := make(chan *client.SolveStatus)
eg, egCtx := errgroup.WithContext(ctx)
var resp *client.SolveResponse
eg.Go(func() error {
defer close(statusChan)
var err error
resp, err = c.build(egCtx, ref, options, in, statusChan)
return err
})
eg.Go(func() error {
for s := range statusChan {
st := s
progress.Write(st)
}
return nil
})
return ref, resp, nil, eg.Wait()
}
func (c *Client) build(ctx context.Context, sessionID string, options *pb.BuildOptions, in io.ReadCloser, statusChan chan *client.SolveStatus) (*client.SolveResponse, error) {
eg, egCtx := errgroup.WithContext(ctx)
done := make(chan struct{})
var resp *client.SolveResponse
eg.Go(func() error {
defer close(done)
pbResp, err := c.client().Build(egCtx, &pb.BuildRequest{
SessionID: sessionID,
Options: options,
})
if err != nil {
return err
}
resp = &client.SolveResponse{
ExporterResponse: pbResp.ExporterResponse,
}
return nil
})
eg.Go(func() error {
stream, err := c.client().Status(egCtx, &pb.StatusRequest{
SessionID: sessionID,
})
if err != nil {
return err
}
for {
resp, err := stream.Recv()
if err != nil {
if err == io.EOF {
return nil
}
return errors.Wrap(err, "failed to receive status")
}
statusChan <- pb.FromControlStatus(resp)
}
})
if in != nil {
eg.Go(func() error {
stream, err := c.client().Input(egCtx)
if err != nil {
return err
}
if err := stream.Send(&pb.InputMessage{
Input: &pb.InputMessage_Init{
Init: &pb.InputInitMessage{
SessionID: sessionID,
},
},
}); err != nil {
return errors.Wrap(err, "failed to init input")
}
inReader, inWriter := io.Pipe()
eg2, _ := errgroup.WithContext(ctx)
eg2.Go(func() error {
<-done
return inWriter.Close()
})
go func() {
// do not wait for read completion but return here and let the caller send EOF
// this allows us to return on ctx.Done() without being blocked by this reader.
io.Copy(inWriter, in)
inWriter.Close()
}()
eg2.Go(func() error {
for {
buf := make([]byte, 32*1024)
n, err := inReader.Read(buf)
if err != nil {
if err == io.EOF {
break // break loop and send EOF
}
return err
} else if n > 0 {
if err := stream.Send(&pb.InputMessage{
Input: &pb.InputMessage_Data{
Data: &pb.DataMessage{
Data: buf[:n],
},
},
}); err != nil {
return err
}
}
}
return stream.Send(&pb.InputMessage{
Input: &pb.InputMessage_Data{
Data: &pb.DataMessage{
EOF: true,
},
},
})
})
return eg2.Wait()
})
}
return resp, eg.Wait()
}
func (c *Client) client() pb.ControllerClient {
return pb.NewControllerClient(c.conn)
}

View File

@@ -0,0 +1,335 @@
//go:build linux
package remote
import (
"context"
"fmt"
"io"
"net"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strconv"
"syscall"
"time"
"github.com/containerd/log"
"github.com/docker/buildx/build"
cbuild "github.com/docker/buildx/controller/build"
"github.com/docker/buildx/controller/control"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/version"
"github.com/docker/cli/cli/command"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/pelletier/go-toml"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc"
)
const (
serveCommandName = "_INTERNAL_SERVE"
)
var (
defaultLogFilename = fmt.Sprintf("buildx.%s.log", version.Revision)
defaultSocketFilename = fmt.Sprintf("buildx.%s.sock", version.Revision)
defaultPIDFilename = fmt.Sprintf("buildx.%s.pid", version.Revision)
)
type serverConfig struct {
// Specify buildx server root
Root string `toml:"root"`
// LogLevel sets the logging level [trace, debug, info, warn, error, fatal, panic]
LogLevel string `toml:"log_level"`
// Specify file to output buildx server log
LogFile string `toml:"log_file"`
}
func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts control.ControlOptions, logger progress.SubLogger) (control.BuildxController, error) {
rootDir := opts.Root
if rootDir == "" {
rootDir = rootDataDir(dockerCli)
}
serverRoot := filepath.Join(rootDir, "shared")
// connect to buildx server if it is already running
ctx2, cancel := context.WithCancelCause(ctx)
ctx2, _ = context.WithTimeoutCause(ctx2, 1*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
c, err := newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
cancel(errors.WithStack(context.Canceled))
if err != nil {
if !errors.Is(err, context.DeadlineExceeded) {
return nil, errors.Wrap(err, "cannot connect to the buildx server")
}
} else {
return &buildxController{c, serverRoot}, nil
}
// start buildx server via subcommand
err = logger.Wrap("no buildx server found; launching...", func() error {
launchFlags := []string{}
if opts.ServerConfig != "" {
launchFlags = append(launchFlags, "--config", opts.ServerConfig)
}
logFile, err := getLogFilePath(dockerCli, opts.ServerConfig)
if err != nil {
return err
}
wait, err := launch(ctx, logFile, append([]string{serveCommandName}, launchFlags...)...)
if err != nil {
return err
}
go wait()
// wait for buildx server to be ready
ctx2, cancel = context.WithCancelCause(ctx)
ctx2, _ = context.WithTimeoutCause(ctx2, 10*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
c, err = newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
cancel(errors.WithStack(context.Canceled))
if err != nil {
return errors.Wrap(err, "cannot connect to the buildx server")
}
return nil
})
if err != nil {
return nil, err
}
return &buildxController{c, serverRoot}, nil
}
func AddControllerCommands(cmd *cobra.Command, dockerCli command.Cli) {
cmd.AddCommand(
serveCmd(dockerCli),
)
}
func serveCmd(dockerCli command.Cli) *cobra.Command {
var serverConfigPath string
cmd := &cobra.Command{
Use: fmt.Sprintf("%s [OPTIONS]", serveCommandName),
Hidden: true,
RunE: func(cmd *cobra.Command, args []string) error {
// Parse config
config, err := getConfig(dockerCli, serverConfigPath)
if err != nil {
return err
}
if config.LogLevel == "" {
logrus.SetLevel(logrus.InfoLevel)
} else {
lvl, err := logrus.ParseLevel(config.LogLevel)
if err != nil {
return errors.Wrap(err, "failed to prepare logger")
}
logrus.SetLevel(lvl)
}
logrus.SetFormatter(&logrus.JSONFormatter{
TimestampFormat: log.RFC3339NanoFixed,
})
root, err := prepareRootDir(dockerCli, config)
if err != nil {
return err
}
pidF := filepath.Join(root, defaultPIDFilename)
if err := os.WriteFile(pidF, []byte(fmt.Sprintf("%d", os.Getpid())), 0600); err != nil {
return err
}
defer func() {
if err := os.Remove(pidF); err != nil {
logrus.Errorf("failed to clean up info file %q: %v", pidF, err)
}
}()
// prepare server
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultHandle, *build.Inputs, error) {
return cbuild.RunBuild(ctx, dockerCli, options, stdin, progress, true)
})
defer b.Close()
// serve server
addr := filepath.Join(root, defaultSocketFilename)
if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { // avoid EADDRINUSE
return err
}
defer func() {
if err := os.Remove(addr); err != nil {
logrus.Errorf("failed to clean up socket %q: %v", addr, err)
}
}()
logrus.Infof("starting server at %q", addr)
l, err := net.Listen("unix", addr)
if err != nil {
return err
}
rpc := grpc.NewServer(
grpc.UnaryInterceptor(grpcerrors.UnaryServerInterceptor),
grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor),
)
controllerapi.RegisterControllerServer(rpc, b)
doneCh := make(chan struct{})
errCh := make(chan error, 1)
go func() {
defer close(doneCh)
if err := rpc.Serve(l); err != nil {
errCh <- errors.Wrapf(err, "error on serving via socket %q", addr)
}
}()
var s os.Signal
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGINT)
signal.Notify(sigCh, syscall.SIGTERM)
select {
case err := <-errCh:
logrus.Errorf("got error %s, exiting", err)
return err
case s = <-sigCh:
logrus.Infof("got signal %s, exiting", s)
return nil
case <-doneCh:
logrus.Infof("rpc server done, exiting")
return nil
}
},
}
flags := cmd.Flags()
flags.StringVar(&serverConfigPath, "config", "", "Specify buildx server config file")
return cmd
}
func getLogFilePath(dockerCli command.Cli, configPath string) (string, error) {
config, err := getConfig(dockerCli, configPath)
if err != nil {
return "", err
}
if config.LogFile == "" {
root, err := prepareRootDir(dockerCli, config)
if err != nil {
return "", err
}
return filepath.Join(root, defaultLogFilename), nil
}
return config.LogFile, nil
}
func getConfig(dockerCli command.Cli, configPath string) (*serverConfig, error) {
var defaultConfigPath bool
if configPath == "" {
defaultRoot := rootDataDir(dockerCli)
configPath = filepath.Join(defaultRoot, "config.toml")
defaultConfigPath = true
}
var config serverConfig
tree, err := toml.LoadFile(configPath)
if err != nil && !(os.IsNotExist(err) && defaultConfigPath) {
return nil, errors.Wrapf(err, "failed to read config %q", configPath)
} else if err == nil {
if err := tree.Unmarshal(&config); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal config %q", configPath)
}
}
return &config, nil
}
func prepareRootDir(dockerCli command.Cli, config *serverConfig) (string, error) {
rootDir := config.Root
if rootDir == "" {
rootDir = rootDataDir(dockerCli)
}
if rootDir == "" {
return "", errors.New("buildx root dir must be determined")
}
if err := os.MkdirAll(rootDir, 0700); err != nil {
return "", err
}
serverRoot := filepath.Join(rootDir, "shared")
if err := os.MkdirAll(serverRoot, 0700); err != nil {
return "", err
}
return serverRoot, nil
}
func rootDataDir(dockerCli command.Cli) string {
return filepath.Join(confutil.NewConfig(dockerCli).Dir(), "controller")
}
func newBuildxClientAndCheck(ctx context.Context, addr string) (*Client, error) {
c, err := NewClient(ctx, addr)
if err != nil {
return nil, err
}
p, v, r, err := c.Version(ctx)
if err != nil {
return nil, err
}
logrus.Debugf("connected to server (\"%v %v %v\")", p, v, r)
if !(p == version.Package && v == version.Version && r == version.Revision) {
return nil, errors.Errorf("version mismatch (client: \"%v %v %v\", server: \"%v %v %v\")", version.Package, version.Version, version.Revision, p, v, r)
}
return c, nil
}
type buildxController struct {
*Client
serverRoot string
}
func (c *buildxController) Kill(ctx context.Context) error {
pidB, err := os.ReadFile(filepath.Join(c.serverRoot, defaultPIDFilename))
if err != nil {
return err
}
pid, err := strconv.ParseInt(string(pidB), 10, 64)
if err != nil {
return err
}
if pid <= 0 {
return errors.New("no PID is recorded for buildx server")
}
p, err := os.FindProcess(int(pid))
if err != nil {
return err
}
if err := p.Signal(syscall.SIGINT); err != nil {
return err
}
// TODO: Should we send SIGKILL if process doesn't finish?
return nil
}
func launch(ctx context.Context, logFile string, args ...string) (func() error, error) {
// set absolute path of binary, since we set the working directory to the root
pathname, err := os.Executable()
if err != nil {
return nil, err
}
bCmd := exec.CommandContext(ctx, pathname, args...)
if logFile != "" {
f, err := os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
defer f.Close()
bCmd.Stdout = f
bCmd.Stderr = f
}
bCmd.Stdin = nil
bCmd.Dir = "/"
bCmd.SysProcAttr = &syscall.SysProcAttr{
Setsid: true,
}
if err := bCmd.Start(); err != nil {
return nil, err
}
return bCmd.Wait, nil
}

View File

@@ -0,0 +1,19 @@
//go:build !linux
package remote
import (
"context"
"github.com/docker/buildx/controller/control"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts control.ControlOptions, logger progress.SubLogger) (control.BuildxController, error) {
return nil, errors.New("remote buildx unsupported")
}
func AddControllerCommands(cmd *cobra.Command, dockerCli command.Cli) {}

430
controller/remote/io.go Normal file
View File

@@ -0,0 +1,430 @@
package remote
import (
"context"
"io"
"syscall"
"time"
"github.com/docker/buildx/controller/pb"
"github.com/moby/sys/signal"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type msgStream interface {
Send(*pb.Message) error
Recv() (*pb.Message, error)
}
type ioServerConfig struct {
stdin io.WriteCloser
stdout, stderr io.ReadCloser
// signalFn is a callback function called when a signal is reached to the client.
signalFn func(context.Context, syscall.Signal) error
// resizeFn is a callback function called when a resize event is reached to the client.
resizeFn func(context.Context, winSize) error
}
func serveIO(attachCtx context.Context, srv msgStream, initFn func(*pb.InitMessage) error, ioConfig *ioServerConfig) (err error) {
stdin, stdout, stderr := ioConfig.stdin, ioConfig.stdout, ioConfig.stderr
stream := &debugStream{srv, "server=" + time.Now().String()}
eg, ctx := errgroup.WithContext(attachCtx)
done := make(chan struct{})
msg, err := receive(ctx, stream)
if err != nil {
return err
}
init := msg.GetInit()
if init == nil {
return errors.Errorf("unexpected message: %T; wanted init", msg.GetInput())
}
sessionID := init.SessionID
if sessionID == "" {
return errors.New("no session ID is provided")
}
if err := initFn(init); err != nil {
return errors.Wrap(err, "failed to initialize IO server")
}
if stdout != nil {
stdoutReader, stdoutWriter := io.Pipe()
eg.Go(func() error {
<-done
return stdoutWriter.Close()
})
go func() {
// do not wait for read completion but return here and let the caller send EOF
// this allows us to return on ctx.Done() without being blocked by this reader.
io.Copy(stdoutWriter, stdout)
stdoutWriter.Close()
}()
eg.Go(func() error {
defer stdoutReader.Close()
return copyToStream(1, stream, stdoutReader)
})
}
if stderr != nil {
stderrReader, stderrWriter := io.Pipe()
eg.Go(func() error {
<-done
return stderrWriter.Close()
})
go func() {
// do not wait for read completion but return here and let the caller send EOF
// this allows us to return on ctx.Done() without being blocked by this reader.
io.Copy(stderrWriter, stderr)
stderrWriter.Close()
}()
eg.Go(func() error {
defer stderrReader.Close()
return copyToStream(2, stream, stderrReader)
})
}
msgCh := make(chan *pb.Message)
eg.Go(func() error {
defer close(msgCh)
for {
msg, err := receive(ctx, stream)
if err != nil {
return err
}
select {
case msgCh <- msg:
case <-done:
return nil
case <-ctx.Done():
return nil
}
}
})
eg.Go(func() error {
defer close(done)
for {
var msg *pb.Message
select {
case msg = <-msgCh:
case <-ctx.Done():
return nil
}
if msg == nil {
return nil
}
if file := msg.GetFile(); file != nil {
if file.Fd != 0 {
return errors.Errorf("unexpected fd: %v", file.Fd)
}
if stdin == nil {
continue // no stdin destination is specified so ignore the data
}
if len(file.Data) > 0 {
_, err := stdin.Write(file.Data)
if err != nil {
return err
}
}
if file.EOF {
stdin.Close()
}
} else if resize := msg.GetResize(); resize != nil {
if ioConfig.resizeFn != nil {
ioConfig.resizeFn(ctx, winSize{
cols: resize.Cols,
rows: resize.Rows,
})
}
} else if sig := msg.GetSignal(); sig != nil {
if ioConfig.signalFn != nil {
syscallSignal, ok := signal.SignalMap[sig.Name]
if !ok {
continue
}
ioConfig.signalFn(ctx, syscallSignal)
}
} else {
return errors.Errorf("unexpected message: %T", msg.GetInput())
}
}
})
return eg.Wait()
}
type ioAttachConfig struct {
stdin io.ReadCloser
stdout, stderr io.WriteCloser
signal <-chan syscall.Signal
resize <-chan winSize
}
type winSize struct {
rows uint32
cols uint32
}
func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage, cfg ioAttachConfig) (retErr error) {
eg, ctx := errgroup.WithContext(ctx)
done := make(chan struct{})
if err := stream.Send(&pb.Message{
Input: &pb.Message_Init{
Init: initMessage,
},
}); err != nil {
return errors.Wrap(err, "failed to init")
}
if cfg.stdin != nil {
stdinReader, stdinWriter := io.Pipe()
eg.Go(func() error {
<-done
return stdinWriter.Close()
})
go func() {
// do not wait for read completion but return here and let the caller send EOF
// this allows us to return on ctx.Done() without being blocked by this reader.
io.Copy(stdinWriter, cfg.stdin)
stdinWriter.Close()
}()
eg.Go(func() error {
defer stdinReader.Close()
return copyToStream(0, stream, stdinReader)
})
}
if cfg.signal != nil {
eg.Go(func() error {
names := signalNames()
for {
var sig syscall.Signal
select {
case sig = <-cfg.signal:
case <-done:
return nil
case <-ctx.Done():
return nil
}
name := names[sig]
if name == "" {
continue
}
if err := stream.Send(&pb.Message{
Input: &pb.Message_Signal{
Signal: &pb.SignalMessage{
Name: name,
},
},
}); err != nil {
return errors.Wrap(err, "failed to send signal")
}
}
})
}
if cfg.resize != nil {
eg.Go(func() error {
for {
var win winSize
select {
case win = <-cfg.resize:
case <-done:
return nil
case <-ctx.Done():
return nil
}
if err := stream.Send(&pb.Message{
Input: &pb.Message_Resize{
Resize: &pb.ResizeMessage{
Rows: win.rows,
Cols: win.cols,
},
},
}); err != nil {
return errors.Wrap(err, "failed to send resize")
}
}
})
}
msgCh := make(chan *pb.Message)
eg.Go(func() error {
defer close(msgCh)
for {
msg, err := receive(ctx, stream)
if err != nil {
return err
}
select {
case msgCh <- msg:
case <-done:
return nil
case <-ctx.Done():
return nil
}
}
})
eg.Go(func() error {
eofs := make(map[uint32]struct{})
defer close(done)
for {
var msg *pb.Message
select {
case msg = <-msgCh:
case <-ctx.Done():
return nil
}
if msg == nil {
return nil
}
if file := msg.GetFile(); file != nil {
if _, ok := eofs[file.Fd]; ok {
continue
}
var out io.WriteCloser
switch file.Fd {
case 1:
out = cfg.stdout
case 2:
out = cfg.stderr
default:
return errors.Errorf("unsupported fd %d", file.Fd)
}
if out == nil {
logrus.Warnf("attachIO: no writer for fd %d", file.Fd)
continue
}
if len(file.Data) > 0 {
if _, err := out.Write(file.Data); err != nil {
return err
}
}
if file.EOF {
eofs[file.Fd] = struct{}{}
}
} else {
return errors.Errorf("unexpected message: %T", msg.GetInput())
}
}
})
return eg.Wait()
}
func receive(ctx context.Context, stream msgStream) (*pb.Message, error) {
msgCh := make(chan *pb.Message)
errCh := make(chan error)
go func() {
msg, err := stream.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
return
}
errCh <- err
return
}
msgCh <- msg
}()
select {
case msg := <-msgCh:
return msg, nil
case err := <-errCh:
return nil, err
case <-ctx.Done():
return nil, context.Cause(ctx)
}
}
func copyToStream(fd uint32, snd msgStream, r io.Reader) error {
for {
buf := make([]byte, 32*1024)
n, err := r.Read(buf)
if err != nil {
if err == io.EOF {
break // break loop and send EOF
}
return err
} else if n > 0 {
if err := snd.Send(&pb.Message{
Input: &pb.Message_File{
File: &pb.FdMessage{
Fd: fd,
Data: buf[:n],
},
},
}); err != nil {
return err
}
}
}
return snd.Send(&pb.Message{
Input: &pb.Message_File{
File: &pb.FdMessage{
Fd: fd,
EOF: true,
},
},
})
}
func signalNames() map[syscall.Signal]string {
m := make(map[syscall.Signal]string, len(signal.SignalMap))
for name, value := range signal.SignalMap {
m[value] = name
}
return m
}
type debugStream struct {
msgStream
prefix string
}
func (s *debugStream) Send(msg *pb.Message) error {
switch m := msg.GetInput().(type) {
case *pb.Message_File:
if m.File.EOF {
logrus.Debugf("|---> File Message (sender:%v) fd=%d, EOF", s.prefix, m.File.Fd)
} else {
logrus.Debugf("|---> File Message (sender:%v) fd=%d, %d bytes", s.prefix, m.File.Fd, len(m.File.Data))
}
case *pb.Message_Resize:
logrus.Debugf("|---> Resize Message (sender:%v): %+v", s.prefix, m.Resize)
case *pb.Message_Signal:
logrus.Debugf("|---> Signal Message (sender:%v): %s", s.prefix, m.Signal.Name)
}
return s.msgStream.Send(msg)
}
func (s *debugStream) Recv() (*pb.Message, error) {
msg, err := s.msgStream.Recv()
if err != nil {
return nil, err
}
switch m := msg.GetInput().(type) {
case *pb.Message_File:
if m.File.EOF {
logrus.Debugf("|<--- File Message (receiver:%v) fd=%d, EOF", s.prefix, m.File.Fd)
} else {
logrus.Debugf("|<--- File Message (receiver:%v) fd=%d, %d bytes", s.prefix, m.File.Fd, len(m.File.Data))
}
case *pb.Message_Resize:
logrus.Debugf("|<--- Resize Message (receiver:%v): %+v", s.prefix, m.Resize)
case *pb.Message_Signal:
logrus.Debugf("|<--- Signal Message (receiver:%v): %s", s.prefix, m.Signal.Name)
}
return msg, nil
}

445
controller/remote/server.go Normal file
View File

@@ -0,0 +1,445 @@
package remote
import (
"context"
"io"
"sync"
"sync/atomic"
"time"
"github.com/docker/buildx/build"
controllererrors "github.com/docker/buildx/controller/errdefs"
"github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/controller/processes"
"github.com/docker/buildx/util/desktop"
"github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/version"
"github.com/moby/buildkit/client"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultHandle, inp *build.Inputs, err error)
func NewServer(buildFunc BuildFunc) *Server {
return &Server{
buildFunc: buildFunc,
}
}
type Server struct {
buildFunc BuildFunc
session map[string]*session
sessionMu sync.Mutex
}
type session struct {
buildOnGoing atomic.Bool
statusChan chan *pb.StatusResponse
cancelBuild func(error)
buildOptions *pb.BuildOptions
inputPipe *io.PipeWriter
result *build.ResultHandle
processes *processes.Manager
}
func (s *session) cancelRunningProcesses() {
s.processes.CancelRunningProcesses()
}
func (m *Server) ListProcesses(ctx context.Context, req *pb.ListProcessesRequest) (res *pb.ListProcessesResponse, err error) {
m.sessionMu.Lock()
defer m.sessionMu.Unlock()
s, ok := m.session[req.SessionID]
if !ok {
return nil, errors.Errorf("unknown session ID %q", req.SessionID)
}
res = new(pb.ListProcessesResponse)
res.Infos = append(res.Infos, s.processes.ListProcesses()...)
return res, nil
}
func (m *Server) DisconnectProcess(ctx context.Context, req *pb.DisconnectProcessRequest) (res *pb.DisconnectProcessResponse, err error) {
m.sessionMu.Lock()
defer m.sessionMu.Unlock()
s, ok := m.session[req.SessionID]
if !ok {
return nil, errors.Errorf("unknown session ID %q", req.SessionID)
}
return res, s.processes.DeleteProcess(req.ProcessID)
}
func (m *Server) Info(ctx context.Context, req *pb.InfoRequest) (res *pb.InfoResponse, err error) {
return &pb.InfoResponse{
BuildxVersion: &pb.BuildxVersion{
Package: version.Package,
Version: version.Version,
Revision: version.Revision,
},
}, nil
}
func (m *Server) List(ctx context.Context, req *pb.ListRequest) (res *pb.ListResponse, err error) {
keys := make(map[string]struct{})
m.sessionMu.Lock()
for k := range m.session {
keys[k] = struct{}{}
}
m.sessionMu.Unlock()
var keysL []string
for k := range keys {
keysL = append(keysL, k)
}
return &pb.ListResponse{
Keys: keysL,
}, nil
}
func (m *Server) Disconnect(ctx context.Context, req *pb.DisconnectRequest) (res *pb.DisconnectResponse, err error) {
sessionID := req.SessionID
if sessionID == "" {
return nil, errors.New("disconnect: empty session ID")
}
m.sessionMu.Lock()
if s, ok := m.session[sessionID]; ok {
if s.cancelBuild != nil {
s.cancelBuild(errors.WithStack(context.Canceled))
}
s.cancelRunningProcesses()
if s.result != nil {
s.result.Done()
}
}
delete(m.session, sessionID)
m.sessionMu.Unlock()
return &pb.DisconnectResponse{}, nil
}
func (m *Server) Close() error {
m.sessionMu.Lock()
for k := range m.session {
if s, ok := m.session[k]; ok {
if s.cancelBuild != nil {
s.cancelBuild(errors.WithStack(context.Canceled))
}
s.cancelRunningProcesses()
}
}
m.sessionMu.Unlock()
return nil
}
func (m *Server) Inspect(ctx context.Context, req *pb.InspectRequest) (*pb.InspectResponse, error) {
sessionID := req.SessionID
if sessionID == "" {
return nil, errors.New("inspect: empty session ID")
}
var bo *pb.BuildOptions
m.sessionMu.Lock()
if s, ok := m.session[sessionID]; ok {
bo = s.buildOptions
} else {
m.sessionMu.Unlock()
return nil, errors.Errorf("inspect: unknown key %v", sessionID)
}
m.sessionMu.Unlock()
return &pb.InspectResponse{Options: bo}, nil
}
func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResponse, error) {
sessionID := req.SessionID
if sessionID == "" {
return nil, errors.New("build: empty session ID")
}
// Prepare status channel and session
m.sessionMu.Lock()
if m.session == nil {
m.session = make(map[string]*session)
}
s, ok := m.session[sessionID]
if ok {
if !s.buildOnGoing.CompareAndSwap(false, true) {
m.sessionMu.Unlock()
return &pb.BuildResponse{}, errors.New("build ongoing")
}
s.cancelRunningProcesses()
s.result = nil
} else {
s = &session{}
s.buildOnGoing.Store(true)
}
s.processes = processes.NewManager()
statusChan := make(chan *pb.StatusResponse)
s.statusChan = statusChan
inR, inW := io.Pipe()
defer inR.Close()
s.inputPipe = inW
m.session[sessionID] = s
m.sessionMu.Unlock()
defer func() {
close(statusChan)
m.sessionMu.Lock()
s, ok := m.session[sessionID]
if ok {
s.statusChan = nil
s.buildOnGoing.Store(false)
}
m.sessionMu.Unlock()
}()
pw := pb.NewProgressWriter(statusChan)
// Build the specified request
ctx, cancel := context.WithCancelCause(ctx)
defer func() { cancel(errors.WithStack(context.Canceled)) }()
resp, res, _, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
m.sessionMu.Lock()
if s, ok := m.session[sessionID]; ok {
// NOTE: buildFunc can return *build.ResultHandle even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).
if res != nil {
s.result = res
s.cancelBuild = cancel
s.buildOptions = req.Options
m.session[sessionID] = s
if buildErr != nil {
var ref string
var ebr *desktop.ErrorWithBuildRef
if errors.As(buildErr, &ebr) {
ref = ebr.Ref
}
buildErr = controllererrors.WrapBuild(buildErr, sessionID, ref)
}
}
} else {
m.sessionMu.Unlock()
return nil, errors.Errorf("build: unknown session ID %v", sessionID)
}
m.sessionMu.Unlock()
if buildErr != nil {
return nil, buildErr
}
if resp == nil {
resp = &client.SolveResponse{}
}
return &pb.BuildResponse{
ExporterResponse: resp.ExporterResponse,
}, nil
}
func (m *Server) Status(req *pb.StatusRequest, stream pb.Controller_StatusServer) error {
sessionID := req.SessionID
if sessionID == "" {
return errors.New("status: empty session ID")
}
// Wait and get status channel prepared by Build()
var statusChan <-chan *pb.StatusResponse
for {
// TODO: timeout?
m.sessionMu.Lock()
if _, ok := m.session[sessionID]; !ok || m.session[sessionID].statusChan == nil {
m.sessionMu.Unlock()
time.Sleep(time.Millisecond) // TODO: wait Build without busy loop and make it cancellable
continue
}
statusChan = m.session[sessionID].statusChan
m.sessionMu.Unlock()
break
}
// forward status
for ss := range statusChan {
if ss == nil {
break
}
if err := stream.Send(ss); err != nil {
return err
}
}
return nil
}
func (m *Server) Input(stream pb.Controller_InputServer) (err error) {
// Get the target ref from init message
msg, err := stream.Recv()
if err != nil {
if !errors.Is(err, io.EOF) {
return err
}
return nil
}
init := msg.GetInit()
if init == nil {
return errors.Errorf("unexpected message: %T; wanted init", msg.GetInit())
}
sessionID := init.SessionID
if sessionID == "" {
return errors.New("input: no session ID is provided")
}
// Wait and get input stream pipe prepared by Build()
var inputPipeW *io.PipeWriter
for {
// TODO: timeout?
m.sessionMu.Lock()
if _, ok := m.session[sessionID]; !ok || m.session[sessionID].inputPipe == nil {
m.sessionMu.Unlock()
time.Sleep(time.Millisecond) // TODO: wait Build without busy loop and make it cancellable
continue
}
inputPipeW = m.session[sessionID].inputPipe
m.sessionMu.Unlock()
break
}
// Forward input stream
eg, ctx := errgroup.WithContext(context.TODO())
done := make(chan struct{})
msgCh := make(chan *pb.InputMessage)
eg.Go(func() error {
defer close(msgCh)
for {
msg, err := stream.Recv()
if err != nil {
if !errors.Is(err, io.EOF) {
return err
}
return nil
}
select {
case msgCh <- msg:
case <-done:
return nil
case <-ctx.Done():
return nil
}
}
})
eg.Go(func() (retErr error) {
defer close(done)
defer func() {
if retErr != nil {
inputPipeW.CloseWithError(retErr)
return
}
inputPipeW.Close()
}()
for {
var msg *pb.InputMessage
select {
case msg = <-msgCh:
case <-ctx.Done():
return context.Cause(ctx)
}
if msg == nil {
return nil
}
if data := msg.GetData(); data != nil {
if len(data.Data) > 0 {
_, err := inputPipeW.Write(data.Data)
if err != nil {
return err
}
}
if data.EOF {
return nil
}
}
}
})
return eg.Wait()
}
func (m *Server) Invoke(srv pb.Controller_InvokeServer) error {
containerIn, containerOut := ioset.Pipe()
defer func() { containerOut.Close(); containerIn.Close() }()
initDoneCh := make(chan *processes.Process)
initErrCh := make(chan error)
eg, egCtx := errgroup.WithContext(context.TODO())
srvIOCtx, srvIOCancel := context.WithCancelCause(egCtx)
eg.Go(func() error {
defer srvIOCancel(errors.WithStack(context.Canceled))
return serveIO(srvIOCtx, srv, func(initMessage *pb.InitMessage) (retErr error) {
defer func() {
if retErr != nil {
initErrCh <- retErr
}
}()
sessionID := initMessage.SessionID
cfg := initMessage.InvokeConfig
m.sessionMu.Lock()
s, ok := m.session[sessionID]
if !ok {
m.sessionMu.Unlock()
return errors.Errorf("invoke: unknown session ID %v", sessionID)
}
m.sessionMu.Unlock()
pid := initMessage.ProcessID
if pid == "" {
return errors.Errorf("invoke: specify process ID")
}
proc, ok := s.processes.Get(pid)
if !ok {
// Start a new process.
if cfg == nil {
return errors.New("no container config is provided")
}
var err error
proc, err = s.processes.StartProcess(pid, s.result, cfg)
if err != nil {
return err
}
}
// Attach containerIn to this process
proc.ForwardIO(&containerIn, srvIOCancel)
initDoneCh <- proc
return nil
}, &ioServerConfig{
stdin: containerOut.Stdin,
stdout: containerOut.Stdout,
stderr: containerOut.Stderr,
// TODO: signal, resize
})
})
eg.Go(func() (rErr error) {
defer srvIOCancel(errors.WithStack(context.Canceled))
// Wait for init done
var proc *processes.Process
select {
case p := <-initDoneCh:
proc = p
case err := <-initErrCh:
return err
case <-egCtx.Done():
return egCtx.Err()
}
// Wait for IO done
select {
case <-srvIOCtx.Done():
return srvIOCtx.Err()
case err := <-proc.Done():
return err
case <-egCtx.Done():
return egCtx.Err()
}
})
return eg.Wait()
}

View File

@@ -48,8 +48,6 @@ target "lint" {
"linux/s390x",
"linux/ppc64le",
"linux/riscv64",
"netbsd/amd64",
"netbsd/arm64",
"openbsd/amd64",
"openbsd/arm64",
"windows/amd64",
@@ -169,8 +167,6 @@ target "binaries-cross" {
"linux/ppc64le",
"linux/riscv64",
"linux/s390x",
"netbsd/amd64",
"netbsd/arm64",
"openbsd/amd64",
"openbsd/arm64",
"windows/amd64",

View File

@@ -347,22 +347,18 @@ is defined in https://golang.org/pkg/path/#Match.
```console
$ docker buildx bake --set target.args.mybuildarg=value
$ docker buildx bake --set target.platform=linux/arm64
$ docker buildx bake --set foo*.args.mybuildarg=value # overrides build arg for all targets starting with 'foo'
$ docker buildx bake --set *.platform=linux/arm64 # overrides platform for all targets
$ docker buildx bake --set foo*.no-cache # bypass caching only for targets starting with 'foo'
$ docker buildx bake --set target.platform+=linux/arm64 # appends 'linux/arm64' to the platform list
$ docker buildx bake --set foo*.args.mybuildarg=value # overrides build arg for all targets starting with 'foo'
$ docker buildx bake --set *.platform=linux/arm64 # overrides platform for all targets
$ docker buildx bake --set foo*.no-cache # bypass caching only for targets starting with 'foo'
```
You can override the following fields:
* `annotations`
* `attest`
* `args`
* `cache-from`
* `cache-to`
* `context`
* `dockerfile`
* `entitlements`
* `labels`
* `load`
* `no-cache`
@@ -375,20 +371,3 @@ You can override the following fields:
* `ssh`
* `tags`
* `target`
You can append using `+=` operator for the following fields:
* `annotations
* `attest
* `cache-from`
* `cache-to`
* `entitlements
* `no-cache-filter`
* `output`
* `platform`
* `secrets`
* `ssh`
* `tags`
> [!NOTE]
> ¹ These fields already append by default.

View File

@@ -28,6 +28,7 @@ Start a build
| [`--cgroup-parent`](#cgroup-parent) | `string` | | Set the parent cgroup for the `RUN` instructions during build |
| [`--check`](#check) | `bool` | | Shorthand for `--call=check` |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
| [`-f`](#file), [`--file`](#file) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
| `--iidfile` | `string` | | Write the image ID to a file |
| `--label` | `stringArray` | | Set metadata for an image |
@@ -43,8 +44,10 @@ Start a build
| `--pull` | `bool` | | Always attempt to pull all referenced images |
| [`--push`](#push) | `bool` | | Shorthand for `--output=type=registry` |
| `-q`, `--quiet` | `bool` | | Suppress the build output and print image ID on success |
| `--root` | `string` | | Specify root directory of server to connect (EXPERIMENTAL) |
| [`--sbom`](#sbom) | `string` | | Shorthand for `--attest=type=sbom` |
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
| `--server-config` | `string` | | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL) |
| [`--shm-size`](#shm-size) | `bytes` | `0` | Shared memory size for build containers |
| [`--ssh`](#ssh) | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
| [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
@@ -941,7 +944,7 @@ $ docker buildx build --secret [type=file,]id=<ID>[,src=<FILEPATH>] .
###### `type=file` usage
In the following example, `type=file` is automatically detected because no
environment variable matching `aws` (the ID) is set.
environment variable mathing `aws` (the ID) is set.
```console
$ docker buildx build --secret id=aws,src=$HOME/.aws/credentials .

View File

@@ -12,13 +12,16 @@ Start debugger (EXPERIMENTAL)
### Options
| Name | Type | Default | Description |
|:----------------|:---------|:--------|:--------------------------------------------------------------------------------------------------------------------|
| `--builder` | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| `--invoke` | `string` | | Launch a monitor with executing specified command (EXPERIMENTAL) |
| `--on` | `string` | `error` | When to launch the monitor ([always, error]) (EXPERIMENTAL) |
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`) for the monitor. Use plain to show container output |
| Name | Type | Default | Description |
|:------------------|:---------|:--------|:--------------------------------------------------------------------------------------------------------------------|
| `--builder` | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| `--detach` | `bool` | `true` | Detach buildx server for the monitor (supported only on linux) (EXPERIMENTAL) |
| `--invoke` | `string` | | Launch a monitor with executing specified command (EXPERIMENTAL) |
| `--on` | `string` | `error` | When to launch the monitor ([always, error]) (EXPERIMENTAL) |
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`) for the monitor. Use plain to show container output |
| `--root` | `string` | | Specify root directory of server to connect for the monitor (EXPERIMENTAL) |
| `--server-config` | `string` | | Specify buildx server config file for the monitor (used only when launching new server) (EXPERIMENTAL) |
<!---MARKER_GEN_END-->

View File

@@ -24,6 +24,7 @@ Start a build
| `--cgroup-parent` | `string` | | Set the parent cgroup for the `RUN` instructions during build |
| `--check` | `bool` | | Shorthand for `--call=check` |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) |
| `-f`, `--file` | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
| `--iidfile` | `string` | | Write the image ID to a file |
| `--label` | `stringArray` | | Set metadata for an image |
@@ -39,8 +40,10 @@ Start a build
| `--pull` | `bool` | | Always attempt to pull all referenced images |
| `--push` | `bool` | | Shorthand for `--output=type=registry` |
| `-q`, `--quiet` | `bool` | | Suppress the build output and print image ID on success |
| `--root` | `string` | | Specify root directory of server to connect (EXPERIMENTAL) |
| `--sbom` | `string` | | Shorthand for `--attest=type=sbom` |
| `--secret` | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
| `--server-config` | `string` | | Specify buildx server config file (used only when launching new server) (EXPERIMENTAL) |
| `--shm-size` | `bytes` | `0` | Shared memory size for build containers |
| `--ssh` | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
| `-t`, `--tag` | `stringArray` | | Name and optionally a tag (format: `name:tag`) |

View File

@@ -7,8 +7,6 @@ Commands to work on build records
| Name | Description |
|:---------------------------------------|:-----------------------------------------------|
| [`export`](buildx_history_export.md) | Export a build into Docker Desktop bundle |
| [`import`](buildx_history_import.md) | Import a build into Docker Desktop |
| [`inspect`](buildx_history_inspect.md) | Inspect a build |
| [`logs`](buildx_history_logs.md) | Print the logs of a build |
| [`ls`](buildx_history_ls.md) | List build records |
@@ -27,32 +25,3 @@ Commands to work on build records
<!---MARKER_GEN_END-->
### Build references
Most `buildx history` subcommands accept a build reference to identify which
build to act on. You can specify the build in two ways:
- By build ID, fetched by `docker buildx history ls`:
```console
docker buildx history export qu2gsuo8ejqrwdfii23xkkckt --output build.dockerbuild
```
- By relative offset, to refer to recent builds:
```console
docker buildx history export ^1 --output build.dockerbuild
```
- `^0` or no reference targets the most recent build
- `^1` refers to the build before the most recent
- `^2` refers to two builds back, and so on
Offset references are supported in the following `buildx history` commands:
- `logs`
- `inspect`
- `open`
- `trace`
- `export`
- `rm`

View File

@@ -1,81 +0,0 @@
# docker buildx history export
<!---MARKER_GEN_START-->
Export a build into Docker Desktop bundle
### Options
| Name | Type | Default | Description |
|:---------------------------------------|:---------|:--------|:-----------------------------------------|
| [`--all`](#all) | `bool` | | Export all records for the builder |
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
| [`-D`](#debug), [`--debug`](#debug) | `bool` | | Enable debug logging |
| [`-o`](#output), [`--output`](#output) | `string` | | Output file path |
<!---MARKER_GEN_END-->
## Description
Export one or more build records to `.dockerbuild` archive files. These archives
contain metadata, logs, and build outputs, and can be imported into Docker
Desktop or shared across environments.
## Examples
### <a name="output"></a> Export a single build to a custom file (--output)
```console
docker buildx history export qu2gsuo8ejqrwdfii23xkkckt --output mybuild.dockerbuild
```
You can find build IDs by running:
```console
docker buildx history ls
```
### <a name="o"></a> Export multiple builds to individual `.dockerbuild` files (-o)
To export two builds to separate files:
```console
# Using build IDs
docker buildx history export qu2gsuo8ejqrwdfii23xkkckt qsiifiuf1ad9pa9qvppc0z1l3 -o multi.dockerbuild
# Or using relative offsets
docker buildx history export ^1 ^2 -o multi.dockerbuild
```
Or use shell redirection:
```console
docker buildx history export ^1 > mybuild.dockerbuild
docker buildx history export ^2 > backend-build.dockerbuild
```
### <a name="all"></a> Export all build records to a file (--all)
Use the `--all` flag and redirect the output:
```console
docker buildx history export --all > all-builds.dockerbuild
```
Or use the `--output` flag:
```console
docker buildx history export --all -o all-builds.dockerbuild
```
### <a name="builder"></a> Use a specific builder instance (--builder)
```console
docker buildx history export --builder builder0 ^1 -o builder0-build.dockerbuild
```
### <a name="debug"></a> Enable debug logging (--debug)
```console
docker buildx history export --debug qu2gsuo8ejqrwdfii23xkkckt -o debug-build.dockerbuild
```

View File

@@ -1,47 +0,0 @@
# docker buildx history import
<!---MARKER_GEN_START-->
Import a build into Docker Desktop
### Options
| Name | Type | Default | Description |
|:---------------------------------|:--------------|:--------|:-----------------------------------------|
| `--builder` | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Import from a file path |
<!---MARKER_GEN_END-->
## Description
Import a build record from a `.dockerbuild` archive into Docker Desktop. This
lets you view, inspect, and analyze builds created in other environments or CI
pipelines.
## Examples
### Import a `.dockerbuild` archive from standard input
```console
docker buildx history import < mybuild.dockerbuild
```
### <a name="file"></a> Import a build archive from a file (--file)
```console
docker buildx history import --file ./artifacts/backend-build.dockerbuild
```
### Open a build manually
By default, the `import` command automatically opens the imported build in Docker
Desktop. You don't need to run `open` unless you're opening a specific build
or re-opening it later.
If you've imported multiple builds, you can open one manually:
```console
docker buildx history open ci-build
```

View File

@@ -21,61 +21,13 @@ Inspect a build
<!---MARKER_GEN_END-->
## Description
Inspect a build record to view metadata such as duration, status, build inputs,
platforms, outputs, and attached artifacts. You can also use flags to extract
provenance, SBOMs, or other detailed information.
## Examples
### Inspect the most recent build
```console
$ docker buildx history inspect
Name: buildx (binaries)
Context: .
Dockerfile: Dockerfile
VCS Repository: https://github.com/crazy-max/buildx.git
VCS Revision: f15eaa1ee324ffbbab29605600d27a84cab86361
Target: binaries
Platforms: linux/amd64
Keep Git Dir: true
Started: 2025-02-07 11:56:24
Duration: 1m 1s
Build Steps: 16/16 (25% cached)
Image Resolve Mode: local
Materials:
URI DIGEST
pkg:docker/docker/dockerfile@1 sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25
pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64 sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037
pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64 sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3
Attachments:
DIGEST PLATFORM TYPE
sha256:217329d2af959d4f02e3a96dcbe62bf100cab1feb8006a047ddfe51a5397f7e3 https://slsa.dev/provenance/v0.2
```
### Inspect a specific build
```console
# Using a build ID
docker buildx history inspect qu2gsuo8ejqrwdfii23xkkckt
# Or using a relative offset
docker buildx history inspect ^1
```
### <a name="format"></a> Format the output (--format)
The formatting options (`--format`) pretty-prints the output to `pretty` (default),
`json` or using a Go template.
**Pretty output**
```console
$ docker buildx history inspect
Name: buildx (binaries)
@@ -105,7 +57,6 @@ sha256:217329d2af959d4f02e3a96dcbe62bf100cab1feb8006a047ddfe51a5397f7e3
Print build logs: docker buildx history logs g9808bwrjrlkbhdamxklx660b
```
**JSON output**
```console
$ docker buildx history inspect --format json
@@ -160,8 +111,6 @@ $ docker buildx history inspect --format json
}
```
**Go template output**
```console
$ docker buildx history inspect --format "{{.Name}}: {{.VCSRepository}} ({{.VCSRevision}})"
buildx (binaries): https://github.com/crazy-max/buildx.git (f15eaa1ee324ffbbab29605600d27a84cab86361)

View File

@@ -5,78 +5,13 @@ Inspect a build attachment
### Options
| Name | Type | Default | Description |
|:------------------|:---------|:--------|:-----------------------------------------|
| `--builder` | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| `--platform` | `string` | | Platform of attachment |
| [`--type`](#type) | `string` | | Type of attachment |
| Name | Type | Default | Description |
|:----------------|:---------|:--------|:-----------------------------------------|
| `--builder` | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| `--platform` | `string` | | Platform of attachment |
| `--type` | `string` | | Type of attachment |
<!---MARKER_GEN_END-->
## Description
Inspect a specific attachment from a build record, such as a provenance file or
SBOM. Attachments are optional artifacts stored with the build and may be
platform-specific.
## Examples
### <a name="type"></a> Inspect a provenance attachment from a build (--type)
Supported types include `provenance` and `sbom`.
```console
$ docker buildx history inspect attachment qu2gsuo8ejqrwdfii23xkkckt --type provenance
{
"_type": "https://slsa.dev/provenance/v0.2",
"buildDefinition": {
"buildType": "https://build.docker.com/BuildKit@v1",
"externalParameters": {
"target": "app",
"platforms": ["linux/amd64"]
}
},
"runDetails": {
"builder": "docker",
"by": "ci@docker.com"
}
}
```
### Inspect a SBOM for linux/amd64
```console
$ docker buildx history inspect attachment ^0 \
--type sbom \
--platform linux/amd64
{
"bomFormat": "CycloneDX",
"specVersion": "1.5",
"version": 1,
"components": [
{
"type": "library",
"name": "alpine",
"version": "3.18.2"
}
]
}
```
### Inspect an attachment by digest
You can inspect an attachment directly using its digset, which you can get from
the `inspect` output:
```console
# Using a build ID
docker buildx history inspect attachment qu2gsuo8ejqrwdfii23xkkckt sha256:abcdef123456...
# Or using a relative offset
docker buildx history inspect attachment ^0 sha256:abcdef123456...
```
Use `--type sbom` or `--type provenance` to filter attachments by type. To
inspect a specific attachment by digest, omit the `--type` flag.

View File

@@ -5,61 +5,12 @@ Print the logs of a build
### Options
| Name | Type | Default | Description |
|:--------------------------|:---------|:--------|:--------------------------------------------------|
| `--builder` | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| [`--progress`](#progress) | `string` | `plain` | Set type of progress output (plain, rawjson, tty) |
| Name | Type | Default | Description |
|:----------------|:---------|:--------|:--------------------------------------------------|
| `--builder` | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| `--progress` | `string` | `plain` | Set type of progress output (plain, rawjson, tty) |
<!---MARKER_GEN_END-->
## Description
Print the logs for a completed build. The output appears in the same format as
`--progress=plain`, showing the full logs for each step.
By default, this shows logs for the most recent build on the current builder.
You can also specify an earlier build using an offset. For example:
- `^1` shows logs for the build before the most recent
- `^2` shows logs for the build two steps back
## Examples
### Print logs for the most recent build
```console
$ docker buildx history logs
#1 [internal] load build definition from Dockerfile
#1 transferring dockerfile: 31B done
#1 DONE 0.0s
#2 [internal] load .dockerignore
#2 transferring context: 2B done
#2 DONE 0.0s
...
```
By default, this shows logs for the most recent build on the current builder.
### Print logs for a specific build
To print logs for a specific build, use a build ID or offset:
```console
# Using a build ID
docker buildx history logs qu2gsuo8ejqrwdfii23xkkckt
# Or using a relative offset
docker buildx history logs ^1
```
### <a name="progress"></a> Set type of progress output (--progress)
```console
$ docker buildx history logs ^1 --progress rawjson
{"id":"buildx_step_1","status":"START","timestamp":"2024-05-01T12:34:56.789Z","detail":"[internal] load build definition from Dockerfile"}
{"id":"buildx_step_1","status":"COMPLETE","timestamp":"2024-05-01T12:34:57.001Z","duration":212000000}
...
```

View File

@@ -5,98 +5,13 @@ List build records
### Options
| Name | Type | Default | Description |
|:--------------------------|:--------------|:--------|:---------------------------------------------|
| `--builder` | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| [`--filter`](#filter) | `stringArray` | | Provide filter values (e.g., `status=error`) |
| [`--format`](#format) | `string` | `table` | Format the output |
| [`--local`](#local) | `bool` | | List records for current repository only |
| [`--no-trunc`](#no-trunc) | `bool` | | Don't truncate output |
| Name | Type | Default | Description |
|:----------------|:---------|:--------|:-----------------------------------------|
| `--builder` | `string` | | Override the configured builder instance |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| `--format` | `string` | `table` | Format the output |
| `--no-trunc` | `bool` | | Don't truncate output |
<!---MARKER_GEN_END-->
## Description
List completed builds recorded by the active builder. Each entry includes the
build ID, name, status, timestamp, and duration.
By default, only records for the current builder are shown. You can filter
results using flags.
## Examples
### List all build records for the current builder
```console
$ docker buildx history ls
BUILD ID NAME STATUS CREATED AT DURATION
qu2gsuo8ejqrwdfii23xkkckt .dev/2850 Completed 3 days ago 1.4s
qsiifiuf1ad9pa9qvppc0z1l3 .dev/2850 Completed 3 days ago 1.3s
g9808bwrjrlkbhdamxklx660b .dev/3120 Completed 5 days ago 2.1s
```
### <a name="filter"></a> List failed builds (--filter)
```console
docker buildx history ls --filter status=error
```
You can filter the list using the `--filter` flag. Supported filters include:
| Filter | Supported comparisons | Example |
|:-------|:----------------------|:--------|
| `ref`, `repository`, `status` | Support `=` and `!=` comparisons | `--filter status!=success` |
| `startedAt`, `completedAt`, `duration` | Support `<` and `>` comparisons with time values | `--filter duration>30s` |
You can combine multiple filters by repeating the `--filter` flag:
```console
docker buildx history ls --filter status=error --filter duration>30s
```
### <a name="local"></a> List builds from the current project (--local)
```console
docker buildx history ls --local
```
### <a name="no-trunc"></a> Display full output without truncation (--no-trunc)
```console
docker buildx history ls --no-trunc
```
### <a name="format"></a> Format output (--format)
**JSON output**
```console
$ docker buildx history ls --format json
[
{
"ID": "qu2gsuo8ejqrwdfii23xkkckt",
"Name": ".dev/2850",
"Status": "Completed",
"CreatedAt": "2025-04-15T12:33:00Z",
"Duration": "1.4s"
},
{
"ID": "qsiifiuf1ad9pa9qvppc0z1l3",
"Name": ".dev/2850",
"Status": "Completed",
"CreatedAt": "2025-04-15T12:29:00Z",
"Duration": "1.3s"
}
]
```
**Go template output**
```console
$ docker buildx history ls --format '{{.Name}} - {{.Duration}}'
.dev/2850 - 1.4s
.dev/2850 - 1.3s
.dev/3120 - 2.1s
```

View File

@@ -13,27 +13,3 @@ Open a build in Docker Desktop
<!---MARKER_GEN_END-->
## Description
Open a build record in Docker Desktop for visual inspection. This requires
Docker Desktop to be installed and running on the host machine.
## Examples
### Open the most recent build in Docker Desktop
```console
docker buildx history open
```
By default, this opens the most recent build on the current builder.
### Open a specific build
```console
# Using a build ID
docker buildx history open qu2gsuo8ejqrwdfii23xkkckt
# Or using a relative offset
docker buildx history open ^1
```

View File

@@ -14,36 +14,3 @@ Remove build records
<!---MARKER_GEN_END-->
## Description
Remove one or more build records from the current builders history. You can
remove specific builds by ID or offset, or delete all records at once using
the `--all` flag.
## Examples
### Remove a specific build
```console
# Using a build ID
docker buildx history rm qu2gsuo8ejqrwdfii23xkkckt
# Or using a relative offset
docker buildx history rm ^1
```
### Remove multiple builds
```console
# Using build IDs
docker buildx history rm qu2gsuo8ejqrwdfii23xkkckt qsiifiuf1ad9pa9qvppc0z1l3
# Or using relative offsets
docker buildx history rm ^1 ^2
```
### Remove all build records from the current builder
```console
docker buildx history rm --all
```

View File

@@ -5,65 +5,13 @@ Show the OpenTelemetry trace of a build record
### Options
| Name | Type | Default | Description |
|:------------------------|:---------|:--------------|:-----------------------------------------|
| [`--addr`](#addr) | `string` | `127.0.0.1:0` | Address to bind the UI server |
| `--builder` | `string` | | Override the configured builder instance |
| [`--compare`](#compare) | `string` | | Compare with another build reference |
| `-D`, `--debug` | `bool` | | Enable debug logging |
| Name | Type | Default | Description |
|:----------------|:---------|:--------------|:-----------------------------------------|
| `--addr` | `string` | `127.0.0.1:0` | Address to bind the UI server |
| `--builder` | `string` | | Override the configured builder instance |
| `--compare` | `string` | | Compare with another build reference |
| `-D`, `--debug` | `bool` | | Enable debug logging |
<!---MARKER_GEN_END-->
## Description
View the OpenTelemetry trace for a completed build. This command loads the
trace into a Jaeger UI viewer and opens it in your browser.
This helps analyze build performance, step timing, and internal execution flows.
## Examples
### Open the OpenTelemetry trace for the most recent build
This command starts a temporary Jaeger UI server and opens your default browser
to view the trace.
```console
docker buildx history trace
```
### Open the trace for a specific build
```console
# Using a build ID
docker buildx history trace qu2gsuo8ejqrwdfii23xkkckt
# Or using a relative offset
docker buildx history trace ^1
```
### <a name="addr"></a> Run the Jaeger UI on a specific port (--addr)
```console
# Using a build ID
docker buildx history trace qu2gsuo8ejqrwdfii23xkkckt --addr 127.0.0.1:16686
# Or using a relative offset
docker buildx history trace ^1 --addr 127.0.0.1:16686
```
### <a name="compare"></a> Compare two build traces (--compare)
Compare two specific builds by name:
```console
# Using build IDs
docker buildx history trace --compare=qu2gsuo8ejqrwdfii23xkkckt qsiifiuf1ad9pa9qvppc0z1l3
# Or using a single relative offset
docker buildx history trace --compare=^1
```
When you use a single reference with `--compare`, it compares that build
against the most recent one.

View File

@@ -24,10 +24,11 @@ import (
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/system"
"github.com/docker/docker/errdefs"
dockerarchive "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/stdcopy"
"github.com/moby/buildkit/client"
mobyarchive "github.com/moby/go-archive"
"github.com/pkg/errors"
)
@@ -55,7 +56,6 @@ type Driver struct {
restartPolicy container.RestartPolicy
env []string
defaultLoad bool
gpus []container.DeviceRequest
}
func (d *Driver) IsMobyDriver() bool {
@@ -158,9 +158,6 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
if d.cpusetMems != "" {
hc.Resources.CpusetMems = d.cpusetMems
}
if len(d.gpus) > 0 && d.hasGPUCapability(ctx, cfg.Image, d.gpus) {
hc.Resources.DeviceRequests = d.gpus
}
if info, err := d.DockerAPI.Info(ctx); err == nil {
if info.CgroupDriver == "cgroupfs" {
// Place all buildkit containers inside this cgroup by default so limits can be attached
@@ -249,8 +246,8 @@ func (d *Driver) copyToContainer(ctx context.Context, files map[string][]byte) e
if srcPath != "" {
defer os.RemoveAll(srcPath)
}
srcArchive, err := mobyarchive.TarWithOptions(srcPath, &mobyarchive.TarOptions{
ChownOpts: &mobyarchive.ChownOpts{UID: 0, GID: 0},
srcArchive, err := dockerarchive.TarWithOptions(srcPath, &dockerarchive.TarOptions{
ChownOpts: &idtools.Identity{UID: 0, GID: 0},
})
if err != nil {
return err
@@ -423,7 +420,6 @@ func (d *Driver) Features(ctx context.Context) map[driver.Feature]bool {
driver.DockerExporter: true,
driver.CacheExport: true,
driver.MultiPlatform: true,
driver.DirectPush: true,
driver.DefaultLoad: d.defaultLoad,
}
}
@@ -432,31 +428,6 @@ func (d *Driver) HostGatewayIP(ctx context.Context) (net.IP, error) {
return nil, errors.New("host-gateway is not supported by the docker-container driver")
}
// hasGPUCapability checks if docker daemon has GPU capability. We need to run
// a dummy container with GPU device to check if the daemon has this capability
// because there is no API to check it yet.
func (d *Driver) hasGPUCapability(ctx context.Context, image string, gpus []container.DeviceRequest) bool {
cfg := &container.Config{
Image: image,
Entrypoint: []string{"/bin/true"},
}
hc := &container.HostConfig{
NetworkMode: container.NetworkMode(container.IPCModeNone),
AutoRemove: true,
Resources: container.Resources{
DeviceRequests: gpus,
},
}
resp, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, "")
if err != nil {
return false
}
if err := d.DockerAPI.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
return false
}
return true
}
func demuxConn(c net.Conn) net.Conn {
pr, pw := io.Pipe()
// TODO: rewrite parser with Reader() to avoid goroutine switch

View File

@@ -51,12 +51,6 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
InitConfig: cfg,
restartPolicy: rp,
}
var gpus dockeropts.GpuOpts
if err := gpus.Set("all"); err == nil {
if v := gpus.Value(); len(v) > 0 {
d.gpus = v
}
}
for k, v := range cfg.DriverOpts {
switch {
case k == "network":

View File

@@ -93,7 +93,6 @@ func (d *Driver) Features(ctx context.Context) map[driver.Feature]bool {
driver.DockerExporter: useContainerdSnapshotter,
driver.CacheExport: useContainerdSnapshotter,
driver.MultiPlatform: useContainerdSnapshotter,
driver.DirectPush: useContainerdSnapshotter,
driver.DefaultLoad: true,
}
})

View File

@@ -7,6 +7,5 @@ const DockerExporter Feature = "Docker exporter"
const CacheExport Feature = "Cache export"
const MultiPlatform Feature = "Multi-platform build"
const DirectPush Feature = "Direct push"
const DefaultLoad Feature = "Automatically load images to the Docker Engine image store"

View File

@@ -35,10 +35,10 @@ func testEndpoint(server, defaultNamespace string, ca, cert, key []byte, skipTLS
}
var testStoreCfg = store.NewConfig(
func() any {
return &map[string]any{}
func() interface{} {
return &map[string]interface{}{}
},
store.EndpointTypeGetter(KubernetesEndpoint, func() any { return &EndpointMeta{} }),
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
)
func TestSaveLoadContexts(t *testing.T) {
@@ -197,7 +197,7 @@ func checkClientConfig(t *testing.T, ep Endpoint, server, namespace string, ca,
func save(s store.Writer, ep Endpoint, name string) error {
meta := store.Metadata{
Endpoints: map[string]any{
Endpoints: map[string]interface{}{
KubernetesEndpoint: ep.EndpointMeta,
},
Name: name,

View File

@@ -43,7 +43,7 @@ type Endpoint struct {
func init() {
command.RegisterDefaultStoreEndpoints(
store.EndpointTypeGetter(KubernetesEndpoint, func() any { return &EndpointMeta{} }),
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
)
}
@@ -96,7 +96,7 @@ func (c *Endpoint) KubernetesConfig() clientcmd.ClientConfig {
// ResolveDefault returns endpoint metadata for the default Kubernetes
// endpoint, which is derived from the env-based kubeconfig.
func (c *EndpointMeta) ResolveDefault() (any, *store.EndpointTLSData, error) {
func (c *EndpointMeta) ResolveDefault() (interface{}, *store.EndpointTLSData, error) {
kubeconfig := os.Getenv("KUBECONFIG")
if kubeconfig == "" {
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")

View File

@@ -238,7 +238,6 @@ func (d *Driver) Features(_ context.Context) map[driver.Feature]bool {
driver.DockerExporter: d.DockerAPI != nil,
driver.CacheExport: true,
driver.MultiPlatform: true, // Untested (needs multiple Driver instances)
driver.DirectPush: true,
driver.DefaultLoad: d.defaultLoad,
}
}

View File

@@ -90,7 +90,7 @@ func ListRunningPods(ctx context.Context, client clientcorev1.PodInterface, depl
for i := range podList.Items {
pod := &podList.Items[i]
if pod.Status.Phase == corev1.PodRunning {
logrus.Debugf("pod running: %q", pod.Name)
logrus.Debugf("pod runnning: %q", pod.Name)
runningPods = append(runningPods, pod)
}
}

View File

@@ -25,7 +25,7 @@ func GenerateNodeName(builderName string, txn *store.Txn) (string, error) {
}
var name string
for range 6 {
for i := 0; i < 6; i++ {
name, err = randomName()
if err != nil {
return "", err

Some files were not shown because too many files have changed in this diff Show More